@@ -100,7 +100,16 @@ def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
100
100
tempNamedTuple = namedtuple ("Callsite" , "function file linenum" )
101
101
self ._callsite = tempNamedTuple (function = None , file = None , linenum = None )
102
102
SparkContext ._ensure_initialized (self , gateway = gateway )
103
-
103
+ try :
104
+ self ._do_init (master , appName , sparkHome , pyFiles , environment , batchSize , serializer ,
105
+ conf )
106
+ except :
107
+ # If an error occurs, clean up in order to allow future SparkContext creation:
108
+ self .stop ()
109
+ raise
110
+
111
+ def _do_init (self , master , appName , sparkHome , pyFiles , environment , batchSize , serializer ,
112
+ conf ):
104
113
self .environment = environment or {}
105
114
self ._conf = conf or SparkConf (_jvm = self ._jvm )
106
115
self ._batchSize = batchSize # -1 represents an unlimited batch size
@@ -249,17 +258,14 @@ def defaultMinPartitions(self):
249
258
"""
250
259
return self ._jsc .sc ().defaultMinPartitions ()
251
260
252
- def __del__ (self ):
253
- self .stop ()
254
-
255
261
def stop (self ):
256
262
"""
257
263
Shut down the SparkContext.
258
264
"""
259
- if self . _jsc :
265
+ if getattr ( self , " _jsc" , None ) :
260
266
self ._jsc .stop ()
261
267
self ._jsc = None
262
- if self . _accumulatorServer :
268
+ if getattr ( self , " _accumulatorServer" , None ) :
263
269
self ._accumulatorServer .shutdown ()
264
270
self ._accumulatorServer = None
265
271
with SparkContext ._lock :
0 commit comments