@@ -86,6 +86,7 @@ class FileScanRDD(
86
86
protected val fileScanMaxThreadNum = sparkSession.sessionState.conf.fileScanMaxThreadNum
87
87
protected val maxInConsumingObjNum = sparkSession.sessionState.conf.parquetColumnBatchNum
88
88
@ volatile protected var usingMultiThread = false
89
+ @ transient protected var fileScanExecutor : Option [ThreadPoolExecutor ] = None
89
90
90
91
protected val indexEnabled = sparkSession.sessionState.conf.indexEnabled
91
92
@@ -373,20 +374,20 @@ class FileScanRDD(
373
374
if (fileNum <= maxThreadNum) {
374
375
workersNum = fileNum
375
376
}
376
-
377
- private val executor = getExecutor
377
+ if (fileScanExecutor == null || fileScanExecutor.isEmpty) {
378
+ fileScanExecutor = Some (createExecutor)
379
+ }
380
+ private val executor = fileScanExecutor.get
378
381
private val notCompleteFiles = new AtomicInteger (fileNum)
379
382
380
383
@ volatile private var scanException : Exception = null
381
384
import scala .collection .JavaConverters ._
382
385
private val workingInputStatsMap = new ConcurrentHashMap [String , (Long , Long )].asScala
383
386
private val completedInputStats = (new LongAccumulator , new LongAccumulator )
384
387
385
- private def getExecutor : ThreadPoolExecutor = {
386
- context.getOrCreateObjFromEnv(" fileScanExecutor" ,
387
- {ThreadUtils .newDaemonFixedThreadPool(workersNum,
388
- s " FileScanForPartition ${context.partitionId()}" )})
389
- .asInstanceOf [ThreadPoolExecutor ]
388
+ private def createExecutor : ThreadPoolExecutor = {
389
+ ThreadUtils .newDaemonFixedThreadPool(workersNum,
390
+ s " FileScanForPartition ${context.partitionId()}" )
390
391
}
391
392
392
393
// start to submit scan task
0 commit comments