@@ -180,20 +180,13 @@ private[spark] class TaskSetManager(
180
180
}
181
181
}
182
182
183
- var hadAliveLocations = false
184
183
for (loc <- tasks(index).preferredLocations) {
185
184
for (execId <- loc.executorId) {
186
185
addTo(pendingTasksForExecutor.getOrElseUpdate(execId, new ArrayBuffer ))
187
186
}
188
- if (sched.hasExecutorsAliveOnHost(loc.host)) {
189
- hadAliveLocations = true
190
- }
191
187
addTo(pendingTasksForHost.getOrElseUpdate(loc.host, new ArrayBuffer ))
192
188
for (rack <- sched.getRackForHost(loc.host)) {
193
189
addTo(pendingTasksForRack.getOrElseUpdate(rack, new ArrayBuffer ))
194
- if (sched.hasHostAliveOnRack(rack)){
195
- hadAliveLocations = true
196
- }
197
190
}
198
191
}
199
192
@@ -286,7 +279,7 @@ private[spark] class TaskSetManager(
286
279
! hasAttemptOnHost(index, host) && ! executorIsBlacklisted(execId, index)
287
280
288
281
if (! speculatableTasks.isEmpty) {
289
- // Check for process-local or preference-less tasks; note that tasks can be process-local
282
+ // Check for process-local tasks; note that tasks can be process-local
290
283
// on multiple nodes when we replicate cached blocks, as in Spark Streaming
291
284
for (index <- speculatableTasks if canRunOnHost(index)) {
292
285
val prefs = tasks(index).preferredLocations
@@ -308,6 +301,7 @@ private[spark] class TaskSetManager(
308
301
}
309
302
}
310
303
304
+ // Check for no-preference tasks
311
305
if (TaskLocality .isAllowed(locality, TaskLocality .NO_PREF )) {
312
306
for (index <- speculatableTasks if canRunOnHost(index)) {
313
307
val locations = tasks(index).preferredLocations
@@ -350,7 +344,7 @@ private[spark] class TaskSetManager(
350
344
* @return An option containing (task index within the task set, locality, is speculative?)
351
345
*/
352
346
private def findTask (execId : String , host : String , maxLocality : TaskLocality .Value )
353
- : Option [(Int , TaskLocality .Value , Boolean )] =
347
+ : Option [(Int , TaskLocality .Value , Boolean )] =
354
348
{
355
349
for (index <- findTaskFromList(execId, getPendingTasksForExecutor(execId))) {
356
350
return Some ((index, TaskLocality .PROCESS_LOCAL , false ))
@@ -392,7 +386,7 @@ private[spark] class TaskSetManager(
392
386
/**
393
387
* Respond to an offer of a single executor from the scheduler by finding a task
394
388
*
395
- * NOTE: this function is either called with a real preferredLocality level which
389
+ * NOTE: this function is either called with a maxLocality which
396
390
* would be adjusted by delay scheduling algorithm or it will be with a special
397
391
* NO_PREF locality which will be not modified
398
392
*
0 commit comments