Skip to content
This repository was archived by the owner on May 9, 2024. It is now read-only.

Commit 4e831e2

Browse files
committed
Reviewer’s comments.
1 parent a68c3bf commit 4e831e2

File tree

1 file changed

+6
-8
lines changed

1 file changed

+6
-8
lines changed

core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,11 @@ import java.io.File
2121
import java.util.{Collections, List => JList}
2222
import java.util.concurrent.locks.ReentrantLock
2323

24-
import com.google.common.collect.HashBiMap
25-
2624
import scala.collection.JavaConversions._
2725
import scala.collection.mutable.{HashMap, HashSet}
2826

27+
import com.google.common.collect.HashBiMap
28+
2929
import org.apache.mesos.Protos.{TaskInfo => MesosTaskInfo, _}
3030
import org.apache.mesos.{Scheduler => MScheduler, _}
3131
import org.apache.spark.scheduler.TaskSchedulerImpl
@@ -99,7 +99,7 @@ private[spark] class CoarseMesosSchedulerBackend(
9999
startScheduler(master, CoarseMesosSchedulerBackend.this, fwInfo)
100100
}
101101

102-
def createCommand(offer: Offer, numCores: Int): CommandInfo = {
102+
def createCommand(offer: Offer, numCores: Int, taskId: Int): CommandInfo = {
103103
val executorSparkHome = conf.getOption("spark.mesos.executor.home")
104104
.orElse(sc.getSparkHome())
105105
.getOrElse {
@@ -155,7 +155,7 @@ private[spark] class CoarseMesosSchedulerBackend(
155155
s"cd $basename*; $prefixEnv " +
156156
"./bin/spark-class org.apache.spark.executor.CoarseGrainedExecutorBackend" +
157157
s" --driver-url $driverURL" +
158-
s" --executor-id ${offer.getSlaveId.getValue}" +
158+
s" --executor-id ${sparkExecutorId(offer.getSlaveId.getValue, taskId.toString)}" +
159159
s" --hostname ${offer.getHostname}" +
160160
s" --cores $numCores" +
161161
s" --app-id $appId")
@@ -213,7 +213,7 @@ private[spark] class CoarseMesosSchedulerBackend(
213213
val task = MesosTaskInfo.newBuilder()
214214
.setTaskId(TaskID.newBuilder().setValue(taskId.toString).build())
215215
.setSlaveId(offer.getSlaveId)
216-
.setCommand(createCommand(offer, cpusToUse + extraCoresPerSlave))
216+
.setCommand(createCommand(offer, cpusToUse + extraCoresPerSlave, taskId))
217217
.setName("Task " + taskId)
218218
.addResources(createResource("cpus", cpusToUse))
219219
.addResources(createResource("mem",
@@ -345,14 +345,12 @@ private[spark] class CoarseMesosSchedulerBackend(
345345
}
346346
}
347347

348-
assert(pendingRemovedSlaveIds.size <= taskIdToSlaveId.size)
349-
350348
// We cannot simply decrement from the existing executor limit as we may not able to
351349
// launch as much executors as the limit. But we assume if we are notified to kill
352350
// executors, that means the scheduler wants to set the limit that is less than
353351
// the amount of the executors that has been launched. Therefore, we take the existing
354352
// amount of executors launched and deduct the executors killed as the new limit.
355-
executorLimitOption = Option(taskIdToSlaveId.size - pendingRemovedSlaveIds.size)
353+
executorLimitOption = Option(Math.max(0, taskIdToSlaveId.size - pendingRemovedSlaveIds.size))
356354
true
357355
}
358356
}

0 commit comments

Comments
 (0)