@@ -25,7 +25,7 @@ import org.apache.spark.{JobExecutionStatus, SparkConf}
25
25
import org .apache .spark .status .api .v1
26
26
import org .apache .spark .ui .scope ._
27
27
import org .apache .spark .util .{Distribution , Utils }
28
- import org .apache .spark .util .kvstore .{InMemoryStore , KVStore , LevelDB }
28
+ import org .apache .spark .util .kvstore .{InMemoryStore , KVStore }
29
29
30
30
/**
31
31
* A wrapper around a KVStore that provides methods for accessing the API data stored within.
@@ -148,18 +148,18 @@ private[spark] class AppStatusStore(
148
148
// cheaper for disk stores (avoids deserialization).
149
149
val count = {
150
150
Utils .tryWithResource(
151
- if (store.isInstanceOf [LevelDB ]) {
151
+ if (store.isInstanceOf [InMemoryStore ]) {
152
152
store.view(classOf [TaskDataWrapper ])
153
153
.parent(stageKey)
154
- .index(TaskIndexNames .EXEC_RUN_TIME )
155
- .first(0L )
154
+ .index(TaskIndexNames .STATUS )
155
+ .first(" SUCCESS" )
156
+ .last(" SUCCESS" )
156
157
.closeableIterator()
157
158
} else {
158
159
store.view(classOf [TaskDataWrapper ])
159
160
.parent(stageKey)
160
- .index(TaskIndexNames .STATUS )
161
- .first(" SUCCESS" )
162
- .last(" SUCCESS" )
161
+ .index(TaskIndexNames .EXEC_RUN_TIME )
162
+ .first(0L )
163
163
.closeableIterator()
164
164
}
165
165
) { it =>
@@ -230,14 +230,26 @@ private[spark] class AppStatusStore(
230
230
// stabilize once the stage finishes. It's also slow, especially with disk stores.
231
231
val indices = quantiles.map { q => math.min((q * count).toLong, count - 1 ) }
232
232
233
- // TODO Summary metrics needs to display all the successful tasks' metrics (SPARK-26119).
233
+ // TODO: Summary metrics needs to display all the successful tasks' metrics (SPARK-26119).
234
234
// For InMemory case, it is efficient to find using the following code. But for diskStore case
235
235
// we need an efficient solution to avoid deserialization time overhead. For that, we need to
236
236
// rework on the way indexing works, so that we can index by specific metrics for successful
237
237
// and failed tasks differently (would be tricky). Also would require changing the disk store
238
238
// version (to invalidate old stores).
239
239
def scanTasks (index : String )(fn : TaskDataWrapper => Long ): IndexedSeq [Double ] = {
240
- if (store.isInstanceOf [LevelDB ]) {
240
+ if (store.isInstanceOf [InMemoryStore ]) {
241
+ val quantileTasks = store.view(classOf [TaskDataWrapper ])
242
+ .parent(stageKey)
243
+ .index(index)
244
+ .first(0L )
245
+ .asScala
246
+ .filter { _.status == " SUCCESS" } // Filter "SUCCESS" tasks
247
+ .toIndexedSeq
248
+
249
+ indices.map { index =>
250
+ fn(quantileTasks(index.toInt)).toDouble
251
+ }.toIndexedSeq
252
+ } else {
241
253
Utils .tryWithResource(
242
254
store.view(classOf [TaskDataWrapper ])
243
255
.parent(stageKey)
@@ -262,18 +274,6 @@ private[spark] class AppStatusStore(
262
274
}
263
275
}.toIndexedSeq
264
276
}
265
- } else {
266
- val quantileTasks = store.view(classOf [TaskDataWrapper ])
267
- .parent(stageKey)
268
- .index(index)
269
- .first(0L )
270
- .asScala
271
- .filter { _.status == " SUCCESS" } // Filter "SUCCESS" tasks
272
- .toIndexedSeq
273
-
274
- indices.map { index =>
275
- fn(quantileTasks(index.toInt)).toDouble
276
- }.toIndexedSeq
277
277
}
278
278
}
279
279
0 commit comments