Skip to content

Commit a24e29f

Browse files
SPARK-3278 refactored weightedlabeledpoint to (double, double, double) and updated api
1 parent deb0f17 commit a24e29f

File tree

1 file changed

+3
-7
lines changed

1 file changed

+3
-7
lines changed

mllib/src/test/scala/org/apache/spark/mllib/regression/IsotonicRegressionSuite.scala

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -195,17 +195,14 @@ class IsotonicRegressionSuite
195195

196196
class IsotonicRegressionClusterSuite
197197
extends FunSuite
198-
with LocalClusterSparkContext
199-
with MLlibTestSparkContext
200-
with Matchers{
198+
with LocalClusterSparkContext {
201199

202200
test("task size should be small in both training and prediction") {
203201
val n = 5
204202

205-
206203
val trainData = (0 to n).map(i => (i.toDouble, i.toDouble, 1.toDouble))
207204

208-
val points = sc.parallelize(trainData, 2)
205+
val points = sc.parallelize(trainData, 1)
209206

210207
/*val points = sc.parallelize(0 until n, 2).mapPartitionsWithIndex { (idx, iter) =>
211208
val random = new Random(idx)
@@ -215,7 +212,6 @@ class IsotonicRegressionClusterSuite
215212
// If we serialize data directly in the task closure, the size of the serialized task would be
216213
// greater than 1MB and hence Spark would throw an error.
217214
val model = IsotonicRegression.train(points, true)
218-
219-
model.predict(0)
215+
val predictions = model.predict(points.map(_._2))
220216
}
221217
}

0 commit comments

Comments
 (0)