Skip to content

Commit a836a37

Browse files
committed
Move object RDD to the front of RDD.scala.
I ran into multiple cases that SBT/Scala compiler was confused by the implicits in continuous compilation mode. Moving them to the beginning of the file seemed to have fixed the problem.
1 parent 77be8b9 commit a836a37

File tree

1 file changed

+38
-28
lines changed
  • core/src/main/scala/org/apache/spark/rdd

1 file changed

+38
-28
lines changed

core/src/main/scala/org/apache/spark/rdd/RDD.scala

Lines changed: 38 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,44 @@ import org.apache.spark.util.collection.OpenHashMap
4747
import org.apache.spark.util.random.{BernoulliSampler, PoissonSampler, BernoulliCellSampler,
4848
SamplingUtils}
4949

50+
51+
object RDD {
52+
53+
// The following implicit functions were in SparkContext before 1.2 and users had to
54+
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
55+
// them automatically. However, we still keep the old functions in SparkContext for backward
56+
// compatibility and forward to the following functions directly.
57+
58+
implicit def rddToPairRDDFunctions[K, V](rdd: RDD[(K, V)])
59+
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null): PairRDDFunctions[K, V] = {
60+
new PairRDDFunctions(rdd)
61+
}
62+
63+
implicit def rddToAsyncRDDActions[T: ClassTag](rdd: RDD[T]): AsyncRDDActions[T] = {
64+
new AsyncRDDActions(rdd)
65+
}
66+
67+
implicit def rddToSequenceFileRDDFunctions[K <% Writable: ClassTag, V <% Writable: ClassTag](
68+
rdd: RDD[(K, V)]): SequenceFileRDDFunctions[K, V] = {
69+
new SequenceFileRDDFunctions(rdd)
70+
}
71+
72+
implicit def rddToOrderedRDDFunctions[K : Ordering : ClassTag, V: ClassTag](rdd: RDD[(K, V)])
73+
: OrderedRDDFunctions[K, V] = {
74+
new OrderedRDDFunctions[K, V, (K, V)](rdd)
75+
}
76+
77+
implicit def doubleRDDToDoubleRDDFunctions(rdd: RDD[Double]): DoubleRDDFunctions = {
78+
new DoubleRDDFunctions(rdd)
79+
}
80+
81+
implicit def numericRDDToDoubleRDDFunctions[T](rdd: RDD[T])(implicit num: Numeric[T])
82+
: DoubleRDDFunctions = {
83+
new DoubleRDDFunctions(rdd.map(x => num.toDouble(x)))
84+
}
85+
}
86+
87+
5088
/**
5189
* A Resilient Distributed Dataset (RDD), the basic abstraction in Spark. Represents an immutable,
5290
* partitioned collection of elements that can be operated on in parallel. This class contains the
@@ -1385,31 +1423,3 @@ abstract class RDD[T: ClassTag](
13851423
new JavaRDD(this)(elementClassTag)
13861424
}
13871425
}
1388-
1389-
object RDD {
1390-
1391-
// The following implicit functions were in SparkContext before 1.2 and users had to
1392-
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
1393-
// them automatically. However, we still keep the old functions in SparkContext for backward
1394-
// compatibility and forward to the following functions directly.
1395-
1396-
implicit def rddToPairRDDFunctions[K, V](rdd: RDD[(K, V)])
1397-
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null) = {
1398-
new PairRDDFunctions(rdd)
1399-
}
1400-
1401-
implicit def rddToAsyncRDDActions[T: ClassTag](rdd: RDD[T]) = new AsyncRDDActions(rdd)
1402-
1403-
implicit def rddToSequenceFileRDDFunctions[K <% Writable: ClassTag, V <% Writable: ClassTag](
1404-
rdd: RDD[(K, V)]) =
1405-
new SequenceFileRDDFunctions(rdd)
1406-
1407-
implicit def rddToOrderedRDDFunctions[K : Ordering : ClassTag, V: ClassTag](
1408-
rdd: RDD[(K, V)]) =
1409-
new OrderedRDDFunctions[K, V, (K, V)](rdd)
1410-
1411-
implicit def doubleRDDToDoubleRDDFunctions(rdd: RDD[Double]) = new DoubleRDDFunctions(rdd)
1412-
1413-
implicit def numericRDDToDoubleRDDFunctions[T](rdd: RDD[T])(implicit num: Numeric[T]) =
1414-
new DoubleRDDFunctions(rdd.map(x => num.toDouble(x)))
1415-
}

0 commit comments

Comments
 (0)