Skip to content

Commit 11d0c2b

Browse files
committed
Integer -> java.lang.Integer
1 parent 737819a commit 11d0c2b

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
7474
* of the original partition.
7575
*/
7676
def mapPartitionsWithIndex[R: ClassTag](
77-
f: JFunction2[Integer, java.util.Iterator[T], java.util.Iterator[R]],
77+
f: JFunction2[java.lang.Integer, java.util.Iterator[T], java.util.Iterator[R]],
7878
preservesPartitioning: Boolean = false): JavaRDD[R] =
7979
new JavaRDD(rdd.mapPartitionsWithIndex(((a,b) => f(a,asJavaIterator(b))),
8080
preservesPartitioning))

core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,10 +106,10 @@ class JavaSparkContext(val sc: SparkContext) extends JavaSparkContextVarargsWork
106106
def startTime: java.lang.Long = sc.startTime
107107

108108
/** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */
109-
def defaultParallelism: Integer = sc.defaultParallelism
109+
def defaultParallelism: java.lang.Integer = sc.defaultParallelism
110110

111111
/** Default min number of partitions for Hadoop RDDs when not given by user */
112-
def defaultMinSplits: Integer = sc.defaultMinSplits
112+
def defaultMinSplits: java.lang.Integer = sc.defaultMinSplits
113113

114114
/** Distribute a local Scala collection to form an RDD. */
115115
def parallelize[T](list: java.util.List[T], numSlices: Int): JavaRDD[T] = {

0 commit comments

Comments
 (0)