Skip to content

Commit 767aec4

Browse files
committed
Change visibility for CancelableRangePartitioner
1 parent 61ab157 commit 767aec4

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

core/src/main/scala/org/apache/spark/Partitioner.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ class RangePartitioner[K : Ordering : ClassTag, V](
110110
// We allow partitions = 0, which happens when sorting an empty RDD under the default settings.
111111
require(partitions >= 0, s"Number of partitions cannot be negative but found $partitions.")
112112

113-
private var ordering = implicitly[Ordering[K]]
113+
protected var ordering = implicitly[Ordering[K]]
114114

115115
// An array of upper bounds for the first (partitions - 1) partitions
116116
private var rangeBounds: Array[K] = {
@@ -156,7 +156,7 @@ class RangePartitioner[K : Ordering : ClassTag, V](
156156

157157
def numPartitions = rangeBounds.length + 1
158158

159-
private var binarySearch: ((Array[K], K) => Int) = CollectionsUtils.makeBinarySearch[K]
159+
protected var binarySearch: ((Array[K], K) => Int) = CollectionsUtils.makeBinarySearch[K]
160160

161161
def getPartition(key: Any): Int = {
162162
val k = key.asInstanceOf[K]
@@ -204,7 +204,7 @@ class RangePartitioner[K : Ordering : ClassTag, V](
204204
}
205205

206206
@throws(classOf[IOException])
207-
private def writeObject(out: ObjectOutputStream): Unit = Utils.tryOrIOException {
207+
protected def writeObject(out: ObjectOutputStream): Unit = Utils.tryOrIOException {
208208
val sfactory = SparkEnv.get.serializer
209209
sfactory match {
210210
case js: JavaSerializer => out.defaultWriteObject()
@@ -222,7 +222,7 @@ class RangePartitioner[K : Ordering : ClassTag, V](
222222
}
223223

224224
@throws(classOf[IOException])
225-
private def readObject(in: ObjectInputStream): Unit = Utils.tryOrIOException {
225+
protected def readObject(in: ObjectInputStream): Unit = Utils.tryOrIOException {
226226
val sfactory = SparkEnv.get.serializer
227227
sfactory match {
228228
case js: JavaSerializer => in.defaultReadObject()
@@ -240,7 +240,7 @@ class RangePartitioner[K : Ordering : ClassTag, V](
240240
}
241241
}
242242

243-
private[spark] object RangePartitioner {
243+
object RangePartitioner {
244244

245245
/**
246246
* Sketches the input RDD via reservoir sampling on each partition.

core/src/main/scala/org/apache/spark/util/CollectionsUtils.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ import java.util
2121

2222
import scala.reflect.{classTag, ClassTag}
2323

24-
private[spark] object CollectionsUtils {
24+
object CollectionsUtils {
2525
def makeBinarySearch[K : Ordering : ClassTag] : (Array[K], K) => Int = {
2626
// For primitive keys, we can use the natural ordering. Otherwise, use the Ordering comparator.
2727
classTag[K] match {

core/src/main/scala/org/apache/spark/util/random/SamplingUtils.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ package org.apache.spark.util.random
2020
import scala.reflect.ClassTag
2121
import scala.util.Random
2222

23-
private[spark] object SamplingUtils {
23+
object SamplingUtils {
2424

2525
/**
2626
* Reservoir sampling implementation that also returns the input size.

0 commit comments

Comments
 (0)