Skip to content

Commit 017c8ba

Browse files
author
Andrew Or
committed
Merge branch 'master' of github.com:apache/spark into demarcate-tests
Conflicts: core/src/test/scala/org/apache/spark/status/api/v1/SimpleDateParamSuite.scala mllib/src/test/scala/org/apache/spark/ml/feature/OneHotEncoderSuite.scala mllib/src/test/scala/org/apache/spark/ml/feature/VectorAssemblerSuite.scala sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/DictionaryEncodingSuite.scala sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/IntegralDeltaSuite.scala
2 parents 7790b6c + e7b6177 commit 017c8ba

File tree

300 files changed

+1497
-1497
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

300 files changed

+1497
-1497
lines changed

core/pom.xml

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -383,9 +383,15 @@
383383
<scope>test</scope>
384384
</dependency>
385385
<dependency>
386-
<groupId>org.spark-project</groupId>
386+
<groupId>net.razorvine</groupId>
387387
<artifactId>pyrolite</artifactId>
388388
<version>4.4</version>
389+
<exclusions>
390+
<exclusion>
391+
<groupId>net.razorvine</groupId>
392+
<artifactId>serpent</artifactId>
393+
</exclusion>
394+
</exclusions>
389395
</dependency>
390396
<dependency>
391397
<groupId>net.sf.py4j</groupId>

core/src/main/scala/org/apache/spark/Accumulators.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ GrowableAccumulableParam[R <% Growable[T] with TraversableOnce[T] with Serializa
228228
* @tparam T result type
229229
*/
230230
class Accumulator[T](@transient initialValue: T, param: AccumulatorParam[T], name: Option[String])
231-
extends Accumulable[T,T](initialValue, param, name) {
231+
extends Accumulable[T, T](initialValue, param, name) {
232232

233233
def this(initialValue: T, param: AccumulatorParam[T]) = this(initialValue, param, None)
234234
}

core/src/main/scala/org/apache/spark/Aggregator.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ case class Aggregator[K, V, C] (
4545
def combineValuesByKey(iter: Iterator[_ <: Product2[K, V]],
4646
context: TaskContext): Iterator[(K, C)] = {
4747
if (!isSpillEnabled) {
48-
val combiners = new AppendOnlyMap[K,C]
48+
val combiners = new AppendOnlyMap[K, C]
4949
var kv: Product2[K, V] = null
5050
val update = (hadValue: Boolean, oldValue: C) => {
5151
if (hadValue) mergeValue(oldValue, kv._2) else createCombiner(kv._2)
@@ -76,7 +76,7 @@ case class Aggregator[K, V, C] (
7676
: Iterator[(K, C)] =
7777
{
7878
if (!isSpillEnabled) {
79-
val combiners = new AppendOnlyMap[K,C]
79+
val combiners = new AppendOnlyMap[K, C]
8080
var kc: Product2[K, C] = null
8181
val update = (hadValue: Boolean, oldValue: C) => {
8282
if (hadValue) mergeCombiners(oldValue, kc._2) else kc._2

core/src/main/scala/org/apache/spark/Partitioner.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ class HashPartitioner(partitions: Int) extends Partitioner {
103103
*/
104104
class RangePartitioner[K : Ordering : ClassTag, V](
105105
@transient partitions: Int,
106-
@transient rdd: RDD[_ <: Product2[K,V]],
106+
@transient rdd: RDD[_ <: Product2[K, V]],
107107
private var ascending: Boolean = true)
108108
extends Partitioner {
109109

@@ -185,7 +185,7 @@ class RangePartitioner[K : Ordering : ClassTag, V](
185185
}
186186

187187
override def equals(other: Any): Boolean = other match {
188-
case r: RangePartitioner[_,_] =>
188+
case r: RangePartitioner[_, _] =>
189189
r.rangeBounds.sameElements(rangeBounds) && r.ascending == ascending
190190
case _ =>
191191
false
@@ -249,7 +249,7 @@ private[spark] object RangePartitioner {
249249
* @param sampleSizePerPartition max sample size per partition
250250
* @return (total number of items, an array of (partitionId, number of items, sample))
251251
*/
252-
def sketch[K:ClassTag](
252+
def sketch[K : ClassTag](
253253
rdd: RDD[K],
254254
sampleSizePerPartition: Int): (Long, Array[(Int, Int, Array[K])]) = {
255255
val shift = rdd.id
@@ -272,7 +272,7 @@ private[spark] object RangePartitioner {
272272
* @param partitions number of partitions
273273
* @return selected bounds
274274
*/
275-
def determineBounds[K:Ordering:ClassTag](
275+
def determineBounds[K : Ordering : ClassTag](
276276
candidates: ArrayBuffer[(K, Float)],
277277
partitions: Int): Array[K] = {
278278
val ordering = implicitly[Ordering[K]]

core/src/main/scala/org/apache/spark/SizeEstimator.scala

Lines changed: 0 additions & 44 deletions
This file was deleted.

core/src/main/scala/org/apache/spark/SparkConf.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -481,7 +481,7 @@ private[spark] object SparkConf extends Logging {
481481
"are no longer accepted. To specify the equivalent now, one may use '64k'.")
482482
)
483483

484-
Map(configs.map { cfg => (cfg.key -> cfg) }:_*)
484+
Map(configs.map { cfg => (cfg.key -> cfg) } : _*)
485485
}
486486

487487
/**

core/src/main/scala/org/apache/spark/SparkContext.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -389,7 +389,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
389389

390390
_conf.set("spark.executor.id", SparkContext.DRIVER_IDENTIFIER)
391391

392-
_jars =_conf.getOption("spark.jars").map(_.split(",")).map(_.filter(_.size != 0)).toSeq.flatten
392+
_jars = _conf.getOption("spark.jars").map(_.split(",")).map(_.filter(_.size != 0)).toSeq.flatten
393393
_files = _conf.getOption("spark.files").map(_.split(",")).map(_.filter(_.size != 0))
394394
.toSeq.flatten
395395

@@ -438,7 +438,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
438438
_ui =
439439
if (conf.getBoolean("spark.ui.enabled", true)) {
440440
Some(SparkUI.createLiveUI(this, _conf, listenerBus, _jobProgressListener,
441-
_env.securityManager,appName, startTime = startTime))
441+
_env.securityManager, appName, startTime = startTime))
442442
} else {
443443
// For tests, do not enable the UI
444444
None
@@ -917,7 +917,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
917917
classOf[FixedLengthBinaryInputFormat],
918918
classOf[LongWritable],
919919
classOf[BytesWritable],
920-
conf=conf)
920+
conf = conf)
921921
val data = br.map { case (k, v) =>
922922
val bytes = v.getBytes
923923
assert(bytes.length == recordLength, "Byte array does not have correct length")
@@ -1267,7 +1267,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
12671267
*/
12681268
def accumulableCollection[R <% Growable[T] with TraversableOnce[T] with Serializable: ClassTag, T]
12691269
(initialValue: R): Accumulable[R, T] = {
1270-
val param = new GrowableAccumulableParam[R,T]
1270+
val param = new GrowableAccumulableParam[R, T]
12711271
val acc = new Accumulable(initialValue, param)
12721272
cleaner.foreach(_.registerAccumulatorForCleanup(acc))
12731273
acc
@@ -1316,7 +1316,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
13161316
val uri = new URI(path)
13171317
val schemeCorrectedPath = uri.getScheme match {
13181318
case null | "local" => new File(path).getCanonicalFile.toURI.toString
1319-
case _ => path
1319+
case _ => path
13201320
}
13211321

13221322
val hadoopPath = new Path(schemeCorrectedPath)

core/src/main/scala/org/apache/spark/SparkEnv.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,7 @@ object SparkEnv extends Logging {
298298
}
299299
}
300300

301-
val mapOutputTracker = if (isDriver) {
301+
val mapOutputTracker = if (isDriver) {
302302
new MapOutputTrackerMaster(conf)
303303
} else {
304304
new MapOutputTrackerWorker(conf)
@@ -348,7 +348,7 @@ object SparkEnv extends Logging {
348348
val fileServerPort = conf.getInt("spark.fileserver.port", 0)
349349
val server = new HttpFileServer(conf, securityManager, fileServerPort)
350350
server.initialize()
351-
conf.set("spark.fileserver.uri", server.serverUri)
351+
conf.set("spark.fileserver.uri", server.serverUri)
352352
server
353353
} else {
354354
null

core/src/main/scala/org/apache/spark/SparkHadoopWriter.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ class SparkHadoopWriter(@transient jobConf: JobConf)
5050
private var jID: SerializableWritable[JobID] = null
5151
private var taID: SerializableWritable[TaskAttemptID] = null
5252

53-
@transient private var writer: RecordWriter[AnyRef,AnyRef] = null
54-
@transient private var format: OutputFormat[AnyRef,AnyRef] = null
53+
@transient private var writer: RecordWriter[AnyRef, AnyRef] = null
54+
@transient private var format: OutputFormat[AnyRef, AnyRef] = null
5555
@transient private var committer: OutputCommitter = null
5656
@transient private var jobContext: JobContext = null
5757
@transient private var taskContext: TaskAttemptContext = null
@@ -114,10 +114,10 @@ class SparkHadoopWriter(@transient jobConf: JobConf)
114114

115115
// ********* Private Functions *********
116116

117-
private def getOutputFormat(): OutputFormat[AnyRef,AnyRef] = {
117+
private def getOutputFormat(): OutputFormat[AnyRef, AnyRef] = {
118118
if (format == null) {
119119
format = conf.value.getOutputFormat()
120-
.asInstanceOf[OutputFormat[AnyRef,AnyRef]]
120+
.asInstanceOf[OutputFormat[AnyRef, AnyRef]]
121121
}
122122
format
123123
}
@@ -138,7 +138,7 @@ class SparkHadoopWriter(@transient jobConf: JobConf)
138138

139139
private def getTaskContext(): TaskAttemptContext = {
140140
if (taskContext == null) {
141-
taskContext = newTaskAttemptContext(conf.value, taID.value)
141+
taskContext = newTaskAttemptContext(conf.value, taID.value)
142142
}
143143
taskContext
144144
}

core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
9696
def mapPartitionsWithIndex[R](
9797
f: JFunction2[jl.Integer, java.util.Iterator[T], java.util.Iterator[R]],
9898
preservesPartitioning: Boolean = false): JavaRDD[R] =
99-
new JavaRDD(rdd.mapPartitionsWithIndex(((a,b) => f(a,asJavaIterator(b))),
99+
new JavaRDD(rdd.mapPartitionsWithIndex(((a, b) => f(a, asJavaIterator(b))),
100100
preservesPartitioning)(fakeClassTag))(fakeClassTag)
101101

102102
/**

0 commit comments

Comments
 (0)