Skip to content

Commit 494d5c2

Browse files
author
Andrew Or
committed
Revert a few unintended style changes
1 parent 9fac6f3 commit 494d5c2

File tree

3 files changed

+50
-101
lines changed

3 files changed

+50
-101
lines changed

core/src/main/scala/org/apache/spark/SparkContext.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2004,10 +2004,10 @@ object SparkContext extends Logging {
20042004
}
20052005

20062006
private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description"
2007+
20072008
private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id"
2009+
20082010
private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel"
2009-
private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope"
2010-
private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride"
20112011

20122012
/**
20132013
* Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was

core/src/main/scala/org/apache/spark/rdd/RDD.scala

Lines changed: 47 additions & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -315,17 +315,14 @@ abstract class RDD[T: ClassTag](
315315
* Return a new RDD containing the distinct elements in this RDD.
316316
*/
317317
@RDDScope
318-
def distinct(numPartitions: Int)(implicit ord: Ordering[T] = null): RDD[T] = {
318+
def distinct(numPartitions: Int)(implicit ord: Ordering[T] = null): RDD[T] =
319319
map(x => (x, null)).reduceByKey((x, y) => x, numPartitions).map(_._1)
320-
}
321320

322321
/**
323322
* Return a new RDD containing the distinct elements in this RDD.
324323
*/
325324
@RDDScope
326-
def distinct(): RDD[T] = {
327-
distinct(partitions.length)
328-
}
325+
def distinct(): RDD[T] = distinct(partitions.length)
329326

330327
/**
331328
* Return a new RDD that has exactly numPartitions partitions.
@@ -362,10 +359,8 @@ abstract class RDD[T: ClassTag](
362359
* data distributed using a hash partitioner.
363360
*/
364361
@RDDScope
365-
def coalesce(
366-
numPartitions: Int,
367-
shuffle: Boolean = false)
368-
(implicit ord: Ordering[T] = null): RDD[T] = {
362+
def coalesce(numPartitions: Int, shuffle: Boolean = false)(implicit ord: Ordering[T] = null)
363+
: RDD[T] = {
369364
if (shuffle) {
370365
/** Distributes elements evenly across output partitions, starting from a random partition. */
371366
val distributePartition = (index: Int, items: Iterator[T]) => {
@@ -419,9 +414,7 @@ abstract class RDD[T: ClassTag](
419414
* @return split RDDs in an array
420415
*/
421416
@RDDScope
422-
def randomSplit(
423-
weights: Array[Double],
424-
seed: Long = Utils.random.nextLong): Array[RDD[T]] = {
417+
def randomSplit(weights: Array[Double], seed: Long = Utils.random.nextLong): Array[RDD[T]] = {
425418
val sum = weights.sum
426419
val normalizedCumWeights = weights.map(_ / sum).scanLeft(0.0d)(_ + _)
427420
normalizedCumWeights.sliding(2).map { x =>
@@ -502,9 +495,7 @@ abstract class RDD[T: ClassTag](
502495
* times (use `.distinct()` to eliminate them).
503496
*/
504497
@RDDScope
505-
def ++(other: RDD[T]): RDD[T] = {
506-
this.union(other)
507-
}
498+
def ++(other: RDD[T]): RDD[T] = this.union(other)
508499

509500
/**
510501
* Return this RDD sorted by the given key function.
@@ -514,11 +505,10 @@ abstract class RDD[T: ClassTag](
514505
f: (T) => K,
515506
ascending: Boolean = true,
516507
numPartitions: Int = this.partitions.length)
517-
(implicit ord: Ordering[K], ctag: ClassTag[K]): RDD[T] = {
508+
(implicit ord: Ordering[K], ctag: ClassTag[K]): RDD[T] =
518509
this.keyBy[K](f)
519-
.sortByKey(ascending, numPartitions)
520-
.values
521-
}
510+
.sortByKey(ascending, numPartitions)
511+
.values
522512

523513
/**
524514
* Return the intersection of this RDD and another one. The output will not contain any duplicate
@@ -529,8 +519,8 @@ abstract class RDD[T: ClassTag](
529519
@RDDScope
530520
def intersection(other: RDD[T]): RDD[T] = {
531521
this.map(v => (v, null)).cogroup(other.map(v => (v, null)))
532-
.filter { case (_, (leftGroup, rightGroup)) => leftGroup.nonEmpty && rightGroup.nonEmpty }
533-
.keys
522+
.filter { case (_, (leftGroup, rightGroup)) => leftGroup.nonEmpty && rightGroup.nonEmpty }
523+
.keys
534524
}
535525

536526
/**
@@ -542,13 +532,11 @@ abstract class RDD[T: ClassTag](
542532
* @param partitioner Partitioner to use for the resulting RDD
543533
*/
544534
@RDDScope
545-
def intersection(
546-
other: RDD[T],
547-
partitioner: Partitioner)
548-
(implicit ord: Ordering[T] = null): RDD[T] = {
535+
def intersection(other: RDD[T], partitioner: Partitioner)(implicit ord: Ordering[T] = null)
536+
: RDD[T] = {
549537
this.map(v => (v, null)).cogroup(other.map(v => (v, null)), partitioner)
550-
.filter { case (_, (leftGroup, rightGroup)) => leftGroup.nonEmpty && rightGroup.nonEmpty }
551-
.keys
538+
.filter { case (_, (leftGroup, rightGroup)) => leftGroup.nonEmpty && rightGroup.nonEmpty }
539+
.keys
552540
}
553541

554542
/**
@@ -577,9 +565,7 @@ abstract class RDD[T: ClassTag](
577565
* elements (a, b) where a is in `this` and b is in `other`.
578566
*/
579567
@RDDScope
580-
def cartesian[U: ClassTag](other: RDD[U]): RDD[(T, U)] = {
581-
new CartesianRDD(sc, this, other)
582-
}
568+
def cartesian[U: ClassTag](other: RDD[U]): RDD[(T, U)] = new CartesianRDD(sc, this, other)
583569

584570
/**
585571
* Return an RDD of grouped items. Each group consists of a key and a sequence of elements
@@ -591,9 +577,8 @@ abstract class RDD[T: ClassTag](
591577
* or [[PairRDDFunctions.reduceByKey]] will provide much better performance.
592578
*/
593579
@RDDScope
594-
def groupBy[K](f: T => K)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] = {
580+
def groupBy[K](f: T => K)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] =
595581
groupBy[K](f, defaultPartitioner(this))
596-
}
597582

598583
/**
599584
* Return an RDD of grouped elements. Each group consists of a key and a sequence of elements
@@ -605,11 +590,8 @@ abstract class RDD[T: ClassTag](
605590
* or [[PairRDDFunctions.reduceByKey]] will provide much better performance.
606591
*/
607592
@RDDScope
608-
def groupBy[K](
609-
f: T => K,
610-
numPartitions: Int)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] = {
593+
def groupBy[K](f: T => K, numPartitions: Int)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] =
611594
groupBy(f, new HashPartitioner(numPartitions))
612-
}
613595

614596
/**
615597
* Return an RDD of grouped items. Each group consists of a key and a sequence of elements
@@ -621,10 +603,8 @@ abstract class RDD[T: ClassTag](
621603
* or [[PairRDDFunctions.reduceByKey]] will provide much better performance.
622604
*/
623605
@RDDScope
624-
def groupBy[K](
625-
f: T => K,
626-
p: Partitioner)
627-
(implicit kt: ClassTag[K], ord: Ordering[K] = null): RDD[(K, Iterable[T])] = {
606+
def groupBy[K](f: T => K, p: Partitioner)(implicit kt: ClassTag[K], ord: Ordering[K] = null)
607+
: RDD[(K, Iterable[T])] = {
628608
val cleanF = sc.clean(f)
629609
this.map(t => (cleanF(t), t)).groupByKey(p)
630610
}
@@ -633,17 +613,14 @@ abstract class RDD[T: ClassTag](
633613
* Return an RDD created by piping elements to a forked external process.
634614
*/
635615
@RDDScope
636-
def pipe(command: String): RDD[String] = {
637-
new PipedRDD(this, command)
638-
}
616+
def pipe(command: String): RDD[String] = new PipedRDD(this, command)
639617

640618
/**
641619
* Return an RDD created by piping elements to a forked external process.
642620
*/
643621
@RDDScope
644-
def pipe(command: String, env: Map[String, String]): RDD[String] = {
622+
def pipe(command: String, env: Map[String, String]): RDD[String] =
645623
new PipedRDD(this, command, env)
646-
}
647624

648625
/**
649626
* Return an RDD created by piping elements to a forked external process.
@@ -685,8 +662,7 @@ abstract class RDD[T: ClassTag](
685662
*/
686663
@RDDScope
687664
def mapPartitions[U: ClassTag](
688-
f: Iterator[T] => Iterator[U],
689-
preservesPartitioning: Boolean = false): RDD[U] = {
665+
f: Iterator[T] => Iterator[U], preservesPartitioning: Boolean = false): RDD[U] = {
690666
val func = (context: TaskContext, index: Int, iter: Iterator[T]) => f(iter)
691667
new MapPartitionsRDD(this, sc.clean(func), preservesPartitioning)
692668
}
@@ -700,8 +676,7 @@ abstract class RDD[T: ClassTag](
700676
*/
701677
@RDDScope
702678
def mapPartitionsWithIndex[U: ClassTag](
703-
f: (Int, Iterator[T]) => Iterator[U],
704-
preservesPartitioning: Boolean = false): RDD[U] = {
679+
f: (Int, Iterator[T]) => Iterator[U], preservesPartitioning: Boolean = false): RDD[U] = {
705680
val func = (context: TaskContext, index: Int, iter: Iterator[T]) => f(index, iter)
706681
new MapPartitionsRDD(this, sc.clean(func), preservesPartitioning)
707682
}
@@ -731,8 +706,7 @@ abstract class RDD[T: ClassTag](
731706
@deprecated("use mapPartitionsWithIndex", "0.7.0")
732707
@RDDScope
733708
def mapPartitionsWithSplit[U: ClassTag](
734-
f: (Int, Iterator[T]) => Iterator[U],
735-
preservesPartitioning: Boolean = false): RDD[U] = {
709+
f: (Int, Iterator[T]) => Iterator[U], preservesPartitioning: Boolean = false): RDD[U] = {
736710
mapPartitionsWithIndex(f, preservesPartitioning)
737711
}
738712

@@ -826,44 +800,39 @@ abstract class RDD[T: ClassTag](
826800
@RDDScope
827801
def zipPartitions[B: ClassTag, V: ClassTag]
828802
(rdd2: RDD[B], preservesPartitioning: Boolean)
829-
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] = {
803+
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] =
830804
new ZippedPartitionsRDD2(sc, sc.clean(f), this, rdd2, preservesPartitioning)
831-
}
832805

833806
@RDDScope
834807
def zipPartitions[B: ClassTag, V: ClassTag]
835808
(rdd2: RDD[B])
836-
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] = {
809+
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] =
837810
zipPartitions(rdd2, preservesPartitioning = false)(f)
838-
}
839811

840812
@RDDScope
841813
def zipPartitions[B: ClassTag, C: ClassTag, V: ClassTag]
842814
(rdd2: RDD[B], rdd3: RDD[C], preservesPartitioning: Boolean)
843-
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] = {
815+
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] =
844816
new ZippedPartitionsRDD3(sc, sc.clean(f), this, rdd2, rdd3, preservesPartitioning)
845-
}
846817

847818
@RDDScope
848819
def zipPartitions[B: ClassTag, C: ClassTag, V: ClassTag]
849820
(rdd2: RDD[B], rdd3: RDD[C])
850-
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] = {
821+
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] =
851822
zipPartitions(rdd2, rdd3, preservesPartitioning = false)(f)
852-
}
853823

854824
@RDDScope
855825
def zipPartitions[B: ClassTag, C: ClassTag, D: ClassTag, V: ClassTag]
856826
(rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D], preservesPartitioning: Boolean)
857-
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] = {
827+
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] =
858828
new ZippedPartitionsRDD4(sc, sc.clean(f), this, rdd2, rdd3, rdd4, preservesPartitioning)
859-
}
860829

861830
@RDDScope
862831
def zipPartitions[B: ClassTag, C: ClassTag, D: ClassTag, V: ClassTag]
863832
(rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D])
864-
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] = {
833+
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] =
865834
zipPartitions(rdd2, rdd3, rdd4, preservesPartitioning = false)(f)
866-
}
835+
867836

868837
// Actions (launch a job to return a value to the user program)
869838

@@ -929,26 +898,21 @@ abstract class RDD[T: ClassTag](
929898
* RDD will be &lt;= us.
930899
*/
931900
@RDDScope
932-
def subtract(other: RDD[T]): RDD[T] = {
901+
def subtract(other: RDD[T]): RDD[T] =
933902
subtract(other, partitioner.getOrElse(new HashPartitioner(partitions.length)))
934-
}
935903

936904
/**
937905
* Return an RDD with the elements from `this` that are not in `other`.
938906
*/
939907
@RDDScope
940-
def subtract(other: RDD[T], numPartitions: Int): RDD[T] = {
908+
def subtract(other: RDD[T], numPartitions: Int): RDD[T] =
941909
subtract(other, new HashPartitioner(numPartitions))
942-
}
943910

944911
/**
945912
* Return an RDD with the elements from `this` that are not in `other`.
946913
*/
947914
@RDDScope
948-
def subtract(
949-
other: RDD[T],
950-
p: Partitioner)
951-
(implicit ord: Ordering[T] = null): RDD[T] = {
915+
def subtract(other: RDD[T], p: Partitioner)(implicit ord: Ordering[T] = null): RDD[T] = {
952916
if (partitioner == Some(p)) {
953917
// Our partitioner knows how to handle T (which, since we have a partitioner, is
954918
// really (K, V)) so make a new Partitioner that will de-tuple our fake tuples
@@ -1108,9 +1072,7 @@ abstract class RDD[T: ClassTag](
11081072
*/
11091073
@Experimental
11101074
@RDDScope
1111-
def countApprox(
1112-
timeout: Long,
1113-
confidence: Double = 0.95): PartialResult[BoundedDouble] = {
1075+
def countApprox(timeout: Long, confidence: Double = 0.95): PartialResult[BoundedDouble] = {
11141076
val countElements: (TaskContext, Iterator[T]) => Long = { (ctx, iter) =>
11151077
var result = 0L
11161078
while (iter.hasNext) {
@@ -1144,7 +1106,8 @@ abstract class RDD[T: ClassTag](
11441106
@RDDScope
11451107
def countByValueApprox(timeout: Long, confidence: Double = 0.95)
11461108
(implicit ord: Ordering[T] = null)
1147-
: PartialResult[Map[T, BoundedDouble]] = {
1109+
: PartialResult[Map[T, BoundedDouble]] =
1110+
{
11481111
if (elementClassTag.runtimeClass.isArray) {
11491112
throw new SparkException("countByValueApprox() does not support arrays")
11501113
}
@@ -1224,9 +1187,7 @@ abstract class RDD[T: ClassTag](
12241187
* the same index assignments, you should sort the RDD with sortByKey() or save it to a file.
12251188
*/
12261189
@RDDScope
1227-
def zipWithIndex(): RDD[(T, Long)] = {
1228-
new ZippedWithIndexRDD(this)
1229-
}
1190+
def zipWithIndex(): RDD[(T, Long)] = new ZippedWithIndexRDD(this)
12301191

12311192
/**
12321193
* Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k,
@@ -1297,11 +1258,9 @@ abstract class RDD[T: ClassTag](
12971258
* Return the first element in this RDD.
12981259
*/
12991260
@RDDScope
1300-
def first(): T = {
1301-
take(1) match {
1302-
case Array(t) => t
1303-
case _ => throw new UnsupportedOperationException("empty collection")
1304-
}
1261+
def first(): T = take(1) match {
1262+
case Array(t) => t
1263+
case _ => throw new UnsupportedOperationException("empty collection")
13051264
}
13061265

13071266
/**
@@ -1320,9 +1279,7 @@ abstract class RDD[T: ClassTag](
13201279
* @return an array of top elements
13211280
*/
13221281
@RDDScope
1323-
def top(num: Int)(implicit ord: Ordering[T]): Array[T] = {
1324-
takeOrdered(num)(ord.reverse)
1325-
}
1282+
def top(num: Int)(implicit ord: Ordering[T]): Array[T] = takeOrdered(num)(ord.reverse)
13261283

13271284
/**
13281285
* Returns the first k (smallest) elements from this RDD as defined by the specified
@@ -1367,18 +1324,14 @@ abstract class RDD[T: ClassTag](
13671324
* @return the maximum element of the RDD
13681325
* */
13691326
@RDDScope
1370-
def max()(implicit ord: Ordering[T]): T = {
1371-
this.reduce(ord.max)
1372-
}
1327+
def max()(implicit ord: Ordering[T]): T = this.reduce(ord.max)
13731328

13741329
/**
13751330
* Returns the min of this RDD as defined by the implicit Ordering[T].
13761331
* @return the minimum element of the RDD
13771332
* */
13781333
@RDDScope
1379-
def min()(implicit ord: Ordering[T]): T = {
1380-
this.reduce(ord.min)
1381-
}
1334+
def min()(implicit ord: Ordering[T]): T = this.reduce(ord.min)
13821335

13831336
/**
13841337
* @note due to complications in the internal implementation, this method will raise an
@@ -1389,9 +1342,7 @@ abstract class RDD[T: ClassTag](
13891342
* may be empty even when it has at least 1 partition.
13901343
*/
13911344
@RDDScope
1392-
def isEmpty(): Boolean = {
1393-
partitions.length == 0 || take(1).length == 0
1394-
}
1345+
def isEmpty(): Boolean = partitions.length == 0 || take(1).length == 0
13951346

13961347
/**
13971348
* Save this RDD as a text file, using string representations of elements.
@@ -1425,9 +1376,7 @@ abstract class RDD[T: ClassTag](
14251376
* Save this RDD as a compressed text file, using string representations of elements.
14261377
*/
14271378
@RDDScope
1428-
def saveAsTextFile(
1429-
path: String,
1430-
codec: Class[_ <: CompressionCodec]): Unit = {
1379+
def saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]): Unit = {
14311380
// https://issues.apache.org/jira/browse/SPARK-2075
14321381
val nullWritableClassTag = implicitly[ClassTag[NullWritable]]
14331382
val textClassTag = implicitly[ClassTag[Text]]

core/src/main/scala/org/apache/spark/ui/viz/VizGraph.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,4 +155,4 @@ private[ui] object VizGraph {
155155
subgraph.append(indent + "}\n")
156156
subgraph.toString()
157157
}
158-
}
158+
}

0 commit comments

Comments
 (0)