Skip to content

Commit 9d48cbf

Browse files
committed
Initial pass
1 parent d666053 commit 9d48cbf

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+166
-35
lines changed

core/src/main/scala/org/apache/spark/Aggregator.scala

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@ package org.apache.spark
2020
import org.apache.spark.util.collection.{AppendOnlyMap, ExternalAppendOnlyMap}
2121

2222
/**
23+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
24+
*
2325
* A set of functions used to aggregate data.
2426
*
2527
* @param createCombiner function to create the initial value of the aggregation.

core/src/main/scala/org/apache/spark/Dependency.scala

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,16 @@ import org.apache.spark.rdd.RDD
2121
import org.apache.spark.serializer.Serializer
2222

2323
/**
24+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
25+
*
2426
* Base class for dependencies.
2527
*/
2628
abstract class Dependency[T](val rdd: RDD[T]) extends Serializable
2729

2830

2931
/**
32+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
33+
*
3034
* Base class for dependencies where each partition of the parent RDD is used by at most one
3135
* partition of the child RDD. Narrow dependencies allow for pipelined execution.
3236
*/
@@ -41,6 +45,8 @@ abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) {
4145

4246

4347
/**
48+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
49+
*
4450
* Represents a dependency on the output of a shuffle stage.
4551
* @param rdd the parent RDD
4652
* @param partitioner partitioner used to partition the shuffle output
@@ -59,6 +65,8 @@ class ShuffleDependency[K, V](
5965

6066

6167
/**
68+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
69+
*
6270
* Represents a one-to-one dependency between partitions of the parent and child RDDs.
6371
*/
6472
class OneToOneDependency[T](rdd: RDD[T]) extends NarrowDependency[T](rdd) {
@@ -67,6 +75,8 @@ class OneToOneDependency[T](rdd: RDD[T]) extends NarrowDependency[T](rdd) {
6775

6876

6977
/**
78+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
79+
*
7080
* Represents a one-to-one dependency between ranges of partitions in the parent and child RDDs.
7181
* @param rdd the parent RDD
7282
* @param inStart the start of the range in the parent RDD

core/src/main/scala/org/apache/spark/FutureAction.scala

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@ import org.apache.spark.rdd.RDD
2525
import org.apache.spark.scheduler.{JobFailed, JobSucceeded, JobWaiter}
2626

2727
/**
28+
* <span class="badge badge-red" style="float: right;">EXPERIMENTAL</span>
29+
*
2830
* A future for the result of an action to support cancellation. This is an extension of the
2931
* Scala Future interface to support cancellation.
3032
*/
@@ -148,6 +150,8 @@ class SimpleFutureAction[T] private[spark](jobWaiter: JobWaiter[_], resultFunc:
148150

149151

150152
/**
153+
* <span class="badge badge-red" style="float: right;">EXPERIMENTAL</span>
154+
*
151155
* A [[FutureAction]] for actions that could trigger multiple Spark jobs. Examples include take,
152156
* takeSample. Cancellation works by setting the cancelled flag to true and interrupting the
153157
* action thread if it is being blocked by a job.

core/src/main/scala/org/apache/spark/SerializableWritable.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configuration
2323
import org.apache.hadoop.io.ObjectWritable
2424
import org.apache.hadoop.io.Writable
2525

26-
class SerializableWritable[T <: Writable](@transient var t: T) extends Serializable {
26+
private[spark] class SerializableWritable[T <: Writable](@transient var t: T) extends Serializable {
2727
def value = t
2828
override def toString = t.toString
2929

core/src/main/scala/org/apache/spark/SparkEnv.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ import org.apache.spark.util.{AkkaUtils, Utils}
4141
* objects needs to have the right SparkEnv set. You can get the current environment with
4242
* SparkEnv.get (e.g. after creating a SparkContext) and set it with SparkEnv.set.
4343
*/
44-
class SparkEnv private[spark] (
44+
private[spark] class SparkEnv private[spark] (
4545
val executorId: String,
4646
val actorSystem: ActorSystem,
4747
val serializer: Serializer,

core/src/main/scala/org/apache/spark/TaskContext.scala

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,11 @@ import scala.collection.mutable.ArrayBuffer
2121

2222
import org.apache.spark.executor.TaskMetrics
2323

24+
/**
25+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
26+
*
27+
* Contextual information about a task which can be read or mutated during execution.
28+
*/
2429
class TaskContext(
2530
val stageId: Int,
2631
val partitionId: Int,

core/src/main/scala/org/apache/spark/api/java/JavaUtils.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ package org.apache.spark.api.java
1919

2020
import com.google.common.base.Optional
2121

22-
object JavaUtils {
22+
private[spark] object JavaUtils {
2323
def optionToOptional[T](option: Option[T]): Optional[T] =
2424
option match {
2525
case Some(value) => Optional.of(value)

core/src/main/scala/org/apache/spark/executor/ExecutorSource.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import org.apache.hadoop.fs.FileSystem
2424

2525
import org.apache.spark.metrics.source.Source
2626

27-
class ExecutorSource(val executor: Executor, executorId: String) extends Source {
27+
private[spark] class ExecutorSource(val executor: Executor, executorId: String) extends Source {
2828
private def fileStats(scheme: String) : Option[FileSystem.Statistics] =
2929
FileSystem.getAllStatistics().filter(s => s.getScheme.equals(scheme)).headOption
3030

core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,11 @@ package org.apache.spark.executor
1919

2020
import org.apache.spark.storage.{BlockId, BlockStatus}
2121

22+
/**
23+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
24+
*
25+
* Metrics tracked during the execution of a task.
26+
*/
2227
class TaskMetrics extends Serializable {
2328
/**
2429
* Host's name the task runs on
@@ -82,6 +87,11 @@ object TaskMetrics {
8287
}
8388

8489

90+
/**
91+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
92+
*
93+
* Metrics pertaining to shuffle data read in a given task.
94+
*/
8595
class ShuffleReadMetrics extends Serializable {
8696
/**
8797
* Absolute time when this task finished reading shuffle data
@@ -116,6 +126,11 @@ class ShuffleReadMetrics extends Serializable {
116126
var remoteBytesRead: Long = _
117127
}
118128

129+
/**
130+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
131+
*
132+
* Metrics pertaining to shuffle data written in a given task.
133+
*/
119134
class ShuffleWriteMetrics extends Serializable {
120135
/**
121136
* Number of bytes written for the shuffle by this task

core/src/main/scala/org/apache/spark/io/CompressionCodec.scala

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,14 @@ import org.xerial.snappy.{SnappyInputStream, SnappyOutputStream}
2525
import org.apache.spark.SparkConf
2626

2727
/**
28+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
29+
*
2830
* CompressionCodec allows the customization of choosing different compression implementations
2931
* to be used in block storage.
32+
*
33+
* Note: The wire protocol for a codec is not guaranteed compatible across versions of Spark.
34+
* This is intended for use as an internal compression utility within a single
35+
* Spark application.
3036
*/
3137
trait CompressionCodec {
3238

@@ -52,7 +58,13 @@ private[spark] object CompressionCodec {
5258

5359

5460
/**
61+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
62+
*
5563
* LZF implementation of [[org.apache.spark.io.CompressionCodec]].
64+
*
65+
* Note: The wire protocol for this codec is not guaranteed to be compatible across versions
66+
* of Spark. This is intended for use as an internal compression utility within a single Spark
67+
* application.
5668
*/
5769
class LZFCompressionCodec(conf: SparkConf) extends CompressionCodec {
5870

@@ -65,8 +77,14 @@ class LZFCompressionCodec(conf: SparkConf) extends CompressionCodec {
6577

6678

6779
/**
80+
* <span class="badge badge-red" style="float: right;">SEMI-PRIVATE</span>
81+
*
6882
* Snappy implementation of [[org.apache.spark.io.CompressionCodec]].
6983
* Block size can be configured by spark.io.compression.snappy.block.size.
84+
*
85+
* Note: The wire protocol for this codec is not guaranteed to be compatible across versions
86+
* of Spark. This is intended for use as an internal compression utility within a single Spark
87+
* application.
7088
*/
7189
class SnappyCompressionCodec(conf: SparkConf) extends CompressionCodec {
7290

0 commit comments

Comments
 (0)