Skip to content

[Fix #274] Document + fix annotation usages #470

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -1110,6 +1110,7 @@ class SparkContext(config: SparkConf) extends Logging {
}

/**
* :: Experimental ::
* Submit a job for execution and return a FutureJob holding the result.
*/
@Experimental
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,14 @@

import java.lang.annotation.*;

/** A new component of Spark which may have unstable API's. */
/**
* A new component of Spark which may have unstable API's.
*
* NOTE: If there exists a Scaladoc comment that immediately precedes this annotation, the first
* line of the comment must be ":: AlphaComponent ::" with no trailing blank line. This is because
* of the known issue that Scaladoc displays only either the annotation or the comment, whichever
* comes first.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,11 @@
* A lower-level, unstable API intended for developers.
*
* Developer API's might change or be removed in minor versions of Spark.
*
* NOTE: If there exists a Scaladoc comment that immediately precedes this annotation, the first
* line of the comment must be ":: DeveloperApi ::" with no trailing blank line. This is because
* of the known issue that Scaladoc displays only either the annotation or the comment, whichever
* comes first.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,11 @@
*
* Experimental API's might change or be removed in minor versions of Spark, or be adopted as
* first-class Spark API's.
*
* NOTE: If there exists a Scaladoc comment that immediately precedes this annotation, the first
* line of the comment must be ":: Experimental ::" with no trailing blank line. This is because
* of the known issue that Scaladoc displays only either the annotation or the comment, whichever
* comes first.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
Expand Down
6 changes: 0 additions & 6 deletions core/src/main/scala/org/apache/spark/rdd/RDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -94,26 +94,20 @@ abstract class RDD[T: ClassTag](
def compute(split: Partition, context: TaskContext): Iterator[T]

/**
* :: DeveloperApi ::
* Implemented by subclasses to return the set of partitions in this RDD. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*/
@DeveloperApi
protected def getPartitions: Array[Partition]

/**
* :: DeveloperApi ::
* Implemented by subclasses to return how this RDD depends on parent RDDs. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*/
@DeveloperApi
protected def getDependencies: Seq[Dependency[_]] = deps

/**
* :: DeveloperApi ::
* Optionally overridden by subclasses to specify placement preferences.
*/
@DeveloperApi
protected def getPreferredLocations(split: Partition): Seq[String] = Nil

/** Optionally overridden by subclasses to specify how they are partitioned. */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,12 @@ import org.apache.spark.annotation.DeveloperApi
// information about a specific split instance : handles both split instances.
// So that we do not need to worry about the differences.
@DeveloperApi
class SplitInfo(val inputFormatClazz: Class[_], val hostLocation: String, val path: String,
val length: Long, val underlyingSplit: Any) {
class SplitInfo(
val inputFormatClazz: Class[_],
val hostLocation: String,
val path: String,
val length: Long,
val underlyingSplit: Any) {
override def toString(): String = {
"SplitInfo " + super.toString + " .. inputFormatClazz " + inputFormatClazz +
", hostLocation : " + hostLocation + ", path : " + path +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -391,9 +391,9 @@ object KMeans {
* Returns the squared Euclidean distance between two vectors computed by
* [[org.apache.spark.mllib.util.MLUtils#fastSquaredDistance]].
*/
private[clustering]
def fastSquaredDistance(v1: BreezeVectorWithNorm, v2: BreezeVectorWithNorm)
: Double = {
private[clustering] def fastSquaredDistance(
v1: BreezeVectorWithNorm,
v2: BreezeVectorWithNorm): Double = {
MLUtils.fastSquaredDistance(v1.vector, v1.norm, v2.vector, v2.norm)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,6 @@ import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.annotation.Experimental

/**
* :: Experimental ::
*/
@Experimental
trait RegressionModel extends Serializable {
/**
Expand Down
2 changes: 0 additions & 2 deletions sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -249,11 +249,9 @@ class SQLContext(@transient val sparkContext: SparkContext)
}

/**
* :: DeveloperApi ::
* The primary workflow for executing relational queries using Spark. Designed to allow easy
* access to the intermediate phases of query execution for developers.
*/
@DeveloperApi
protected abstract class QueryExecution {
def logical: LogicalPlan

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.apache.spark.sql
import net.razorvine.pickle.Pickler

import org.apache.spark.{Dependency, OneToOneDependency, Partition, TaskContext}
import org.apache.spark.annotation.{AlphaComponent, Experimental, DeveloperApi}
import org.apache.spark.annotation.{AlphaComponent, Experimental}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,6 @@ class HiveContext(sc: SparkContext) extends SQLContext(sc) {
sparkContext.parallelize(Seq(new GenericRow(Array[Any]()): Row), 1)

/** Extends QueryExecution with hive specific features. */
@DeveloperApi
protected[sql] abstract class QueryExecution extends super.QueryExecution {
// TODO: Create mixin for the analyzer instead of overriding things here.
override lazy val optimizedPlan =
Expand Down