|
17 | 17 |
|
18 | 18 | package org.apache.spark
|
19 | 19 |
|
| 20 | +import scala.language.implicitConversions |
| 21 | + |
20 | 22 | import java.io._
|
21 | 23 | import java.net.URI
|
22 |
| -import java.util.UUID.randomUUID |
23 | 24 | import java.util.concurrent.atomic.AtomicInteger
|
24 | 25 | import java.util.{Properties, UUID}
|
25 |
| - |
| 26 | +import java.util.UUID.randomUUID |
| 27 | +import scala.collection.{Map, Set} |
| 28 | +import scala.collection.JavaConversions._ |
| 29 | +import scala.collection.generic.Growable |
| 30 | +import scala.collection.mutable.HashMap |
| 31 | +import scala.reflect.{ClassTag, classTag} |
26 | 32 | import org.apache.hadoop.conf.Configuration
|
27 | 33 | import org.apache.hadoop.fs.Path
|
28 | 34 | import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}
|
29 | 35 | import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}
|
30 |
| -import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat} |
31 | 36 | import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}
|
| 37 | +import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat} |
32 | 38 | import org.apache.mesos.MesosNativeLibrary
|
| 39 | + |
33 | 40 | import org.apache.spark.annotation.{DeveloperApi, Experimental}
|
34 | 41 | import org.apache.spark.broadcast.Broadcast
|
35 | 42 | import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
|
36 |
| -import org.apache.spark.input.{ByteInputFormat, FixedLengthBinaryInputFormat, StreamInputFormat, WholeTextFileInputFormat} |
| 43 | +import org.apache.spark.input.{StreamInputFormat, StreamFileInputFormat, WholeTextFileInputFormat, ByteInputFormat, FixedLengthBinaryInputFormat} |
37 | 44 | import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
|
38 | 45 | import org.apache.spark.rdd._
|
39 | 46 | import org.apache.spark.scheduler._
|
| 47 | +import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, SparkDeploySchedulerBackend, SimrSchedulerBackend} |
40 | 48 | import org.apache.spark.scheduler.cluster.mesos.{CoarseMesosSchedulerBackend, MesosSchedulerBackend}
|
41 |
| -import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, SimrSchedulerBackend, SparkDeploySchedulerBackend} |
42 | 49 | import org.apache.spark.scheduler.local.LocalBackend
|
43 | 50 | import org.apache.spark.storage.{BlockManagerSource, RDDInfo, StorageStatus, StorageUtils}
|
44 | 51 | import org.apache.spark.ui.SparkUI
|
45 | 52 | import org.apache.spark.util.{CallSite, ClosureCleaner, MetadataCleaner, MetadataCleanerType, TimeStampedWeakValueHashMap, Utils}
|
46 | 53 |
|
47 |
| -import scala.collection.JavaConversions._ |
48 |
| -import scala.collection.generic.Growable |
49 |
| -import scala.collection.mutable.HashMap |
50 |
| -import scala.collection.{Map, Set} |
51 |
| -import scala.language.implicitConversions |
52 |
| -import scala.reflect.{ClassTag, classTag} |
53 |
| - |
54 | 54 | /**
|
55 | 55 | * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
|
56 | 56 | * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
|
|
0 commit comments