Skip to content

Commit f4841dc

Browse files
committed
un-optimizing imports, silly intellij
1 parent eacfaa6 commit f4841dc

File tree

3 files changed

+25
-20
lines changed

3 files changed

+25
-20
lines changed

core/src/main/scala/org/apache/spark/SparkContext.scala

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -17,40 +17,40 @@
1717

1818
package org.apache.spark
1919

20+
import scala.language.implicitConversions
21+
2022
import java.io._
2123
import java.net.URI
22-
import java.util.UUID.randomUUID
2324
import java.util.concurrent.atomic.AtomicInteger
2425
import java.util.{Properties, UUID}
25-
26+
import java.util.UUID.randomUUID
27+
import scala.collection.{Map, Set}
28+
import scala.collection.JavaConversions._
29+
import scala.collection.generic.Growable
30+
import scala.collection.mutable.HashMap
31+
import scala.reflect.{ClassTag, classTag}
2632
import org.apache.hadoop.conf.Configuration
2733
import org.apache.hadoop.fs.Path
2834
import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}
2935
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}
30-
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
3136
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}
37+
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
3238
import org.apache.mesos.MesosNativeLibrary
39+
3340
import org.apache.spark.annotation.{DeveloperApi, Experimental}
3441
import org.apache.spark.broadcast.Broadcast
3542
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
36-
import org.apache.spark.input.{ByteInputFormat, FixedLengthBinaryInputFormat, StreamInputFormat, WholeTextFileInputFormat}
43+
import org.apache.spark.input.{StreamInputFormat, StreamFileInputFormat, WholeTextFileInputFormat, ByteInputFormat, FixedLengthBinaryInputFormat}
3744
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
3845
import org.apache.spark.rdd._
3946
import org.apache.spark.scheduler._
47+
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, SparkDeploySchedulerBackend, SimrSchedulerBackend}
4048
import org.apache.spark.scheduler.cluster.mesos.{CoarseMesosSchedulerBackend, MesosSchedulerBackend}
41-
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, SimrSchedulerBackend, SparkDeploySchedulerBackend}
4249
import org.apache.spark.scheduler.local.LocalBackend
4350
import org.apache.spark.storage.{BlockManagerSource, RDDInfo, StorageStatus, StorageUtils}
4451
import org.apache.spark.ui.SparkUI
4552
import org.apache.spark.util.{CallSite, ClosureCleaner, MetadataCleaner, MetadataCleanerType, TimeStampedWeakValueHashMap, Utils}
4653

47-
import scala.collection.JavaConversions._
48-
import scala.collection.generic.Growable
49-
import scala.collection.mutable.HashMap
50-
import scala.collection.{Map, Set}
51-
import scala.language.implicitConversions
52-
import scala.reflect.{ClassTag, classTag}
53-
5454
/**
5555
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
5656
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.

core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,25 +17,30 @@
1717

1818
package org.apache.spark.api.java
1919

20-
import java.io.DataInputStream
2120
import java.util
2221
import java.util.{Map => JMap}
2322

23+
import java.io.DataInputStream
24+
25+
import org.apache.hadoop.io.{BytesWritable, LongWritable}
26+
import org.apache.spark.input.FixedLengthBinaryInputFormat
27+
28+
import scala.collection.JavaConversions
29+
import scala.collection.JavaConversions._
30+
import scala.language.implicitConversions
31+
import scala.reflect.ClassTag
32+
2433
import com.google.common.base.Optional
2534
import org.apache.hadoop.conf.Configuration
2635
import org.apache.hadoop.mapred.{InputFormat, JobConf}
2736
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat}
28-
import org.apache.spark.SparkContext.{DoubleAccumulatorParam, IntAccumulatorParam}
37+
2938
import org.apache.spark._
39+
import org.apache.spark.SparkContext.{DoubleAccumulatorParam, IntAccumulatorParam}
3040
import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
3141
import org.apache.spark.broadcast.Broadcast
3242
import org.apache.spark.rdd.{EmptyRDD, RDD}
3343

34-
import scala.collection.JavaConversions
35-
import scala.collection.JavaConversions._
36-
import scala.language.implicitConversions
37-
import scala.reflect.ClassTag
38-
3944
/**
4045
* A Java-friendly version of [[org.apache.spark.SparkContext]] that returns
4146
* [[org.apache.spark.api.java.JavaRDD]]s and works with Java collections instead of Scala ones.

core/src/main/scala/org/apache/spark/input/FixedLengthBinaryRecordReader.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ import java.io.IOException
2222
import org.apache.hadoop.fs.FSDataInputStream
2323
import org.apache.hadoop.io.compress.CompressionCodecFactory
2424
import org.apache.hadoop.io.{BytesWritable, LongWritable}
25-
import org.apache.hadoop.mapreduce.lib.input.FileSplit
2625
import org.apache.hadoop.mapreduce.{InputSplit, RecordReader, TaskAttemptContext}
26+
import org.apache.hadoop.mapreduce.lib.input.FileSplit
2727

2828
/**
2929
*

0 commit comments

Comments
 (0)