Skip to content

Commit 39d9df2

Browse files
committed
Fix the code style
1 parent e4ad8b5 commit 39d9df2

File tree

2 files changed

+2
-4
lines changed

2 files changed

+2
-4
lines changed

core/src/main/scala/org/apache/spark/input/FixedLengthBinaryInputFormat.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,7 @@ private[spark] object FixedLengthBinaryInputFormat {
3434

3535
/** Retrieves the record length property from a Hadoop configuration */
3636
def getRecordLength(context: JobContext): Int = {
37-
SparkHadoopUtil.get.getConfigurationFromJobContext(context).
38-
get(RECORD_LENGTH_PROPERTY).toInt
37+
SparkHadoopUtil.get.getConfigurationFromJobContext(context).get(RECORD_LENGTH_PROPERTY).toInt
3938
}
4039
}
4140

core/src/main/scala/org/apache/spark/input/PortableDataStream.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,6 @@ package org.apache.spark.input
1919

2020
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream}
2121

22-
import org.apache.spark.deploy.SparkHadoopUtil
23-
2422
import scala.collection.JavaConversions._
2523

2624
import com.google.common.io.ByteStreams
@@ -30,6 +28,7 @@ import org.apache.hadoop.mapreduce.{InputSplit, JobContext, RecordReader, TaskAt
3028
import org.apache.hadoop.mapreduce.lib.input.{CombineFileInputFormat, CombineFileRecordReader, CombineFileSplit}
3129

3230
import org.apache.spark.annotation.Experimental
31+
import org.apache.spark.deploy.SparkHadoopUtil
3332

3433
/**
3534
* A general format for reading whole files in as streams, byte arrays,

0 commit comments

Comments
 (0)