Skip to content

Commit 3c49a30

Browse files
committed
fixing wholetextfileinput to it has the same setMinPartitions function as in BinaryData files
1 parent 359a096 commit 3c49a30

File tree

2 files changed

+4
-3
lines changed

2 files changed

+4
-3
lines changed

core/src/main/scala/org/apache/spark/input/WholeTextFileInputFormat.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,10 @@ private[spark] class WholeTextFileInputFormat extends CombineFileInputFormat[Str
4848
}
4949

5050
/**
51-
* Allow minPartitions set by end-user in order to keep compatibility with old Hadoop API.
51+
* Allow minPartitions set by end-user in order to keep compatibility with old Hadoop API,
52+
* which is set through setMaxSplitSize
5253
*/
53-
def setMaxSplitSize(context: JobContext, minPartitions: Int) {
54+
def setMinPartitions(context: JobContext, minPartitions: Int) {
5455
val files = listStatus(context)
5556
val totalLen = files.map { file =>
5657
if (file.isDir) 0L else file.getLen

core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ private[spark] class WholeTextFileRDD(
182182
case _ =>
183183
}
184184
val jobContext = newJobContext(conf, jobId)
185-
inputFormat.setMaxSplitSize(jobContext, minPartitions)
185+
inputFormat.setMinPartitions(jobContext, minPartitions)
186186
val rawSplits = inputFormat.getSplits(jobContext).toArray
187187
val result = new Array[Partition](rawSplits.size)
188188
for (i <- 0 until rawSplits.size) {

0 commit comments

Comments
 (0)