File tree Expand file tree Collapse file tree 2 files changed +4
-3
lines changed
core/src/main/scala/org/apache/spark Expand file tree Collapse file tree 2 files changed +4
-3
lines changed Original file line number Diff line number Diff line change @@ -48,9 +48,10 @@ private[spark] class WholeTextFileInputFormat extends CombineFileInputFormat[Str
48
48
}
49
49
50
50
/**
51
- * Allow minPartitions set by end-user in order to keep compatibility with old Hadoop API.
51
+ * Allow minPartitions set by end-user in order to keep compatibility with old Hadoop API,
52
+ * which is set through setMaxSplitSize
52
53
*/
53
- def setMaxSplitSize (context : JobContext , minPartitions : Int ) {
54
+ def setMinPartitions (context : JobContext , minPartitions : Int ) {
54
55
val files = listStatus(context)
55
56
val totalLen = files.map { file =>
56
57
if (file.isDir) 0L else file.getLen
Original file line number Diff line number Diff line change @@ -182,7 +182,7 @@ private[spark] class WholeTextFileRDD(
182
182
case _ =>
183
183
}
184
184
val jobContext = newJobContext(conf, jobId)
185
- inputFormat.setMaxSplitSize (jobContext, minPartitions)
185
+ inputFormat.setMinPartitions (jobContext, minPartitions)
186
186
val rawSplits = inputFormat.getSplits(jobContext).toArray
187
187
val result = new Array [Partition ](rawSplits.size)
188
188
for (i <- 0 until rawSplits.size) {
You can’t perform that action at this time.
0 commit comments