Skip to content

Commit 66fc600

Browse files
committed
Fix and minimize regression test (verified that it still fails)
1 parent 1d8d125 commit 66fc600

File tree

2 files changed

+6
-10
lines changed

2 files changed

+6
-10
lines changed

core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ class SparkHadoopUtil extends Logging {
179179
Seq.empty
180180
} else {
181181
FileSystem.getAllStatistics
182-
.filter { stats => scheme.equals(stats.getScheme) }
182+
.filter { stats => stats.getScheme.equals(scheme) }
183183
.map(Utils.invoke(classOf[Statistics], _, "getThreadStatistics"))
184184
}
185185
}

core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ import java.io.{FileWriter, PrintWriter, File}
2222
import org.apache.spark.SharedSparkContext
2323
import org.apache.spark.deploy.SparkHadoopUtil
2424
import org.apache.spark.scheduler.{SparkListenerTaskEnd, SparkListener}
25+
import org.apache.spark.util.Utils
2526

2627
import org.scalatest.FunSuite
2728
import org.scalatest.matchers.ShouldMatchers
@@ -108,26 +109,21 @@ class InputOutputMetricsSuite extends FunSuite with SharedSparkContext with Shou
108109
}
109110

110111
test("exceptions while getting IO thread statistics should not fail tasks / jobs (SPARK-8062)") {
111-
// For some reason, the following code needs to be called in order for this regression test to
112-
// fail and reproduce the bug. The fact that this is necessary suggests that there may be other
113-
// bugs in our InputOutputMetrics code; SPARK-8086 tracks progress towards investigating this
114-
// issue, since fixing it is out of scope for SPARK-8062.
115-
val fs = FileSystem.getLocal(new Configuration())
116-
val outPath = new Path(fs.getWorkingDirectory, "outdir")
117-
SparkHadoopUtil.get.getFSBytesWrittenOnThreadCallback(outPath, fs.getConf)
112+
val tempDir = Utils.createTempDir()
113+
val outPath = new File(tempDir, "outfile")
118114

119115
// Intentionally call this method with a null scheme, which will store an entry for a FileSystem
120116
// with a null scheme into Hadoop's global `FileSystem.statisticsTable`.
121117
FileSystem.getStatistics(null, classOf[FileSystem])
122118

123119
// Prior to fixing SPARK-8062, this would fail with a NullPointerException in
124120
// SparkHadoopUtil.getFileSystemThreadStatistics
125-
val rdd = sc.parallelize(Array("a", "b", "c", "d"), 2)
126121
try {
122+
val rdd = sc.parallelize(Array("a", "b", "c", "d"), 2)
127123
rdd.saveAsTextFile(outPath.toString)
128124
sc.textFile(outPath.toString).count()
129125
} finally {
130-
fs.delete(outPath, true)
126+
Utils.deleteRecursively(tempDir)
131127
}
132128
}
133129
}

0 commit comments

Comments
 (0)