@@ -41,10 +41,10 @@ class RecordingManagedBuffer(underlyingBuffer: NioManagedBuffer) extends Managed
41
41
var callsToRetain = 0
42
42
var callsToRelease = 0
43
43
44
- override def size () = underlyingBuffer.size()
45
- override def nioByteBuffer () = underlyingBuffer.nioByteBuffer()
46
- override def createInputStream () = underlyingBuffer.createInputStream()
47
- override def convertToNetty () = underlyingBuffer.convertToNetty()
44
+ override def size (): Long = underlyingBuffer.size()
45
+ override def nioByteBuffer (): ByteBuffer = underlyingBuffer.nioByteBuffer()
46
+ override def createInputStream (): InputStream = underlyingBuffer.createInputStream()
47
+ override def convertToNetty (): AnyRef = underlyingBuffer.convertToNetty()
48
48
49
49
override def retain (): ManagedBuffer = {
50
50
callsToRetain += 1
@@ -81,7 +81,7 @@ class HashShuffleReaderSuite extends SparkFunSuite with LocalSparkContext {
81
81
// Create a return function to use for the mocked wrapForCompression method that just returns
82
82
// the original input stream.
83
83
val dummyCompressionFunction = new Answer [InputStream ] {
84
- override def answer (invocation : InvocationOnMock ) =
84
+ override def answer (invocation : InvocationOnMock ): InputStream =
85
85
invocation.getArguments()(1 ).asInstanceOf [InputStream ]
86
86
}
87
87
@@ -118,7 +118,7 @@ class HashShuffleReaderSuite extends SparkFunSuite with LocalSparkContext {
118
118
// Test a scenario where all data is local, just to avoid creating a bunch of additional mocks
119
119
// for the code to read data over the network.
120
120
val statuses : Array [(BlockManagerId , Long )] =
121
- Array .fill(numMaps)((localBlockManagerId, byteOutputStream.size()))
121
+ Array .fill(numMaps)((localBlockManagerId, byteOutputStream.size().toLong ))
122
122
when(mapOutputTracker.getServerStatuses(shuffleId, reduceId)).thenReturn(statuses)
123
123
124
124
// Create a mocked shuffle handle to pass into HashShuffleReader.
0 commit comments