@@ -35,6 +35,12 @@ import org.apache.spark.network._
35
35
import org .apache .spark .serializer .Serializer
36
36
import org .apache .spark .util ._
37
37
38
+ sealed trait Values
39
+
40
+ case class ByteBufferValues (buffer : ByteBuffer ) extends Values
41
+ case class IteratorValues (iterator : Iterator [Any ]) extends Values
42
+ case class ArrayBufferValues (buffer : ArrayBuffer [Any ]) extends Values
43
+
38
44
private [spark] class BlockManager (
39
45
executorId : String ,
40
46
actorSystem : ActorSystem ,
@@ -455,7 +461,7 @@ private[spark] class BlockManager(
455
461
456
462
def put (blockId : BlockId , values : Iterator [Any ], level : StorageLevel , tellMaster : Boolean )
457
463
: Long = {
458
- doPut(blockId, Left ( Left ( values) ), level, tellMaster)
464
+ doPut(blockId, IteratorValues ( values), level, tellMaster)
459
465
}
460
466
461
467
/**
@@ -477,7 +483,7 @@ private[spark] class BlockManager(
477
483
def put (blockId : BlockId , values : ArrayBuffer [Any ], level : StorageLevel ,
478
484
tellMaster : Boolean = true ) : Long = {
479
485
require(values != null , " Values is null" )
480
- doPut(blockId, Left ( Right ( values) ), level, tellMaster)
486
+ doPut(blockId, ArrayBufferValues ( values), level, tellMaster)
481
487
}
482
488
483
489
/**
@@ -486,11 +492,11 @@ private[spark] class BlockManager(
486
492
def putBytes (blockId : BlockId , bytes : ByteBuffer , level : StorageLevel ,
487
493
tellMaster : Boolean = true ) {
488
494
require(bytes != null , " Bytes is null" )
489
- doPut(blockId, Right (bytes), level, tellMaster)
495
+ doPut(blockId, ByteBufferValues (bytes), level, tellMaster)
490
496
}
491
497
492
498
private def doPut (blockId : BlockId ,
493
- data : Either [ Either [ Iterator [ Any ], ArrayBuffer [ Any ]], ByteBuffer ] ,
499
+ data : Values ,
494
500
level : StorageLevel , tellMaster : Boolean = true ): Long = {
495
501
require(blockId != null , " BlockId is null" )
496
502
require(level != null && level.isValid, " StorageLevel is null or invalid" )
@@ -533,8 +539,9 @@ private[spark] class BlockManager(
533
539
534
540
// If we're storing bytes, then initiate the replication before storing them locally.
535
541
// This is faster as data is already serialized and ready to send.
536
- val replicationFuture = if (data.isRight && level.replication > 1 ) {
537
- val bufferView = data.right.get.duplicate() // Doesn't copy the bytes, just creates a wrapper
542
+ val replicationFuture = if (data.isInstanceOf [ByteBufferValues ] && level.replication > 1 ) {
543
+ // Duplicate doesn't copy the bytes, just creates a wrapper
544
+ val bufferView = data.asInstanceOf [ByteBufferValues ].buffer.duplicate()
538
545
Future {
539
546
replicate(blockId, bufferView, level)
540
547
}
@@ -548,42 +555,43 @@ private[spark] class BlockManager(
548
555
549
556
var marked = false
550
557
try {
551
- data match {
552
- case Left (values) => {
553
- if (level.useMemory) {
554
- // Save it just to memory first, even if it also has useDisk set to true; we will
555
- // drop it to disk later if the memory store can't hold it.
556
- val res = values match {
557
- case Left (values_i) => memoryStore.putValues(blockId, values_i, level, true )
558
- case Right (values_a) => memoryStore.putValues(blockId, values_a, level, true )
559
- }
560
- size = res.size
561
- res.data match {
562
- case Right (newBytes) => bytesAfterPut = newBytes
563
- case Left (newIterator) => valuesAfterPut = newIterator
564
- }
565
- } else {
566
- // Save directly to disk.
567
- // Don't get back the bytes unless we replicate them.
568
- val askForBytes = level.replication > 1
569
-
570
- val res = values match {
571
- case Left (values_i) => diskStore.putValues(blockId, values_i, level, askForBytes)
572
- case Right (values_a) => diskStore.putValues(blockId, values_a, level, askForBytes)
573
- }
574
-
575
- size = res.size
576
- res.data match {
577
- case Right (newBytes) => bytesAfterPut = newBytes
578
- case _ =>
579
- }
558
+ if (level.useMemory) {
559
+ // Save it just to memory first, even if it also has useDisk set to true; we will
560
+ // drop it to disk later if the memory store can't hold it.
561
+ val res = data match {
562
+ case IteratorValues (values_i) =>
563
+ memoryStore.putValues(blockId, values_i, level, true )
564
+ case ArrayBufferValues (values_a) =>
565
+ memoryStore.putValues(blockId, values_a, level, true )
566
+ case ByteBufferValues (value_bytes) => {
567
+ value_bytes.rewind();
568
+ memoryStore.putBytes(blockId, value_bytes, level)
569
+ }
570
+ }
571
+ size = res.size
572
+ res.data match {
573
+ case Right (newBytes) => bytesAfterPut = newBytes
574
+ case Left (newIterator) => valuesAfterPut = newIterator
575
+ }
576
+ } else {
577
+ // Save directly to disk.
578
+ // Don't get back the bytes unless we replicate them.
579
+ val askForBytes = level.replication > 1
580
+
581
+ val res = data match {
582
+ case IteratorValues (values_i) =>
583
+ diskStore.putValues(blockId, values_i, level, askForBytes)
584
+ case ArrayBufferValues (values_a) =>
585
+ diskStore.putValues(blockId, values_a, level, askForBytes)
586
+ case ByteBufferValues (value_bytes) => {
587
+ value_bytes.rewind();
588
+ diskStore.putBytes(blockId, value_bytes, level)
580
589
}
581
590
}
582
- case Right (bytes) => {
583
- bytes.rewind()
584
- // Store it only in memory at first, even if useDisk is also set to true
585
- (if (level.useMemory) memoryStore else diskStore).putBytes(blockId, bytes, level)
586
- size = bytes.limit
591
+ size = res.size
592
+ res.data match {
593
+ case Right (newBytes) => bytesAfterPut = newBytes
594
+ case _ =>
587
595
}
588
596
}
589
597
@@ -612,8 +620,8 @@ private[spark] class BlockManager(
612
620
// values and need to serialize and replicate them now:
613
621
if (level.replication > 1 ) {
614
622
data match {
615
- case Right (bytes) => Await .ready(replicationFuture, Duration .Inf )
616
- case Left (values) => {
623
+ case ByteBufferValues (bytes) => Await .ready(replicationFuture, Duration .Inf )
624
+ case _ => {
617
625
val remoteStartTime = System .currentTimeMillis
618
626
// Serialize the block if not already done
619
627
if (bytesAfterPut == null ) {
0 commit comments