Skip to content

Commit 1a40a2a

Browse files
darabosmarkhamstra
authored andcommitted
SPARK-1188: Do not re-use objects in the EdgePartition/EdgeTriplet iterators.
This avoids a silent data corruption issue (https://spark-project.atlassian.net/browse/SPARK-1188) and has no performance impact by my measurements. It also simplifies the code. As far as I can tell the object re-use was nothing but premature optimization. I did actual benchmarks for all the included changes, and there is no performance difference. I am not sure where to put the benchmarks. Does Spark not have a benchmark suite? This is an example benchmark I did: test("benchmark") { val builder = new EdgePartitionBuilder[Int] for (i <- (1 to 10000000)) { builder.add(i.toLong, i.toLong, i) } val p = builder.toEdgePartition p.map(_.attr + 1).iterator.toList } It ran for 10 seconds both before and after this change. Author: Daniel Darabos <[email protected]> Closes apache#276 from darabos/spark-1188 and squashes the following commits: 574302b [Daniel Darabos] Restore "manual" copying in EdgePartition.map(Iterator). Add comment to discourage novices like myself from trying to simplify the code. 4117a64 [Daniel Darabos] Revert EdgePartitionSuite. 4955697 [Daniel Darabos] Create a copy of the Edge objects in EdgeRDD.compute(). This avoids exposing the object re-use, while still enables the more efficient behavior for internal code. 4ec77f8 [Daniel Darabos] Add comments about object re-use to the affected functions. 2da5e87 [Daniel Darabos] Restore object re-use in EdgePartition. 0182f2b [Daniel Darabos] Do not re-use objects in the EdgePartition/EdgeTriplet iterators. This avoids a silent data corruption issue (SPARK-1188) and has no performance impact in my measurements. It also simplifies the code. c55f52f [Daniel Darabos] Tests that reproduce the problems from SPARK-1188. (cherry picked from commit 7823633) Signed-off-by: Reynold Xin <[email protected]>
1 parent 121f4c7 commit 1a40a2a

File tree

4 files changed

+58
-10
lines changed

4 files changed

+58
-10
lines changed

graphx/src/main/scala/org/apache/spark/graphx/EdgeRDD.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,8 @@ class EdgeRDD[@specialized ED: ClassTag](
4545
partitionsRDD.partitioner.orElse(Some(Partitioner.defaultPartitioner(partitionsRDD)))
4646

4747
override def compute(part: Partition, context: TaskContext): Iterator[Edge[ED]] = {
48-
firstParent[(PartitionID, EdgePartition[ED])].iterator(part, context).next._2.iterator
48+
val p = firstParent[(PartitionID, EdgePartition[ED])].iterator(part, context)
49+
p.next._2.iterator.map(_.copy())
4950
}
5051

5152
override def collect(): Array[Edge[ED]] = this.map(_.copy()).collect()

graphx/src/main/scala/org/apache/spark/graphx/impl/EdgePartition.scala

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,9 @@ class EdgePartition[@specialized(Char, Int, Boolean, Byte, Long, Float, Double)
5656
* Construct a new edge partition by applying the function f to all
5757
* edges in this partition.
5858
*
59+
* Be careful not to keep references to the objects passed to `f`.
60+
* To improve GC performance the same object is re-used for each call.
61+
*
5962
* @param f a function from an edge to a new attribute
6063
* @tparam ED2 the type of the new attribute
6164
* @return a new edge partition with the result of the function `f`
@@ -84,12 +87,12 @@ class EdgePartition[@specialized(Char, Int, Boolean, Byte, Long, Float, Double)
8487
* order of the edges returned by `EdgePartition.iterator` and
8588
* should return attributes equal to the number of edges.
8689
*
87-
* @param f a function from an edge to a new attribute
90+
* @param iter an iterator for the new attribute values
8891
* @tparam ED2 the type of the new attribute
89-
* @return a new edge partition with the result of the function `f`
90-
* applied to each edge
92+
* @return a new edge partition with the attribute values replaced
9193
*/
9294
def map[ED2: ClassTag](iter: Iterator[ED2]): EdgePartition[ED2] = {
95+
// Faster than iter.toArray, because the expected size is known.
9396
val newData = new Array[ED2](data.size)
9497
var i = 0
9598
while (iter.hasNext) {
@@ -188,6 +191,9 @@ class EdgePartition[@specialized(Char, Int, Boolean, Byte, Long, Float, Double)
188191
/**
189192
* Get an iterator over the edges in this partition.
190193
*
194+
* Be careful not to keep references to the objects from this iterator.
195+
* To improve GC performance the same object is re-used in `next()`.
196+
*
191197
* @return an iterator over edges in the partition
192198
*/
193199
def iterator = new Iterator[Edge[ED]] {
@@ -216,6 +222,9 @@ class EdgePartition[@specialized(Char, Int, Boolean, Byte, Long, Float, Double)
216222
/**
217223
* Get an iterator over the cluster of edges in this partition with source vertex id `srcId`. The
218224
* cluster must start at position `index`.
225+
*
226+
* Be careful not to keep references to the objects from this iterator. To improve GC performance
227+
* the same object is re-used in `next()`.
219228
*/
220229
private def clusterIterator(srcId: VertexId, index: Int) = new Iterator[Edge[ED]] {
221230
private[this] val edge = new Edge[ED]

graphx/src/main/scala/org/apache/spark/graphx/impl/EdgeTripletIterator.scala

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -37,20 +37,15 @@ class EdgeTripletIterator[VD: ClassTag, ED: ClassTag](
3737
// Current position in the array.
3838
private var pos = 0
3939

40-
// A triplet object that this iterator.next() call returns. We reuse this object to avoid
41-
// allocating too many temporary Java objects.
42-
private val triplet = new EdgeTriplet[VD, ED]
43-
4440
private val vmap = new PrimitiveKeyOpenHashMap[VertexId, VD](vidToIndex, vertexArray)
4541

4642
override def hasNext: Boolean = pos < edgePartition.size
4743

4844
override def next() = {
45+
val triplet = new EdgeTriplet[VD, ED]
4946
triplet.srcId = edgePartition.srcIds(pos)
50-
// assert(vmap.containsKey(e.src.id))
5147
triplet.srcAttr = vmap(triplet.srcId)
5248
triplet.dstId = edgePartition.dstIds(pos)
53-
// assert(vmap.containsKey(e.dst.id))
5449
triplet.dstAttr = vmap(triplet.dstId)
5550
triplet.attr = edgePartition.data(pos)
5651
pos += 1
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one or more
3+
* contributor license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright ownership.
5+
* The ASF licenses this file to You under the Apache License, Version 2.0
6+
* (the "License"); you may not use this file except in compliance with
7+
* the License. You may obtain a copy of the License at
8+
*
9+
* http://www.apache.org/licenses/LICENSE-2.0
10+
*
11+
* Unless required by applicable law or agreed to in writing, software
12+
* distributed under the License is distributed on an "AS IS" BASIS,
13+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
* See the License for the specific language governing permissions and
15+
* limitations under the License.
16+
*/
17+
18+
package org.apache.spark.graphx.impl
19+
20+
import scala.reflect.ClassTag
21+
import scala.util.Random
22+
23+
import org.scalatest.FunSuite
24+
25+
import org.apache.spark.graphx._
26+
27+
class EdgeTripletIteratorSuite extends FunSuite {
28+
test("iterator.toList") {
29+
val builder = new EdgePartitionBuilder[Int]
30+
builder.add(1, 2, 0)
31+
builder.add(1, 3, 0)
32+
builder.add(1, 4, 0)
33+
val vidmap = new VertexIdToIndexMap
34+
vidmap.add(1)
35+
vidmap.add(2)
36+
vidmap.add(3)
37+
vidmap.add(4)
38+
val vs = Array.fill(vidmap.capacity)(0)
39+
val iter = new EdgeTripletIterator[Int, Int](vidmap, vs, builder.toEdgePartition)
40+
val result = iter.toList.map(et => (et.srcId, et.dstId))
41+
assert(result === Seq((1, 2), (1, 3), (1, 4)))
42+
}
43+
}

0 commit comments

Comments
 (0)