File tree Expand file tree Collapse file tree 2 files changed +2
-7
lines changed Expand file tree Collapse file tree 2 files changed +2
-7
lines changed Original file line number Diff line number Diff line change 21
21
<parent >
22
22
<groupId >org.apache.spark</groupId >
23
23
<artifactId >spark-parent</artifactId >
24
- <<<<<<< HEAD
25
24
<version >1.2.0-SNAPSHOT</version >
26
- =======
27
- <version >1.1.0-SNAPSHOT</version >
28
- >>>>>>> modified the code base on comment in https://github.com/tdas/spark/pull/10
29
25
<relativePath >../pom.xml</relativePath >
30
26
</parent >
31
27
Original file line number Diff line number Diff line change 22
22
from pyspark .serializers import NoOpSerializer ,\
23
23
BatchedSerializer , CloudPickleSerializer , pack_long ,\
24
24
CompressedSerializer
25
- from pyspark .rdd import _JavaStackTrace
26
25
from pyspark .storagelevel import StorageLevel
27
26
from pyspark .resultiterable import ResultIterable
28
27
from pyspark .streaming .util import rddToFileName , RDDFunction
29
-
28
+ from pyspark . traceback_utils import SCCallSiteSync
30
29
31
30
from py4j .java_collections import ListConverter , MapConverter
32
31
@@ -187,7 +186,7 @@ def add_shuffle_key(split, iterator):
187
186
yield outputSerializer .dumps (items )
188
187
keyed = PipelinedDStream (self , add_shuffle_key )
189
188
keyed ._bypass_serializer = True
190
- with _JavaStackTrace (self .ctx ) as st :
189
+ with SCCallSiteSync (self .context ) as css :
191
190
partitioner = self .ctx ._jvm .PythonPartitioner (numPartitions ,
192
191
id (partitionFunc ))
193
192
jdstream = self .ctx ._jvm .PythonPairwiseDStream (keyed ._jdstream .dstream (),
You can’t perform that action at this time.
0 commit comments