@@ -53,13 +53,13 @@ class FlumePollingInputDStream[T: ClassTag](
53
53
val maxBatchSize : Int ,
54
54
val parallelism : Int ,
55
55
storageLevel : StorageLevel
56
- ) extends ReceiverInputDStream [SparkPollingEvent ](ssc_) {
56
+ ) extends ReceiverInputDStream [SparkFlumePollingEvent ](ssc_) {
57
57
/**
58
58
* Gets the receiver object that will be sent to the worker nodes
59
59
* to receive data. This method needs to defined by any specific implementation
60
60
* of a NetworkInputDStream.
61
61
*/
62
- override def getReceiver (): Receiver [SparkPollingEvent ] = {
62
+ override def getReceiver (): Receiver [SparkFlumePollingEvent ] = {
63
63
new FlumePollingReceiver (addresses, maxBatchSize, parallelism, storageLevel)
64
64
}
65
65
}
@@ -69,7 +69,7 @@ private[streaming] class FlumePollingReceiver(
69
69
maxBatchSize : Int ,
70
70
parallelism : Int ,
71
71
storageLevel : StorageLevel
72
- ) extends Receiver [SparkPollingEvent ](storageLevel) with Logging {
72
+ ) extends Receiver [SparkFlumePollingEvent ](storageLevel) with Logging {
73
73
74
74
lazy val channelFactoryExecutor =
75
75
Executors .newCachedThreadPool(new ThreadFactoryBuilder ().setDaemon(true ).
@@ -105,7 +105,7 @@ private[streaming] class FlumePollingReceiver(
105
105
logDebug(" Received batch of " + events.size() + " events with sequence number: " + seq)
106
106
try {
107
107
// Convert each Flume event to a serializable SparkPollingEvent
108
- events.foreach(event => store(SparkPollingEvent .fromSparkSinkEvent(event)))
108
+ events.foreach(event => store(SparkFlumePollingEvent .fromSparkSinkEvent(event)))
109
109
// Send an ack to Flume so that Flume discards the events from its channels.
110
110
client.ack(seq)
111
111
} catch {
@@ -129,7 +129,7 @@ private[streaming] class FlumePollingReceiver(
129
129
}
130
130
}
131
131
132
- override def store (dataItem : SparkPollingEvent ) {
132
+ override def store (dataItem : SparkFlumePollingEvent ) {
133
133
// Not entirely sure store is thread-safe for all storage levels - so wrap it in synchronized
134
134
// This takes a performance hit, since the parallelism is useful only for pulling data now.
135
135
this .synchronized {
@@ -155,9 +155,9 @@ private[streaming] class FlumePollingReceiver(
155
155
private class FlumeConnection (val transceiver : NettyTransceiver ,
156
156
val client : SparkFlumeProtocol .Callback )
157
157
158
- private [streaming] object SparkPollingEvent {
159
- def fromSparkSinkEvent (in : SparkSinkEvent ): SparkPollingEvent = {
160
- val event = new SparkPollingEvent ()
158
+ private [streaming] object SparkFlumePollingEvent {
159
+ def fromSparkSinkEvent (in : SparkSinkEvent ): SparkFlumePollingEvent = {
160
+ val event = new SparkFlumePollingEvent ()
161
161
event.event = in
162
162
event
163
163
}
@@ -167,7 +167,7 @@ private[streaming] object SparkPollingEvent {
167
167
* SparkSinkEvent is identical to AvroFlumeEvent, we need to create a new class and a wrapper
168
168
* around that to make it externalizable.
169
169
*/
170
- class SparkPollingEvent () extends Externalizable with Logging {
170
+ class SparkFlumePollingEvent () extends Externalizable with Logging {
171
171
var event : SparkSinkEvent = new SparkSinkEvent ()
172
172
173
173
/* De-serialize from bytes. */
0 commit comments