Skip to content
This repository was archived by the owner on May 9, 2024. It is now read-only.

Commit 37af91a

Browse files
committed
iMerge remote-tracking branch 'upstream/master' into ldaonline
2 parents 20328d1 + 49c7a8f commit 37af91a

File tree

8 files changed

+11
-10
lines changed

8 files changed

+11
-10
lines changed

core/src/main/scala/org/apache/spark/ContextCleaner.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -188,10 +188,10 @@ private[spark] class ContextCleaner(sc: SparkContext) extends Logging {
188188
/** Perform broadcast cleanup. */
189189
def doCleanupBroadcast(broadcastId: Long, blocking: Boolean) {
190190
try {
191-
logDebug("Cleaning broadcast " + broadcastId)
191+
logDebug(s"Cleaning broadcast $broadcastId")
192192
broadcastManager.unbroadcast(broadcastId, true, blocking)
193193
listeners.foreach(_.broadcastCleaned(broadcastId))
194-
logInfo("Cleaned broadcast " + broadcastId)
194+
logDebug(s"Cleaned broadcast $broadcastId")
195195
} catch {
196196
case e: Exception => logError("Error cleaning broadcast " + broadcastId, e)
197197
}

core/src/main/scala/org/apache/spark/storage/BlockManager.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1074,7 +1074,7 @@ private[spark] class BlockManager(
10741074
* Remove all blocks belonging to the given broadcast.
10751075
*/
10761076
def removeBroadcast(broadcastId: Long, tellMaster: Boolean): Int = {
1077-
logInfo(s"Removing broadcast $broadcastId")
1077+
logDebug(s"Removing broadcast $broadcastId")
10781078
val blocksToRemove = blockInfo.keys.collect {
10791079
case bid @ BroadcastBlockId(`broadcastId`, _) => bid
10801080
}
@@ -1086,7 +1086,7 @@ private[spark] class BlockManager(
10861086
* Remove a block from both memory and disk.
10871087
*/
10881088
def removeBlock(blockId: BlockId, tellMaster: Boolean = true): Unit = {
1089-
logInfo(s"Removing block $blockId")
1089+
logDebug(s"Removing block $blockId")
10901090
val info = blockInfo.get(blockId).orNull
10911091
if (info != null) {
10921092
info.synchronized {

core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ class BlockManagerMaster(
6161
tachyonSize: Long): Boolean = {
6262
val res = askDriverWithReply[Boolean](
6363
UpdateBlockInfo(blockManagerId, blockId, storageLevel, memSize, diskSize, tachyonSize))
64-
logInfo("Updated info of block " + blockId)
64+
logDebug(s"Updated info of block $blockId")
6565
res
6666
}
6767

core/src/main/scala/org/apache/spark/storage/MemoryStore.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ private[spark] class MemoryStore(blockManager: BlockManager, maxMemory: Long)
184184
val entry = entries.remove(blockId)
185185
if (entry != null) {
186186
currentMemory -= entry.size
187-
logInfo(s"Block $blockId of size ${entry.size} dropped from memory (free $freeMemory)")
187+
logDebug(s"Block $blockId of size ${entry.size} dropped from memory (free $freeMemory)")
188188
true
189189
} else {
190190
false

examples/scala-2.10/src/main/scala/org/apache/spark/examples/streaming/DirectKafkaWordCount.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,8 @@ import org.apache.spark.SparkConf
3030
* <topics> is a list of one or more kafka topics to consume from
3131
*
3232
* Example:
33-
* $ bin/run-example streaming.KafkaWordCount broker1-host:port,broker2-host:port topic1,topic2
33+
* $ bin/run-example streaming.DirectKafkaWordCount broker1-host:port,broker2-host:port \
34+
* topic1,topic2
3435
*/
3536
object DirectKafkaWordCount {
3637
def main(args: Array[String]) {

examples/src/main/python/streaming/kafka_wordcount.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
http://kafka.apache.org/documentation.html#quickstart
2424
2525
and then run the example
26-
`$ bin/spark-submit --driver-class-path external/kafka-assembly/target/scala-*/\
26+
`$ bin/spark-submit --jars external/kafka-assembly/target/scala-*/\
2727
spark-streaming-kafka-assembly-*.jar examples/src/main/python/streaming/kafka_wordcount.py \
2828
localhost:2181 test`
2929
"""

graphx/src/main/scala/org/apache/spark/graphx/impl/EdgeRDDImpl.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ package org.apache.spark.graphx.impl
1919

2020
import scala.reflect.{classTag, ClassTag}
2121

22-
import org.apache.spark.{OneToOneDependency, HashPartitioner, TaskContext}
22+
import org.apache.spark.{OneToOneDependency, HashPartitioner}
2323
import org.apache.spark.rdd.RDD
2424
import org.apache.spark.storage.StorageLevel
2525

python/pyspark/streaming/kafka.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def createStream(ssc, zkQuorum, groupId, topics, kafkaParams={},
8282
8383
2. Download the JAR of the artifact from Maven Central http://search.maven.org/,
8484
Group Id = org.apache.spark, Artifact Id = spark-streaming-kafka-assembly, Version = %s.
85-
Then, innclude the jar in the spark-submit command as
85+
Then, include the jar in the spark-submit command as
8686
8787
$ bin/spark-submit --jars <spark-streaming-kafka-assembly.jar> ...
8888

0 commit comments

Comments
 (0)