Skip to content

Commit 658fe8f

Browse files
scwfmarmbrus
authored andcommitted
[SPARK-4695][SQL] Get result using executeCollect
Using ```executeCollect``` to collect the result, because executeCollect is a custom implementation of collect in spark sql which better than rdd's collect Author: wangfei <[email protected]> Closes #3547 from scwf/executeCollect and squashes the following commits: a5ab68e [wangfei] Revert "adding debug info" a60d680 [wangfei] fix test failure 0db7ce8 [wangfei] adding debug info 184c594 [wangfei] using executeCollect instead collect (cherry picked from commit 3ae0cda) Signed-off-by: Michael Armbrust <[email protected]>
1 parent adc5d6f commit 658fe8f

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -377,7 +377,7 @@ class HiveContext(sc: SparkContext) extends SQLContext(sc) {
377377
command.executeCollect().map(_.head.toString)
378378

379379
case other =>
380-
val result: Seq[Seq[Any]] = toRdd.map(_.copy()).collect().toSeq
380+
val result: Seq[Seq[Any]] = other.executeCollect().toSeq
381381
// We need the types so we can output struct field names
382382
val types = analyzed.output.map(_.dataType)
383383
// Reformat to match hive tab delimited output.
@@ -416,6 +416,8 @@ object HiveContext {
416416
case (bin: Array[Byte], BinaryType) => new String(bin, "UTF-8")
417417
case (decimal: Decimal, DecimalType()) => // Hive strips trailing zeros so use its toString
418418
HiveShim.createDecimal(decimal.toBigDecimal.underlying()).toString
419+
case (decimal: BigDecimal, DecimalType()) =>
420+
HiveShim.createDecimal(decimal.underlying()).toString
419421
case (other, tpe) if primitiveTypes contains tpe => other.toString
420422
}
421423

0 commit comments

Comments
 (0)