Skip to content

Commit 888968f

Browse files
Fix issues in code style
1 parent 27540ba commit 888968f

File tree

4 files changed

+21
-14
lines changed

4 files changed

+21
-14
lines changed

sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,10 @@ private[hive] sealed trait TableReader {
5151
* data warehouse directory.
5252
*/
5353
private[hive]
54-
class HadoopTableReader(@transient attributes: Seq[Attribute],
55-
@transient relation: MetastoreRelation, @transient sc: HiveContext)
54+
class HadoopTableReader(
55+
@transient attributes: Seq[Attribute],
56+
@transient relation: MetastoreRelation,
57+
@transient sc: HiveContext)
5658
extends TableReader {
5759

5860
// Choose the minimum number of splits. If mapred.map.tasks is set, then use that unless
@@ -135,7 +137,8 @@ class HadoopTableReader(@transient attributes: Seq[Attribute],
135137
* subdirectory of each partition being read. If None, then all files are accepted.
136138
*/
137139
def makeRDDForPartitionedTable(
138-
partitionToDeserializer: Map[HivePartition, Class[_ <: Deserializer]],
140+
partitionToDeserializer: Map[HivePartition,
141+
Class[_ <: Deserializer]],
139142
filterOpt: Option[PathFilter]): RDD[Row] = {
140143
val hivePartitionRDDs = partitionToDeserializer.map { case (partition, partDeserializer) =>
141144
val partDesc = Utilities.getPartitionDesc(partition)
@@ -261,8 +264,11 @@ private[hive] object HadoopTableReader extends HiveInspectors {
261264
*
262265
* @return Iterable Row object that transformed from the given iterable input.
263266
*/
264-
def fillObject(iter: Iterator[Writable], deserializer: Deserializer,
265-
attrs: Seq[(Attribute, Int)], row: GenericMutableRow): Iterator[Row] = {
267+
def fillObject(
268+
iter: Iterator[Writable],
269+
deserializer: Deserializer,
270+
attrs: Seq[(Attribute, Int)],
271+
row: GenericMutableRow): Iterator[Row] = {
266272
val soi = deserializer.getObjectInspector().asInstanceOf[StructObjectInspector]
267273
// get the field references according to the attributes(output of the reader) required
268274
val fieldRefs = attrs.map { case (attr, idx) => (soi.getStructFieldRef(attr.name), idx) }

sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ case class HiveTableScan(
7777
val columnInternalNames = neededColumnIDs.map(HiveConf.getColumnInternalName(_)).mkString(",")
7878

7979
if (attributes.size == relation.output.size) {
80-
// TODO what if duplicated attributes queried?
80+
// SQLContext#pruneFilterProject guarantees no duplicated value in `attributes`
8181
ColumnProjectionUtils.setFullyReadColumns(hiveConf)
8282
} else {
8383
ColumnProjectionUtils.appendReadColumnIDs(hiveConf, neededColumnIDs)

sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,10 @@
1717

1818
package org.apache.spark.sql.hive.execution
1919

20+
import org.scalatest.{BeforeAndAfterAll, FunSuite}
21+
2022
import org.apache.spark.{SparkConf, SparkContext}
2123
import org.apache.spark.sql.hive.test.TestHive
22-
import org.scalatest.{BeforeAndAfterAll, FunSuite}
2324

2425
class HiveTableScanSuite extends HiveComparisonTest {
2526
// MINOR HACK: You must run a query before calling reset the first time.
@@ -31,17 +32,17 @@ class HiveTableScanSuite extends HiveComparisonTest {
3132
| 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'
3233
| STORED AS RCFILE
3334
""".stripMargin)
34-
TestHive.hql("""from src
35-
| insert into table part_scan_test PARTITION (ds='2010-01-01')
36-
| select 100,100 limit 1
35+
TestHive.hql("""FROM src
36+
| INSERT INTO TABLE part_scan_test PARTITION (ds='2010-01-01')
37+
| SELECT 100,100 LIMIT 1
3738
""".stripMargin)
38-
TestHive.hql("""ALTER TABLE part_scan_test set SERDE
39+
TestHive.hql("""ALTER TABLE part_scan_test SET SERDE
3940
| 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
4041
""".stripMargin)
41-
TestHive.hql("""from src insert into table part_scan_test PARTITION (ds='2010-01-02')
42-
| select 200,200 limit 1
42+
TestHive.hql("""FROM src INSERT INTO TABLE part_scan_test PARTITION (ds='2010-01-02')
43+
| SELECT 200,200 LIMIT 1
4344
""".stripMargin)
4445

4546
createQueryTest("partition_based_table_scan_with_different_serde",
46-
"select * from part_scan_test", false)
47+
"SELECT * from part_scan_test", false)
4748
}

0 commit comments

Comments
 (0)