Skip to content

[SPARK-2890][SQL] Allow reading of data when case insensitive resolution could cause possible ambiguity. #2209

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -308,13 +308,9 @@ case class StructField(name: String, dataType: DataType, nullable: Boolean) {
object StructType {
protected[sql] def fromAttributes(attributes: Seq[Attribute]): StructType =
StructType(attributes.map(a => StructField(a.name, a.dataType, a.nullable)))

private def validateFields(fields: Seq[StructField]): Boolean =
fields.map(field => field.name).distinct.size == fields.size
}

case class StructType(fields: Seq[StructField]) extends DataType {
require(StructType.validateFields(fields), "Found fields with the same name.")

/**
* Returns all field names in a [[Seq]].
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,47 +17,68 @@

package org.apache.spark.sql.hive.execution

import org.apache.spark.sql.hive.test.TestHive
import org.apache.hadoop.conf.Configuration
import org.apache.spark.SparkContext._
import java.io.{DataOutput, DataInput}
import java.util
import org.apache.hadoop.fs.{FileSystem, Path}
import java.util.Properties

import org.apache.spark.util.Utils

import scala.collection.JavaConversions._

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.serde2.{SerDeStats, AbstractSerDe}
import org.apache.hadoop.io.{NullWritable, Writable}
import org.apache.hadoop.io.Writable
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspectorFactory, ObjectInspector}
import java.util.Properties

import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import scala.collection.JavaConversions._
import java.io.{DataOutput, DataInput}
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject

import org.apache.spark.sql.Row
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.hive.test.TestHive._

case class Fields(f1: Int, f2: Int, f3: Int, f4: Int, f5: Int)

/**
* A test suite for Hive custom UDFs.
*/
class HiveUdfSuite extends HiveComparisonTest {

TestHive.sql(
"""
test("spark sql udf test that returns a struct") {
registerFunction("getStruct", (_: Int) => Fields(1, 2, 3, 4, 5))
assert(sql(
"""
|SELECT getStruct(1).f1,
| getStruct(1).f2,
| getStruct(1).f3,
| getStruct(1).f4,
| getStruct(1).f5 FROM src LIMIT 1
""".stripMargin).first() === Row(1, 2, 3, 4, 5))
}

test("hive struct udf") {
sql(
"""
|CREATE EXTERNAL TABLE hiveUdfTestTable (
| pair STRUCT<id: INT, value: INT>
|)
|PARTITIONED BY (partition STRING)
|ROW FORMAT SERDE '%s'
|STORED AS SEQUENCEFILE
""".stripMargin.format(classOf[PairSerDe].getName)
)

TestHive.sql(
"ALTER TABLE hiveUdfTestTable ADD IF NOT EXISTS PARTITION(partition='testUdf') LOCATION '%s'"
.format(this.getClass.getClassLoader.getResource("data/files/testUdf").getFile)
)

TestHive.sql("CREATE TEMPORARY FUNCTION testUdf AS '%s'".format(classOf[PairUdf].getName))

TestHive.sql("SELECT testUdf(pair) FROM hiveUdfTestTable")

TestHive.sql("DROP TEMPORARY FUNCTION IF EXISTS testUdf")
""".
stripMargin.format(classOf[PairSerDe].getName))

val location = Utils.getSparkClassLoader.getResource("data/files/testUdf").getFile
sql(s"""
ALTER TABLE hiveUdfTestTable
ADD IF NOT EXISTS PARTITION(partition='testUdf')
LOCATION '$location'""")

sql(s"CREATE TEMPORARY FUNCTION testUdf AS '${classOf[PairUdf].getName}'")
sql("SELECT testUdf(pair) FROM hiveUdfTestTable")
sql("DROP TEMPORARY FUNCTION IF EXISTS testUdf")
}
}

class TestPair(x: Int, y: Int) extends Writable with Serializable {
Expand Down