@@ -30,25 +30,21 @@ import org.apache.spark.sql.hive.test.TestHive
30
30
*/
31
31
class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
32
32
// TODO: bundle in jar files... get from classpath
33
- lazy val hiveQueryDir = TestHive .getHiveFile(" ql " + File .separator + " src " +
34
- File .separator + " test" + File .separator + " queries " + File .separator + " clientpositive " )
33
+ lazy val hiveQueryDir = TestHive .getHiveFile(
34
+ " ql/src/ test/queries/clientpositive " .split( " / " ).mkString( File .separator) )
35
35
36
- var originalTimeZone : TimeZone = _
37
- var originalLocale : Locale = _
38
- var originalColumnBatchSize : Int = TestHive .columnBatchSize
36
+ private val originalTimeZone = TimeZone .getDefault
37
+ private val originalLocale = Locale .getDefault
38
+ private val originalColumnBatchSize = TestHive .columnBatchSize
39
39
40
40
def testCases = hiveQueryDir.listFiles.map(f => f.getName.stripSuffix(" .q" ) -> f)
41
41
42
42
override def beforeAll () {
43
43
TestHive .cacheTables = true
44
44
// Timezone is fixed to America/Los_Angeles for those timezone sensitive tests (timestamp_*)
45
- originalTimeZone = TimeZone .getDefault
46
45
TimeZone .setDefault(TimeZone .getTimeZone(" America/Los_Angeles" ))
47
-
48
46
// Add Locale setting
49
- originalLocale = Locale .getDefault
50
47
Locale .setDefault(Locale .US )
51
-
52
48
// Set a relatively small column batch size for testing purposes
53
49
TestHive .setConf(SQLConf .COLUMN_BATCH_SIZE , " 5" )
54
50
}
0 commit comments