@@ -27,7 +27,7 @@ import org.scalatest.{BeforeAndAfter, PrivateMethodTester}
27
27
import org .apache .spark .SparkException
28
28
import org .apache .spark .sql .{AnalysisException , DataFrame , QueryTest , Row }
29
29
import org .apache .spark .sql .catalyst .parser .CatalystSqlParser
30
- import org .apache .spark .sql .catalyst .util .CaseInsensitiveMap
30
+ import org .apache .spark .sql .catalyst .util .{ CaseInsensitiveMap , DateTimeTestUtils }
31
31
import org .apache .spark .sql .execution .DataSourceScanExec
32
32
import org .apache .spark .sql .execution .command .ExplainCommand
33
33
import org .apache .spark .sql .execution .datasources .LogicalRelation
@@ -1525,27 +1525,34 @@ class JDBCSuite extends QueryTest
1525
1525
}
1526
1526
1527
1527
test(" parsing timestamp bounds" ) {
1528
- Seq (
1529
- (" 1972-07-04 03:30:00" , " 1972-07-15 20:50:32.5" , " 1972-07-27 14:11:05" ),
1530
- (" 2019-01-20 12:00:00.502" , " 2019-01-20 12:00:00.751" , " 2019-01-20 12:00:01.000" ),
1531
- (" 2019-01-20T00:00:00.123456" , " 2019-01-20 00:05:00.123456" , " 2019-01-20T00:10:00.123456" ),
1532
- (" 1500-01-20T00:00:00.123456" , " 1500-01-20 00:05:00.123456" , " 1500-01-20T00:10:00.123456" )
1533
- ).foreach { case (lower, middle, upper) =>
1534
- val df = spark.read.format(" jdbc" )
1535
- .option(" url" , urlWithUserAndPass)
1536
- .option(" dbtable" , " TEST.DATETIME" )
1537
- .option(" partitionColumn" , " t" )
1538
- .option(" lowerBound" , lower)
1539
- .option(" upperBound" , upper)
1540
- .option(" numPartitions" , 2 )
1541
- .load()
1542
-
1543
- df.logicalPlan match {
1544
- case LogicalRelation (JDBCRelation (_, parts, _), _, _, _) =>
1545
- val whereClauses = parts.map(_.asInstanceOf [JDBCPartition ].whereClause).toSet
1546
- assert(whereClauses === Set (
1547
- s """ "T" < ' $middle' or "T" is null """ ,
1548
- s """ "T" >= ' $middle' """ ))
1528
+ DateTimeTestUtils .outstandingTimezonesIds.foreach { timeZone =>
1529
+ withSQLConf(SQLConf .SESSION_LOCAL_TIMEZONE .key -> timeZone) {
1530
+ Seq (
1531
+ (" 1972-07-04 03:30:00" , " 1972-07-15 20:50:32.5" , " 1972-07-27 14:11:05" ),
1532
+ (" 2019-01-20 12:00:00.502" , " 2019-01-20 12:00:00.751" , " 2019-01-20 12:00:01.000" ),
1533
+ (
1534
+ " 2019-01-20T00:00:00.123456" ,
1535
+ " 2019-01-20 00:05:00.123456" ,
1536
+ " 2019-01-20T00:10:00.123456" ),
1537
+ (" 1500-01-20T00:00:00.123456" , " 1500-01-20 00:05:00.123456" , " 1500-01-20T00:10:00.123456" )
1538
+ ).foreach { case (lower, middle, upper) =>
1539
+ val df = spark.read.format(" jdbc" )
1540
+ .option(" url" , urlWithUserAndPass)
1541
+ .option(" dbtable" , " TEST.DATETIME" )
1542
+ .option(" partitionColumn" , " t" )
1543
+ .option(" lowerBound" , lower)
1544
+ .option(" upperBound" , upper)
1545
+ .option(" numPartitions" , 2 )
1546
+ .load()
1547
+
1548
+ df.logicalPlan match {
1549
+ case LogicalRelation (JDBCRelation (_, parts, _), _, _, _) =>
1550
+ val whereClauses = parts.map(_.asInstanceOf [JDBCPartition ].whereClause).toSet
1551
+ assert(whereClauses === Set (
1552
+ s """ "T" < ' $middle' or "T" is null """ ,
1553
+ s """ "T" >= ' $middle' """ ))
1554
+ }
1555
+ }
1549
1556
}
1550
1557
}
1551
1558
}
0 commit comments