|
1 | 1 | package com.redislabs.provider.redis.df
|
2 | 2 |
|
3 | 3 | import java.sql.{Date, Timestamp}
|
| 4 | +import java.util.UUID |
4 | 5 |
|
5 | 6 | import com.redislabs.provider.redis.toRedisContext
|
6 | 7 | import com.redislabs.provider.redis.util.Person.{data, _}
|
7 | 8 | import com.redislabs.provider.redis.util.TestUtils._
|
8 | 9 | import com.redislabs.provider.redis.util.{EntityId, Person}
|
9 | 10 | import org.apache.spark.SparkException
|
10 |
| -import org.apache.spark.sql.DataFrame |
| 11 | +import org.apache.spark.sql.{DataFrame, Row} |
11 | 12 | import org.apache.spark.sql.redis.RedisSourceRelation.tableDataKeyPattern
|
12 | 13 | import org.apache.spark.sql.redis._
|
13 |
| -import org.apache.spark.sql.types._ |
| 14 | +import org.apache.spark.sql.types.{StructField, _} |
14 | 15 | import org.scalatest.Matchers
|
15 | 16 |
|
| 17 | +import scala.util.Random |
| 18 | + |
16 | 19 | /**
|
17 | 20 | * @author The Viet Nguyen
|
18 | 21 | */
|
@@ -295,6 +298,37 @@ trait HashDataframeSuite extends RedisDataframeSuite with Matchers {
|
295 | 298 | }
|
296 | 299 | }
|
297 | 300 |
|
| 301 | + /** |
| 302 | + * A test case for https://github.com/RedisLabs/spark-redis/issues/132 |
| 303 | + */ |
| 304 | + test("RedisSourceRelation.buildScan columns ordering") { |
| 305 | + val schema = { |
| 306 | + StructType(Array( |
| 307 | + StructField("id", StringType), |
| 308 | + StructField("int", IntegerType), |
| 309 | + StructField("float", FloatType), |
| 310 | + StructField("double", DoubleType), |
| 311 | + StructField("str", StringType))) |
| 312 | + } |
| 313 | + |
| 314 | + val rowsNum = 8 |
| 315 | + val rdd = spark.sparkContext.parallelize(1 to rowsNum, 2).map { _ => |
| 316 | + def genStr = UUID.randomUUID().toString |
| 317 | + def genInt = Random.nextInt() |
| 318 | + def genDouble = Random.nextDouble() |
| 319 | + def genFloat = Random.nextFloat() |
| 320 | + Row.fromSeq(Seq(genStr, genInt, genFloat, genDouble, genStr)) |
| 321 | + } |
| 322 | + |
| 323 | + val df = spark.createDataFrame(rdd, schema) |
| 324 | + val tableName = generateTableName("cols-ordering") |
| 325 | + df.write.format(RedisFormat).option(SqlOptionTableName, tableName).save() |
| 326 | + val loadedDf = spark.read.format(RedisFormat).option(SqlOptionTableName, tableName).load() |
| 327 | + loadedDf.schema shouldBe schema |
| 328 | + loadedDf.collect().length shouldBe rowsNum |
| 329 | + loadedDf.show() |
| 330 | + } |
| 331 | + |
298 | 332 | def saveMap(tableName: String): Unit = {
|
299 | 333 | Person.dataMaps.foreach { person =>
|
300 | 334 | saveMap(tableName, person("name"), person)
|
|
0 commit comments