Skip to content

Commit b07e20c

Browse files
committed
Merge branch 'master' of https://github.com/apache/spark into CollectEnoughPrefixes
2 parents 095aa3a + ba33096 commit b07e20c

File tree

110 files changed

+3129
-843
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

110 files changed

+3129
-843
lines changed

R/pkg/R/DataFrame.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1328,7 +1328,7 @@ setMethod("write.df",
13281328
jmode <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "saveMode", mode)
13291329
options <- varargsToEnv(...)
13301330
if (!is.null(path)) {
1331-
options[['path']] = path
1331+
options[['path']] <- path
13321332
}
13331333
callJMethod(df@sdf, "save", source, jmode, options)
13341334
})

R/pkg/R/client.R

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,9 @@ connectBackend <- function(hostname, port, timeout = 6000) {
3636

3737
determineSparkSubmitBin <- function() {
3838
if (.Platform$OS.type == "unix") {
39-
sparkSubmitBinName = "spark-submit"
39+
sparkSubmitBinName <- "spark-submit"
4040
} else {
41-
sparkSubmitBinName = "spark-submit.cmd"
41+
sparkSubmitBinName <- "spark-submit.cmd"
4242
}
4343
sparkSubmitBinName
4444
}

R/pkg/R/deserialize.R

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
# Int -> integer
2424
# String -> character
2525
# Boolean -> logical
26+
# Float -> double
2627
# Double -> double
2728
# Long -> double
2829
# Array[Byte] -> raw

R/pkg/R/group.R

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ setMethod("count",
8787
setMethod("agg",
8888
signature(x = "GroupedData"),
8989
function(x, ...) {
90-
cols = list(...)
90+
cols <- list(...)
9191
stopifnot(length(cols) > 0)
9292
if (is.character(cols[[1]])) {
9393
cols <- varargsToEnv(...)
@@ -97,7 +97,7 @@ setMethod("agg",
9797
if (!is.null(ns)) {
9898
for (n in ns) {
9999
if (n != "") {
100-
cols[[n]] = alias(cols[[n]], n)
100+
cols[[n]] <- alias(cols[[n]], n)
101101
}
102102
}
103103
}

R/pkg/R/schema.R

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ structField.character <- function(x, type, nullable = TRUE) {
123123
}
124124
options <- c("byte",
125125
"integer",
126+
"float",
126127
"double",
127128
"numeric",
128129
"character",

R/pkg/R/utils.R

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,8 @@ convertJListToRList <- function(jList, flatten, logicalUpperBound = NULL,
4141
if (isInstanceOf(obj, "scala.Tuple2")) {
4242
# JavaPairRDD[Array[Byte], Array[Byte]].
4343

44-
keyBytes = callJMethod(obj, "_1")
45-
valBytes = callJMethod(obj, "_2")
44+
keyBytes <- callJMethod(obj, "_1")
45+
valBytes <- callJMethod(obj, "_2")
4646
res <- list(unserialize(keyBytes),
4747
unserialize(valBytes))
4848
} else {

R/pkg/inst/tests/test_binaryFile.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ context("functions on binary files")
2020
# JavaSparkContext handle
2121
sc <- sparkR.init()
2222

23-
mockFile = c("Spark is pretty.", "Spark is awesome.")
23+
mockFile <- c("Spark is pretty.", "Spark is awesome.")
2424

2525
test_that("saveAsObjectFile()/objectFile() following textFile() works", {
2626
fileName1 <- tempfile(pattern="spark-test", fileext=".tmp")

R/pkg/inst/tests/test_binary_function.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ test_that("zipPartitions() on RDDs", {
7676
expect_equal(actual,
7777
list(list(1, c(1,2), c(1,2,3)), list(2, c(3,4), c(4,5,6))))
7878

79-
mockFile = c("Spark is pretty.", "Spark is awesome.")
79+
mockFile <- c("Spark is pretty.", "Spark is awesome.")
8080
fileName <- tempfile(pattern="spark-test", fileext=".tmp")
8181
writeLines(mockFile, fileName)
8282

R/pkg/inst/tests/test_rdd.R

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ test_that("zipRDD() on RDDs", {
447447
expect_equal(actual,
448448
list(list(0, 1000), list(1, 1001), list(2, 1002), list(3, 1003), list(4, 1004)))
449449

450-
mockFile = c("Spark is pretty.", "Spark is awesome.")
450+
mockFile <- c("Spark is pretty.", "Spark is awesome.")
451451
fileName <- tempfile(pattern="spark-test", fileext=".tmp")
452452
writeLines(mockFile, fileName)
453453

@@ -483,7 +483,7 @@ test_that("cartesian() on RDDs", {
483483
actual <- collect(cartesian(rdd, emptyRdd))
484484
expect_equal(actual, list())
485485

486-
mockFile = c("Spark is pretty.", "Spark is awesome.")
486+
mockFile <- c("Spark is pretty.", "Spark is awesome.")
487487
fileName <- tempfile(pattern="spark-test", fileext=".tmp")
488488
writeLines(mockFile, fileName)
489489

R/pkg/inst/tests/test_sparkSQL.R

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,32 @@ test_that("create DataFrame from RDD", {
108108
expect_equal(count(df), 10)
109109
expect_equal(columns(df), c("a", "b"))
110110
expect_equal(dtypes(df), list(c("a", "int"), c("b", "string")))
111+
112+
df <- jsonFile(sqlContext, jsonPathNa)
113+
hiveCtx <- tryCatch({
114+
newJObject("org.apache.spark.sql.hive.test.TestHiveContext", ssc)
115+
}, error = function(err) {
116+
skip("Hive is not build with SparkSQL, skipped")
117+
})
118+
sql(hiveCtx, "CREATE TABLE people (name string, age double, height float)")
119+
insertInto(df, "people")
120+
expect_equal(sql(hiveCtx, "SELECT age from people WHERE name = 'Bob'"), c(16))
121+
expect_equal(sql(hiveCtx, "SELECT height from people WHERE name ='Bob'"), c(176.5))
122+
123+
schema <- structType(structField("name", "string"), structField("age", "integer"),
124+
structField("height", "float"))
125+
df2 <- createDataFrame(sqlContext, df.toRDD, schema)
126+
expect_equal(columns(df2), c("name", "age", "height"))
127+
expect_equal(dtypes(df2), list(c("name", "string"), c("age", "int"), c("height", "float")))
128+
expect_equal(collect(where(df2, df2$name == "Bob")), c("Bob", 16, 176.5))
129+
130+
localDF <- data.frame(name=c("John", "Smith", "Sarah"), age=c(19, 23, 18), height=c(164.10, 181.4, 173.7))
131+
df <- createDataFrame(sqlContext, localDF, schema)
132+
expect_is(df, "DataFrame")
133+
expect_equal(count(df), 3)
134+
expect_equal(columns(df), c("name", "age", "height"))
135+
expect_equal(dtypes(df), list(c("name", "string"), c("age", "int"), c("height", "float")))
136+
expect_equal(collect(where(df, df$name == "John")), c("John", 19, 164.10))
111137
})
112138

113139
test_that("convert NAs to null type in DataFrames", {

0 commit comments

Comments
 (0)