Skip to content

Commit cde086c

Browse files
committed
[SPARK-13817][SQL][MINOR] Renames Dataset.newDataFrame to Dataset.ofRows
## What changes were proposed in this pull request? This PR does the renaming as suggested by marmbrus in [this comment][1]. ## How was this patch tested? Existing tests. [1]: 6d37e1e#commitcomment-16654694 Author: Cheng Lian <[email protected]> Closes #11889 from liancheng/spark-13817-follow-up.
1 parent 7d11750 commit cde086c

File tree

22 files changed

+44
-44
lines changed

22 files changed

+44
-44
lines changed

sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
129129
userSpecifiedSchema = userSpecifiedSchema,
130130
className = source,
131131
options = extraOptions.toMap)
132-
Dataset.newDataFrame(sqlContext, LogicalRelation(dataSource.resolveRelation()))
132+
Dataset.ofRows(sqlContext, LogicalRelation(dataSource.resolveRelation()))
133133
}
134134

135135
/**
@@ -176,7 +176,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
176176
userSpecifiedSchema = userSpecifiedSchema,
177177
className = source,
178178
options = extraOptions.toMap)
179-
Dataset.newDataFrame(sqlContext, StreamingRelation(dataSource.createSource()))
179+
Dataset.ofRows(sqlContext, StreamingRelation(dataSource.createSource()))
180180
}
181181

182182
/**
@@ -376,7 +376,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
376376
parsedOptions)
377377
}
378378

379-
Dataset.newDataFrame(
379+
Dataset.ofRows(
380380
sqlContext,
381381
LogicalRDD(
382382
schema.toAttributes,
@@ -424,7 +424,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
424424
* @since 1.4.0
425425
*/
426426
def table(tableName: String): DataFrame = {
427-
Dataset.newDataFrame(sqlContext,
427+
Dataset.ofRows(sqlContext,
428428
sqlContext.sessionState.catalog.lookupRelation(
429429
sqlContext.sessionState.sqlParser.parseTableIdentifier(tableName)))
430430
}

sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ private[sql] object Dataset {
5353
new Dataset(sqlContext, logicalPlan, implicitly[Encoder[T]])
5454
}
5555

56-
def newDataFrame(sqlContext: SQLContext, logicalPlan: LogicalPlan): DataFrame = {
56+
def ofRows(sqlContext: SQLContext, logicalPlan: LogicalPlan): DataFrame = {
5757
val qe = sqlContext.executePlan(logicalPlan)
5858
qe.assertAnalyzed()
5959
new Dataset[Row](sqlContext, logicalPlan, RowEncoder(qe.analyzed.schema))
@@ -2322,7 +2322,7 @@ class Dataset[T] private[sql](
23222322

23232323
/** A convenient function to wrap a logical plan and produce a DataFrame. */
23242324
@inline private def withPlan(logicalPlan: => LogicalPlan): DataFrame = {
2325-
Dataset.newDataFrame(sqlContext, logicalPlan)
2325+
Dataset.ofRows(sqlContext, logicalPlan)
23262326
}
23272327

23282328
/** A convenient function to wrap a logical plan and produce a Dataset. */

sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ class KeyValueGroupedDataset[K, V] private[sql](
5959

6060
private def groupedData = {
6161
new RelationalGroupedDataset(
62-
Dataset.newDataFrame(sqlContext, logicalPlan),
62+
Dataset.ofRows(sqlContext, logicalPlan),
6363
groupingAttributes,
6464
RelationalGroupedDataset.GroupByType)
6565
}

sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -52,17 +52,17 @@ class RelationalGroupedDataset protected[sql](
5252

5353
groupType match {
5454
case RelationalGroupedDataset.GroupByType =>
55-
Dataset.newDataFrame(
55+
Dataset.ofRows(
5656
df.sqlContext, Aggregate(groupingExprs, aliasedAgg, df.logicalPlan))
5757
case RelationalGroupedDataset.RollupType =>
58-
Dataset.newDataFrame(
58+
Dataset.ofRows(
5959
df.sqlContext, Aggregate(Seq(Rollup(groupingExprs)), aliasedAgg, df.logicalPlan))
6060
case RelationalGroupedDataset.CubeType =>
61-
Dataset.newDataFrame(
61+
Dataset.ofRows(
6262
df.sqlContext, Aggregate(Seq(Cube(groupingExprs)), aliasedAgg, df.logicalPlan))
6363
case RelationalGroupedDataset.PivotType(pivotCol, values) =>
6464
val aliasedGrps = groupingExprs.map(alias)
65-
Dataset.newDataFrame(
65+
Dataset.ofRows(
6666
df.sqlContext, Pivot(aliasedGrps, pivotCol, values, aggExprs, df.logicalPlan))
6767
}
6868
}

sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,7 @@ class SQLContext private[sql](
351351
val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
352352
val attributeSeq = schema.toAttributes
353353
val rowRDD = RDDConversions.productToRowRdd(rdd, schema.map(_.dataType))
354-
Dataset.newDataFrame(self, LogicalRDD(attributeSeq, rowRDD)(self))
354+
Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRDD)(self))
355355
}
356356

357357
/**
@@ -366,7 +366,7 @@ class SQLContext private[sql](
366366
SQLContext.setActive(self)
367367
val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
368368
val attributeSeq = schema.toAttributes
369-
Dataset.newDataFrame(self, LocalRelation.fromProduct(attributeSeq, data))
369+
Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data))
370370
}
371371

372372
/**
@@ -376,7 +376,7 @@ class SQLContext private[sql](
376376
* @since 1.3.0
377377
*/
378378
def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = {
379-
Dataset.newDataFrame(this, LogicalRelation(baseRelation))
379+
Dataset.ofRows(this, LogicalRelation(baseRelation))
380380
}
381381

382382
/**
@@ -431,7 +431,7 @@ class SQLContext private[sql](
431431
rowRDD.map{r: Row => InternalRow.fromSeq(r.toSeq)}
432432
}
433433
val logicalPlan = LogicalRDD(schema.toAttributes, catalystRows)(self)
434-
Dataset.newDataFrame(this, logicalPlan)
434+
Dataset.ofRows(this, logicalPlan)
435435
}
436436

437437

@@ -466,7 +466,7 @@ class SQLContext private[sql](
466466
// TODO: use MutableProjection when rowRDD is another DataFrame and the applied
467467
// schema differs from the existing schema on any field data type.
468468
val logicalPlan = LogicalRDD(schema.toAttributes, catalystRows)(self)
469-
Dataset.newDataFrame(this, logicalPlan)
469+
Dataset.ofRows(this, logicalPlan)
470470
}
471471

472472
/**
@@ -494,7 +494,7 @@ class SQLContext private[sql](
494494
*/
495495
@DeveloperApi
496496
def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = {
497-
Dataset.newDataFrame(self, LocalRelation.fromExternalRows(schema.toAttributes, rows.asScala))
497+
Dataset.ofRows(self, LocalRelation.fromExternalRows(schema.toAttributes, rows.asScala))
498498
}
499499

500500
/**
@@ -513,7 +513,7 @@ class SQLContext private[sql](
513513
val localBeanInfo = Introspector.getBeanInfo(Utils.classForName(className))
514514
SQLContext.beansToRows(iter, localBeanInfo, attributeSeq)
515515
}
516-
Dataset.newDataFrame(this, LogicalRDD(attributeSeq, rowRdd)(this))
516+
Dataset.ofRows(this, LogicalRDD(attributeSeq, rowRdd)(this))
517517
}
518518

519519
/**
@@ -541,7 +541,7 @@ class SQLContext private[sql](
541541
val className = beanClass.getName
542542
val beanInfo = Introspector.getBeanInfo(beanClass)
543543
val rows = SQLContext.beansToRows(data.asScala.iterator, beanInfo, attrSeq)
544-
Dataset.newDataFrame(self, LocalRelation(attrSeq, rows.toSeq))
544+
Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq))
545545
}
546546

547547
/**
@@ -759,7 +759,7 @@ class SQLContext private[sql](
759759
* @since 1.3.0
760760
*/
761761
def sql(sqlText: String): DataFrame = {
762-
Dataset.newDataFrame(this, parseSql(sqlText))
762+
Dataset.ofRows(this, parseSql(sqlText))
763763
}
764764

765765
/**
@@ -782,7 +782,7 @@ class SQLContext private[sql](
782782
}
783783

784784
private def table(tableIdent: TableIdentifier): DataFrame = {
785-
Dataset.newDataFrame(this, sessionState.catalog.lookupRelation(tableIdent))
785+
Dataset.ofRows(this, sessionState.catalog.lookupRelation(tableIdent))
786786
}
787787

788788
/**
@@ -794,7 +794,7 @@ class SQLContext private[sql](
794794
* @since 1.3.0
795795
*/
796796
def tables(): DataFrame = {
797-
Dataset.newDataFrame(this, ShowTablesCommand(None))
797+
Dataset.ofRows(this, ShowTablesCommand(None))
798798
}
799799

800800
/**
@@ -806,7 +806,7 @@ class SQLContext private[sql](
806806
* @since 1.3.0
807807
*/
808808
def tables(databaseName: String): DataFrame = {
809-
Dataset.newDataFrame(this, ShowTablesCommand(Some(databaseName)))
809+
Dataset.ofRows(this, ShowTablesCommand(Some(databaseName)))
810810
}
811811

812812
/**
@@ -871,7 +871,7 @@ class SQLContext private[sql](
871871
schema: StructType): DataFrame = {
872872

873873
val rowRdd = rdd.map(r => python.EvaluatePython.fromJava(r, schema).asInstanceOf[InternalRow])
874-
Dataset.newDataFrame(this, LogicalRDD(schema.toAttributes, rowRdd)(self))
874+
Dataset.ofRows(this, LogicalRDD(schema.toAttributes, rowRdd)(self))
875875
}
876876

877877
/**

sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ case class CacheTableCommand(
261261

262262
override def run(sqlContext: SQLContext): Seq[Row] = {
263263
plan.foreach { logicalPlan =>
264-
sqlContext.registerDataFrameAsTable(Dataset.newDataFrame(sqlContext, logicalPlan), tableName)
264+
sqlContext.registerDataFrameAsTable(Dataset.ofRows(sqlContext, logicalPlan), tableName)
265265
}
266266
sqlContext.cacheTable(tableName)
267267

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ case class DataSource(
154154
}
155155

156156
def dataFrameBuilder(files: Array[String]): DataFrame = {
157-
Dataset.newDataFrame(
157+
Dataset.ofRows(
158158
sqlContext,
159159
LogicalRelation(
160160
DataSource(

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoDataSource.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ private[sql] case class InsertIntoDataSource(
3434

3535
override def run(sqlContext: SQLContext): Seq[Row] = {
3636
val relation = logicalRelation.relation.asInstanceOf[InsertableRelation]
37-
val data = Dataset.newDataFrame(sqlContext, query)
37+
val data = Dataset.ofRows(sqlContext, query)
3838
// Apply the schema of the existing table to the new data.
3939
val df = sqlContext.internalCreateDataFrame(data.queryExecution.toRdd, logicalRelation.schema)
4040
relation.insert(df, overwrite)

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelation.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ private[sql] case class InsertIntoHadoopFsRelation(
114114
val partitionSet = AttributeSet(partitionColumns)
115115
val dataColumns = query.output.filterNot(partitionSet.contains)
116116

117-
val queryExecution = Dataset.newDataFrame(sqlContext, query).queryExecution
117+
val queryExecution = Dataset.ofRows(sqlContext, query).queryExecution
118118
SQLExecution.withNewExecutionId(sqlContext, queryExecution) {
119119
val relation =
120120
WriteRelation(

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/ddl.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ case class CreateTempTableUsing(
101101
options = options)
102102
sqlContext.sessionState.catalog.registerTable(
103103
tableIdent,
104-
Dataset.newDataFrame(sqlContext, LogicalRelation(dataSource.resolveRelation())).logicalPlan)
104+
Dataset.ofRows(sqlContext, LogicalRelation(dataSource.resolveRelation())).logicalPlan)
105105

106106
Seq.empty[Row]
107107
}
@@ -116,7 +116,7 @@ case class CreateTempTableUsingAsSelect(
116116
query: LogicalPlan) extends RunnableCommand {
117117

118118
override def run(sqlContext: SQLContext): Seq[Row] = {
119-
val df = Dataset.newDataFrame(sqlContext, query)
119+
val df = Dataset.ofRows(sqlContext, query)
120120
val dataSource = DataSource(
121121
sqlContext,
122122
className = provider,
@@ -126,7 +126,7 @@ case class CreateTempTableUsingAsSelect(
126126
val result = dataSource.write(mode, df)
127127
sqlContext.sessionState.catalog.registerTable(
128128
tableIdent,
129-
Dataset.newDataFrame(sqlContext, LogicalRelation(result)).logicalPlan)
129+
Dataset.ofRows(sqlContext, LogicalRelation(result)).logicalPlan)
130130

131131
Seq.empty[Row]
132132
}
@@ -147,7 +147,7 @@ case class RefreshTable(tableIdent: TableIdentifier)
147147
if (isCached) {
148148
// Create a data frame to represent the table.
149149
// TODO: Use uncacheTable once it supports database name.
150-
val df = Dataset.newDataFrame(sqlContext, logicalPlan)
150+
val df = Dataset.ofRows(sqlContext, logicalPlan)
151151
// Uncache the logicalPlan.
152152
sqlContext.cacheManager.tryUncacheQuery(df, blocking = true)
153153
// Cache it again.

0 commit comments

Comments
 (0)