Repository: spark
Updated Branches:
  refs/heads/master 3d09ceeef -> f86f71763


[MINOR][EXAMPLE] Use SparkSession instead of SQLContext in RDDRelation.scala

## What changes were proposed in this pull request?

Now, `SQLContext` is used for backward-compatibility, we had better use 
`SparkSession` in Spark 2.0 examples.

## How was this patch tested?

It's just example change. After building, run `bin/run-example 
org.apache.spark.examples.sql.RDDRelation`.

Author: Dongjoon Hyun <[email protected]>

Closes #12808 from dongjoon-hyun/rddrelation.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/f86f7176
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/f86f7176
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/f86f7176

Branch: refs/heads/master
Commit: f86f71763c014aa23940510e1e4af5a9244271e6
Parents: 3d09cee
Author: Dongjoon Hyun <[email protected]>
Authored: Sat Apr 30 00:15:04 2016 -0700
Committer: Reynold Xin <[email protected]>
Committed: Sat Apr 30 00:15:04 2016 -0700

----------------------------------------------------------------------
 .../apache/spark/examples/sql/RDDRelation.scala | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/f86f7176/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala 
b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
index 94b67cb..8ce4427 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
@@ -19,7 +19,7 @@
 package org.apache.spark.examples.sql
 
 import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.sql.{SaveMode, SQLContext}
+import org.apache.spark.sql.{SaveMode, SparkSession}
 
 // One method for defining the schema of an RDD is to make a case class with 
the desired column
 // names and types.
@@ -29,10 +29,10 @@ object RDDRelation {
   def main(args: Array[String]) {
     val sparkConf = new SparkConf().setAppName("RDDRelation")
     val sc = new SparkContext(sparkConf)
-    val sqlContext = new SQLContext(sc)
+    val spark = new SparkSession(sc)
 
-    // Importing the SQL context gives access to all the SQL functions and 
implicit conversions.
-    import sqlContext.implicits._
+    // Importing the SparkSession gives access to all the SQL functions and 
implicit conversions.
+    import spark.implicits._
 
     val df = sc.parallelize((1 to 100).map(i => Record(i, s"val_$i"))).toDF()
     // Any RDD containing case classes can be registered as a table.  The 
schema of the table is
@@ -41,15 +41,15 @@ object RDDRelation {
 
     // Once tables have been registered, you can run SQL queries over them.
     println("Result of SELECT *:")
-    sqlContext.sql("SELECT * FROM records").collect().foreach(println)
+    spark.sql("SELECT * FROM records").collect().foreach(println)
 
     // Aggregation queries are also supported.
-    val count = sqlContext.sql("SELECT COUNT(*) FROM 
records").collect().head.getLong(0)
+    val count = spark.sql("SELECT COUNT(*) FROM 
records").collect().head.getLong(0)
     println(s"COUNT(*): $count")
 
-    // The results of SQL queries are themselves RDDs and support all normal 
RDD functions.  The
+    // The results of SQL queries are themselves RDDs and support all normal 
RDD functions. The
     // items in the RDD are of type Row, which allows you to access each 
column by ordinal.
-    val rddFromSql = sqlContext.sql("SELECT key, value FROM records WHERE key 
< 10")
+    val rddFromSql = spark.sql("SELECT key, value FROM records WHERE key < 10")
 
     println("Result of RDD.map:")
     rddFromSql.rdd.map(row => s"Key: ${row(0)}, Value: 
${row(1)}").collect().foreach(println)
@@ -61,14 +61,14 @@ object RDDRelation {
     df.write.mode(SaveMode.Overwrite).parquet("pair.parquet")
 
     // Read in parquet file.  Parquet files are self-describing so the schema 
is preserved.
-    val parquetFile = sqlContext.read.parquet("pair.parquet")
+    val parquetFile = spark.read.parquet("pair.parquet")
 
     // Queries can be run using the DSL on parquet files just like the 
original RDD.
     parquetFile.where($"key" === 
1).select($"value".as("a")).collect().foreach(println)
 
     // These files can also be registered as tables.
     parquetFile.registerTempTable("parquetFile")
-    sqlContext.sql("SELECT * FROM parquetFile").collect().foreach(println)
+    spark.sql("SELECT * FROM parquetFile").collect().foreach(println)
 
     sc.stop()
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to