This is an automated email from the ASF dual-hosted git repository.

zjffdu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/zeppelin.git


The following commit(s) were added to refs/heads/master by this push:
     new e5d13d8  [ZEPPELIN-4576]. Unable to create DataSet from case class in 
Spark Interpreter
e5d13d8 is described below

commit e5d13d8e683bf8ae7624f32cd876afbd8f5b3c23
Author: Jeff Zhang <zjf...@apache.org>
AuthorDate: Fri Jan 24 14:33:21 2020 +0800

    [ZEPPELIN-4576]. Unable to create DataSet from case class in Spark 
Interpreter
    
    ### What is this PR for?
    This bug is due to we didn't import spark classes properly, this PR fix 
this issue and add related unit test.
    
    ### What type of PR is it?
    [Bug Fix ]
    
    ### Todos
    * [ ] - Task
    
    ### What is the Jira issue?
    * https://issues.apache.org/jira/browse/ZEPPELIN-4576
    
    ### How should this be tested?
    * CI pass
    
    ### Screenshots (if appropriate)
    
    ### Questions:
    * Does the licenses files need update? No
    * Is there breaking changes for older versions? No
    * Does this needs documentation? No
    
    Author: Jeff Zhang <zjf...@apache.org>
    
    Closes #3624 from zjffdu/ZEPPELIN-4576 and squashes the following commits:
    
    2d9ee73b0 [Jeff Zhang] [ZEPPELIN-4576]. Unable to create DataSet from case 
class in Spark Interpreter
---
 .../apache/zeppelin/spark/SparkInterpreterTest.java  |  9 +++++++++
 .../zeppelin/spark/BaseSparkScalaInterpreter.scala   | 20 ++++++++++----------
 2 files changed, 19 insertions(+), 10 deletions(-)

diff --git 
a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkInterpreterTest.java
 
b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkInterpreterTest.java
index b453b13..441af36 100644
--- 
a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkInterpreterTest.java
+++ 
b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkInterpreterTest.java
@@ -198,6 +198,15 @@ public class SparkInterpreterTest {
             ").toDF()", getInterpreterContext());
     assertEquals(InterpreterResult.Code.SUCCESS, result.code());
 
+    // create dataset from case class
+    context = getInterpreterContext();
+    result = interpreter.interpret("case class Person(id:Int, name:String, 
age:Int, country:String)\n" +
+            "val df2 = spark.createDataFrame(Seq(Person(1, \"andy\", 20, 
\"USA\"), " +
+            "Person(2, \"jeff\", 23, \"China\"), Person(3, \"james\", 18, 
\"USA\")))\n" +
+            "df2.printSchema\n" +
+            "df2.show() ", context);
+    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+
     // spark version
     result = interpreter.interpret("sc.version", getInterpreterContext());
     assertEquals(InterpreterResult.Code.SUCCESS, result.code());
diff --git 
a/spark/spark-scala-parent/src/main/scala/org/apache/zeppelin/spark/BaseSparkScalaInterpreter.scala
 
b/spark/spark-scala-parent/src/main/scala/org/apache/zeppelin/spark/BaseSparkScalaInterpreter.scala
index 6b22dd8..9b3fc72 100644
--- 
a/spark/spark-scala-parent/src/main/scala/org/apache/zeppelin/spark/BaseSparkScalaInterpreter.scala
+++ 
b/spark/spark-scala-parent/src/main/scala/org/apache/zeppelin/spark/BaseSparkScalaInterpreter.scala
@@ -222,13 +222,13 @@ abstract class BaseSparkScalaInterpreter(val conf: 
SparkConf,
     bind("sc", "org.apache.spark.SparkContext", sc, List("""@transient"""))
     bind("sqlContext", sqlContext.getClass.getCanonicalName, sqlContext, 
List("""@transient"""))
 
-    interpret("import org.apache.spark.SparkContext._")
-    interpret("import sqlContext.implicits._")
-    interpret("import sqlContext.sql")
-    interpret("import org.apache.spark.sql.functions._")
+    scalaInterpret("import org.apache.spark.SparkContext._")
+    scalaInterpret("import sqlContext.implicits._")
+    scalaInterpret("import sqlContext.sql")
+    scalaInterpret("import org.apache.spark.sql.functions._")
     // print empty string otherwise the last statement's output of this method
     // (aka. import org.apache.spark.sql.functions._) will mix with the output 
of user code
-    interpret("print(\"\")")
+    scalaInterpret("print(\"\")")
   }
 
   private def spark2CreateContext(): Unit = {
@@ -278,13 +278,13 @@ abstract class BaseSparkScalaInterpreter(val conf: 
SparkConf,
     bind("sc", "org.apache.spark.SparkContext", sc, List("""@transient"""))
     bind("sqlContext", "org.apache.spark.sql.SQLContext", sqlContext, 
List("""@transient"""))
 
-    interpret("import org.apache.spark.SparkContext._")
-    interpret("import spark.implicits._")
-    interpret("import spark.sql")
-    interpret("import org.apache.spark.sql.functions._")
+    scalaInterpret("import org.apache.spark.SparkContext._")
+    scalaInterpret("import spark.implicits._")
+    scalaInterpret("import spark.sql")
+    scalaInterpret("import org.apache.spark.sql.functions._")
     // print empty string otherwise the last statement's output of this method
     // (aka. import org.apache.spark.sql.functions._) will mix with the output 
of user code
-    interpret("print(\"\")")
+    scalaInterpret("print(\"\")")
   }
 
   protected def createZeppelinContext(): Unit = {

Reply via email to