This is an automated email from the ASF dual-hosted git repository.

alexott pushed a commit to branch branch-0.9
in repository https://gitbox.apache.org/repos/asf/zeppelin.git


The following commit(s) were added to refs/heads/branch-0.9 by this push:
     new 9002bc1  [ZEPPELIN-4864] Cleanup Travis-CI config
9002bc1 is described below

commit 9002bc1b2fd9eaa58bba03e17792fb48030d2683
Author: Alex Ott <alex...@apache.org>
AuthorDate: Tue Jun 9 11:20:20 2020 +0200

    [ZEPPELIN-4864] Cleanup Travis-CI config
    
    ### What is this PR for?
    
    Existing Travis-CI config contained references to previous distributions, 
etc. This PR fixes this.
    It also fixes a problem with Livy interpreter, where tests were failing 
because R wasn't enabled for them
    
    ### What type of PR is it?
    Bug Fix / Improvement
    
    ### What is the Jira issue?
    * https://issues.apache.org/jira/browse/ZEPPELIN-4864
    
    ### How should this be tested?
    * https://travis-ci.org/github/alexott/zeppelin/builds/696385230
    
    Author: Alex Ott <alex...@apache.org>
    
    Closes #3793 from alexott/ZEPPELIN-4864 and squashes the following commits:
    
    3fccb1465 [Alex Ott] [ZEPPELIN-4864] enable R for Livy tests
    91a30a303 [Alex Ott] [ZEPPELIN-4864] improve debugability of Livy 
integration test
    82233446e [Alex Ott] [ZEPPELIN-4864] Cleanup Travis-CI config
    
    (cherry picked from commit 5cdb51bf7899c4d5de5cf84c0048d252741abe2f)
    Signed-off-by: Alex Ott <alex...@apache.org>
---
 .travis.yml                                        | 18 +---
 .../apache/zeppelin/livy/LivyInterpreterIT.java    | 99 ++++++++++------------
 testing/install_external_dependencies.sh           |  8 +-
 3 files changed, 51 insertions(+), 74 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index cdf1f93..ecb2799 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -28,20 +28,6 @@ cache:
     -  zeppelin-web/node_modules
     -  zeppelin-web/bower_components
 
-addons:
-  apt:
-    sources:
-    - r-source
-    - sourceline: 'deb http://cran.rstudio.com/bin/linux/ubuntu trusty/'
-      key_url: 
'keyserver.ubuntu.com/pks/lookup?op=get&search=0x51716619E084DAB9'
-    - r-packages-trusty
-    - mysql-5.7-trusty
-    packages:
-    - r-base
-    - r-base-dev
-    - mysql-server
-    - mysql-client
-
 services:
   - mysql
 
@@ -154,12 +140,12 @@ jobs:
     # Test python/pyspark with python 2, livy 0.5
     - dist: xenial
       jdk: "openjdk8"
-      env: PYTHON="2" SCALA_VER="2.10" SPARK_VER="1.6.3" HADOOP_VER="2.6" 
LIVY_VER="0.5.0-incubating" PROFILE="-Pspark-1.6 -Phadoop2 -Pscala-2.10" 
BUILD_FLAG="install -DskipTests -DskipRat" TEST_FLAG="verify -DskipRat" 
MODULES="-pl livy -am" TEST_PROJECTS=""
+      env: PYTHON="2" SCALA_VER="2.10" SPARK_VER="1.6.3" HADOOP_VER="2.6" 
LIVY_VER="0.5.0-incubating" R="true" PROFILE="-Pspark-1.6 -Phadoop2 
-Pscala-2.10" BUILD_FLAG="install -DskipTests -DskipRat" TEST_FLAG="verify 
-DskipRat" MODULES="-pl livy -am" TEST_PROJECTS=""
 
     # Test livy 0.5 with spark 2.2.0 under python3
     - dist: xenial
       jdk: "openjdk8"
-      env: PYTHON="3" SPARK_VER="2.2.0" HADOOP_VER="2.6" 
LIVY_VER="0.5.0-incubating" PROFILE="" BUILD_FLAG="install -DskipTests 
-DskipRat" TEST_FLAG="verify -DskipRat" MODULES="-pl livy -am" TEST_PROJECTS=""
+      env: PYTHON="3" SPARK_VER="2.2.0" HADOOP_VER="2.6" 
LIVY_VER="0.5.0-incubating" R="true" PROFILE="" BUILD_FLAG="install -DskipTests 
-DskipRat" TEST_FLAG="verify -DskipRat" MODULES="-pl livy -am" TEST_PROJECTS=""
 
 before_install:
   # check files included in commit range, clear bower_components if a 
bower.json file has changed.
diff --git a/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java 
b/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java
index 4504089..9421f02 100644
--- a/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java
+++ b/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java
@@ -115,7 +115,7 @@ public class LivyInterpreterIT {
     try {
       // detect spark version
       InterpreterResult result = sparkInterpreter.interpret("sc.version", 
context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(1, result.message().size());
 
       boolean isSpark2 = isSpark2(sparkInterpreter, context);
@@ -141,20 +141,20 @@ public class LivyInterpreterIT {
     ;
 
     InterpreterResult result = sparkInterpreter.interpret("sc.parallelize(1 to 
10).sum()", context);
-    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
     assertEquals(1, result.message().size());
     assertTrue(result.message().get(0).getData().contains("Double = 55.0"));
 
     // single line comment
     String singleLineComment = "println(1)// my comment";
     result = sparkInterpreter.interpret(singleLineComment, context);
-    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
     assertEquals(1, result.message().size());
 
     // multiple line comment
     String multipleLineComment = "println(1)/* multiple \n" + "line \n" + 
"comment */";
     result = sparkInterpreter.interpret(multipleLineComment, context);
-    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
     assertEquals(1, result.message().size());
 
     // multi-line string
@@ -162,7 +162,7 @@ public class LivyInterpreterIT {
         "line\"\"\"\n" +
         "println(str)";
     result = sparkInterpreter.interpret(multiLineString, context);
-    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
     assertEquals(1, result.message().size());
     assertTrue(result.message().get(0).getData().contains("multiple\nline"));
 
@@ -171,14 +171,14 @@ public class LivyInterpreterIT {
         "name:String)\n" +
         "val p=Person(1, \"name_a\")";
     result = sparkInterpreter.interpret(caseClassCode, context);
-    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
     assertEquals(1, result.message().size());
     assertTrue(result.message().get(0).getData().contains("p: Person = 
Person(1,name_a)"));
 
     // object class
     String objectClassCode = "object Person {}";
     result = sparkInterpreter.interpret(objectClassCode, context);
-    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
     assertEquals(1, result.message().size());
     if (!isSpark2) {
       assertTrue(result.message().get(0).getData().contains("defined module 
Person"));
@@ -189,7 +189,7 @@ public class LivyInterpreterIT {
     // html output
     String htmlCode = "println(\"%html <h1> hello </h1>\")";
     result = sparkInterpreter.interpret(htmlCode, context);
-    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
     assertEquals(1, result.message().size());
     assertEquals(InterpreterResult.Type.HTML, 
result.message().get(0).getType());
 
@@ -249,7 +249,7 @@ public class LivyInterpreterIT {
       result = sparkInterpreter.interpret(
           "val 
df=sqlContext.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
               + "df.collect()", context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(1, result.message().size());
       assertTrue(result.message().get(0).getData()
           .contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
@@ -257,7 +257,7 @@ public class LivyInterpreterIT {
       result = sparkInterpreter.interpret(
           "val df=spark.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", 
\"col_2\")\n"
               + "df.collect()", context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(1, result.message().size());
       assertTrue(result.message().get(0).getData()
           .contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
@@ -265,25 +265,25 @@ public class LivyInterpreterIT {
     sparkInterpreter.interpret("df.registerTempTable(\"df\")", context);
     // test LivySparkSQLInterpreter which share the same SparkContext with 
LivySparkInterpreter
     result = sqlInterpreter.interpret("select * from df where col_1='hello'", 
context);
-    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
     assertEquals(InterpreterResult.Type.TABLE, 
result.message().get(0).getType());
     assertEquals("col_1\tcol_2\nhello\t20", result.message().get(0).getData());
     // double quotes
     result = sqlInterpreter.interpret("select * from df where 
col_1=\"hello\"", context);
-    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
     assertEquals(InterpreterResult.Type.TABLE, 
result.message().get(0).getType());
     assertEquals("col_1\tcol_2\nhello\t20", result.message().get(0).getData());
 
     // only enable this test in spark2 as spark1 doesn't work for this case
     if (isSpark2) {
       result = sqlInterpreter.interpret("select * from df where 
col_1=\"he\\\"llo\" ", context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(InterpreterResult.Type.TABLE, 
result.message().get(0).getType());
     }
 
     // single quotes inside attribute value
     result = sqlInterpreter.interpret("select * from df where 
col_1=\"he'llo\"", context);
-    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
     assertEquals(InterpreterResult.Type.TABLE, 
result.message().get(0).getType());
 
     // test sql with syntax error
@@ -328,7 +328,7 @@ public class LivyInterpreterIT {
           "val 
df=sqlContext.createDataFrame(Seq((\"12characters12characters\",20)))"
               + ".toDF(\"col_1\", \"col_2\")\n"
               + "df.collect()", context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(1, result.message().size());
       assertTrue(result.message().get(0).getData()
           .contains("Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
@@ -337,7 +337,7 @@ public class LivyInterpreterIT {
           "val 
df=spark.createDataFrame(Seq((\"12characters12characters\",20)))"
               + ".toDF(\"col_1\", \"col_2\")\n"
               + "df.collect()", context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(1, result.message().size());
       assertTrue(result.message().get(0).getData()
           .contains("Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
@@ -346,7 +346,7 @@ public class LivyInterpreterIT {
     // test LivySparkSQLInterpreter which share the same SparkContext with 
LivySparkInterpreter
     result = sqlInterpreter.interpret("select * from df where 
col_1='12characters12characters'",
         context);
-    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
     assertEquals(InterpreterResult.Type.TABLE, 
result.message().get(0).getType());
     assertEquals("col_1\tcol_2\n12characters12cha...\t20", 
result.message().get(0).getData());
 
@@ -405,14 +405,14 @@ public class LivyInterpreterIT {
 
     try {
       InterpreterResult result = pysparkInterpreter.interpret("sc.version", 
context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(1, result.message().size());
 
       boolean isSpark2 = isSpark2(pysparkInterpreter, context);
 
       // test RDD api
       result = pysparkInterpreter.interpret("sc.range(1, 10).sum()", context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(1, result.message().size());
       assertTrue(result.message().get(0).getData().contains("45"));
 
@@ -422,7 +422,7 @@ public class LivyInterpreterIT {
             + "sqlContext = SQLContext(sc)", context);
         result = 
pysparkInterpreter.interpret("df=sqlContext.createDataFrame([(\"hello\",20)])\n"
             + "df.collect()", context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(1, result.message().size());
         //python2 has u and python3 don't have u
         
assertTrue(result.message().get(0).getData().contains("[Row(_1=u'hello', 
_2=20)]")
@@ -430,7 +430,7 @@ public class LivyInterpreterIT {
       } else {
         result = 
pysparkInterpreter.interpret("df=spark.createDataFrame([(\"hello\",20)])\n"
             + "df.collect()", context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(1, result.message().size());
         //python2 has u and python3 don't have u
         
assertTrue(result.message().get(0).getData().contains("[Row(_1=u'hello', 
_2=20)]")
@@ -441,7 +441,7 @@ public class LivyInterpreterIT {
       pysparkInterpreter.interpret("t = [{\"name\":\"userA\", 
\"role\":\"roleA\"},"
           + "{\"name\":\"userB\", \"role\":\"roleB\"}]", context);
       result = pysparkInterpreter.interpret("%table t", context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(1, result.message().size());
       assertEquals(InterpreterResult.Type.TABLE, 
result.message().get(0).getType());
       assertTrue(result.message().get(0).getData().contains("userA"));
@@ -515,7 +515,7 @@ public class LivyInterpreterIT {
 
     try {
       InterpreterResult result = sparkInterpreter.interpret("sc.version", 
context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(2, result.message().size());
       // check yarn appId and ensure it is not null
       assertTrue(result.message().get(1).getData().contains("Spark Application 
Id: application_"));
@@ -523,13 +523,13 @@ public class LivyInterpreterIT {
       // html output
       String htmlCode = "println(\"%html <h1> hello </h1>\")";
       result = sparkInterpreter.interpret(htmlCode, context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(2, result.message().size());
       assertEquals(InterpreterResult.Type.HTML, 
result.message().get(0).getType());
 
       // detect spark version
       result = sparkInterpreter.interpret("sc.version", context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(2, result.message().size());
 
       boolean isSpark2 = isSpark2(sparkInterpreter, context);
@@ -539,7 +539,7 @@ public class LivyInterpreterIT {
             "val 
df=sqlContext.createDataFrame(Seq((\"12characters12characters\",20)))"
                 + ".toDF(\"col_1\", \"col_2\")\n"
                 + "df.collect()", context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(2, result.message().size());
         assertTrue(result.message().get(0).getData()
             .contains("Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
@@ -548,7 +548,7 @@ public class LivyInterpreterIT {
             "val 
df=spark.createDataFrame(Seq((\"12characters12characters\",20)))"
                 + ".toDF(\"col_1\", \"col_2\")\n"
                 + "df.collect()", context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(2, result.message().size());
         assertTrue(result.message().get(0).getData()
             .contains("Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
@@ -557,7 +557,7 @@ public class LivyInterpreterIT {
       // test LivySparkSQLInterpreter which share the same SparkContext with 
LivySparkInterpreter
       result = sqlInterpreter.interpret("select * from df where 
col_1='12characters12characters'",
           context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(InterpreterResult.Type.TABLE, 
result.message().get(0).getType());
       assertEquals("col_1\tcol_2\n12characters12characters\t20", 
result.message().get(0).getData());
     } finally {
@@ -599,7 +599,7 @@ public class LivyInterpreterIT {
       // test DataFrame api
       if (isSpark2) {
         result = sparkRInterpreter.interpret("df <- 
as.DataFrame(faithful)\nhead(df)", context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(1, result.message().size());
         assertTrue(result.message().get(0).getData().contains("eruptions 
waiting"));
 
@@ -628,7 +628,7 @@ public class LivyInterpreterIT {
       } else {
         result = sparkRInterpreter.interpret("df <- 
createDataFrame(sqlContext, faithful)" +
             "\nhead(df)", context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(1, result.message().size());
         assertTrue(result.message().get(0).getData().contains("eruptions 
waiting"));
       }
@@ -673,11 +673,11 @@ public class LivyInterpreterIT {
 
       String p1 = 
IOUtils.toString(getClass().getResourceAsStream("/livy_tutorial_1.scala"));
       InterpreterResult result = sparkInterpreter.interpret(p1, context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
 
       String p2 = 
IOUtils.toString(getClass().getResourceAsStream("/livy_tutorial_2.sql"));
       result = sqlInterpreter.interpret(p2, context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(InterpreterResult.Type.TABLE, 
result.message().get(0).getType());
     } finally {
       sparkInterpreter.close();
@@ -734,7 +734,7 @@ public class LivyInterpreterIT {
           .build();
       // detect spark version
       InterpreterResult result = sparkInterpreter.interpret("sc.version", 
context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(1, result.message().size());
 
       boolean isSpark2 = isSpark2((BaseLivyInterpreter) 
sparkInterpreter.getInnerInterpreter(),
@@ -744,7 +744,7 @@ public class LivyInterpreterIT {
         result = sparkInterpreter.interpret(
             "val 
df=sqlContext.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
                 + "df.collect()", context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(1, result.message().size());
         assertTrue(result.message().get(0).getData()
             .contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
@@ -753,7 +753,7 @@ public class LivyInterpreterIT {
         // access table from pyspark
         result = pysparkInterpreter.interpret("sqlContext.sql(\"select * from 
df\").show()",
             context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(1, result.message().size());
         assertTrue(result.message().get(0).getData()
             .contains("+-----+-----+\n" +
@@ -765,14 +765,14 @@ public class LivyInterpreterIT {
         // access table from sparkr
         result = sparkRInterpreter.interpret("head(sql(sqlContext, \"select * 
from df\"))",
             context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(1, result.message().size());
         assertTrue(result.message().get(0).getData().contains("col_1 col_2\n1 
hello    20"));
       } else {
         result = sparkInterpreter.interpret(
             "val df=spark.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", 
\"col_2\")\n"
                 + "df.collect()", context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(1, result.message().size());
         assertTrue(result.message().get(0).getData()
             .contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
@@ -780,7 +780,7 @@ public class LivyInterpreterIT {
 
         // access table from pyspark
         result = pysparkInterpreter.interpret("spark.sql(\"select * from 
df\").show()", context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(1, result.message().size());
         assertTrue(result.message().get(0).getData()
             .contains("+-----+-----+\n" +
@@ -791,7 +791,7 @@ public class LivyInterpreterIT {
 
         // access table from sparkr
         result = sparkRInterpreter.interpret("head(sql(\"select * from 
df\"))", context);
-        assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+        assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
         assertEquals(1, result.message().size());
         assertTrue(result.message().get(0).getData().contains("col_1 col_2\n1 
hello    20"));
       }
@@ -804,14 +804,14 @@ public class LivyInterpreterIT {
               "plt.figure()\n" +
               "plt.plot(data)\n" +
               "%matplot plt", context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(1, result.message().size());
       assertEquals(InterpreterResult.Type.IMG, 
result.message().get(0).getType());
 
       // test plotting of R
       result = sparkRInterpreter.interpret(
           "hist(mtcars$mpg)", context);
-      assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+      assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, 
result.code());
       assertEquals(1, result.message().size());
       assertEquals(InterpreterResult.Type.IMG, 
result.message().get(0).getType());
 
@@ -828,22 +828,13 @@ public class LivyInterpreterIT {
   }
 
   private boolean isSpark2(BaseLivyInterpreter interpreter, InterpreterContext 
context) {
-    InterpreterResult result = null;
     if (interpreter instanceof LivySparkRInterpreter) {
-      result = interpreter.interpret("sparkR.session()", context);
+      InterpreterResult result = interpreter.interpret("sparkR.session()", 
context);
       // SparkRInterpreter would always return SUCCESS, it is due to bug of 
LIVY-313
-      if (result.message().get(0).getData().contains("Error")) {
-        return false;
-      } else {
-        return true;
-      }
+      return !result.message().get(0).getData().contains("Error");
     } else {
-      result = interpreter.interpret("spark", context);
-      if (result.code() == InterpreterResult.Code.SUCCESS) {
-        return true;
-      } else {
-        return false;
-      }
+      InterpreterResult result = interpreter.interpret("spark", context);
+      return result.code() == InterpreterResult.Code.SUCCESS;
     }
   }
 
diff --git a/testing/install_external_dependencies.sh 
b/testing/install_external_dependencies.sh
index a1fdc32..e44815b 100755
--- a/testing/install_external_dependencies.sh
+++ b/testing/install_external_dependencies.sh
@@ -22,9 +22,9 @@ touch ~/.environ
 
 # Install Python dependencies for Python specific tests
 if [[ -n "$PYTHON" ]] ; then
-  wget 
https://repo.continuum.io/miniconda/Miniconda${PYTHON}-4.6.14-Linux-x86_64.sh 
-O miniconda.sh
+  wget 
"https://repo.continuum.io/miniconda/Miniconda${PYTHON}-4.6.14-Linux-x86_64.sh"; 
-O miniconda.sh
 
-  bash miniconda.sh -b -p $HOME/miniconda
+  bash miniconda.sh -b -p "$HOME/miniconda"
   echo "export PATH='$HOME/miniconda/bin:$PATH'" >> ~/.environ
   source ~/.environ
 
@@ -44,9 +44,9 @@ if [[ -n "$PYTHON" ]] ; then
   fi
 
   if [[ -n "$TENSORFLOW" ]] ; then
-    check_results=`conda search -c conda-forge tensorflow`
+    check_results=$(conda search -c conda-forge tensorflow)
     echo "search tensorflow = $check_results"
-    pip install tensorflow==${TENSORFLOW}
+    pip install "tensorflow==${TENSORFLOW}"
   fi
 fi
 

Reply via email to