This is an automated email from the ASF dual-hosted git repository.

zjffdu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/zeppelin.git


The following commit(s) were added to refs/heads/master by this push:
     new a58902c  [ZEPPELIN-4850]. Include charting option for single value
a58902c is described below

commit a58902c44d62f2b0a5f5f8c8c5c5794425204dfe
Author: Jeff Zhang <zjf...@apache.org>
AuthorDate: Sat Jun 6 00:12:00 2020 +0800

    [ZEPPELIN-4850]. Include charting option for single value
    
    ### What is this PR for?
    
    This PR to support single row result displaying in frontend. For now we 
always use table format for sql output, but sometimes user want to just display 
a single value or single row in frontend (e.g. displaying KPI value). This PR 
use `SingleRowInterpreterResult` to represent such sql output, and refactor 
existing code to leverage this class to display single row in frontend. Besides 
that this PR also support the single row result in spark sql.
    
    ### What type of PR is it?
    [ Improvement | Feature | Documentation ]
    
    ### Todos
    * [ ] - Task
    
    ### What is the Jira issue?
    * https://github.com/zjffdu/zeppelin/compare/ZEPPELIN-4850
    
    ### How should this be tested?
    * CI pass
    
    ### Screenshots (if appropriate)
    
    ### Questions:
    * Does the licenses files need update? No
    * Is there breaking changes for older versions? No
    * Does this needs documentation? No
    
    Author: Jeff Zhang <zjf...@apache.org>
    
    Closes #3791 from zjffdu/ZEPPELIN-4850 and squashes the following commits:
    
    10b992e70 [Jeff Zhang] [ZEPPELIN-4850]. Include charting option for single 
value
---
 .../zeppelin/flink/sql/SingleRowStreamSqlJob.java  | 34 ++++++-----
 .../org/apache/zeppelin/jdbc/JDBCInterpreter.java  | 40 ++++++------
 .../apache/zeppelin/spark/SparkSqlInterpreter.java |  5 +-
 .../org/apache/zeppelin/spark/SparkShimsTest.java  |  2 +-
 .../zeppelin/spark/SparkSqlInterpreterTest.java    | 26 ++++++++
 .../zeppelin/spark/SparkZeppelinContext.scala      |  2 +-
 .../org/apache/zeppelin/spark/SparkShims.java      |  2 +-
 .../org/apache/zeppelin/spark/Spark1Shims.java     | 21 ++++++-
 .../org/apache/zeppelin/spark/Spark2Shims.java     | 20 +++++-
 .../org/apache/zeppelin/spark/Spark3Shims.java     | 20 +++++-
 .../interpreter/SingleRowInterpreterResult.java    | 71 ++++++++++++++++++++++
 .../SingleRowInterpreterResultTest.java            | 48 +++++++++++++++
 12 files changed, 250 insertions(+), 41 deletions(-)

diff --git 
a/flink/interpreter/src/main/java/org/apache/zeppelin/flink/sql/SingleRowStreamSqlJob.java
 
b/flink/interpreter/src/main/java/org/apache/zeppelin/flink/sql/SingleRowStreamSqlJob.java
index 05d8a4f..902ff42 100644
--- 
a/flink/interpreter/src/main/java/org/apache/zeppelin/flink/sql/SingleRowStreamSqlJob.java
+++ 
b/flink/interpreter/src/main/java/org/apache/zeppelin/flink/sql/SingleRowStreamSqlJob.java
@@ -24,10 +24,14 @@ import org.apache.flink.types.Row;
 import org.apache.zeppelin.flink.FlinkShims;
 import org.apache.zeppelin.flink.JobManager;
 import org.apache.zeppelin.interpreter.InterpreterContext;
+import org.apache.zeppelin.interpreter.SingleRowInterpreterResult;
 import org.apache.zeppelin.tabledata.TableDataUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.ArrayList;
+import java.util.List;
+
 
 public class SingleRowStreamSqlJob extends AbstractStreamSqlJob {
 
@@ -64,14 +68,10 @@ public class SingleRowStreamSqlJob extends 
AbstractStreamSqlJob {
 
   @Override
   protected String buildResult() {
-    StringBuilder builder = new StringBuilder();
-    builder.append("%angular ");
-    String outputText = template;
-    for (int i = 0; i < latestRow.getArity(); ++i) {
-      outputText = outputText.replace("{" + i + "}", "{{value_" + i + "}}");
-    }
-    builder.append(outputText);
-    return builder.toString();
+    SingleRowInterpreterResult singleRowResult =
+            new SingleRowInterpreterResult(rowToList(latestRow), template, 
context);
+    singleRowResult.pushAngularObjects();
+    return singleRowResult.toAngular();
   }
 
   @Override
@@ -80,10 +80,11 @@ public class SingleRowStreamSqlJob extends 
AbstractStreamSqlJob {
       LOGGER.warn("Skip RefreshTask as no data available");
       return;
     }
+    SingleRowInterpreterResult singleRowResult =
+            new SingleRowInterpreterResult(rowToList(latestRow), template, 
context);
     if (isFirstRefresh) {
       context.out().clear(false);
-      String output = buildResult();
-      context.out.write(output);
+      context.out.write(singleRowResult.toAngular());
       context.out.flush();
       // should checkpoint the html output, otherwise frontend won't display 
the output
       // after recovering.
@@ -91,11 +92,14 @@ public class SingleRowStreamSqlJob extends 
AbstractStreamSqlJob {
       isFirstRefresh = false;
     }
 
-    for (int i = 0; i < latestRow.getArity(); ++i) {
-      context.getAngularObjectRegistry().add("value_" + i,
-              TableDataUtils.normalizeColumn(latestRow.getField(i)),
-              context.getNoteId(),
-              context.getParagraphId());
+    singleRowResult.pushAngularObjects();
+  }
+
+  private List rowToList(Row row) {
+    List list = new ArrayList<>();
+    for (int i = 0; i < row.getArity(); i++) {
+      list.add(row.getField(i));
     }
+    return list;
   }
 }
diff --git a/jdbc/src/main/java/org/apache/zeppelin/jdbc/JDBCInterpreter.java 
b/jdbc/src/main/java/org/apache/zeppelin/jdbc/JDBCInterpreter.java
index 24caf7d..8605d8b 100644
--- a/jdbc/src/main/java/org/apache/zeppelin/jdbc/JDBCInterpreter.java
+++ b/jdbc/src/main/java/org/apache/zeppelin/jdbc/JDBCInterpreter.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.alias.CredentialProvider;
 import org.apache.hadoop.security.alias.CredentialProviderFactory;
+import org.apache.zeppelin.interpreter.SingleRowInterpreterResult;
 import org.apache.zeppelin.interpreter.ZeppelinContext;
 import org.apache.zeppelin.interpreter.util.SqlSplitter;
 import org.apache.zeppelin.tabledata.TableDataUtils;
@@ -723,28 +724,17 @@ public class JDBCInterpreter extends KerberosInterpreter {
               String template = context.getLocalProperties().get("template");
               if (!StringUtils.isBlank(template)) {
                 resultSet.next();
-                ResultSetMetaData md = resultSet.getMetaData();
+                SingleRowInterpreterResult singleRowResult =
+                        new SingleRowInterpreterResult(getFirstRow(resultSet), 
template, context);
+
                 if (isFirstRefreshMap.get(context.getParagraphId())) {
-                  String angularTemplate = template;
-                  for (int j = 0; j < md.getColumnCount(); ++j) {
-                    angularTemplate = angularTemplate.replace("{" + j + "}", 
"{{value_" + j + "}}");
-                  }
-                  context.out.write("%angular " + angularTemplate);
+                  context.out.write(singleRowResult.toAngular());
                   context.out.write("\n%text ");
                   context.out.flush();
                   isFirstRefreshMap.put(context.getParagraphId(), false);
                 }
-                for (int j = 1; j <= md.getColumnCount(); ++j) {
-                  Object columnObject = resultSet.getObject(j);
-                  String columnValue = null;
-                  if (columnObject == null) {
-                    columnValue = "null";
-                  } else {
-                    columnValue = resultSet.getString(j);
-                  }
-                  context.getAngularObjectRegistry().add("value_" + (j - 1),
-                          columnValue, context.getNoteId(), 
context.getParagraphId());
-                }
+                singleRowResult.pushAngularObjects();
+
               } else {
                 String results = getResults(resultSet,
                         !containsIgnoreCase(sqlToExecute, EXPLAIN_PREDICATE));
@@ -792,6 +782,22 @@ public class JDBCInterpreter extends KerberosInterpreter {
     return new InterpreterResult(Code.SUCCESS);
   }
 
+  private List getFirstRow(ResultSet rs) throws SQLException {
+    List list = new ArrayList();
+    ResultSetMetaData md = rs.getMetaData();
+    for (int i = 1; i <= md.getColumnCount(); ++i) {
+      Object columnObject = rs.getObject(i);
+      String columnValue = null;
+      if (columnObject == null) {
+        columnValue = "null";
+      } else {
+        columnValue = rs.getString(i);
+      }
+      list.add(columnValue);
+    }
+    return list;
+  }
+
   /**
    * For %table response replace Tab and Newline characters from the content.
    */
diff --git 
a/spark/interpreter/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java
 
b/spark/interpreter/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java
index 5b415a2..d440da1 100644
--- 
a/spark/interpreter/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java
+++ 
b/spark/interpreter/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java
@@ -19,7 +19,6 @@ package org.apache.zeppelin.spark;
 
 import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.spark.SparkContext;
-import org.apache.spark.sql.SQLContext;
 import org.apache.zeppelin.interpreter.AbstractInterpreter;
 import org.apache.zeppelin.interpreter.ZeppelinContext;
 import org.apache.zeppelin.interpreter.InterpreterContext;
@@ -41,7 +40,7 @@ import java.util.Properties;
  * Spark SQL interpreter for Zeppelin.
  */
 public class SparkSqlInterpreter extends AbstractInterpreter {
-  private Logger logger = LoggerFactory.getLogger(SparkSqlInterpreter.class);
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(SparkSqlInterpreter.class);
 
   private SparkInterpreter sparkInterpreter;
   private SqlSplitter sqlSplitter;
@@ -111,7 +110,7 @@ public class SparkSqlInterpreter extends 
AbstractInterpreter {
       if (Boolean.parseBoolean(getProperty("zeppelin.spark.sql.stacktrace", 
"false"))) {
         builder.append(ExceptionUtils.getStackTrace(e));
       } else {
-        logger.error("Invocation target exception", e);
+        LOGGER.error("Invocation target exception", e);
         String msg = e.getMessage()
                 + "\nset zeppelin.spark.sql.stacktrace = true to see full 
stacktrace";
         builder.append(msg);
diff --git 
a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java 
b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java
index b7dd312..d428502 100644
--- 
a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java
+++ 
b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java
@@ -95,7 +95,7 @@ public class SparkShimsTest {
                                            InterpreterContext context) {}
 
             @Override
-            public String showDataFrame(Object obj, int maxResult) {
+            public String showDataFrame(Object obj, int maxResult, 
InterpreterContext context) {
               return null;
             }
 
diff --git 
a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java
 
b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java
index 740cc59..d32964f 100644
--- 
a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java
+++ 
b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java
@@ -174,6 +174,32 @@ public class SparkSqlInterpreterTest {
   }
 
   @Test
+  public void testSingleRowResult() throws InterpreterException {
+    sparkInterpreter.interpret("case class P(age:Int)", context);
+    sparkInterpreter.interpret(
+            "val gr = 
sc.parallelize(Seq(P(1),P(2),P(3),P(4),P(5),P(6),P(7),P(8),P(9),P(10)))",
+            context);
+    sparkInterpreter.interpret("gr.toDF.registerTempTable(\"gr\")", context);
+
+    context = InterpreterContext.builder()
+            .setNoteId("noteId")
+            .setParagraphId("paragraphId")
+            .setParagraphTitle("title")
+            .setAngularObjectRegistry(new 
AngularObjectRegistry(intpGroup.getId(), null))
+            .setResourcePool(new LocalResourcePool("id"))
+            .setInterpreterOut(new InterpreterOutput(null))
+            .setIntpEventClient(mock(RemoteInterpreterEventClient.class))
+            .build();
+    context.getLocalProperties().put("template", "Total count: <h1>{0}</h1>, 
Total age: <h1>{1}</h1>");
+
+    InterpreterResult ret = sqlInterpreter.interpret("select count(1), 
sum(age) from gr", context);
+    context.getLocalProperties().remove("template");
+    assertEquals(InterpreterResult.Code.SUCCESS, ret.code());
+    assertEquals(Type.HTML, ret.message().get(0).getType());
+    assertEquals("Total count: <h1>10</h1>, Total age: <h1>55</h1>", 
ret.message().get(0).getData());
+  }
+
+  @Test
   public void testMultipleStatements() throws InterpreterException {
     sparkInterpreter.interpret("case class P(age:Int)", context);
     sparkInterpreter.interpret(
diff --git 
a/spark/spark-scala-parent/src/main/scala/org/apache/zeppelin/spark/SparkZeppelinContext.scala
 
b/spark/spark-scala-parent/src/main/scala/org/apache/zeppelin/spark/SparkZeppelinContext.scala
index 2cd3ec3..fa4188c 100644
--- 
a/spark/spark-scala-parent/src/main/scala/org/apache/zeppelin/spark/SparkZeppelinContext.scala
+++ 
b/spark/spark-scala-parent/src/main/scala/org/apache/zeppelin/spark/SparkZeppelinContext.scala
@@ -67,7 +67,7 @@ class SparkZeppelinContext(val sc: SparkContext,
 
   override def getInterpreterClassMap: util.Map[String, String] = 
interpreterClassMap.asJava
 
-  override def showData(obj: Any, maxResult: Int): String = 
sparkShims.showDataFrame(obj, maxResult)
+  override def showData(obj: Any, maxResult: Int): String = 
sparkShims.showDataFrame(obj, maxResult, interpreterContext)
 
   /**
    * create paragraph level of dynamic form of Select with no item selected.
diff --git 
a/spark/spark-shims/src/main/scala/org/apache/zeppelin/spark/SparkShims.java 
b/spark/spark-shims/src/main/scala/org/apache/zeppelin/spark/SparkShims.java
index 67b140a..306b031 100644
--- a/spark/spark-shims/src/main/scala/org/apache/zeppelin/spark/SparkShims.java
+++ b/spark/spark-shims/src/main/scala/org/apache/zeppelin/spark/SparkShims.java
@@ -99,7 +99,7 @@ public abstract class SparkShims {
                                           String sparkWebUrl,
                                           InterpreterContext context);
 
-  public abstract String showDataFrame(Object obj, int maxResult);
+  public abstract String showDataFrame(Object obj, int maxResult, 
InterpreterContext context);
 
   public abstract Object getAsDataFrame(String value);
 
diff --git 
a/spark/spark1-shims/src/main/scala/org/apache/zeppelin/spark/Spark1Shims.java 
b/spark/spark1-shims/src/main/scala/org/apache/zeppelin/spark/Spark1Shims.java
index ad0efe4..45c8618 100644
--- 
a/spark/spark1-shims/src/main/scala/org/apache/zeppelin/spark/Spark1Shims.java
+++ 
b/spark/spark1-shims/src/main/scala/org/apache/zeppelin/spark/Spark1Shims.java
@@ -29,6 +29,7 @@ import org.apache.spark.sql.types.StructType;
 import org.apache.spark.ui.jobs.JobProgressListener;
 import org.apache.zeppelin.interpreter.InterpreterContext;
 import org.apache.zeppelin.interpreter.ResultMessages;
+import org.apache.zeppelin.interpreter.SingleRowInterpreterResult;
 import org.apache.zeppelin.tabledata.TableDataUtils;
 
 import java.util.ArrayList;
@@ -60,7 +61,7 @@ public class Spark1Shims extends SparkShims {
   }
 
   @Override
-  public String showDataFrame(Object obj, int maxResult) {
+  public String showDataFrame(Object obj, int maxResult, InterpreterContext 
context) {
     if (obj instanceof DataFrame) {
       DataFrame df = (DataFrame) obj;
       String[] columns = df.columns();
@@ -68,8 +69,18 @@ public class Spark1Shims extends SparkShims {
       if (columns.length == 0) {
         return "";
       }
+
       // fetch maxResult+1 rows so that we can check whether it is larger than 
zeppelin.spark.maxResult
       List<Row> rows = df.takeAsList(maxResult + 1);
+      String template = context.getLocalProperties().get("template");
+      if (!StringUtils.isBlank(template)) {
+        if (rows.size() >= 1) {
+          return new SingleRowInterpreterResult(sparkRowToList(rows.get(0)), 
template, context).toHtml();
+        } else {
+          return "";
+        }
+      }
+
       StringBuilder msg = new StringBuilder();
       msg.append("\n%table ");
       msg.append(StringUtils.join(TableDataUtils.normalizeColumns(columns), 
"\t"));
@@ -100,6 +111,14 @@ public class Spark1Shims extends SparkShims {
     }
   }
 
+  private List sparkRowToList(Row row) {
+    List list = new ArrayList();
+    for (int i = 0; i< row.size(); i++) {
+      list.add(row.get(i));
+    }
+    return list;
+  }
+
   @Override
   public DataFrame getAsDataFrame(String value) {
     String[] lines = value.split("\\n");
diff --git 
a/spark/spark2-shims/src/main/scala/org/apache/zeppelin/spark/Spark2Shims.java 
b/spark/spark2-shims/src/main/scala/org/apache/zeppelin/spark/Spark2Shims.java
index 3adba1c..21fb149 100644
--- 
a/spark/spark2-shims/src/main/scala/org/apache/zeppelin/spark/Spark2Shims.java
+++ 
b/spark/spark2-shims/src/main/scala/org/apache/zeppelin/spark/Spark2Shims.java
@@ -29,6 +29,7 @@ import org.apache.spark.sql.catalyst.expressions.GenericRow;
 import org.apache.spark.sql.types.StructType;
 import org.apache.zeppelin.interpreter.InterpreterContext;
 import org.apache.zeppelin.interpreter.ResultMessages;
+import org.apache.zeppelin.interpreter.SingleRowInterpreterResult;
 import org.apache.zeppelin.tabledata.TableDataUtils;
 
 import java.util.ArrayList;
@@ -61,7 +62,7 @@ public class Spark2Shims extends SparkShims {
   }
 
   @Override
-  public String showDataFrame(Object obj, int maxResult) {
+  public String showDataFrame(Object obj, int maxResult, InterpreterContext 
context) {
     if (obj instanceof Dataset) {
       Dataset<Row> df = ((Dataset) obj).toDF();
       String[] columns = df.columns();
@@ -71,6 +72,15 @@ public class Spark2Shims extends SparkShims {
       }
       // fetch maxResult+1 rows so that we can check whether it is larger than 
zeppelin.spark.maxResult
       List<Row> rows = df.takeAsList(maxResult + 1);
+      String template = context.getLocalProperties().get("template");
+      if (!StringUtils.isBlank(template)) {
+        if (rows.size() >= 1) {
+          return new SingleRowInterpreterResult(sparkRowToList(rows.get(0)), 
template, context).toHtml();
+        } else {
+          return "";
+        }
+      }
+
       StringBuilder msg = new StringBuilder();
       msg.append("\n%table ");
       msg.append(StringUtils.join(TableDataUtils.normalizeColumns(columns), 
"\t"));
@@ -101,6 +111,14 @@ public class Spark2Shims extends SparkShims {
     }
   }
 
+  private List sparkRowToList(Row row) {
+    List list = new ArrayList();
+    for (int i = 0; i< row.size(); i++) {
+      list.add(row.get(i));
+    }
+    return list;
+  }
+
   @Override
   public Dataset<Row> getAsDataFrame(String value) {
     String[] lines = value.split("\\n");
diff --git 
a/spark/spark3-shims/src/main/scala/org/apache/zeppelin/spark/Spark3Shims.java 
b/spark/spark3-shims/src/main/scala/org/apache/zeppelin/spark/Spark3Shims.java
index b213041..544bd0a 100644
--- 
a/spark/spark3-shims/src/main/scala/org/apache/zeppelin/spark/Spark3Shims.java
+++ 
b/spark/spark3-shims/src/main/scala/org/apache/zeppelin/spark/Spark3Shims.java
@@ -29,6 +29,7 @@ import org.apache.spark.sql.catalyst.expressions.GenericRow;
 import org.apache.spark.sql.types.StructType;
 import org.apache.zeppelin.interpreter.InterpreterContext;
 import org.apache.zeppelin.interpreter.ResultMessages;
+import org.apache.zeppelin.interpreter.SingleRowInterpreterResult;
 import org.apache.zeppelin.tabledata.TableDataUtils;
 
 import java.util.ArrayList;
@@ -61,7 +62,7 @@ public class Spark3Shims extends SparkShims {
   }
 
   @Override
-  public String showDataFrame(Object obj, int maxResult) {
+  public String showDataFrame(Object obj, int maxResult, InterpreterContext 
context) {
     if (obj instanceof Dataset) {
       Dataset<Row> df = ((Dataset) obj).toDF();
       String[] columns = df.columns();
@@ -71,6 +72,15 @@ public class Spark3Shims extends SparkShims {
       }
       // fetch maxResult+1 rows so that we can check whether it is larger than 
zeppelin.spark.maxResult
       List<Row> rows = df.takeAsList(maxResult + 1);
+      String template = context.getLocalProperties().get("template");
+      if (!StringUtils.isBlank(template)) {
+        if (rows.size() >= 1) {
+          return new SingleRowInterpreterResult(sparkRowToList(rows.get(0)), 
template, context).toHtml();
+        } else {
+          return "";
+        }
+      }
+
       StringBuilder msg = new StringBuilder();
       msg.append("%table ");
       msg.append(StringUtils.join(TableDataUtils.normalizeColumns(columns), 
"\t"));
@@ -101,6 +111,14 @@ public class Spark3Shims extends SparkShims {
     }
   }
 
+  private List sparkRowToList(Row row) {
+    List list = new ArrayList();
+    for (int i = 0; i< row.size(); i++) {
+      list.add(row.get(i));
+    }
+    return list;
+  }
+
   @Override
   public Dataset<Row> getAsDataFrame(String value) {
     String[] lines = value.split("\\n");
diff --git 
a/zeppelin-interpreter/src/main/java/org/apache/zeppelin/interpreter/SingleRowInterpreterResult.java
 
b/zeppelin-interpreter/src/main/java/org/apache/zeppelin/interpreter/SingleRowInterpreterResult.java
new file mode 100644
index 0000000..db975b2
--- /dev/null
+++ 
b/zeppelin-interpreter/src/main/java/org/apache/zeppelin/interpreter/SingleRowInterpreterResult.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.zeppelin.interpreter;
+
+import org.apache.zeppelin.tabledata.TableDataUtils;
+
+import java.util.List;
+
+/**
+ * Represent the single row interpreter result, usually this is for the sql 
result.
+ * Where you would like to build dashboard for the sql output via just single 
row. e.g. KPI
+ *
+ */
+public class SingleRowInterpreterResult {
+
+  private String template;
+  private List values;
+  private InterpreterContext context;
+
+  public SingleRowInterpreterResult(List values, String template, 
InterpreterContext context) {
+    this.values = values;
+    this.template = template;
+    this.context = context;
+  }
+
+  public String toHtml() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("%html ");
+    String outputText = template;
+    for (int i = 0; i < values.size(); ++i) {
+      outputText = outputText.replace("{" + i + "}", values.get(i).toString());
+    }
+    builder.append(outputText);
+    return builder.toString();
+  }
+
+  public String toAngular() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("%angular ");
+    String outputText = template;
+    for (int i = 0; i < values.size(); ++i) {
+      outputText = outputText.replace("{" + i + "}", "{{value_" + i + "}}");
+    }
+    builder.append(outputText);
+    return builder.toString();
+  }
+
+  public void pushAngularObjects() {
+    for (int i = 0; i < values.size(); ++i) {
+      context.getAngularObjectRegistry().add("value_" + i,
+              TableDataUtils.normalizeColumn(values.get(i)),
+              context.getNoteId(),
+              context.getParagraphId());
+    }
+  }
+}
diff --git 
a/zeppelin-interpreter/src/test/java/org/apache/zeppelin/interpreter/SingleRowInterpreterResultTest.java
 
b/zeppelin-interpreter/src/test/java/org/apache/zeppelin/interpreter/SingleRowInterpreterResultTest.java
new file mode 100644
index 0000000..0b39ed2
--- /dev/null
+++ 
b/zeppelin-interpreter/src/test/java/org/apache/zeppelin/interpreter/SingleRowInterpreterResultTest.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.zeppelin.interpreter;
+
+import com.google.common.collect.Lists;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+public class SingleRowInterpreterResultTest {
+
+  @Test
+  public void testHtml() {
+    List list = Lists.newArrayList("2020-01-01", 10);
+    String template = "Total count:{1} for {0}";
+    InterpreterContext context = InterpreterContext.builder().build();
+    SingleRowInterpreterResult singleRowInterpreterResult = new 
SingleRowInterpreterResult(list, template, context);
+    String htmlOutput = singleRowInterpreterResult.toHtml();
+    assertEquals("%html Total count:10 for 2020-01-01", htmlOutput);
+  }
+
+  @Test
+  public void testAngular() {
+    List list = Lists.newArrayList("2020-01-01", 10);
+    String template = "Total count:{1} for {0}";
+    InterpreterContext context = InterpreterContext.builder().build();
+    SingleRowInterpreterResult singleRowInterpreterResult = new 
SingleRowInterpreterResult(list, template, context);
+    String angularOutput = singleRowInterpreterResult.toAngular();
+    assertEquals("%angular Total count:{{value_1}} for {{value_0}}", 
angularOutput);
+  }
+}

Reply via email to