Modified: kylin/site/development/index.html
URL: 
http://svn.apache.org/viewvc/kylin/site/development/index.html?rev=1851595&r1=1851594&r2=1851595&view=diff
==============================================================================
--- kylin/site/development/index.html (original)
+++ kylin/site/development/index.html Fri Jan 18 02:41:45 2019
@@ -6146,6 +6146,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/index.html" class="list-group-item-lay 
pjaxlink">Development Quick Guide</a></li>      
       
@@ -6172,6 +6176,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/dev_env.html" class="list-group-item-lay 
pjaxlink">Setup Development Env</a></li>      
       
@@ -6250,6 +6258,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_test.html" class="list-group-item-lay 
pjaxlink">How to Test</a></li>      
       
@@ -6286,6 +6298,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_contribute.html" 
class="list-group-item-lay pjaxlink">How to Contribute</a></li>      
       
@@ -6318,6 +6334,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_become_apache_committer.html" 
class="list-group-item-lay pjaxlink">How to become an Apache Committer</a></li> 
     
       
@@ -6358,6 +6378,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_docs.html" class="list-group-item-lay 
pjaxlink">How to Write Document</a></li>      
       
@@ -6406,6 +6430,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_package.html" class="list-group-item-lay 
pjaxlink">How to Build Binary Package</a></li>      
       
@@ -6450,6 +6478,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_hbase_branches.html" 
class="list-group-item-lay pjaxlink">How to Maintain Hadoop/HBase 
Branches</a></li>      
       
@@ -6502,6 +6534,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_release.html" class="list-group-item-lay 
pjaxlink">How to Make Release</a></li>      
       
@@ -6570,6 +6606,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/plugin_arch.html" class="list-group-item-lay 
pjaxlink">Plugin Architecture</a></li>      
       
@@ -6634,6 +6674,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/new_metadata.html" class="list-group-item-lay 
pjaxlink">New Metadata Model</a></li>      
       
@@ -6706,6 +6750,10 @@ Static Code Analysis: <a href="https://b
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/web_tech.html" class="list-group-item-lay 
pjaxlink">Kylin Web Summary</a></li>      
       
@@ -6729,6 +6777,32 @@ Static Code Analysis: <a href="https://b
       
 
 
+   
+  
+
+  
+    
+  
+
+
+
+  
+    
+  
+    
+  
+    
+  
+    
+  
+    
+  
+    
+      <li class="navlist">
+        <a href="/development/datasource_sdk.html" class="list-group-item-lay 
pjaxlink">Develop JDBC Data Source</a></li>      
+      
+
+
 
 
 

Modified: kylin/site/development/new_metadata.html
URL: 
http://svn.apache.org/viewvc/kylin/site/development/new_metadata.html?rev=1851595&r1=1851594&r2=1851595&view=diff
==============================================================================
--- kylin/site/development/new_metadata.html (original)
+++ kylin/site/development/new_metadata.html Fri Jan 18 02:41:45 2019
@@ -6142,6 +6142,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/index.html" class="list-group-item-lay 
pjaxlink">Development Quick Guide</a></li>      
       
@@ -6168,6 +6172,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/dev_env.html" class="list-group-item-lay 
pjaxlink">Setup Development Env</a></li>      
       
@@ -6246,6 +6254,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_test.html" class="list-group-item-lay 
pjaxlink">How to Test</a></li>      
       
@@ -6282,6 +6294,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_contribute.html" 
class="list-group-item-lay pjaxlink">How to Contribute</a></li>      
       
@@ -6314,6 +6330,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_become_apache_committer.html" 
class="list-group-item-lay pjaxlink">How to become an Apache Committer</a></li> 
     
       
@@ -6354,6 +6374,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_docs.html" class="list-group-item-lay 
pjaxlink">How to Write Document</a></li>      
       
@@ -6402,6 +6426,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_package.html" class="list-group-item-lay 
pjaxlink">How to Build Binary Package</a></li>      
       
@@ -6446,6 +6474,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_hbase_branches.html" 
class="list-group-item-lay pjaxlink">How to Maintain Hadoop/HBase 
Branches</a></li>      
       
@@ -6498,6 +6530,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_release.html" class="list-group-item-lay 
pjaxlink">How to Make Release</a></li>      
       
@@ -6566,6 +6602,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/plugin_arch.html" class="list-group-item-lay 
pjaxlink">Plugin Architecture</a></li>      
       
@@ -6630,6 +6670,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/new_metadata.html" class="list-group-item-lay 
pjaxlink">New Metadata Model</a></li>      
       
@@ -6702,6 +6746,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/web_tech.html" class="list-group-item-lay 
pjaxlink">Kylin Web Summary</a></li>      
       
@@ -6725,6 +6773,32 @@ var _hmt = _hmt || [];
       
 
 
+   
+  
+
+  
+    
+  
+
+
+
+  
+    
+  
+    
+  
+    
+  
+    
+  
+    
+  
+    
+      <li class="navlist">
+        <a href="/development/datasource_sdk.html" class="list-group-item-lay 
pjaxlink">Develop JDBC Data Source</a></li>      
+      
+
+
 
 
 

Modified: kylin/site/development/plugin_arch.html
URL: 
http://svn.apache.org/viewvc/kylin/site/development/plugin_arch.html?rev=1851595&r1=1851594&r2=1851595&view=diff
==============================================================================
--- kylin/site/development/plugin_arch.html (original)
+++ kylin/site/development/plugin_arch.html Fri Jan 18 02:41:45 2019
@@ -6164,6 +6164,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/index.html" class="list-group-item-lay 
pjaxlink">Development Quick Guide</a></li>      
       
@@ -6190,6 +6194,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/dev_env.html" class="list-group-item-lay 
pjaxlink">Setup Development Env</a></li>      
       
@@ -6268,6 +6276,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_test.html" class="list-group-item-lay 
pjaxlink">How to Test</a></li>      
       
@@ -6304,6 +6316,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_contribute.html" 
class="list-group-item-lay pjaxlink">How to Contribute</a></li>      
       
@@ -6336,6 +6352,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_become_apache_committer.html" 
class="list-group-item-lay pjaxlink">How to become an Apache Committer</a></li> 
     
       
@@ -6376,6 +6396,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_docs.html" class="list-group-item-lay 
pjaxlink">How to Write Document</a></li>      
       
@@ -6424,6 +6448,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_package.html" class="list-group-item-lay 
pjaxlink">How to Build Binary Package</a></li>      
       
@@ -6468,6 +6496,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_hbase_branches.html" 
class="list-group-item-lay pjaxlink">How to Maintain Hadoop/HBase 
Branches</a></li>      
       
@@ -6520,6 +6552,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_release.html" class="list-group-item-lay 
pjaxlink">How to Make Release</a></li>      
       
@@ -6588,6 +6624,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/plugin_arch.html" class="list-group-item-lay 
pjaxlink">Plugin Architecture</a></li>      
       
@@ -6652,6 +6692,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/new_metadata.html" class="list-group-item-lay 
pjaxlink">New Metadata Model</a></li>      
       
@@ -6724,6 +6768,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/web_tech.html" class="list-group-item-lay 
pjaxlink">Kylin Web Summary</a></li>      
       
@@ -6747,6 +6795,32 @@ var _hmt = _hmt || [];
       
 
 
+   
+  
+
+  
+    
+  
+
+
+
+  
+    
+  
+    
+  
+    
+  
+    
+  
+    
+  
+    
+      <li class="navlist">
+        <a href="/development/datasource_sdk.html" class="list-group-item-lay 
pjaxlink">Develop JDBC Data Source</a></li>      
+      
+
+
 
 
 

Modified: kylin/site/development/web_tech.html
URL: 
http://svn.apache.org/viewvc/kylin/site/development/web_tech.html?rev=1851595&r1=1851594&r2=1851595&view=diff
==============================================================================
--- kylin/site/development/web_tech.html (original)
+++ kylin/site/development/web_tech.html Fri Jan 18 02:41:45 2019
@@ -6172,6 +6172,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/index.html" class="list-group-item-lay 
pjaxlink">Development Quick Guide</a></li>      
       
@@ -6198,6 +6202,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/dev_env.html" class="list-group-item-lay 
pjaxlink">Setup Development Env</a></li>      
       
@@ -6276,6 +6284,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_test.html" class="list-group-item-lay 
pjaxlink">How to Test</a></li>      
       
@@ -6312,6 +6324,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_contribute.html" 
class="list-group-item-lay pjaxlink">How to Contribute</a></li>      
       
@@ -6344,6 +6360,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_become_apache_committer.html" 
class="list-group-item-lay pjaxlink">How to become an Apache Committer</a></li> 
     
       
@@ -6384,6 +6404,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_docs.html" class="list-group-item-lay 
pjaxlink">How to Write Document</a></li>      
       
@@ -6432,6 +6456,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_package.html" class="list-group-item-lay 
pjaxlink">How to Build Binary Package</a></li>      
       
@@ -6476,6 +6504,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_hbase_branches.html" 
class="list-group-item-lay pjaxlink">How to Maintain Hadoop/HBase 
Branches</a></li>      
       
@@ -6528,6 +6560,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/howto_release.html" class="list-group-item-lay 
pjaxlink">How to Make Release</a></li>      
       
@@ -6596,6 +6632,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/plugin_arch.html" class="list-group-item-lay 
pjaxlink">Plugin Architecture</a></li>      
       
@@ -6660,6 +6700,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/new_metadata.html" class="list-group-item-lay 
pjaxlink">New Metadata Model</a></li>      
       
@@ -6732,6 +6776,10 @@ var _hmt = _hmt || [];
     
   
     
+  
+    
+  
+    
       <li class="navlist">
         <a href="/development/web_tech.html" class="list-group-item-lay 
pjaxlink">Kylin Web Summary</a></li>      
       
@@ -6755,6 +6803,32 @@ var _hmt = _hmt || [];
       
 
 
+   
+  
+
+  
+    
+  
+
+
+
+  
+    
+  
+    
+  
+    
+  
+    
+  
+    
+  
+    
+      <li class="navlist">
+        <a href="/development/datasource_sdk.html" class="list-group-item-lay 
pjaxlink">Develop JDBC Data Source</a></li>      
+      
+
+
 
 
 

Modified: kylin/site/docs/release_notes.html
URL: 
http://svn.apache.org/viewvc/kylin/site/docs/release_notes.html?rev=1851595&r1=1851594&r2=1851595&view=diff
==============================================================================
--- kylin/site/docs/release_notes.html (original)
+++ kylin/site/docs/release_notes.html Fri Jan 18 02:41:45 2019
@@ -6035,8 +6035,8 @@ there are source code package, binary pa
 <p>or send to Apache Kylin mailing list:</p>
 
 <ul>
-  <li>User relative: <a 
href="mailto:u...@kylin.apache.org";>u...@kylin.apache.org</a></li>
-  <li>Development relative: <a 
href="mailto:d...@kylin.apache.org";>d...@kylin.apache.org</a></li>
+  <li>User relative: <a 
href="&#109;&#097;&#105;&#108;&#116;&#111;:&#117;&#115;&#101;&#114;&#064;&#107;&#121;&#108;&#105;&#110;&#046;&#097;&#112;&#097;&#099;&#104;&#101;&#046;&#111;&#114;&#103;">&#117;&#115;&#101;&#114;&#064;&#107;&#121;&#108;&#105;&#110;&#046;&#097;&#112;&#097;&#099;&#104;&#101;&#046;&#111;&#114;&#103;</a></li>
+  <li>Development relative: <a 
href="&#109;&#097;&#105;&#108;&#116;&#111;:&#100;&#101;&#118;&#064;&#107;&#121;&#108;&#105;&#110;&#046;&#097;&#112;&#097;&#099;&#104;&#101;&#046;&#111;&#114;&#103;">&#100;&#101;&#118;&#064;&#107;&#121;&#108;&#105;&#110;&#046;&#097;&#112;&#097;&#099;&#104;&#101;&#046;&#111;&#114;&#103;</a></li>
 </ul>
 
 <h2 id="v260---2019-01-12">v2.6.0 - 2019-01-12</h2>
@@ -8183,7 +8183,7 @@ This version includes many bug fixs/enha
   <li>[KYLIN-1396] - minor bug in BigDecimalSerializer - avoidVerbose should 
be incremented each time when input scale is larger than given scale</li>
   <li>[KYLIN-1419] - NullPointerException occurs when query from subqueries 
with order by</li>
   <li>[KYLIN-1445] - Kylin should throw error if HIVE_CONF dir cannot be 
found</li>
-  <li>[KYLIN-1466] - Some environment variables are not used in bin/kylin.sh 
<RUNNABLE_CLASS_NAME></RUNNABLE_CLASS_NAME></li>
+  <li>[KYLIN-1466] - Some environment variables are not used in bin/kylin.sh 
<runnable_class_name></runnable_class_name></li>
   <li>[KYLIN-1469] - Hive dependency jars are hard coded in test</li>
   <li>[KYLIN-1471] - LIMIT after having clause should not be pushed down to 
storage context</li>
   <li>[KYLIN-1473] - Cannot have comments in the end of New Query textbox</li>
@@ -8282,7 +8282,7 @@ This version includes many bug fixs/enha
   <li>[KYLIN-1443] - For setting Auto Merge Time Ranges, before sending them 
to backend, the related time ranges should be sorted increasingly</li>
   <li>[KYLIN-1445] - Kylin should throw error if HIVE_CONF dir cannot be 
found</li>
   <li>[KYLIN-1456] - Shouldn’t use “1970-01-01” as the default end 
date</li>
-  <li>[KYLIN-1466] - Some environment variables are not used in bin/kylin.sh 
<RUNNABLE_CLASS_NAME></RUNNABLE_CLASS_NAME></li>
+  <li>[KYLIN-1466] - Some environment variables are not used in bin/kylin.sh 
<runnable_class_name></runnable_class_name></li>
   <li>[KYLIN-1469] - Hive dependency jars are hard coded in test</li>
 </ul>
 

Modified: kylin/site/feed.xml
URL: 
http://svn.apache.org/viewvc/kylin/site/feed.xml?rev=1851595&r1=1851594&r2=1851595&view=diff
==============================================================================
--- kylin/site/feed.xml (original)
+++ kylin/site/feed.xml Fri Jan 18 02:41:45 2019
@@ -19,11 +19,49 @@
     <description>Apache Kylin Home</description>
     <link>http://kylin.apache.org/</link>
     <atom:link href="http://kylin.apache.org/feed.xml"; rel="self" 
type="application/rss+xml"/>
-    <pubDate>Sun, 13 Jan 2019 18:04:49 -0800</pubDate>
-    <lastBuildDate>Sun, 13 Jan 2019 18:04:49 -0800</lastBuildDate>
+    <pubDate>Thu, 17 Jan 2019 18:33:24 -0800</pubDate>
+    <lastBuildDate>Thu, 17 Jan 2019 18:33:24 -0800</lastBuildDate>
     <generator>Jekyll v2.5.3</generator>
     
       <item>
+        <title>Introduce data source SDK</title>
+        <description>&lt;h2 id=&quot;data-source-sdk&quot;&gt;Data source 
SDK&lt;/h2&gt;
+
+&lt;p&gt;Apache Kylin has already supported several data sources like Amazon 
Redshift, SQL Server through JDBC. But we found that it takes much efforts to 
develop an implementation to a new source engine, like supporting metadata 
sync, cube build and query pushdown. It’s mainly because the SQL dialects and 
jdbc implementations between source engines are quite different.&lt;/p&gt;
+
+&lt;p&gt;So since 2.6.0, Kylin provides a new data source SDK, which provides 
APIs to help developers handle these dialect differences and easily implement a 
new data source through JDBC.&lt;/p&gt;
+
+&lt;p&gt;With this SDK, users can achieve followings from a JDBC 
source:&lt;/p&gt;
+
+&lt;ul&gt;
+  &lt;li&gt;Synchronize metadata and data from JDBC source&lt;/li&gt;
+  &lt;li&gt;Build cube from JDBC source&lt;/li&gt;
+  &lt;li&gt;Query pushdown to JDBC source engine when cube is 
unmatched&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;h2 id=&quot;structure&quot;&gt;Structure&lt;/h2&gt;
+
+&lt;p class=&quot;center&quot;&gt;&lt;img 
src=&quot;/images/blog/data-source-sdk.png&quot; alt=&quot;&quot; 
/&gt;&lt;/p&gt;
+
+&lt;p&gt;When users want to synchronize metadata or get data from data source, 
the request pass through the framework, and the framework find the adaptor what 
has an API for metadata and data.&lt;/p&gt;
+
+&lt;p&gt;To avoid having complex adaptors, when having a push-down query, 
framework provides sql conversions from ansi sql to target data source 
dialect(includes sql functions and sql types), and adaptor just provide a 
function &lt;em&gt;fixSql&lt;/em&gt; to fix the sql after conversion.&lt;/p&gt;
+
+&lt;h2 id=&quot;how-to-develop&quot;&gt;How to develop&lt;/h2&gt;
+
+&lt;p&gt;Please follow this &lt;a 
href=&quot;/development/datasource_sdk.html&quot;&gt;doc&lt;/a&gt;&lt;/p&gt;
+
+</description>
+        <pubDate>Wed, 16 Jan 2019 12:00:00 -0800</pubDate>
+        
<link>http://kylin.apache.org/blog/2019/01/16/introduce-data-source-sdk-v2.6.0/</link>
+        <guid 
isPermaLink="true">http://kylin.apache.org/blog/2019/01/16/introduce-data-source-sdk-v2.6.0/</guid>
+        
+        
+        <category>blog</category>
+        
+      </item>
+    
+      <item>
         <title>Why did Meituan develop Kylin On Druid (part 1 of 2)?</title>
         <description>&lt;h2 id=&quot;preface&quot;&gt;Preface&lt;/h2&gt;
 
@@ -31,12 +69,12 @@
 
 &lt;p&gt;During the Apache Kylin Meetup in August 2018, the Meituan team 
shared their Kylin on Druid (KoD) solution. Why did they develop this hybrid 
system? What’s the rationale behind it? This article will answer these 
questions and help you to understand the differences and the pros and cons of 
each OLAP engine.&lt;/p&gt;
 
-&lt;h2 id=&quot;01-introduction-to-apache-kylin&quot;&gt;01 Introduction to 
Apache Kylin&lt;/h2&gt;
+&lt;h2 id=&quot;introduction-to-apache-kylin&quot;&gt;01 Introduction to 
Apache Kylin&lt;/h2&gt;
 &lt;p&gt;Apache Kylin is an open source distributed big data analytics engine. 
It constructs data models on top of huge datasets, builds pre-calculated Cubes 
to support multi-dimensional analysis, and provides a SQL query interface and 
multi-dimensional analysis on top of Hadoop, with general ODBC, JDBC, and 
RESTful API interfaces. Apache Kylin’s unique pre-calculation ability enables 
it to handle extremely large datasets with sub-second query response 
times.&lt;br /&gt;
 &lt;img src=&quot;/images/blog/Kylin-On-Durid/1 kylin_architecture.png&quot; 
alt=&quot;&quot; /&gt;&lt;br /&gt;
 Graphic  1 Kylin Architecture&lt;/p&gt;
 
-&lt;h2 id=&quot;02-apache-kylins-advantage&quot;&gt;02 Apache Kylin’s 
Advantage&lt;/h2&gt;
+&lt;h2 id=&quot;apache-kylins-advantage&quot;&gt;02 Apache Kylin’s 
Advantage&lt;/h2&gt;
 &lt;ol&gt;
   &lt;li&gt;The mature, Hadoop-based computing engines (MapReduce and Spark) 
that provide strong capability of pre-calculation on super large datasets, 
which can be deployed out-of-the-box on any mainstream Hadoop 
platform.&lt;/li&gt;
   &lt;li&gt;Support of ANSI SQL that allows users to do data analysis with SQL 
directly.&lt;/li&gt;
@@ -48,34 +86,34 @@ Graphic  1 Kylin Architecture&lt;/p&gt;
   &lt;li&gt;Support of both batch loading of super large historical datasets 
and micro-batches of data streams.&lt;/li&gt;
 &lt;/ol&gt;
 
-&lt;h2 id=&quot;03-introduction-to-apache-druid-incubating&quot;&gt;03 
Introduction to Apache Druid (incubating)&lt;/h2&gt;
+&lt;h2 id=&quot;introduction-to-apache-druid-incubating&quot;&gt;03 
Introduction to Apache Druid (incubating)&lt;/h2&gt;
 &lt;p&gt;Druid was created in 2012. It’s an open source distributed data 
store. Its core design combines the concept of analytical databases, 
time-series databases, and search systems, and it can support data collection 
and analytics on fairly large datasets. Druid uses an Apache V2 license and is 
an Apache incubator project.&lt;/p&gt;
 
 &lt;p&gt;Druid Architecture&lt;br /&gt;
 From the perspective of deployment architectures, Druid’s processes mostly 
fall into 3 categories based on their roles.&lt;/p&gt;
 
-&lt;h3 
id=&quot;-data-node-slave-node-for-data-ingestion-and-calculation&quot;&gt;•  
Data Node (Slave node for data ingestion and calculation)&lt;/h3&gt;
+&lt;h3 
id=&quot;data-node-slave-node-for-data-ingestion-and-calculation&quot;&gt;•   
Data Node (Slave node for data ingestion and calculation)&lt;/h3&gt;
 &lt;p&gt;The Historical node is in charge of loading segments (committed 
immutable data) and receiving queries on historical data.&lt;br /&gt;
 Middle Manager is in charge of data ingestion and commit segments. Each task 
is done by a separate JVM. &lt;br /&gt;
 Peon is in charge of completing a single task, which is managed and monitored 
by the Middle Manager.&lt;/p&gt;
 
-&lt;h3 id=&quot;-query-node&quot;&gt;•       Query Node&lt;/h3&gt;
+&lt;h3 id=&quot;query-node&quot;&gt;•        Query Node&lt;/h3&gt;
 &lt;p&gt;Broker receives query requests, determines on which segment the data 
resides, and distributes sub-queries and merges query results.&lt;/p&gt;
 
-&lt;h3 id=&quot;-master-node-task-coordinator-and-cluster-manager&quot;&gt;• 
Master Node (Task Coordinator and Cluster Manager)&lt;/h3&gt;
+&lt;h3 id=&quot;master-node-task-coordinator-and-cluster-manager&quot;&gt;•  
Master Node (Task Coordinator and Cluster Manager)&lt;/h3&gt;
 &lt;p&gt;Coordinator monitors Historical nodes, dispatches segments and 
monitor workload.&lt;br /&gt;
 Overlord monitors Middle Manager, dispatches tasks to Middle Manager, and 
assists releasing of segments.&lt;/p&gt;
 
 &lt;h3 id=&quot;external-dependency&quot;&gt;External Dependency&lt;/h3&gt;
 &lt;p&gt;At the same time, Druid has 3 replaceable external 
dependencies.&lt;/p&gt;
 
-&lt;h3 id=&quot;-deep-storage-distributed-storage&quot;&gt;• Deep Storage 
(distributed storage)&lt;/h3&gt;
+&lt;h3 id=&quot;deep-storage-distributed-storage&quot;&gt;•  Deep Storage 
(distributed storage)&lt;/h3&gt;
 &lt;p&gt;Druid uses Deep storage to transfer data files between 
nodes.&lt;/p&gt;
 
-&lt;h3 id=&quot;-metadata-storage&quot;&gt;• Metadata Storage&lt;/h3&gt;
+&lt;h3 id=&quot;metadata-storage&quot;&gt;•  Metadata Storage&lt;/h3&gt;
 &lt;p&gt;Metadata Storage stores the metadata about segment positions and task 
output.&lt;/p&gt;
 
-&lt;h3 
id=&quot;-zookeeper-cluster-management-and-task-coordination&quot;&gt;•       
Zookeeper (cluster management and task coordination)&lt;/h3&gt;
+&lt;h3 
id=&quot;zookeeper-cluster-management-and-task-coordination&quot;&gt;•        
Zookeeper (cluster management and task coordination)&lt;/h3&gt;
 &lt;p&gt;Druid uses Zookeeper (ZK) to ensure consistency of the cluster 
status.&lt;br /&gt;
 &lt;img src=&quot;/images/blog/Kylin-On-Durid/2 druid_architecture.png&quot; 
alt=&quot;&quot; /&gt;&lt;br /&gt;
 Graphic 2 Druid Architecture&lt;/p&gt;
@@ -98,7 +136,7 @@ Graphic 4 Druid Schema&lt;/p&gt;
   &lt;li&gt;Separation of cold/hot data.&lt;/li&gt;
 &lt;/ol&gt;
 
-&lt;h2 id=&quot;04-why-did-meituan-develop-kylin-on-druid&quot;&gt;04 Why did 
Meituan develop Kylin on Druid?&lt;/h2&gt;
+&lt;h2 id=&quot;why-did-meituan-develop-kylin-on-druid&quot;&gt;04 Why did 
Meituan develop Kylin on Druid?&lt;/h2&gt;
 &lt;p&gt;Meituan deployed into production an offline OLAP platform with Apache 
Kylin as its core component in 2015. Since then the platform has served almost 
all business lines with fast growing data volume and query executions, and the 
stress on the cluster has increased accordingly. Throughout the time, the tech 
team in Meituan keeps exploring better solutions for some of Kylin’s 
challenges. The major one is Apache HBase, the storage that Kylin relies 
on.&lt;/p&gt;
 
 &lt;p&gt;Kylin stores its data in HBase by converting the Dimensions and 
Measures into HBase Keys and Values, respectively. As HBase doesn’t support 
secondary index and only has one RowKey index, Kylin’s Dimension values will 
be combined into a fixed sequence to store as RowKey. In this way, filtering on 
a Dimension in the front of the sequence will perform better than those at the 
back. Here’s an example:&lt;/p&gt;
@@ -112,13 +150,14 @@ Graphic 6 Cube2 RowKey Sequence&lt;/p&gt
 
 &lt;p&gt;&lt;strong&gt;Now let’s query each Cube with the same SQL and 
compare the response time.&lt;/strong&gt;&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;select S_SUPPKEY, C_CUSTKEY, 
sum(LO_EXTENDEDPRICE) as m1
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;select S_SUPPKEY, C_CUSTKEY, 
sum(LO_EXTENDEDPRICE) as m1
 from P_LINEORDER
     left join SUPPLIER on P_LINEORDER.LO_SUPPKEY = SUPPLIER.S_SUPPKEY
     left join CUSTOMER on P_LINEORDER.LO_CUSTKEY = CUSTOMER.C_CUSTKEY
 WHERE (LO_ORDERKEY &amp;gt; 1799905 and  LO_ORDERKEY &amp;lt; 1799915)  or 
(LO_ORDERKEY &amp;gt; 1999905 and  LO_ORDERKEY &amp;lt; 1999935)
 GROUP BY S_SUPPKEY, C_CUSTKEY;
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;p&gt;&lt;strong&gt;Below shows the time consumed and data 
scanned:&lt;/strong&gt;&lt;br /&gt;
 &lt;img src=&quot;/images/blog/Kylin-On-Durid/7 cube1_query_log.png&quot; 
alt=&quot;&quot; /&gt;&lt;br /&gt;
@@ -135,12 +174,11 @@ Graphic 8 Cube1 Query Log&lt;/p&gt;
 
 &lt;p&gt;Kylin’s query performance and user experience can be greatly 
improved with pure columnar storage and multiple indexes on Dimensions. As 
analyzed above, Druid happens to meet the requirements of columnar + 
multi-index. So the Meituan Kylin team decided to try replacing HBase with 
Druid.&lt;/p&gt;
 
-&lt;p&gt;Why not just use Druid then? Meituan’s engineers shared their 
thoughts:&lt;/p&gt;
+&lt;p&gt;Why not just use Druid then? Meituan’s engineers shared their 
thoughts:&lt;br /&gt;
+1.     Druid’s native query language is in its own specific JSON format, 
which is not as easy to pick up as SQL. Although the Druid community added SQL 
support later on, the support is not complete and does not meet the data 
analysts’ requirement of complex SQL queries. On the contrary, Kylin natively 
supports ANSI SQL, uses Apache Calcite for semantic parsing, and supports SQL 
features such as join, sub query, window functions, etc. In addition, it 
provides standard interfaces including ODBC/JDBC, and can directly connect with 
BI tools such as Tableau, Power BI, Superset, and Redash.&lt;/p&gt;
+
 &lt;ol&gt;
   &lt;li&gt;
-    &lt;p&gt;Druid’s native query language is in its own specific JSON 
format, which is not as easy to pick up as SQL. Although the Druid community 
added SQL support later on, the support is not complete and does not meet the 
data analysts’ requirement of complex SQL queries. On the contrary, Kylin 
natively supports ANSI SQL, uses Apache Calcite for semantic parsing, and 
supports SQL features such as join, sub query, window functions, etc. In 
addition, it provides standard interfaces including ODBC/JDBC, and can directly 
connect with BI tools such as Tableau, Power BI, Superset, and Redash.&lt;/p&gt;
-  &lt;/li&gt;
-  &lt;li&gt;
     &lt;p&gt;Druid can support only single-table query. Multi-table joins are 
very common in practice, but they cannot be supported by Druid. Kylin, however, 
supports Star Schema and Snowflake Schema, satisfying multi-table join 
requirements.&lt;/p&gt;
   &lt;/li&gt;
   &lt;li&gt;
@@ -167,9 +205,9 @@ Graphic 8 Cube1 Query Log&lt;/p&gt;
 
 &lt;p&gt;Therefore, it appears to be a promising OLAP solution to combine 
Druid’s excellent columnar storage with Kylin’s usability, compatibility, 
and completeness. Druid has columnar storage, inverted index, better filtering 
performance than HBase, native OLAP features, and good secondary aggregation 
capabilities. Meituan’s tech team decided to try replacing HBase with Druid 
as the storage for Kylin.&lt;/p&gt;
 
-&lt;h2 id=&quot;05&quot;&gt;05&lt;/h2&gt;
-&lt;h3 id=&quot;kylin-on-druid-design&quot;&gt;Kylin on Druid Design&lt;/h3&gt;
-&lt;p&gt;At v1.5, Apache Kylin introduced plugable architecture and de-coupled 
computing and storage components, which makes the replacement of HBase 
possible. Here is a brief introduction to the main design concept of Kylin on 
Druid based on Meituan engineer Kaisen Kang’s design doc. (Graphics 9 and 10 
are from reference[1], and text are from reference[1] and [3])&lt;/p&gt;
+&lt;h2 id=&quot;section&quot;&gt;05&lt;/h2&gt;
+&lt;p&gt;### Kylin on Druid Design&lt;br /&gt;
+At v1.5, Apache Kylin introduced plugable architecture and de-coupled 
computing and storage components, which makes the replacement of HBase 
possible. Here is a brief introduction to the main design concept of Kylin on 
Druid based on Meituan engineer Kaisen Kang’s design doc. (Graphics 9 and 10 
are from reference[1], and text are from reference[1] and [3])&lt;/p&gt;
 
 &lt;h3 id=&quot;process-of-building-cube&quot;&gt;Process of Building 
Cube&lt;/h3&gt;
 &lt;ol&gt;
@@ -202,12 +240,12 @@ Graphic 10 Process of Querying Cube&lt;/
   &lt;li&gt;Kylin measure columns map to Druid measure columns.&lt;/li&gt;
 &lt;/ol&gt;
 
-&lt;h2 id=&quot;06-summary&quot;&gt;06 Summary&lt;/h2&gt;
+&lt;h2 id=&quot;summary&quot;&gt;06 Summary&lt;/h2&gt;
 &lt;p&gt;In this article, we first analyzed features and pros/cons of both 
Kylin and Druid, and the reasons for poor performance of Hbase in Kylin in some 
cases. Then we searched solutions and found the feasible option of using Druid 
as the Kylin storage engine. At last, we illustrated the Kylin-on-Druid 
architecture and the processes developed by Meituan.&lt;/p&gt;
 
 &lt;p&gt;Stay tuned for our next article about how to use Kylin on Druid, how 
it performs, and how it can be improved.&lt;/p&gt;
 
-&lt;h2 id=&quot;07-reference&quot;&gt;07 Reference&lt;/h2&gt;
+&lt;h2 id=&quot;reference&quot;&gt;07 Reference&lt;/h2&gt;
 
 &lt;ol&gt;
   &lt;li&gt;
@@ -225,7 +263,7 @@ Graphic 10 Process of Querying Cube&lt;/
 &lt;/ol&gt;
 
 </description>
-        <pubDate>Wed, 12 Dec 2018 17:30:00 +0000</pubDate>
+        <pubDate>Wed, 12 Dec 2018 09:30:00 -0800</pubDate>
         
<link>http://kylin.apache.org/blog/2018/12/12/why-did-meituan-develop-kylin-on-druid-part1-of-2/</link>
         <guid 
isPermaLink="true">http://kylin.apache.org/blog/2018/12/12/why-did-meituan-develop-kylin-on-druid-part1-of-2/</guid>
         
@@ -242,37 +280,37 @@ Graphic 10 Process of Querying Cube&lt;/
 
 &lt;p&gt;这是继2.4.0 后的一个新功能版本。该版本引å…
¥äº†å¾ˆå¤šæœ‰ä»·å€¼çš„æ”¹è¿›ï¼Œå®Œæ•´çš„æ”¹åŠ¨åˆ—è¡¨è¯·å‚è§&lt;a 
href=&quot;https://kylin.apache.org/docs/release_notes.html&quot;&gt;release 
notes&lt;/a&gt;;这里挑一些主要改进做说明:&lt;/p&gt;
 
-&lt;h3 id=&quot;all-in-spark-的-cubing-引擎&quot;&gt;All-in-Spark 的 
Cubing 引擎&lt;/h3&gt;
+&lt;h3 id=&quot;all-in-spark--cubing-&quot;&gt;All-in-Spark 的 Cubing 
引擎&lt;/h3&gt;
 &lt;p&gt;Kylin 的 Spark 引擎将使用 Spark 运行 cube 
计算中的所有分布式作业,包括获取各个维度的不同值,将 
cuboid 文件转换为 HBase HFile,合并 segment,合并词å…
¸ç­‰ã€‚默认的 Spark é…
ç½®ä¹Ÿç»è¿‡ä¼˜åŒ–,使得用户可以获得开箱即用的体验。相å…
³å¼€å‘任务是 KYLIN-3427, KYLIN-3441, KYLIN-3442.&lt;/p&gt;
 
 &lt;p&gt;Spark 任务管理也有所改进:一旦 Spark 
任务开始运行,您就可以在Web控制台上获得作业链接;如果您丢弃该作业,Kylin
 将立刻终止 Spark 作业以及时释放资源;如果重新启动 
Kylin,它可以从上一个作业恢复,而不是重新提交新作业.&lt;/p&gt;
 
-&lt;h3 id=&quot;mysql-做-kylin-元数据的存储&quot;&gt;MySQL 做 Kylin å…
ƒæ•°æ®çš„存储&lt;/h3&gt;
+&lt;h3 id=&quot;mysql--kylin-&quot;&gt;MySQL 做 Kylin å…
ƒæ•°æ®çš„存储&lt;/h3&gt;
 &lt;p&gt;在过去,HBase 是 Kylin 元数据存储的唯一选择。 
在某些情况下 HBase不适用,例如使用多个 HBase 集群来为 Kylin 
提供跨区域的高可用,这里复制的 HBase 
集群是只读的,所以不能做元数据存储。现在我们引入了 
MySQL Metastore 
以满足这种需求。此功能现在处于测试阶段。更多内容参见 
KYLIN-3488。&lt;/p&gt;
 
-&lt;h3 id=&quot;hybrid-model-图形界面&quot;&gt;Hybrid model 
图形界面&lt;/h3&gt;
+&lt;h3 id=&quot;hybrid-model-&quot;&gt;Hybrid model 图形界面&lt;/h3&gt;
 &lt;p&gt;Hybrid 是一种用于组装多个 cube 的高级模型。 
它可用于满足 cube 的 schema 要发生改变的情
况。这个功能过去没有图形界面,因
此只有一小部分用户知道它。现在我们在 Web 
界面上开启了它,以便更多用户可以尝试。&lt;/p&gt;
 
-&lt;h3 id=&quot;默认开启-cube-planner&quot;&gt;默认开启 Cube 
planner&lt;/h3&gt;
+&lt;h3 id=&quot;cube-planner&quot;&gt;默认开启 Cube planner&lt;/h3&gt;
 &lt;p&gt;Cube planner 可以极大地优化 cube 结构,减少构建的 
cuboid 
数量,从而节省计算/存储资源并提高查询性能。它是在v2.3中引å
…¥çš„,但默认情
况下没有开启。为了让更多用户看到并尝试它,我们默认在v2.5中启用它。
 算法将在第一次构建 segment 的时候,根据数据统计自动优化 
cuboid 集合.&lt;/p&gt;
 
-&lt;h3 id=&quot;改进的-segment-剪枝&quot;&gt;改进的 Segment 
剪枝&lt;/h3&gt;
+&lt;h3 id=&quot;segment-&quot;&gt;改进的 Segment 剪枝&lt;/h3&gt;
 &lt;p&gt;Segment(分区)修剪可以有效地减少磁盘和网络I / 
O,因此大大提高了查询性能。 过去,Kylin 只按分区列 
(partition date column) 的值进行 segment 的修剪。 
如果查询中没有将分区列作为过滤条件,那么修剪将不起作用,会扫描所有segment。.&lt;br
 /&gt;
 现在从v2.5开始,Kylin 将在 segment 
级别记录每个维度的最小/最大值。 在扫描 segment 
之前,会将查询的条件与最小/最大索引进行比较。 
如果不匹配,将跳过该 segment。 
检查KYLIN-3370了解更多信息。&lt;/p&gt;
 
-&lt;h3 id=&quot;在-yarn-上合并字典&quot;&gt;在 YARN 上合并字å…
¸&lt;/h3&gt;
+&lt;h3 id=&quot;yarn-&quot;&gt;在 YARN 上合并字典&lt;/h3&gt;
 &lt;p&gt;当 segment 合并时,它们的词å…
¸ä¹Ÿéœ€è¦åˆå¹¶ã€‚在过去,字典合并发生在 Kylin 的 JVM 
中,这需要使用大量的本地内存和 CPU 资源。 在极端情
况下(如果有几个并发作业),可能会导致 Kylin 进程崩溃。 
因此,一些用户不得不为 Kylin 任务节点分配更多内
存,或运行多个任务节点以平衡工作负载。&lt;br /&gt;
 现在从v2.5开始,Kylin 将把这项任务提交给 Hadoop MapReduce 和 
Spark,这样就可以解决这个瓶颈问题。 
查看KYLIN-3471了解更多信息.&lt;/p&gt;
 
-&lt;h3 id=&quot;改进使用全局字å…
¸çš„-cube-构建性能&quot;&gt;改进使用全局字典的 cube 
构建性能&lt;/h3&gt;
+&lt;h3 id=&quot;cube-&quot;&gt;改进使用全局字典的 cube 
构建性能&lt;/h3&gt;
 &lt;p&gt;全局字典 (Global Dictionary) 是 bitmap 精确去重计数的必
要条件。如果去重列具有非常高的基数,则 GD 
可能非常大。在 cube 构建阶段,Kylin 需要通过 GD 
将非整数值转换为整数。尽管 GD 
已被分成多个切片,可以分开加载到内
存,但是由于去重列的值是乱序的。Kylin 需要反复载å…
¥å’Œè½½å‡º(swap in/out)切片,这会导致构建任务非常缓慢。&lt;br 
/&gt;
 该增强功能引入了一个新步骤,为每个数据块从全局字å…
¸ä¸­æž„建一个缩小的字典。 随后每个任务只需要加
载缩小的字典,从而避免频繁的载å…
¥å’Œè½½å‡ºã€‚性能可以比以前快3倍。查看 KYLIN-3491 
了解更多信息.&lt;/p&gt;
 
-&lt;h3 
id=&quot;改进含-topn-count-distinct-的-cube-大小的估计&quot;&gt;改进含
 TOPN, COUNT DISTINCT 的 cube 大小的估计&lt;/h3&gt;
+&lt;h3 id=&quot;topn-count-distinct--cube-&quot;&gt;改进含 TOPN, COUNT 
DISTINCT 的 cube 大小的估计&lt;/h3&gt;
 &lt;p&gt;Cube 的大小在构建时是预先估计的,并被后续几
个步骤使用,例如决定 MR / Spark 作业的分区数,计算 HBase 
region 切割等。它的准确与否会对构建性能产生很大影响。 
当存在 COUNT DISTINCT,TOPN 的度量时候,因
为它们的大小是灵活的,因
此估计值可能跟真实值有很大偏差。 
在过去,用户需要调整若干个参数以使尺寸估计更接近实际
尺寸,这对普通用户有点困难。&lt;br /&gt;
 现在,Kylin 将æ 
¹æ®æ”¶é›†çš„统计信息自动调整大小估计。这可以使估计值与实é™
…大小更接近。查看 KYLIN-3453 了解更多信息。&lt;/p&gt;
 
-&lt;h3 id=&quot;支持hadoop-30hbase-20&quot;&gt;支持Hadoop 3.0/HBase 
2.0&lt;/h3&gt;
+&lt;h3 id=&quot;hadoop-30hbase-20&quot;&gt;支持Hadoop 3.0/HBase 
2.0&lt;/h3&gt;
 &lt;p&gt;Hadoop 3和 HBase 2开始被许多用户采用。现在 Kylin 
提供使用新的 Hadoop 和 HBase API 编译的新二进制包
。我们已经在 Hortonworks HDP 3.0 和 Cloudera CDH 6.0 
上进行了测试&lt;/p&gt;
 
 &lt;p&gt;&lt;strong&gt;下载&lt;/strong&gt;&lt;/p&gt;
@@ -289,7 +327,7 @@ Graphic 10 Process of Querying Cube&lt;/
 
 &lt;p&gt;&lt;em&gt;非常感谢所有贡献Apache 
Kylin的朋友!&lt;/em&gt;&lt;/p&gt;
 </description>
-        <pubDate>Thu, 20 Sep 2018 20:00:00 +0000</pubDate>
+        <pubDate>Thu, 20 Sep 2018 13:00:00 -0700</pubDate>
         <link>http://kylin.apache.org/cn/blog/2018/09/20/release-v2.5.0/</link>
         <guid 
isPermaLink="true">http://kylin.apache.org/cn/blog/2018/09/20/release-v2.5.0/</guid>
         
@@ -361,7 +399,7 @@ Graphic 10 Process of Querying Cube&lt;/
 
 &lt;p&gt;&lt;em&gt;Great thanks to everyone who 
contributed!&lt;/em&gt;&lt;/p&gt;
 </description>
-        <pubDate>Thu, 20 Sep 2018 20:00:00 +0000</pubDate>
+        <pubDate>Thu, 20 Sep 2018 13:00:00 -0700</pubDate>
         <link>http://kylin.apache.org/blog/2018/09/20/release-v2.5.0/</link>
         <guid 
isPermaLink="true">http://kylin.apache.org/blog/2018/09/20/release-v2.5.0/</guid>
         
@@ -407,20 +445,22 @@ GRANT ROLE ssb_write_role TO GROUP ssb_w
 # Then add kylin_manager_user to kylin_manager_group in OpenLDAP, so 
kylin_manager_user has access to the ssb database.
 &lt;/pre&gt;
 &lt;p&gt;2 Assign HDFS directory /user/kylin_manager_user read and write 
permissions to kylin_manager_user user.&lt;br /&gt;
-3 Configure the HADOOP_STREAMING_JAR environment variable under the 
kylin_manager_user user home directory.&lt;/p&gt;
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;Export 
HADOOP_STREAMING_JAR=/opt/cloudera/parcels/CDH/lib/hadoop-mapreduce/hadoop-streaming.jar
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+3 Configure the HADOOP_STREAMING_JAR environment variable under the 
kylin_manager_user user home directory.&lt;br /&gt;
+&lt;code class=&quot;highlighter-rouge&quot;&gt;
+Export 
HADOOP_STREAMING_JAR=/opt/cloudera/parcels/CDH/lib/hadoop-mapreduce/hadoop-streaming.jar
+&lt;/code&gt;&lt;/p&gt;
 
 &lt;h2 id=&quot;download-the-ssb-tool-and-compile&quot;&gt;Download the SSB 
tool and compile&lt;/h2&gt;
 
 &lt;p&gt;You can quickly download and compile the ssb test tool by entering 
the following command in the linux terminal.&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;git clone 
https://github.com/jiangshouzhuang/ssb-kylin.git
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;git clone 
https://github.com/jiangshouzhuang/ssb-kylin.git
 cd ssb-kylin
 cd ssb-benchmark
 make clean
 make
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;h2 id=&quot;adjust-the-ssb-parameters&quot;&gt;Adjust the SSB 
parameters&lt;/h2&gt;
 
@@ -428,7 +468,7 @@ make
 
 &lt;p&gt;Part of the ssb.conf file is:&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;  # customer base, default value is 
30,000
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;  # customer base, default value is 
30,000
   customer_base = 30000
   # part base, default value is 200,000
   part_base = 200000
@@ -438,27 +478,30 @@ make
   date_base = 2556
   # lineorder base (purchase record), default value is 6,000,000
   lineorder_base = 6000000
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;p&gt;Of course, the above base parameters can be adjusted according to 
their actual needs, I use the default parameters.&lt;br /&gt;
 In the ssb.conf file, there are some parameters as follows.&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;# manufacturer max. The value range 
is (1 .. manu_max)
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;# manufacturer max. The value range 
is (1 .. manu_max)
 manu_max = 5
 # category max. The value range is (1 .. cat_max)
 cat_max = 5
 # brand max. The value range is (1 .. brand_max)
 brand_max = 40
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;p&gt;&lt;strong&gt;The explanation is as follows:&lt;/strong&gt; &lt;br 
/&gt;
 manu_max, cat_max and brand_max are used to define hierarchical scale. For 
example, manu_max=10, cat_max=10, and brand_max=10 refer to a total of 10 
manufactures, and each manufactures has a maximum of 10 category parts, and 
each category has up to 10 brands. Therefore, the cardinality of manufacture is 
10, the cardinality of category is 100, and the cardinality of brand is 
1000.&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;# customer: num of cities per 
country, default value is 100
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;# customer: num of cities per 
country, default value is 100
 cust_city_max = 9
 # supplier: num of cities per country, default value is 100
 supp_city_max = 9
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;p&gt;&lt;strong&gt;The explanation is as follows:&lt;/strong&gt; &lt;br 
/&gt;
 cust_city_max and supp_city_max are used to define the number of city for each 
country in customer and supplier tables. If the total number of country is 30, 
and cust_city_max=100, supp_city_max=10, then the customer table will have 3000 
different city, and the supplier table will have 300 different city.&lt;/p&gt;
@@ -489,17 +532,19 @@ ${KYLIN_INSTALL_USER_PASSWD} -d org.apac
 &lt;p&gt;If your CDH or other big data platform is not using beeline, but hive 
cli, please modify it yourself.&lt;br /&gt;
 Once everything is ready, we start running the program and generate test 
data:&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;cd ssb-kylin
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;cd ssb-kylin
 bin/run.sh --scale 20
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;p&gt;We set the scale to 20, the program will run for a while, the maximum 
lineorder table data has more than 100 million. After the program is executed, 
we look at the tables in the hive database and the amount of data:&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;use ssb;
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;use ssb;
 show tables;
 select count(1) from lineorder;
 select count(1) from p_lineorder;
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;p&gt;&lt;img src=&quot;/images/blog/2.1 generated tables.png&quot; 
alt=&quot;&quot; /&gt;&lt;/p&gt;
 
@@ -512,9 +557,10 @@ select count(1) from p_lineorder;
 &lt;p&gt;The ssb-kylin project has helped us build the project, model, and 
cube in advance. Just import the Kylin directly like the learn_kylin example. 
Cube Metadata’s directory is cubemeta, because our kylin integrates OpenLDAP, 
there is no ADMIN user, so the owner parameter in cubemeta/cube/ssb.json is set 
to null.&lt;br /&gt;
 Execute the following command to import cubemeta:&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;cd ssb-kylin
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;cd ssb-kylin
 $KYLIN_HOME/bin/metastore.sh restore cubemeta
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;p&gt;Then log in to Kylin and execute Reload Metadata operation. This 
creates new project, model and cube in Kylin. Before building cube, first 
Disable, then Purge, delete old temporary files.&lt;/p&gt;
 
@@ -524,17 +570,19 @@ $KYLIN_HOME/bin/metastore.sh restore cub
 
 &lt;p&gt;Here I test the performance of Spark to build Cube again, disable the 
previously created Cube, and then Purge. Since the Cube is used by Purge, the 
useless HBase tables and HDFS files need to be deleted. Here, manually clean up 
the junk files. First execute the following command:&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;${KYLIN_HOME}/bin/kylin.sh 
org.apache.kylin.tool.StorageCleanupJob --delete false
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;${KYLIN_HOME}/bin/kylin.sh 
org.apache.kylin.tool.StorageCleanupJob --delete false
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;p&gt;Then check whether the listed HBase table and the HDFS file are 
useless. After confirming the error, perform the delete operation:&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;${KYLIN_HOME}/bin/kylin.sh 
org.apache.kylin.tool.StorageCleanupJob --delete true
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;${KYLIN_HOME}/bin/kylin.sh 
org.apache.kylin.tool.StorageCleanupJob --delete true
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;p&gt;When using Spark to build a cube, it consumes a lot of memory. After 
all, using memory resources improves the speed of cube building. Here I will 
list some of the parameters of Spark in the kylin.properties configuration 
file:&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;kylin.engine.spark-conf.spark.master=yarn
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;kylin.engine.spark-conf.spark.master=yarn
 kylin.engine.spark-conf.spark.submit.deployMode=cluster
 kylin.engine.spark-conf.spark.yarn.queue=root.kylin_manager_group
 # config Dynamic resource allocation
@@ -550,7 +598,8 @@ kylin.engine.spark-conf.spark.driver.mem
 kylin.engine.spark-conf.spark.executor.memory=4G 
 kylin.engine.spark-conf.spark.executor.cores=1
 kylin.engine.spark-conf.spark.network.timeout=600
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;p&gt;The above parameters can meet most of the requirements, so users 
basically do not need to configure when designing the Cube. Of course, if the 
situation is special, you can still set Spark-related tuning parameters at the 
Cube level.&lt;/p&gt;
 
@@ -588,7 +637,7 @@ The query result of Scale=10 is as follo
 &lt;/ol&gt;
 
 </description>
-        <pubDate>Mon, 16 Jul 2018 12:28:00 +0000</pubDate>
+        <pubDate>Mon, 16 Jul 2018 05:28:00 -0700</pubDate>
         
<link>http://kylin.apache.org/blog/2018/07/16/Star-Schema-Benchmark-on-Apache-Kylin/</link>
         <guid 
isPermaLink="true">http://kylin.apache.org/blog/2018/07/16/Star-Schema-Benchmark-on-Apache-Kylin/</guid>
         
@@ -638,7 +687,7 @@ The query result of Scale=10 is as follo
 
 &lt;p&gt;Wish you have a good time with Redash-Kylin!&lt;/p&gt;
 </description>
-        <pubDate>Tue, 08 May 2018 20:00:00 +0000</pubDate>
+        <pubDate>Tue, 08 May 2018 13:00:00 -0700</pubDate>
         
<link>http://kylin.apache.org/blog/2018/05/08/redash-kylin-plugin-strikingly/</link>
         <guid 
isPermaLink="true">http://kylin.apache.org/blog/2018/05/08/redash-kylin-plugin-strikingly/</guid>
         
@@ -680,11 +729,11 @@ The query result of Scale=10 is as follo
 &lt;p&gt;Any issue or question,&lt;br /&gt;
 open JIRA to Apache Kylin project: &lt;a 
href=&quot;https://issues.apache.org/jira/browse/KYLIN/&quot;&gt;https://issues.apache.org/jira/browse/KYLIN/&lt;/a&gt;&lt;br
 /&gt;
 or&lt;br /&gt;
-send mail to Apache Kylin dev mailing list: &lt;a 
href=&quot;mailto:d...@kylin.apache.org&quot;&gt;d...@kylin.apache.org&lt;/a&gt;&lt;/p&gt;
+send mail to Apache Kylin dev mailing list: &lt;a 
href=&quot;&amp;#109;&amp;#097;&amp;#105;&amp;#108;&amp;#116;&amp;#111;:&amp;#100;&amp;#101;&amp;#118;&amp;#064;&amp;#107;&amp;#121;&amp;#108;&amp;#105;&amp;#110;&amp;#046;&amp;#097;&amp;#112;&amp;#097;&amp;#099;&amp;#104;&amp;#101;&amp;#046;&amp;#111;&amp;#114;&amp;#103;&quot;&gt;&amp;#100;&amp;#101;&amp;#118;&amp;#064;&amp;#107;&amp;#121;&amp;#108;&amp;#105;&amp;#110;&amp;#046;&amp;#097;&amp;#112;&amp;#097;&amp;#099;&amp;#104;&amp;#101;&amp;#046;&amp;#111;&amp;#114;&amp;#103;&lt;/a&gt;&lt;/p&gt;
 
 &lt;p&gt;&lt;em&gt;Great thanks to everyone who 
contributed!&lt;/em&gt;&lt;/p&gt;
 </description>
-        <pubDate>Sun, 04 Mar 2018 20:00:00 +0000</pubDate>
+        <pubDate>Sun, 04 Mar 2018 12:00:00 -0800</pubDate>
         <link>http://kylin.apache.org/blog/2018/03/04/release-v2.3.0/</link>
         <guid 
isPermaLink="true">http://kylin.apache.org/blog/2018/03/04/release-v2.3.0/</guid>
         
@@ -755,14 +804,15 @@ Figure 4: Build Cube in Apache Kylin&lt;
   &lt;li&gt;Execute SQL in the “Insight” tab, for example:&lt;/li&gt;
 &lt;/ol&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;  select part_dt,
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;  select part_dt,
          sum(price) as total_selled,
          count(distinct seller_id) as sellers
   from kylin_sales
   group by part_dt
   order by part_dt
 -- #This query will hit on the newly built Cube “Kylin_sales_cube”.
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;ol&gt;
   &lt;li&gt;Next, we will install Apache Superset and initialize it.&lt;br 
/&gt;
@@ -770,14 +820,15 @@ Figure 4: Build Cube in Apache Kylin&lt;
   &lt;li&gt;Install kylinpy&lt;/li&gt;
 &lt;/ol&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;   $ pip install kylinpy
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;   $ pip install kylinpy
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;ol&gt;
   &lt;li&gt;Verify your installation, if everything goes well, Apache Superset 
daemon should be up and running.&lt;/li&gt;
 &lt;/ol&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;$ superset runserver -d
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;$ superset runserver -d
 Starting server with command:
 gunicorn -w 2 --timeout 60 -b  0.0.0.0:8088 --limit-request-line 0 
--limit-request-field_size 0 superset:app
 
@@ -786,19 +837,18 @@ gunicorn -w 2 --timeout 60 -b  0.0.0.0:8
 [2018-01-03 15:54:03 +0800] [73673] [INFO] Using worker: sync
 [2018-01-03 15:54:03 +0800] [73676] [INFO] Booting worker with pid: 73676
 [2018-01-03 15:54:03 +0800] [73679] [INFO] Booting worker with pid: 73679
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;h2 id=&quot;connect-apache-kylin-from-apachesuperset&quot;&gt;Connect 
Apache Kylin from ApacheSuperset&lt;/h2&gt;
 
-&lt;p&gt;Now everything you need is installed and ready to go. Let’s try to 
create an Apache Kylin data source in Apache Superset.&lt;/p&gt;
-&lt;ol&gt;
-  &lt;li&gt;
-    &lt;p&gt;Open up http://localhost:8088 in your web browser with the 
credential you set during Apache Superset installation.&lt;br /&gt;
+&lt;p&gt;Now everything you need is installed and ready to go. Let’s try to 
create an Apache Kylin data source in Apache Superset.&lt;br /&gt;
+1. Open up http://localhost:8088 in your web browser with the credential you 
set during Apache Superset installation.&lt;br /&gt;
   &lt;img src=&quot;/images/Kylin-and-Superset/png/5. superset_1.png&quot; 
alt=&quot;&quot; /&gt;&lt;br /&gt;
   Figure 5: Apache Superset Login Page&lt;/p&gt;
-  &lt;/li&gt;
-  &lt;li&gt;
-    &lt;p&gt;Go to Source -&amp;gt; Datasource to configure a new data 
source.&lt;/p&gt;
+
+&lt;ol&gt;
+  &lt;li&gt;Go to Source -&amp;gt; Datasource to configure a new data source.
     &lt;ul&gt;
       &lt;li&gt;SQLAlchemy URI pattern is : 
kylin://&lt;username&gt;:&lt;password&gt;@&lt;hostname&gt;:&lt;port&gt;/&lt;project
 
name=&quot;&quot;&gt;&lt;/project&gt;&lt;/port&gt;&lt;/hostname&gt;&lt;/password&gt;&lt;/username&gt;&lt;/li&gt;
       &lt;li&gt;Check “Expose in SQL Lab” if you want to expose this data 
source in SQL Lab.&lt;/li&gt;
@@ -844,8 +894,9 @@ Figure 11 Query multiple tables from Apa
 &lt;img src=&quot;/images/Kylin-and-Superset/png/12. SQL_Lab_2.png&quot; 
alt=&quot;&quot; /&gt;&lt;br /&gt;
 Figure 12 Define your query and visualize it immediately&lt;/p&gt;
 
-&lt;p&gt;You may copy the entire SQL below to experience how you can query 
Kylin Cube in SQL Lab.&lt;/p&gt;
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;select
+&lt;p&gt;You may copy the entire SQL below to experience how you can query 
Kylin Cube in SQL Lab. &lt;br /&gt;
+&lt;code class=&quot;highlighter-rouge&quot;&gt;
+select
 YEAR_BEG_DT,
 MONTH_BEG_DT,
 WEEK_BEG_DT,
@@ -863,8 +914,8 @@ join KYLIN_CATEGORY_GROUPINGS on SITE_ID
 join KYLIN_ACCOUNT on ACCOUNT_ID=BUYER_ID
 join KYLIN_COUNTRY on ACCOUNT_COUNTRY=COUNTRY
 group by YEAR_BEG_DT, 
MONTH_BEG_DT,WEEK_BEG_DT,META_CATEG_NAME,CATEG_LVL2_NAME, 
CATEG_LVL3_NAME, OPS_REGION, NAME
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
-&lt;h2 
id=&quot;experience-all-features-in-apache-superset-with-apache-kylin&quot;&gt;Experience
 All Features in Apache Superset with Apache Kylin&lt;/h2&gt;
+&lt;/code&gt;&lt;br /&gt;
+## Experience All Features in Apache Superset with Apache Kylin&lt;/p&gt;
 
 &lt;p&gt;Most of the common reporting features are available in Apache 
Superset. Now let’s see how we can use those features to analyze data from 
Apache Kylin.&lt;/p&gt;
 
@@ -877,14 +928,13 @@ group by YEAR_BEG_DT, MONTH_BEG_DT,
 Figure 13 Sort by&lt;/p&gt;
 
 &lt;h3 id=&quot;filtering&quot;&gt;Filtering&lt;/h3&gt;
-&lt;p&gt;There are multiple ways you may filter data from Apache 
Kylin.&lt;/p&gt;
-&lt;ol&gt;
-  &lt;li&gt;
-    &lt;p&gt;Date Filter&lt;br /&gt;
+&lt;p&gt;There are multiple ways you may filter data from Apache Kylin.&lt;br 
/&gt;
+1. Date Filter&lt;br /&gt;
   You may filter date and time dimension with the calendar filter. &lt;br /&gt;
   &lt;img src=&quot;/images/Kylin-and-Superset/png/14. time_filter.png&quot; 
alt=&quot;&quot; /&gt;&lt;br /&gt;
   Figure 14  Filtering time&lt;/p&gt;
-  &lt;/li&gt;
+
+&lt;ol&gt;
   &lt;li&gt;
     &lt;p&gt;Dimension Filter&lt;br /&gt;
   For other dimensions, you may filter it with SQL conditions like “in, not 
in, equal to, not equal to, greater than and equal to, smaller than and equal 
to, greater than, smaller than, like”.&lt;br /&gt;
@@ -952,7 +1002,7 @@ Figure 13 Sort by&lt;/p&gt;
 &lt;/ol&gt;
 
 </description>
-        <pubDate>Mon, 01 Jan 2018 12:28:00 +0000</pubDate>
+        <pubDate>Mon, 01 Jan 2018 04:28:00 -0800</pubDate>
         
<link>http://kylin.apache.org/blog/2018/01/01/kylin-and-superset/</link>
         <guid 
isPermaLink="true">http://kylin.apache.org/blog/2018/01/01/kylin-and-superset/</guid>
         
@@ -981,11 +1031,12 @@ Figure 13 Sort by&lt;/p&gt;
 &lt;h3 id=&quot;make-spark-connect-hbase-with-kerberos-enabled&quot;&gt;Make 
Spark connect HBase with Kerberos enabled&lt;/h3&gt;
 &lt;p&gt;If just want to run Spark Cubing in Yarn client mode, we only need to 
add three line code before new SparkConf() in SparkCubingByLayer:&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;        Configuration configuration 
= HBaseConnection.getCurrentHBaseConfiguration();        
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;        Configuration configuration 
= HBaseConnection.getCurrentHBaseConfiguration();        
         HConnection connection = 
HConnectionManager.createConnection(configuration);
         //Obtain an authentication token for the given user and add it to the 
user&#39;s credentials.
         TokenUtil.obtainAndCacheToken(connection, 
UserProvider.instantiate(configuration).create(UserGroupInformation.getCurrentUser()));
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;p&gt;As for How to make Spark connect HBase using Kerberos in Yarn cluster 
mode, please refer to SPARK-6918, SPARK-12279, and HBASE-17040. The solution 
may work, but not elegant. So I tried the sencond solution.&lt;/p&gt;
 
@@ -1026,7 +1077,7 @@ This following picture shows the content
 
 &lt;p&gt;Following is the Spark configuration I used in our environment. It 
enables Spark dynamic resource allocation; the goal is to let our user set less 
Spark configurations.&lt;/p&gt;
 
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;//running in yarn-cluster mode
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;//running in yarn-cluster mode
 kylin.engine.spark-conf.spark.master=yarn
 kylin.engine.spark-conf.spark.submit.deployMode=cluster 
 
@@ -1051,7 +1102,8 @@ kylin.engine.spark-conf.spark.network.ti
 kylin.engine.spark-conf.spark.yarn.queue=root.hadoop.test
 
 kylin.engine.spark.rdd-partition-cut-mb=100
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;h3 id=&quot;performance-test-of-spark-cubing&quot;&gt;Performance test of 
Spark Cubing&lt;/h3&gt;
 
@@ -1107,7 +1159,7 @@ kylin.engine.spark.rdd-partition-cut-mb=
 &lt;p&gt;Spark Cubing is a great feature for Kylin 2.0, Thanks Kylin 
community. We will apply Spark Cubing in real scenarios in our company. I 
believe Spark Cubing will be more robust and efficient in the future 
releases.&lt;/p&gt;
 
 </description>
-        <pubDate>Fri, 21 Jul 2017 22:22:22 +0000</pubDate>
+        <pubDate>Fri, 21 Jul 2017 15:22:22 -0700</pubDate>
         
<link>http://kylin.apache.org/blog/2017/07/21/Improving-Spark-Cubing/</link>
         <guid 
isPermaLink="true">http://kylin.apache.org/blog/2017/07/21/Improving-Spark-Cubing/</guid>
         
@@ -1127,10 +1179,11 @@ kylin.engine.spark.rdd-partition-cut-mb=
 
 &lt;p&gt;In Apache Kylin, we support the similar SQL sytanx like Apache Hive, 
with a aggregation function called &lt;strong&gt;percentile(&amp;lt;Number 
Column&amp;gt;, &amp;lt;Double&amp;gt;)&lt;/strong&gt;:&lt;/p&gt;
 
-&lt;div class=&quot;language-sql highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;&lt;span 
class=&quot;k&quot;&gt;SELECT&lt;/span&gt; &lt;span 
class=&quot;n&quot;&gt;seller_id&lt;/span&gt;&lt;span 
class=&quot;p&quot;&gt;,&lt;/span&gt; &lt;span 
class=&quot;n&quot;&gt;percentile&lt;/span&gt;&lt;span 
class=&quot;p&quot;&gt;(&lt;/span&gt;&lt;span 
class=&quot;n&quot;&gt;price&lt;/span&gt;&lt;span 
class=&quot;p&quot;&gt;,&lt;/span&gt; &lt;span 
class=&quot;mi&quot;&gt;0&lt;/span&gt;&lt;span 
class=&quot;p&quot;&gt;.&lt;/span&gt;&lt;span 
class=&quot;mi&quot;&gt;5&lt;/span&gt;&lt;span 
class=&quot;p&quot;&gt;)&lt;/span&gt;
+&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;&lt;span 
class=&quot;k&quot;&gt;SELECT&lt;/span&gt; &lt;span 
class=&quot;n&quot;&gt;seller_id&lt;/span&gt;&lt;span 
class=&quot;p&quot;&gt;,&lt;/span&gt; &lt;span 
class=&quot;n&quot;&gt;percentile&lt;/span&gt;&lt;span 
class=&quot;p&quot;&gt;(&lt;/span&gt;&lt;span 
class=&quot;n&quot;&gt;price&lt;/span&gt;&lt;span 
class=&quot;p&quot;&gt;,&lt;/span&gt; &lt;span 
class=&quot;mi&quot;&gt;0&lt;/span&gt;&lt;span 
class=&quot;p&quot;&gt;.&lt;/span&gt;&lt;span 
class=&quot;mi&quot;&gt;5&lt;/span&gt;&lt;span 
class=&quot;p&quot;&gt;)&lt;/span&gt;
 &lt;span class=&quot;k&quot;&gt;FROM&lt;/span&gt; &lt;span 
class=&quot;n&quot;&gt;test_kylin_fact&lt;/span&gt;
 &lt;span class=&quot;k&quot;&gt;GROUP&lt;/span&gt; &lt;span 
class=&quot;k&quot;&gt;BY&lt;/span&gt; &lt;span 
class=&quot;n&quot;&gt;seller_id&lt;/span&gt;
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
+&lt;/code&gt;&lt;/pre&gt;
+&lt;/div&gt;
 
 &lt;h3 id=&quot;how-to-use&quot;&gt;How to use&lt;/h3&gt;
 &lt;p&gt;If you know little about &lt;em&gt;Cubes&lt;/em&gt;, please go to 
&lt;a 
href=&quot;http://kylin.apache.org/docs20/tutorial/kylin_sample.html&quot;&gt;QuickStart&lt;/a&gt;
 first to learn basic knowledge.&lt;/p&gt;
@@ -1147,67 +1200,12 @@ kylin.engine.spark.rdd-partition-cut-mb=
 
 &lt;p&gt;&lt;img src=&quot;/images/blog/percentile_3.png&quot; 
alt=&quot;&quot; /&gt;&lt;/p&gt;
 </description>
-        <pubDate>Sat, 01 Apr 2017 22:22:22 +0000</pubDate>
+        <pubDate>Sat, 01 Apr 2017 15:22:22 -0700</pubDate>
         
<link>http://kylin.apache.org/blog/2017/04/01/percentile-measure/</link>
         <guid 
isPermaLink="true">http://kylin.apache.org/blog/2017/04/01/percentile-measure/</guid>
         
         
         <category>blog</category>
-        
-      </item>
-    
-      <item>
-        <title>Apache Kylin v2.0.0 beta 发布</title>
-        <description>&lt;p&gt;Apache Kylin社区非常高兴地宣布 &lt;a 
href=&quot;http://kylin.apache.org/cn/download/&quot;&gt;v2.0.0 beta 
package&lt;/a&gt; 已经可以下载并测试了。&lt;/p&gt;
-
-&lt;ul&gt;
-  &lt;li&gt;下载链接: &lt;a 
href=&quot;http://kylin.apache.org/cn/download/&quot;&gt;http://kylin.apache.org/cn/download/&lt;/a&gt;&lt;/li&gt;
-  &lt;li&gt;源代码: 
https://github.com/apache/kylin/tree/kylin-2.0.0-beta&lt;/li&gt;
-&lt;/ul&gt;
-
-&lt;p&gt;自从v1.6.0版本发布已经2个多月了。这段时间里,整个社区协力开发完成了一系列重大的功能,希望能将Apache
 Kylin提升到一个新的高度。&lt;/p&gt;
-
-&lt;ul&gt;
-  &lt;li&gt;支持雪花模型 (&lt;a 
href=&quot;https://issues.apache.org/jira/browse/KYLIN-1875&quot;&gt;KYLIN-1875&lt;/a&gt;)&lt;/li&gt;
-  &lt;li&gt;支持 TPC-H 查询 (&lt;a 
href=&quot;https://issues.apache.org/jira/browse/KYLIN-2467&quot;&gt;KYLIN-2467&lt;/a&gt;)&lt;/li&gt;
-  &lt;li&gt;Spark 构建引擎 (&lt;a 
href=&quot;https://issues.apache.org/jira/browse/KYLIN-2331&quot;&gt;KYLIN-2331&lt;/a&gt;)&lt;/li&gt;
-  &lt;li&gt;Job Engine 高可用性 (&lt;a 
href=&quot;https://issues.apache.org/jira/browse/KYLIN-2006&quot;&gt;KYLIN-2006&lt;/a&gt;)&lt;/li&gt;
-  &lt;li&gt;Percentile 度量 (&lt;a 
href=&quot;https://issues.apache.org/jira/browse/KYLIN-2396&quot;&gt;KYLIN-2396&lt;/a&gt;)&lt;/li&gt;
-  &lt;li&gt;在 Cloud 上通过测试 (&lt;a 
href=&quot;https://issues.apache.org/jira/browse/KYLIN-2351&quot;&gt;KYLIN-2351&lt;/a&gt;)&lt;/li&gt;
-&lt;/ul&gt;
-
-&lt;p&gt;非常欢迎大家下载并测试 v2.0.0 
beta。您的反馈对我们非常重要,请发邮件到 &lt;a 
href=&quot;mailto:d...@kylin.apache.org&quot;&gt;d...@kylin.apache.org&lt;/a&gt;。&lt;/p&gt;
-
-&lt;hr /&gt;
-
-&lt;h2 id=&quot;安装&quot;&gt;安装&lt;/h2&gt;
-
-&lt;p&gt;暂时 v2.0.0 beta 无法从 v1.6.0 直接升级,必需全新安装
。这是由于新版本的元数据并不向前兼容。好在 Cube 
数据是向前兼容的,因此只需要开发一个元数据转换工å…
·ï¼Œå°±èƒ½åœ¨ä¸ä¹…
的将来实现平滑升级。我们正在为此努力。&lt;/p&gt;
-
-&lt;hr /&gt;
-
-&lt;h2 id=&quot;运行-tpc-h-基准测试&quot;&gt;运行 TPC-H 
基准测试&lt;/h2&gt;
-
-&lt;p&gt;在 Apache Kylin 上运行 TPC-H 的具体步骤: &lt;a 
href=&quot;https://github.com/Kyligence/kylin-tpch&quot;&gt;https://github.com/Kyligence/kylin-tpch&lt;/a&gt;&lt;/p&gt;
-
-&lt;hr /&gt;
-
-&lt;h2 id=&quot;spark-构建引擎&quot;&gt;Spark 构建引擎&lt;/h2&gt;
-
-&lt;p&gt;Apache Kylin v2.0.0 引入了一个全新的基于 Apache Spark 
的构建引擎。它可用于替换原有的 MapReduce 
构建引擎。初步测试显示 Cube 的构建时间一般能缩短到原å…
ˆçš„ 50% 左右。&lt;/p&gt;
-
-&lt;p&gt;启用 Spark 构建引擎,请参考&lt;a 
href=&quot;/docs16/tutorial/cube_spark.html&quot;&gt;这篇文档&lt;/a&gt;.&lt;/p&gt;
-
-&lt;hr /&gt;
-
-&lt;p&gt;&lt;em&gt;感谢每一位朋友的参与和贡献!&lt;/em&gt;&lt;/p&gt;
-</description>
-        <pubDate>Sat, 25 Feb 2017 20:00:00 +0000</pubDate>
-        
<link>http://kylin.apache.org/cn/blog/2017/02/25/v2.0.0-beta-ready/</link>
-        <guid 
isPermaLink="true">http://kylin.apache.org/cn/blog/2017/02/25/v2.0.0-beta-ready/</guid>
-        
-        
-        <category>blog</category>
         
       </item>
     

Added: kylin/site/images/blog/data-source-sdk.png
URL: 
http://svn.apache.org/viewvc/kylin/site/images/blog/data-source-sdk.png?rev=1851595&view=auto
==============================================================================
Binary file - no diff available.

Propchange: kylin/site/images/blog/data-source-sdk.png
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream


Reply via email to