This is an automated email from the ASF dual-hosted git repository.

morrysnow pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 163a38a527 [opt](Nereids) support sql cache (#22144)
163a38a527 is described below

commit 163a38a527ba4c616c22a4d4c2c2125eca3140a0
Author: Xinyi Zou <zouxiny...@gmail.com>
AuthorDate: Thu Jul 27 09:57:31 2023 +0800

    [opt](Nereids) support sql cache (#22144)
    
    1. let Nereids support sql cache
    2. let legacy planner's sql cache supports union all
---
 .../nereids/rules/rewrite/AddDefaultLimit.java     |   8 +-
 .../apache/doris/planner/SingleNodePlanner.java    |   8 +-
 .../java/org/apache/doris/qe/SessionVariable.java  |  11 +-
 .../java/org/apache/doris/qe/StmtExecutor.java     |  26 +--
 .../main/java/org/apache/doris/qe/cache/Cache.java |   6 +
 .../org/apache/doris/qe/cache/CacheAnalyzer.java   | 129 ++++++++++++-
 .../java/org/apache/doris/qe/cache/SqlCache.java   |  10 +-
 regression-test/data/query_p0/cache/sql_cache.out  |  50 +++++
 .../suites/query_p0/cache/partition_cache.groovy   |   3 +
 .../suites/query_p0/cache/sql_cache.groovy         | 215 +++++++++++++++++++++
 10 files changed, 441 insertions(+), 25 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/AddDefaultLimit.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/AddDefaultLimit.java
index 6853f1e9fb..93cbf2f794 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/AddDefaultLimit.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/AddDefaultLimit.java
@@ -44,8 +44,8 @@ public class AddDefaultLimit extends 
DefaultPlanRewriter<StatementContext> imple
         // check if children contain logical sort and add limit.
         ConnectContext ctx = context.getConnectContext();
         if (ctx != null) {
-            long defaultLimit = ctx.getSessionVariable().sqlSelectLimit;
-            if (defaultLimit >= 0 && defaultLimit < Long.MAX_VALUE) {
+            long defaultLimit = ctx.getSessionVariable().getSqlSelectLimit();
+            if (defaultLimit >= 0) {
                 return new LogicalLimit<>(defaultLimit, 0, LimitPhase.ORIGIN, 
plan);
             }
         }
@@ -76,8 +76,8 @@ public class AddDefaultLimit extends 
DefaultPlanRewriter<StatementContext> imple
     public Plan visitLogicalSort(LogicalSort<? extends Plan> sort, 
StatementContext context) {
         ConnectContext ctx = context.getConnectContext();
         if (ctx != null) {
-            long defaultLimit = ctx.getSessionVariable().defaultOrderByLimit;
-            long sqlLimit = ctx.getSessionVariable().sqlSelectLimit;
+            long defaultLimit = 
ctx.getSessionVariable().getDefaultOrderByLimit();
+            long sqlLimit = ctx.getSessionVariable().getSqlSelectLimit();
             if (defaultLimit >= 0 || sqlLimit >= 0) {
                 if (defaultLimit < 0) {
                     defaultLimit = Long.MAX_VALUE;
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java 
b/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
index 6365f5ae97..709bd67b9f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
@@ -185,7 +185,7 @@ public class SingleNodePlanner {
         }
         long sqlSelectLimit = -1;
         if (ConnectContext.get() != null && 
ConnectContext.get().getSessionVariable() != null) {
-            sqlSelectLimit = 
ConnectContext.get().getSessionVariable().sqlSelectLimit;
+            sqlSelectLimit = 
ConnectContext.get().getSessionVariable().getSqlSelectLimit();
         }
         PlanNode singleNodePlan = createQueryPlan(queryStmt, analyzer,
                 ctx.getQueryOptions().getDefaultOrderByLimit(), 
sqlSelectLimit);
@@ -251,7 +251,7 @@ public class SingleNodePlanner {
     private PlanNode createQueryPlan(QueryStmt stmt, Analyzer analyzer, long 
defaultOrderByLimit, long sqlSelectLimit)
             throws UserException {
         long newDefaultOrderByLimit = defaultOrderByLimit;
-        long defaultLimit = 
analyzer.getContext().getSessionVariable().defaultOrderByLimit;
+        long defaultLimit = 
analyzer.getContext().getSessionVariable().getDefaultOrderByLimit();
         if (newDefaultOrderByLimit == -1) {
             if (defaultLimit <= -1) {
                 newDefaultOrderByLimit = Long.MAX_VALUE;
@@ -320,7 +320,7 @@ public class SingleNodePlanner {
             ((SortNode) root).setDefaultLimit(limit == -1);
             root.setOffset(stmt.getOffset());
             if (useTopN) {
-                if (sqlSelectLimit >= 0 && sqlSelectLimit < Long.MAX_VALUE) {
+                if (sqlSelectLimit >= 0) {
                     newDefaultOrderByLimit = Math.min(newDefaultOrderByLimit, 
sqlSelectLimit);
                 }
                 if (newDefaultOrderByLimit == Long.MAX_VALUE) {
@@ -337,7 +337,7 @@ public class SingleNodePlanner {
             // from SelectStmt outside
             root = addUnassignedConjuncts(analyzer, root);
         } else {
-            if (!stmt.hasLimit() && sqlSelectLimit >= 0 && sqlSelectLimit < 
Long.MAX_VALUE) {
+            if (!stmt.hasLimit() && sqlSelectLimit >= 0) {
                 root.setLimitAndOffset(sqlSelectLimit, stmt.getOffset());
             } else {
                 root.setLimitAndOffset(stmt.getLimit(), stmt.getOffset());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
index 6bb3e35592..0594161a25 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
@@ -396,7 +396,7 @@ public class SessionVariable implements Serializable, 
Writable {
     // By default, the number of Limit items after OrderBy is changed from 
65535 items
     // before v1.2.0 (not included), to return all items by default
     @VariableMgr.VarAttr(name = DEFAULT_ORDER_BY_LIMIT)
-    public long defaultOrderByLimit = -1;
+    private long defaultOrderByLimit = -1;
 
     // query timeout in second.
     @VariableMgr.VarAttr(name = QUERY_TIMEOUT)
@@ -475,7 +475,7 @@ public class SessionVariable implements Serializable, 
Writable {
     public boolean sqlAutoIsNull = false;
 
     @VariableMgr.VarAttr(name = SQL_SELECT_LIMIT)
-    public long sqlSelectLimit = 9223372036854775807L;
+    private long sqlSelectLimit = Long.MAX_VALUE;
 
     // this is used to make c3p0 library happy
     @VariableMgr.VarAttr(name = MAX_ALLOWED_PACKET)
@@ -1309,9 +1309,16 @@ public class SessionVariable implements Serializable, 
Writable {
     }
 
     public long getSqlSelectLimit() {
+        if (sqlSelectLimit < 0 || sqlSelectLimit >= Long.MAX_VALUE) {
+            return -1;
+        }
         return sqlSelectLimit;
     }
 
+    public long getDefaultOrderByLimit() {
+        return defaultOrderByLimit;
+    }
+
     public int getMaxAllowedPacket() {
         return maxAllowedPacket;
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java
index c9b72bad32..9f39a82ad9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java
@@ -1199,7 +1199,7 @@ public class StmtExecutor {
     // the meta fields must be sent right before the first batch of data(or 
eos flag).
     // so if it has data(or eos is true), this method must return true.
     private boolean sendCachedValues(MysqlChannel channel, 
List<InternalService.PCacheValue> cacheValues,
-            SelectStmt selectStmt, boolean isSendFields, boolean isEos)
+            Queriable selectStmt, boolean isSendFields, boolean isEos)
             throws Exception {
         RowBatch batch = null;
         boolean isSend = isSendFields;
@@ -1241,25 +1241,25 @@ public class StmtExecutor {
     /**
      * Handle the SelectStmt via Cache.
      */
-    private void handleCacheStmt(CacheAnalyzer cacheAnalyzer, MysqlChannel 
channel, SelectStmt selectStmt)
+    private void handleCacheStmt(CacheAnalyzer cacheAnalyzer, MysqlChannel 
channel)
             throws Exception {
         InternalService.PFetchCacheResult cacheResult = 
cacheAnalyzer.getCacheData();
         CacheMode mode = cacheAnalyzer.getCacheMode();
-        SelectStmt newSelectStmt = selectStmt;
+        Queriable queryStmt = (Queriable) parsedStmt;
         boolean isSendFields = false;
         if (cacheResult != null) {
             isCached = true;
             if (cacheAnalyzer.getHitRange() == Cache.HitRange.Full) {
-                sendCachedValues(channel, cacheResult.getValuesList(), 
newSelectStmt, isSendFields, true);
+                sendCachedValues(channel, cacheResult.getValuesList(), 
queryStmt, isSendFields, true);
                 return;
             }
             // rewrite sql
             if (mode == CacheMode.Partition) {
                 if (cacheAnalyzer.getHitRange() == Cache.HitRange.Left) {
                     isSendFields = sendCachedValues(channel, 
cacheResult.getValuesList(),
-                            newSelectStmt, isSendFields, false);
+                            queryStmt, isSendFields, false);
                 }
-                newSelectStmt = cacheAnalyzer.getRewriteStmt();
+                StatementBase newSelectStmt = cacheAnalyzer.getRewriteStmt();
                 newSelectStmt.reset();
                 analyzer = new Analyzer(context.getEnv(), context);
                 newSelectStmt.analyze(analyzer);
@@ -1271,7 +1271,7 @@ public class StmtExecutor {
                 planner.plan(newSelectStmt, 
context.getSessionVariable().toThrift());
             }
         }
-        sendResult(false, isSendFields, newSelectStmt, channel, cacheAnalyzer, 
cacheResult);
+        sendResult(false, isSendFields, queryStmt, channel, cacheAnalyzer, 
cacheResult);
     }
 
     private boolean handleSelectRequestInFe(SelectStmt parsedSelectStmt) 
throws IOException {
@@ -1341,9 +1341,13 @@ public class StmtExecutor {
 
         // Sql and PartitionCache
         CacheAnalyzer cacheAnalyzer = new CacheAnalyzer(context, parsedStmt, 
planner);
-        if (cacheAnalyzer.enableCache() && !isOutfileQuery && queryStmt 
instanceof SelectStmt) {
-            handleCacheStmt(cacheAnalyzer, channel, (SelectStmt) queryStmt);
-            return;
+        if (cacheAnalyzer.enableCache() && !isOutfileQuery
+                && context.getSessionVariable().getSqlSelectLimit() < 0
+                && context.getSessionVariable().getDefaultOrderByLimit() < 0) {
+            if (queryStmt instanceof QueryStmt || queryStmt instanceof 
LogicalPlanAdapter) {
+                handleCacheStmt(cacheAnalyzer, channel);
+                return;
+            }
         }
 
         // handle select .. from xx  limit 0
@@ -1442,7 +1446,7 @@ public class StmtExecutor {
             if (cacheAnalyzer != null) {
                 if (cacheResult != null && cacheAnalyzer.getHitRange() == 
Cache.HitRange.Right) {
                     isSendFields =
-                            sendCachedValues(channel, 
cacheResult.getValuesList(), (SelectStmt) queryStmt, isSendFields,
+                            sendCachedValues(channel, 
cacheResult.getValuesList(), (Queriable) queryStmt, isSendFields,
                                     false);
                 }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/Cache.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/Cache.java
index 2efd805fa8..797e252621 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/Cache.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/Cache.java
@@ -53,6 +53,12 @@ public abstract class Cache {
         hitRange = HitRange.None;
     }
 
+    protected Cache(TUniqueId queryId) {
+        this.queryId = queryId;
+        proxy = CacheProxy.getCacheProxy(CacheProxy.CacheProxyType.BE);
+        hitRange = HitRange.None;
+    }
+
     public abstract InternalService.PFetchCacheResult getCacheData(Status 
status);
 
     public HitRange getHitRange() {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java
index b29fef8f30..8fcf8811a8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java
@@ -39,6 +39,7 @@ import org.apache.doris.common.Config;
 import org.apache.doris.common.Status;
 import org.apache.doris.common.util.DebugUtil;
 import org.apache.doris.metric.MetricRepo;
+import org.apache.doris.nereids.glue.LogicalPlanAdapter;
 import org.apache.doris.planner.OlapScanNode;
 import org.apache.doris.planner.Planner;
 import org.apache.doris.planner.ScanNode;
@@ -230,8 +231,10 @@ public class CacheAnalyzer {
         }
         if (enableSqlCache()
                 && (now - latestTable.latestTime) >= 
Config.cache_last_version_interval_second * 1000L) {
-            LOG.debug("TIME:{},{},{}", now, latestTable.latestTime,
-                    Config.cache_last_version_interval_second * 1000);
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("TIME:{},{},{}", now, latestTable.latestTime,
+                        Config.cache_last_version_interval_second * 1000);
+            }
             cache = new SqlCache(this.queryId, this.selectStmt);
             ((SqlCache) cache).setCacheInfo(this.latestTable, 
allViewExpandStmtListStr);
             MetricRepo.COUNTER_CACHE_ADDED_SQL.increase(1L);
@@ -288,8 +291,128 @@ public class CacheAnalyzer {
         return CacheMode.Partition;
     }
 
+    private CacheMode innerCheckCacheModeSetOperation(long now) {
+        // only sql cache
+        if (!enableSqlCache()) {
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("sql cache is disabled. queryid {}", 
DebugUtil.printId(queryId));
+            }
+            return CacheMode.NoNeed;
+        }
+        if (!(parsedStmt instanceof SetOperationStmt) || scanNodes.size() == 
0) {
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("not a set operation stmt or no scan node. queryid 
{}", DebugUtil.printId(queryId));
+            }
+            return CacheMode.NoNeed;
+        }
+        MetricRepo.COUNTER_QUERY_TABLE.increase(1L);
+
+        //Check the last version time of the table
+        List<CacheTable> tblTimeList = Lists.newArrayList();
+        for (int i = 0; i < scanNodes.size(); i++) {
+            ScanNode node = scanNodes.get(i);
+            if (!(node instanceof OlapScanNode)) {
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug("query contains non-olap table. queryid {}", 
DebugUtil.printId(queryId));
+                }
+                return CacheMode.None;
+            }
+            CacheTable cTable = 
getSelectedPartitionLastUpdateTime((OlapScanNode) node);
+            tblTimeList.add(cTable);
+        }
+        MetricRepo.COUNTER_QUERY_OLAP_TABLE.increase(1L);
+        Collections.sort(tblTimeList);
+        latestTable = tblTimeList.get(0);
+        latestTable.debug();
+
+        addAllViewStmt((SetOperationStmt) parsedStmt);
+        String allViewExpandStmtListStr = parsedStmt.toSql() + "|" + 
StringUtils.join(allViewStmtSet, "|");
+
+        if (now == 0) {
+            now = nowtime();
+        }
+        if (enableSqlCache()
+                && (now - latestTable.latestTime) >= 
Config.cache_last_version_interval_second * 1000L) {
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("TIME:{},{},{}", now, latestTable.latestTime,
+                        Config.cache_last_version_interval_second * 1000);
+            }
+            cache = new SqlCache(this.queryId);
+            ((SqlCache) cache).setCacheInfo(this.latestTable, 
allViewExpandStmtListStr);
+            MetricRepo.COUNTER_CACHE_ADDED_SQL.increase(1L);
+            return CacheMode.Sql;
+        }
+        return CacheMode.None;
+    }
+
+    private CacheMode innerCheckCacheModeForNereids(long now) {
+        // only sql cache
+        if (!enableSqlCache()) {
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("sql cache is disabled. queryid {}", 
DebugUtil.printId(queryId));
+            }
+            return CacheMode.NoNeed;
+        }
+        if (!(parsedStmt instanceof LogicalPlanAdapter) || scanNodes.size() == 
0) {
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("not a select stmt or no scan node. queryid {}", 
DebugUtil.printId(queryId));
+            }
+            return CacheMode.NoNeed;
+        }
+        MetricRepo.COUNTER_QUERY_TABLE.increase(1L);
+
+        //Check the last version time of the table
+        List<CacheTable> tblTimeList = Lists.newArrayList();
+        for (int i = 0; i < scanNodes.size(); i++) {
+            ScanNode node = scanNodes.get(i);
+            if (!(node instanceof OlapScanNode)) {
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug("query contains non-olap table. queryid {}", 
DebugUtil.printId(queryId));
+                }
+                return CacheMode.None;
+            }
+            CacheTable cTable = 
getSelectedPartitionLastUpdateTime((OlapScanNode) node);
+            tblTimeList.add(cTable);
+        }
+        MetricRepo.COUNTER_QUERY_OLAP_TABLE.increase(1L);
+        Collections.sort(tblTimeList);
+        latestTable = tblTimeList.get(0);
+        latestTable.debug();
+
+        if (((LogicalPlanAdapter) 
parsedStmt).getStatementContext().getParsedStatement().isExplain()) {
+            return CacheMode.NoNeed;
+        }
+
+        String cacheKey = ((LogicalPlanAdapter) 
parsedStmt).getStatementContext()
+                .getOriginStatement().originStmt.toLowerCase();
+        if (now == 0) {
+            now = nowtime();
+        }
+        if (enableSqlCache()
+                && (now - latestTable.latestTime) >= 
Config.cache_last_version_interval_second * 1000L) {
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("TIME:{},{},{}", now, latestTable.latestTime,
+                        Config.cache_last_version_interval_second * 1000);
+            }
+            cache = new SqlCache(this.queryId);
+            ((SqlCache) cache).setCacheInfo(this.latestTable, cacheKey);
+            MetricRepo.COUNTER_CACHE_ADDED_SQL.increase(1L);
+            return CacheMode.Sql;
+        }
+        return CacheMode.None;
+    }
+
     public InternalService.PFetchCacheResult getCacheData() {
-        cacheMode = innerCheckCacheMode(0);
+        if (parsedStmt instanceof LogicalPlanAdapter) {
+            cacheMode = innerCheckCacheModeForNereids(0);
+        } else if (parsedStmt instanceof SelectStmt) {
+            cacheMode = innerCheckCacheMode(0);
+        } else if (parsedStmt instanceof SetOperationStmt) {
+            cacheMode = innerCheckCacheModeSetOperation(0);
+        } else {
+            return null;
+        }
+
         if (cacheMode == CacheMode.NoNeed) {
             return null;
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java
index e46ef82e0a..550c77ac2a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java
@@ -35,13 +35,21 @@ public class SqlCache extends Cache {
         super(queryId, selectStmt);
     }
 
+    public SqlCache(TUniqueId queryId) {
+        super(queryId);
+    }
+
     public void setCacheInfo(CacheAnalyzer.CacheTable latestTable, String 
allViewExpandStmtListStr) {
         this.latestTable = latestTable;
         this.allViewExpandStmtListStr = allViewExpandStmtListStr;
     }
 
     public String getSqlWithViewStmt() {
-        return selectStmt.toSql() + "|" + allViewExpandStmtListStr;
+        if (selectStmt != null)  {
+            return selectStmt.toSql() + "|" + allViewExpandStmtListStr;
+        } else {
+            return allViewExpandStmtListStr;
+        }
     }
 
     public InternalService.PFetchCacheResult getCacheData(Status status) {
diff --git a/regression-test/data/query_p0/cache/sql_cache.out 
b/regression-test/data/query_p0/cache/sql_cache.out
new file mode 100644
index 0000000000..1b3729294d
--- /dev/null
+++ b/regression-test/data/query_p0/cache/sql_cache.out
@@ -0,0 +1,50 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql_cache --
+2022-05-28     0
+2022-05-29     0
+2022-05-30     0
+2022-06-01     0
+2022-06-02     0
+
+-- !sql_cache --
+2022-05-28     0
+2022-05-29     0
+2022-05-30     0
+2022-06-01     0
+2022-06-02     0
+
+-- !sql_cache --
+2022-05-28     0
+2022-05-29     0
+2022-05-30     0
+2022-06-01     0
+2022-06-02     0
+
+-- !sql_cache --
+2022-05-28     0
+2022-05-28     0
+
+-- !sql_cache --
+2022-05-28     0
+2022-05-28     0
+
+-- !sql_cache --
+2022-05-28     0
+2022-05-29     0
+2022-05-30     0
+2022-06-01     0
+2022-06-02     0
+
+-- !sql_cache --
+2022-05-28     0
+2022-05-29     0
+2022-05-30     0
+2022-06-01     0
+2022-06-02     0
+
+-- !sql_cache --
+2022-05-28     0
+
+-- !sql_cache --
+2022-05-28     0
+
diff --git a/regression-test/suites/query_p0/cache/partition_cache.groovy 
b/regression-test/suites/query_p0/cache/partition_cache.groovy
index 435949fd27..9dde76066f 100644
--- a/regression-test/suites/query_p0/cache/partition_cache.groovy
+++ b/regression-test/suites/query_p0/cache/partition_cache.groovy
@@ -20,7 +20,10 @@
 // and modified by Doris.
 
 suite("partition_cache") {
+    // TODO: regression-test does not support check query profile,
+    // so this suite does not check whether cache is used, :)
     def tableName = "test_partition_cache"
+    sql  "ADMIN SET FRONTEND CONFIG ('cache_last_version_interval_second' = 
'0')"
 
     sql """ DROP TABLE IF EXISTS ${tableName} """
     sql """
diff --git a/regression-test/suites/query_p0/cache/sql_cache.groovy 
b/regression-test/suites/query_p0/cache/sql_cache.groovy
new file mode 100644
index 0000000000..cccfca4d96
--- /dev/null
+++ b/regression-test/suites/query_p0/cache/sql_cache.groovy
@@ -0,0 +1,215 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// 
/testing/trino-product-tests/src/main/resources/sql-tests/testcases/aggregate
+// and modified by Doris.
+
+suite("sql_cache") {
+    // TODO: regression-test does not support check query profile,
+    // so this suite does not check whether cache is used, :)
+    def tableName = "test_sql_cache"
+    sql  "ADMIN SET FRONTEND CONFIG ('cache_last_version_interval_second' = 
'0')"
+
+    sql """ DROP TABLE IF EXISTS ${tableName} """
+    sql """
+            CREATE TABLE IF NOT EXISTS ${tableName} (
+              `k1` date NOT NULL COMMENT "",
+              `k2` int(11) NOT NULL COMMENT ""
+            ) ENGINE=OLAP
+            DUPLICATE KEY(`k1`, `k2`)
+            COMMENT "OLAP"
+            PARTITION BY RANGE(`k1`)
+            (PARTITION p202205 VALUES [('2022-05-01'), ('2022-06-01')),
+            PARTITION p202206 VALUES [('2022-06-01'), ('2022-07-01')))
+            DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 32
+            PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "in_memory" = "false",
+            "storage_format" = "V2"
+            )
+        """
+
+    sql "sync"
+
+    sql """ INSERT INTO ${tableName} VALUES 
+                    ("2022-05-27",0),
+                    ("2022-05-28",0),
+                    ("2022-05-29",0),
+                    ("2022-05-30",0),
+                    ("2022-06-01",0),
+                    ("2022-06-02",0)
+        """
+
+    qt_sql_cache """
+                    select
+                        k1,
+                        sum(k2) as total_pv 
+                    from
+                        ${tableName} 
+                    where
+                        k1 between '2022-05-28' and '2022-06-30' 
+                    group by
+                        k1 
+                    order by
+                        k1;
+                """
+    
+    sql "set enable_sql_cache=true "
+
+    qt_sql_cache """
+                    select
+                        k1,
+                        sum(k2) as total_pv 
+                    from
+                        ${tableName} 
+                    where
+                        k1 between '2022-05-28' and '2022-06-30' 
+                    group by
+                        k1 
+                    order by
+                        k1;
+                """
+    qt_sql_cache """
+                    select
+                        k1,
+                        sum(k2) as total_pv 
+                    from
+                        ${tableName} 
+                    where
+                        k1 between '2022-05-28' and '2022-06-30' 
+                    group by
+                        k1 
+                    order by
+                        k1;
+                """
+
+    qt_sql_cache """
+                    select
+                        k1,
+                        sum(k2) as total_pv 
+                    from
+                        ${tableName} 
+                    where
+                        k1 between '2022-05-28' and '2022-05-28'
+                    group by
+                        k1 
+                    order by
+                        k1
+                    union all
+                    select
+                        k1,
+                        sum(k2) as total_pv 
+                    from
+                        ${tableName} 
+                    where
+                        k1 between '2022-05-28' and '2022-05-28'
+                    group by
+                        k1 
+                    order by
+                        k1;
+                """
+    
+    qt_sql_cache """
+                    select
+                        k1,
+                        sum(k2) as total_pv 
+                    from
+                        ${tableName} 
+                    where
+                        k1 between '2022-05-28' and '2022-05-28'
+                    group by
+                        k1 
+                    order by
+                        k1
+                    union all
+                    select
+                        k1,
+                        sum(k2) as total_pv 
+                    from
+                        ${tableName} 
+                    where
+                        k1 between '2022-05-28' and '2022-05-28'
+                    group by
+                        k1 
+                    order by
+                        k1;
+                """
+
+    sql "SET enable_nereids_planner=true"
+    sql "SET enable_fallback_to_original_planner=false"
+
+    qt_sql_cache """
+                    select
+                        k1,
+                        sum(k2) as total_pv 
+                    from
+                        ${tableName} 
+                    where
+                        k1 between '2022-05-28' and '2022-06-30' 
+                    group by
+                        k1 
+                    order by
+                        k1;
+                """
+    qt_sql_cache """
+                    select
+                        k1,
+                        sum(k2) as total_pv 
+                    from
+                        ${tableName} 
+                    where
+                        k1 between '2022-05-28' and '2022-06-30' 
+                    group by
+                        k1 
+                    order by
+                        k1;
+                """
+
+    sql 'set default_order_by_limit = 2'
+    sql 'set sql_select_limit = 1'
+
+    qt_sql_cache """
+                    select
+                        k1,
+                        sum(k2) as total_pv 
+                    from
+                        ${tableName} 
+                    where
+                        k1 between '2022-05-28' and '2022-06-30' 
+                    group by
+                        k1 
+                    order by
+                        k1;
+                """
+    
+    qt_sql_cache """
+                    select
+                        k1,
+                        sum(k2) as total_pv 
+                    from
+                        ${tableName} 
+                    where
+                        k1 between '2022-05-28' and '2022-06-30' 
+                    group by
+                        k1 
+                    order by
+                        k1;
+                """
+
+    sql  "ADMIN SET FRONTEND CONFIG ('cache_last_version_interval_second' = 
'900')"
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to