This is an automated email from the ASF dual-hosted git repository.

kxiao pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 9c1dcd9a34e8f425ef2b97ae47e7c5d17a292137
Author: Mingyu Chen <morning...@163.com>
AuthorDate: Fri Aug 18 18:31:01 2023 +0800

    [fix](catalog) fix hive partition prune bug on nereids (#23026)
---
 .../apache/doris/catalog/ListPartitionItem.java    |  11 ++
 .../org/apache/doris/catalog/PartitionKey.java     |  16 +++
 .../doris/catalog/external/HMSExternalTable.java   |  10 ++
 .../glue/translator/PhysicalPlanTranslator.java    |  11 +-
 .../rules/HiveDefaultPartitionEvaluator.java       |  63 ++++++++
 .../rules/expression/rules/PartitionPruner.java    |  46 ++++--
 .../LogicalFileScanToPhysicalFileScan.java         |   3 +-
 .../rules/rewrite/PruneFileScanPartition.java      |  70 ++++++++-
 .../rules/rewrite/PruneOlapScanPartition.java      |   4 +-
 .../trees/plans/logical/LogicalFileScan.java       |  68 +++++++--
 .../trees/plans/physical/PhysicalFileScan.java     |  33 +++--
 .../trees/plans/visitor/DefaultPlanRewriter.java   |   8 +-
 .../doris/planner/external/FileScanNode.java       |   1 +
 .../doris/planner/external/HiveScanNode.java       |  85 ++++++-----
 .../hive/test_hive_default_partition.out           |  40 ++++++
 .../hive/test_hive_default_partition.groovy        | 160 +++++++++++++++++++++
 .../hive/test_select_count_optimize.groovy         |   4 +-
 17 files changed, 552 insertions(+), 81 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionItem.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionItem.java
index 04577eb306..2c4371a755 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionItem.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionItem.java
@@ -150,4 +150,15 @@ public class ListPartitionItem extends PartitionItem {
 
         return sb.toString();
     }
+
+    // If any partition key is hive default partition, return true.
+    // Only used for hive table.
+    public boolean isHiveDefaultPartition() {
+        for (PartitionKey partitionKey : partitionKeys) {
+            if (partitionKey.isHiveDefaultPartition()) {
+                return true;
+            }
+        }
+        return false;
+    }
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java
index 70d667a519..f9920701e3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java
@@ -28,6 +28,7 @@ import org.apache.doris.analysis.StringLiteral;
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.io.Text;
 import org.apache.doris.common.io.Writable;
+import org.apache.doris.datasource.hive.HiveMetaStoreCache;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -511,4 +512,19 @@ public class PartitionKey implements 
Comparable<PartitionKey>, Writable {
             return result;
         }
     }
+
+    // if any of partition value is HIVE_DEFAULT_PARTITION
+    // return true to indicate that this is a hive default partition
+    public boolean isHiveDefaultPartition() {
+        for (LiteralExpr literalExpr : keys) {
+            if (!(literalExpr instanceof StringLiteral)) {
+                continue;
+            }
+            StringLiteral key = (StringLiteral) literalExpr;
+            if 
(key.getValue().equals(HiveMetaStoreCache.HIVE_DEFAULT_PARTITION)) {
+                return true;
+            }
+        }
+        return false;
+    }
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java
 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java
index beb1917b05..0224280366 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java
@@ -21,6 +21,8 @@ import org.apache.doris.catalog.Column;
 import org.apache.doris.catalog.Env;
 import org.apache.doris.catalog.HiveMetaStoreClientHelper;
 import org.apache.doris.catalog.HudiUtils;
+import org.apache.doris.catalog.PrimitiveType;
+import org.apache.doris.catalog.ScalarType;
 import org.apache.doris.catalog.Type;
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.datasource.HMSExternalCatalog;
@@ -455,6 +457,13 @@ public class HMSExternalTable extends ExternalTable {
             // Do not use "getColumn()", which will cause dead loop
             for (Column column : schema) {
                 if (partitionKey.equals(column.getName())) {
+                    // For partition column, if it is string type, change it 
to varchar(65535)
+                    // to be same as doris managed table.
+                    // This is to avoid some unexpected behavior such as 
different partition pruning result
+                    // between doris managed table and external table.
+                    if (column.getType().getPrimitiveType() == 
PrimitiveType.STRING) {
+                        
column.setType(ScalarType.createVarcharType(ScalarType.MAX_VARCHAR_LENGTH));
+                    }
                     partitionColumns.add(column);
                     break;
                 }
@@ -601,3 +610,4 @@ public class HMSExternalTable extends ExternalTable {
     }
 }
 
+
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
index ed9b30f93e..95698d430a 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
@@ -408,6 +408,7 @@ public class PhysicalPlanTranslator extends 
DefaultPlanVisitor<PlanFragment, Pla
                     break;
                 case HIVE:
                     scanNode = new HiveScanNode(context.nextPlanNodeId(), 
tupleDescriptor, false);
+                    ((HiveScanNode) 
scanNode).setSelectedPartitions(fileScan.getSelectedPartitions());
                     break;
                 default:
                     throw new RuntimeException("do not support DLA type " + 
((HMSExternalTable) table).getDlaType());
@@ -422,6 +423,7 @@ public class PhysicalPlanTranslator extends 
DefaultPlanVisitor<PlanFragment, Pla
             throw new RuntimeException("do not support table type " + 
table.getType());
         }
         
scanNode.addConjuncts(translateToLegacyConjuncts(fileScan.getConjuncts()));
+
         TableName tableName = new TableName(null, "", "");
         TableRef ref = new TableRef(tableName, null, null);
         BaseTableRef tableRef = new BaseTableRef(ref, table, tableName);
@@ -776,12 +778,13 @@ public class PhysicalPlanTranslator extends 
DefaultPlanVisitor<PlanFragment, Pla
     @Override
     public PlanFragment visitPhysicalStorageLayerAggregate(
             PhysicalStorageLayerAggregate storageLayerAggregate, 
PlanTranslatorContext context) {
-        Preconditions.checkState(storageLayerAggregate.getRelation() 
instanceof PhysicalOlapScan,
-                "PhysicalStorageLayerAggregate only support PhysicalOlapScan: "
+        Preconditions.checkState((storageLayerAggregate.getRelation() 
instanceof PhysicalOlapScan
+                        || storageLayerAggregate.getRelation() instanceof 
PhysicalFileScan),
+                "PhysicalStorageLayerAggregate only support PhysicalOlapScan 
and PhysicalFileScan: "
                         + 
storageLayerAggregate.getRelation().getClass().getName());
         PlanFragment planFragment = 
storageLayerAggregate.getRelation().accept(this, context);
 
-        OlapScanNode olapScanNode = (OlapScanNode) planFragment.getPlanRoot();
+        ScanNode scanNode = (ScanNode) planFragment.getPlanRoot();
         TPushAggOp pushAggOp;
         switch (storageLayerAggregate.getAggOp()) {
             case COUNT:
@@ -800,7 +803,7 @@ public class PhysicalPlanTranslator extends 
DefaultPlanVisitor<PlanFragment, Pla
                 throw new AnalysisException("Unsupported storage layer 
aggregate: "
                         + storageLayerAggregate.getAggOp());
         }
-        olapScanNode.setPushDownAggNoGrouping(pushAggOp);
+        scanNode.setPushDownAggNoGrouping(pushAggOp);
         updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), 
storageLayerAggregate);
         return planFragment;
     }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/HiveDefaultPartitionEvaluator.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/HiveDefaultPartitionEvaluator.java
new file mode 100644
index 0000000000..1249dd0577
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/HiveDefaultPartitionEvaluator.java
@@ -0,0 +1,63 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.nereids.rules.expression.rules;
+
+import org.apache.doris.nereids.trees.expressions.Expression;
+import org.apache.doris.nereids.trees.expressions.Slot;
+import org.apache.doris.nereids.trees.expressions.literal.BooleanLiteral;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Represents a hive default partition.
+ * For any partition predicate, the evaluate() will always return true.
+ */
+public class HiveDefaultPartitionEvaluator implements OnePartitionEvaluator {
+    private long id;
+    private List<Slot> partitionSlots;
+
+    public HiveDefaultPartitionEvaluator(long id, List<Slot> partitionSlots) {
+        this.id = id;
+        this.partitionSlots = partitionSlots;
+    }
+
+    @Override
+    public long getPartitionId() {
+        return id;
+    }
+
+    @Override
+    public List<Map<Slot, PartitionSlotInput>> getOnePartitionInputs() {
+        // this is mocked result.
+        PartitionSlotInput partitionSlotInput = new 
PartitionSlotInput(BooleanLiteral.TRUE, Maps.newHashMap());
+        Map<Slot, PartitionSlotInput> map = Maps.newHashMap();
+        map.put(partitionSlots.get(0), partitionSlotInput);
+        List<Map<Slot, PartitionSlotInput>> list = Lists.newArrayList();
+        list.add(map);
+        return list;
+    }
+
+    @Override
+    public Expression evaluate(Expression expression, Map<Slot, 
PartitionSlotInput> currentInputs) {
+        return BooleanLiteral.TRUE;
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/PartitionPruner.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/PartitionPruner.java
index 7a53427f62..7e89a97915 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/PartitionPruner.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/PartitionPruner.java
@@ -33,11 +33,19 @@ import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 
-/** PartitionPruner */
+/**
+ * PartitionPruner
+ */
 public class PartitionPruner {
     private List<OnePartitionEvaluator> partitions;
     private Expression partitionPredicate;
 
+    /** Different type of table may have different partition prune behavior. */
+    public enum PartitionTableType {
+        OLAP,
+        HIVE
+    }
+
     private PartitionPruner(List<OnePartitionEvaluator> partitions, Expression 
partitionPredicate) {
         this.partitions = Objects.requireNonNull(partitions, "partitions 
cannot be null");
         this.partitionPredicate = Objects.requireNonNull(partitionPredicate, 
"partitionPredicate cannot be null");
@@ -50,29 +58,47 @@ public class PartitionPruner {
                 .collect(ImmutableList.toImmutableList());
     }
 
-    /** prune partition */
+    /**
+     * prune partition with `partitionInfo` as parameter.
+     */
     public static List<Long> prune(List<Slot> partitionSlots, Expression 
partitionPredicate,
-            PartitionInfo partitionInfo, CascadesContext cascadesContext) {
+            PartitionInfo partitionInfo, CascadesContext cascadesContext, 
PartitionTableType partitionTableType) {
+        return prune(partitionSlots, partitionPredicate, 
partitionInfo.getIdToItem(false), cascadesContext,
+                partitionTableType);
+    }
+
+    /**
+     * prune partition with `idToPartitions` as parameter.
+     */
+    public static List<Long> prune(List<Slot> partitionSlots, Expression 
partitionPredicate,
+            Map<Long, PartitionItem> idToPartitions, CascadesContext 
cascadesContext,
+            PartitionTableType partitionTableType) {
         partitionPredicate = TryEliminateUninterestedPredicates.rewrite(
                 partitionPredicate, ImmutableSet.copyOf(partitionSlots), 
cascadesContext);
 
-        Map<Long, PartitionItem> idToPartitions = 
partitionInfo.getIdToItem(false);
-
         List<OnePartitionEvaluator> evaluators = idToPartitions.entrySet()
                 .stream()
-                .map(kv -> toPartitionEvaluator(kv.getKey(), kv.getValue(), 
partitionSlots, cascadesContext))
+                .map(kv -> toPartitionEvaluator(kv.getKey(), kv.getValue(), 
partitionSlots, cascadesContext,
+                        partitionTableType))
                 .collect(ImmutableList.toImmutableList());
 
         PartitionPruner partitionPruner = new PartitionPruner(evaluators, 
partitionPredicate);
         return partitionPruner.prune();
     }
 
-    /** convert partition item to partition evaluator */
+    /**
+     * convert partition item to partition evaluator
+     */
     public static final OnePartitionEvaluator toPartitionEvaluator(long id, 
PartitionItem partitionItem,
-            List<Slot> partitionSlots, CascadesContext cascadesContext) {
+            List<Slot> partitionSlots, CascadesContext cascadesContext, 
PartitionTableType partitionTableType) {
         if (partitionItem instanceof ListPartitionItem) {
-            return new OneListPartitionEvaluator(
-                    id, partitionSlots, (ListPartitionItem) partitionItem, 
cascadesContext);
+            if (partitionTableType == PartitionTableType.HIVE
+                    && ((ListPartitionItem) 
partitionItem).isHiveDefaultPartition()) {
+                return new HiveDefaultPartitionEvaluator(id, partitionSlots);
+            } else {
+                return new OneListPartitionEvaluator(
+                        id, partitionSlots, (ListPartitionItem) partitionItem, 
cascadesContext);
+            }
         } else if (partitionItem instanceof RangePartitionItem) {
             return new OneRangePartitionEvaluator(
                     id, partitionSlots, (RangePartitionItem) partitionItem, 
cascadesContext);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/LogicalFileScanToPhysicalFileScan.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/LogicalFileScanToPhysicalFileScan.java
index c78d8174ac..f53ce1553a 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/LogicalFileScanToPhysicalFileScan.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/LogicalFileScanToPhysicalFileScan.java
@@ -38,7 +38,8 @@ public class LogicalFileScanToPhysicalFileScan extends 
OneImplementationRuleFact
                     DistributionSpecAny.INSTANCE,
                     Optional.empty(),
                     fileScan.getLogicalProperties(),
-                    fileScan.getConjuncts())
+                    fileScan.getConjuncts(),
+                    fileScan.getSelectedPartitions())
         ).toRule(RuleType.LOGICAL_FILE_SCAN_TO_PHYSICAL_FILE_SCAN_RULE);
     }
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneFileScanPartition.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneFileScanPartition.java
index 85c943f12a..4e35b8d82b 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneFileScanPartition.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneFileScanPartition.java
@@ -17,26 +17,90 @@
 
 package org.apache.doris.nereids.rules.rewrite;
 
+import org.apache.doris.catalog.Env;
+import org.apache.doris.catalog.PartitionItem;
+import org.apache.doris.catalog.external.ExternalTable;
+import org.apache.doris.catalog.external.HMSExternalTable;
+import org.apache.doris.catalog.external.HMSExternalTable.DLAType;
+import org.apache.doris.datasource.HMSExternalCatalog;
+import org.apache.doris.datasource.hive.HiveMetaStoreCache;
+import org.apache.doris.nereids.CascadesContext;
 import org.apache.doris.nereids.rules.Rule;
 import org.apache.doris.nereids.rules.RuleType;
+import org.apache.doris.nereids.rules.expression.rules.PartitionPruner;
+import 
org.apache.doris.nereids.rules.expression.rules.PartitionPruner.PartitionTableType;
+import org.apache.doris.nereids.trees.expressions.Slot;
 import org.apache.doris.nereids.trees.plans.logical.LogicalFileScan;
+import 
org.apache.doris.nereids.trees.plans.logical.LogicalFileScan.SelectedPartitions;
 import org.apache.doris.nereids.trees.plans.logical.LogicalFilter;
 
+import com.google.common.collect.Maps;
+import org.apache.commons.collections.CollectionUtils;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
 /**
  * Used to prune partition of file scan. For different external tables, there 
is no unified partition prune method.
  * For example, Hive is using hive meta store api to get partitions. Iceberg 
is using Iceberg api to get FileScanTask,
- * which doesn't return a partition list. So, here we simply pass the conjucts 
to LogicalFileScan, so that different
+ * which doesn't return a partition list.
+ * So here we only support Hive table partition prune.
+ * For other external table, simply pass the conjuncts to LogicalFileScan, so 
that different
  * external file ScanNode could do the partition filter by themselves.
  */
 public class PruneFileScanPartition extends OneRewriteRuleFactory {
 
     @Override
     public Rule build() {
-        return logicalFilter(logicalFileScan()).thenApply(ctx -> {
+        return logicalFilter(logicalFileScan()).when(p -> 
p.child().getSelectedPartitions() == null).thenApply(ctx -> {
             LogicalFilter<LogicalFileScan> filter = ctx.root;
             LogicalFileScan scan = filter.child();
-            LogicalFileScan rewrittenScan = 
scan.withConjuncts(filter.getConjuncts());
+            ExternalTable tbl = scan.getTable();
+            SelectedPartitions selectedPartitions = new SelectedPartitions(0, 
Maps.newHashMap(), false);
+
+            // TODO(cmy): support other external table
+            if (tbl instanceof HMSExternalTable && ((HMSExternalTable) 
tbl).getDlaType() == DLAType.HIVE) {
+                HMSExternalTable hiveTbl = (HMSExternalTable) tbl;
+                selectedPartitions = pruneHivePartitions(hiveTbl, filter, 
scan, ctx.cascadesContext);
+            }
+
+            LogicalFileScan rewrittenScan = 
scan.withConjuncts(filter.getConjuncts())
+                    .withSelectedPartitions(selectedPartitions);
             return new LogicalFilter<>(filter.getConjuncts(), rewrittenScan);
         }).toRule(RuleType.FILE_SCAN_PARTITION_PRUNE);
     }
+
+    private SelectedPartitions pruneHivePartitions(HMSExternalTable hiveTbl,
+            LogicalFilter<LogicalFileScan> filter, LogicalFileScan scan, 
CascadesContext ctx) {
+        Map<Long, PartitionItem> selectedPartitionItems = Maps.newHashMap();
+        if (CollectionUtils.isEmpty(hiveTbl.getPartitionColumns())) {
+            // non partitioned table, return null.
+            // and it will be handled in HiveScanNode
+            return new SelectedPartitions(1, Maps.newHashMap(), false);
+        }
+        Map<String, Slot> scanOutput = scan.getOutput()
+                .stream()
+                .collect(Collectors.toMap(slot -> 
slot.getName().toLowerCase(), Function.identity()));
+
+        List<Slot> partitionSlots = hiveTbl.getPartitionColumns()
+                .stream()
+                .map(column -> scanOutput.get(column.getName().toLowerCase()))
+                .collect(Collectors.toList());
+
+        HiveMetaStoreCache cache = Env.getCurrentEnv().getExtMetaCacheMgr()
+                .getMetaStoreCache((HMSExternalCatalog) hiveTbl.getCatalog());
+        HiveMetaStoreCache.HivePartitionValues hivePartitionValues = 
cache.getPartitionValues(
+                hiveTbl.getDbName(), hiveTbl.getName(), 
hiveTbl.getPartitionColumnTypes());
+        Map<Long, PartitionItem> idToPartitionItem = 
hivePartitionValues.getIdToPartitionItem();
+        List<Long> prunedPartitions = new ArrayList<>(PartitionPruner.prune(
+                partitionSlots, filter.getPredicate(), idToPartitionItem, ctx, 
PartitionTableType.HIVE));
+
+        for (Long id : prunedPartitions) {
+            selectedPartitionItems.put(id, idToPartitionItem.get(id));
+        }
+        return new SelectedPartitions(idToPartitionItem.size(), 
selectedPartitionItems, true);
+    }
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanPartition.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanPartition.java
index 7c3297a1b7..4c10f339bf 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanPartition.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanPartition.java
@@ -22,6 +22,7 @@ import org.apache.doris.catalog.PartitionInfo;
 import org.apache.doris.nereids.rules.Rule;
 import org.apache.doris.nereids.rules.RuleType;
 import org.apache.doris.nereids.rules.expression.rules.PartitionPruner;
+import 
org.apache.doris.nereids.rules.expression.rules.PartitionPruner.PartitionTableType;
 import org.apache.doris.nereids.trees.expressions.Slot;
 import org.apache.doris.nereids.trees.plans.logical.LogicalFilter;
 import org.apache.doris.nereids.trees.plans.logical.LogicalOlapScan;
@@ -65,7 +66,8 @@ public class PruneOlapScanPartition extends 
OneRewriteRuleFactory {
                     .collect(Collectors.toList());
 
             List<Long> prunedPartitions = new 
ArrayList<>(PartitionPruner.prune(
-                    partitionSlots, filter.getPredicate(), partitionInfo, 
ctx.cascadesContext));
+                    partitionSlots, filter.getPredicate(), partitionInfo, 
ctx.cascadesContext,
+                    PartitionTableType.OLAP));
             List<Long> manuallySpecifiedPartitions = 
scan.getManuallySpecifiedPartitions();
             if (!CollectionUtils.isEmpty(manuallySpecifiedPartitions)) {
                 prunedPartitions.retainAll(manuallySpecifiedPartitions);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalFileScan.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalFileScan.java
index 3d4da53a89..e821eedbc0 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalFileScan.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalFileScan.java
@@ -17,6 +17,7 @@
 
 package org.apache.doris.nereids.trees.plans.logical;
 
+import org.apache.doris.catalog.PartitionItem;
 import org.apache.doris.catalog.external.ExternalTable;
 import org.apache.doris.nereids.memo.GroupExpression;
 import org.apache.doris.nereids.properties.LogicalProperties;
@@ -28,9 +29,12 @@ import 
org.apache.doris.nereids.trees.plans.visitor.PlanVisitor;
 import org.apache.doris.nereids.util.Utils;
 
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
+import lombok.Getter;
 
 import java.util.List;
+import java.util.Map;
 import java.util.Objects;
 import java.util.Optional;
 import java.util.Set;
@@ -40,22 +44,27 @@ import java.util.Set;
  */
 public class LogicalFileScan extends LogicalCatalogRelation {
 
-    // TODO remove this conjuncts.
+    // TODO remove this conjuncts when old planner is removed
+    @Getter
     private final Set<Expression> conjuncts;
+    @Getter
+    private final SelectedPartitions selectedPartitions;
 
     /**
      * Constructor for LogicalFileScan.
      */
     public LogicalFileScan(RelationId id, ExternalTable table, List<String> 
qualifier,
             Optional<GroupExpression> groupExpression, 
Optional<LogicalProperties> logicalProperties,
-            Set<Expression> conjuncts) {
+            Set<Expression> conjuncts, SelectedPartitions selectedPartitions) {
         super(id, PlanType.LOGICAL_FILE_SCAN, table, qualifier,
                 groupExpression, logicalProperties);
         this.conjuncts = conjuncts;
+        this.selectedPartitions = selectedPartitions;
     }
 
     public LogicalFileScan(RelationId id, ExternalTable table, List<String> 
qualifier) {
-        this(id, table, qualifier, Optional.empty(), Optional.empty(), 
Sets.newHashSet());
+        this(id, table, qualifier, Optional.empty(), Optional.empty(),
+                Sets.newHashSet(), null);
     }
 
     @Override
@@ -76,19 +85,24 @@ public class LogicalFileScan extends LogicalCatalogRelation 
{
     @Override
     public LogicalFileScan withGroupExpression(Optional<GroupExpression> 
groupExpression) {
         return new LogicalFileScan(relationId, (ExternalTable) table, 
qualifier, groupExpression,
-                Optional.of(getLogicalProperties()), conjuncts);
+                Optional.of(getLogicalProperties()), conjuncts, 
selectedPartitions);
     }
 
     @Override
     public Plan withGroupExprLogicalPropChildren(Optional<GroupExpression> 
groupExpression,
             Optional<LogicalProperties> logicalProperties, List<Plan> 
children) {
         return new LogicalFileScan(relationId, (ExternalTable) table, 
qualifier,
-                groupExpression, logicalProperties, conjuncts);
+                groupExpression, logicalProperties, conjuncts, 
selectedPartitions);
     }
 
     public LogicalFileScan withConjuncts(Set<Expression> conjuncts) {
         return new LogicalFileScan(relationId, (ExternalTable) table, 
qualifier, groupExpression,
-                Optional.of(getLogicalProperties()), conjuncts);
+                Optional.of(getLogicalProperties()), conjuncts, 
selectedPartitions);
+    }
+
+    public LogicalFileScan withSelectedPartitions(SelectedPartitions 
selectedPartitions) {
+        return new LogicalFileScan(relationId, (ExternalTable) table, 
qualifier, groupExpression,
+                Optional.of(getLogicalProperties()), conjuncts, 
selectedPartitions);
     }
 
     @Override
@@ -96,12 +110,44 @@ public class LogicalFileScan extends 
LogicalCatalogRelation {
         return visitor.visitLogicalFileScan(this, context);
     }
 
-    public Set<Expression> getConjuncts() {
-        return this.conjuncts;
-    }
-
     @Override
     public boolean equals(Object o) {
-        return super.equals(o) && Objects.equals(conjuncts, ((LogicalFileScan) 
o).conjuncts);
+        return super.equals(o) && Objects.equals(conjuncts, ((LogicalFileScan) 
o).conjuncts)
+                && Objects.equals(selectedPartitions, ((LogicalFileScan) 
o).selectedPartitions);
+    }
+
+    /**
+     * SelectedPartitions contains the selected partitions and the total 
partition number.
+     * Mainly for hive table partition pruning.
+     */
+    public static class SelectedPartitions {
+        /** total partition number */
+        public long totalPartitionNum = 0;
+        /** partition id -> partition item */
+        public Map<Long, PartitionItem> selectedPartitions;
+        /**
+         * true means the result is after partition pruning
+         * false means the partition pruning is not processed.
+         */
+        public boolean isPartitionPruned;
+
+        /**
+         * Constructor for SelectedPartitions.
+         */
+        public SelectedPartitions(long totalPartitionNum, Map<Long, 
PartitionItem> selectedPartitions,
+                boolean isPartitionPruned) {
+            this.totalPartitionNum = totalPartitionNum;
+            this.selectedPartitions = selectedPartitions;
+            this.isPartitionPruned = isPartitionPruned;
+            if (this.selectedPartitions == null) {
+                this.selectedPartitions = Maps.newHashMap();
+            }
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            return isPartitionPruned == ((SelectedPartitions) 
o).isPartitionPruned && Objects.equals(
+                    selectedPartitions.keySet(), ((SelectedPartitions) 
o).selectedPartitions.keySet());
+        }
     }
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalFileScan.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalFileScan.java
index b5af33d107..9088217cad 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalFileScan.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalFileScan.java
@@ -26,10 +26,13 @@ import 
org.apache.doris.nereids.trees.expressions.Expression;
 import org.apache.doris.nereids.trees.plans.Plan;
 import org.apache.doris.nereids.trees.plans.PlanType;
 import org.apache.doris.nereids.trees.plans.RelationId;
+import 
org.apache.doris.nereids.trees.plans.logical.LogicalFileScan.SelectedPartitions;
 import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor;
 import org.apache.doris.nereids.util.Utils;
 import org.apache.doris.statistics.Statistics;
 
+import lombok.Getter;
+
 import java.util.List;
 import java.util.Optional;
 import java.util.Set;
@@ -40,17 +43,22 @@ import java.util.Set;
 public class PhysicalFileScan extends PhysicalCatalogRelation {
 
     private final DistributionSpec distributionSpec;
+    @Getter
     private final Set<Expression> conjuncts;
+    @Getter
+    private final SelectedPartitions selectedPartitions;
 
     /**
      * Constructor for PhysicalFileScan.
      */
     public PhysicalFileScan(RelationId id, ExternalTable table, List<String> 
qualifier,
             DistributionSpec distributionSpec, Optional<GroupExpression> 
groupExpression,
-            LogicalProperties logicalProperties, Set<Expression> conjuncts) {
+            LogicalProperties logicalProperties, Set<Expression> conjuncts,
+            SelectedPartitions selectedPartitions) {
         super(id, PlanType.PHYSICAL_FILE_SCAN, table, qualifier, 
groupExpression, logicalProperties);
         this.distributionSpec = distributionSpec;
         this.conjuncts = conjuncts;
+        this.selectedPartitions = selectedPartitions;
     }
 
     /**
@@ -59,19 +67,23 @@ public class PhysicalFileScan extends 
PhysicalCatalogRelation {
     public PhysicalFileScan(RelationId id, ExternalTable table, List<String> 
qualifier,
             DistributionSpec distributionSpec, Optional<GroupExpression> 
groupExpression,
             LogicalProperties logicalProperties, PhysicalProperties 
physicalProperties,
-            Statistics statistics, Set<Expression> conjuncts) {
+            Statistics statistics, Set<Expression> conjuncts, 
SelectedPartitions selectedPartitions) {
         super(id, PlanType.PHYSICAL_FILE_SCAN, table, qualifier, 
groupExpression, logicalProperties,
                 physicalProperties, statistics);
         this.distributionSpec = distributionSpec;
         this.conjuncts = conjuncts;
+        this.selectedPartitions = selectedPartitions;
     }
 
     @Override
     public String toString() {
         return Utils.toSqlString("PhysicalFileScan",
-            "qualified", Utils.qualifiedName(qualifier, table.getName()),
-            "output", getOutput(),
-            "stats", statistics
+                "qualified", Utils.qualifiedName(qualifier, table.getName()),
+                "output", getOutput(),
+                "stats", statistics,
+                "conjuncts", conjuncts,
+                "selected partitions num",
+                selectedPartitions.isPartitionPruned ? 
selectedPartitions.selectedPartitions.size() : "unknown"
         );
     }
 
@@ -83,14 +95,14 @@ public class PhysicalFileScan extends 
PhysicalCatalogRelation {
     @Override
     public PhysicalFileScan withGroupExpression(Optional<GroupExpression> 
groupExpression) {
         return new PhysicalFileScan(relationId, getTable(), qualifier, 
distributionSpec,
-            groupExpression, getLogicalProperties(), conjuncts);
+                groupExpression, getLogicalProperties(), conjuncts, 
selectedPartitions);
     }
 
     @Override
     public Plan withGroupExprLogicalPropChildren(Optional<GroupExpression> 
groupExpression,
             Optional<LogicalProperties> logicalProperties, List<Plan> 
children) {
         return new PhysicalFileScan(relationId, getTable(), qualifier, 
distributionSpec,
-                groupExpression, logicalProperties.get(), conjuncts);
+                groupExpression, logicalProperties.get(), conjuncts, 
selectedPartitions);
     }
 
     @Override
@@ -102,10 +114,7 @@ public class PhysicalFileScan extends 
PhysicalCatalogRelation {
     public PhysicalFileScan withPhysicalPropertiesAndStats(PhysicalProperties 
physicalProperties,
                                                        Statistics statistics) {
         return new PhysicalFileScan(relationId, getTable(), qualifier, 
distributionSpec,
-                groupExpression, getLogicalProperties(), physicalProperties, 
statistics, conjuncts);
-    }
-
-    public Set<Expression> getConjuncts() {
-        return this.conjuncts;
+                groupExpression, getLogicalProperties(), physicalProperties, 
statistics, conjuncts,
+                selectedPartitions);
     }
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/DefaultPlanRewriter.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/DefaultPlanRewriter.java
index 1dd90772f1..e73d279545 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/DefaultPlanRewriter.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/DefaultPlanRewriter.java
@@ -37,9 +37,11 @@ public abstract class DefaultPlanRewriter<C> extends 
PlanVisitor<Plan, C> {
 
     @Override
     public Plan 
visitPhysicalStorageLayerAggregate(PhysicalStorageLayerAggregate 
storageLayerAggregate, C context) {
-        PhysicalOlapScan olapScan = (PhysicalOlapScan) 
storageLayerAggregate.getRelation().accept(this, context);
-        if (olapScan != storageLayerAggregate.getRelation()) {
-            return storageLayerAggregate.withPhysicalOlapScan(olapScan);
+        if (storageLayerAggregate.getRelation() instanceof PhysicalOlapScan) {
+            PhysicalOlapScan olapScan = (PhysicalOlapScan) 
storageLayerAggregate.getRelation().accept(this, context);
+            if (olapScan != storageLayerAggregate.getRelation()) {
+                return storageLayerAggregate.withPhysicalOlapScan(olapScan);
+            }
         }
         return storageLayerAggregate;
     }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileScanNode.java 
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileScanNode.java
index 63dd6d452a..662aa939ee 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileScanNode.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileScanNode.java
@@ -158,6 +158,7 @@ public abstract class FileScanNode extends ExternalScanNode 
{
             output.append(String.format("avgRowSize=%s, ", avgRowSize));
         }
         output.append(String.format("numNodes=%s", numNodes)).append("\n");
+        output.append(prefix).append(String.format("pushdown agg=%s", 
pushDownAggNoGroupingOp)).append("\n");
 
         return output.toString();
     }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java 
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java
index 61a571358a..7178a585ff 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java
@@ -43,6 +43,7 @@ import 
org.apache.doris.datasource.hive.HiveMetaStoreCache.FileCacheValue;
 import org.apache.doris.datasource.hive.HivePartition;
 import org.apache.doris.datasource.hive.HiveTransaction;
 import org.apache.doris.datasource.hive.HiveVersionUtil;
+import 
org.apache.doris.nereids.trees.plans.logical.LogicalFileScan.SelectedPartitions;
 import org.apache.doris.planner.ListPartitionPrunerV2;
 import org.apache.doris.planner.PlanNodeId;
 import org.apache.doris.planner.external.HiveSplit.HiveSplitCreator;
@@ -54,8 +55,10 @@ import org.apache.doris.thrift.TFileFormatType;
 import org.apache.doris.thrift.TFileTextScanRangeParams;
 import org.apache.doris.thrift.TFileType;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
+import lombok.Setter;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.logging.log4j.LogManager;
@@ -85,6 +88,9 @@ public class HiveScanNode extends FileQueryScanNode {
     protected final HMSExternalTable hmsTable;
     private HiveTransaction hiveTransaction = null;
 
+    @Setter
+    private SelectedPartitions selectedPartitions = null;
+
     /**
      * * External file scan node for Query Hive table
      * needCheckColumnPriv: Some of ExternalFileScanNode do not need to check 
column priv
@@ -162,56 +168,65 @@ public class HiveScanNode extends FileQueryScanNode {
     }
 
     protected List<HivePartition> getPartitions() throws AnalysisException {
+        List<HivePartition> resPartitions = Lists.newArrayList();
         long start = System.currentTimeMillis();
         HiveMetaStoreCache cache = Env.getCurrentEnv().getExtMetaCacheMgr()
                 .getMetaStoreCache((HMSExternalCatalog) hmsTable.getCatalog());
-        // 1. get ListPartitionItems from cache
-        HiveMetaStoreCache.HivePartitionValues hivePartitionValues = null;
         List<Type> partitionColumnTypes = hmsTable.getPartitionColumnTypes();
         if (!partitionColumnTypes.isEmpty()) {
-            hivePartitionValues = 
cache.getPartitionValues(hmsTable.getDbName(), hmsTable.getName(),
-                    partitionColumnTypes);
-        }
-        if (hivePartitionValues != null) {
-            // 2. prune partitions by expr
-            Map<Long, PartitionItem> idToPartitionItem = 
hivePartitionValues.getIdToPartitionItem();
-            this.totalPartitionNum = idToPartitionItem.size();
-            ListPartitionPrunerV2 pruner = new 
ListPartitionPrunerV2(idToPartitionItem,
-                    hmsTable.getPartitionColumns(), columnNameToRange,
-                    hivePartitionValues.getUidToPartitionRange(),
-                    hivePartitionValues.getRangeToId(),
-                    hivePartitionValues.getSingleColumnRangeMap(),
-                    true);
-            Collection<Long> filteredPartitionIds = pruner.prune();
-            this.readPartitionNum = filteredPartitionIds.size();
-            LOG.debug("hive partition fetch and prune for table {}.{} cost: {} 
ms",
-                    hmsTable.getDbName(), hmsTable.getName(), 
(System.currentTimeMillis() - start));
-
-            // 3. get partitions from cache
-            List<List<String>> partitionValuesList = 
Lists.newArrayListWithCapacity(filteredPartitionIds.size());
-            for (Long id : filteredPartitionIds) {
-                ListPartitionItem listPartitionItem = (ListPartitionItem) 
idToPartitionItem.get(id);
-                
partitionValuesList.add(listPartitionItem.getItems().get(0).getPartitionValuesAsStringList());
+            // partitioned table
+            boolean isPartitionPruned = selectedPartitions == null ? false : 
selectedPartitions.isPartitionPruned;
+            Collection<PartitionItem> partitionItems;
+            if (!isPartitionPruned) {
+                // partitionItems is null means that the partition is not 
pruned by Nereids,
+                // so need to prune partitions here by legacy 
ListPartitionPrunerV2.
+                HiveMetaStoreCache.HivePartitionValues hivePartitionValues = 
cache.getPartitionValues(
+                        hmsTable.getDbName(), hmsTable.getName(), 
partitionColumnTypes);
+                Map<Long, PartitionItem> idToPartitionItem = 
hivePartitionValues.getIdToPartitionItem();
+                this.totalPartitionNum = idToPartitionItem.size();
+                ListPartitionPrunerV2 pruner = new 
ListPartitionPrunerV2(idToPartitionItem,
+                        hmsTable.getPartitionColumns(), columnNameToRange,
+                        hivePartitionValues.getUidToPartitionRange(),
+                        hivePartitionValues.getRangeToId(),
+                        hivePartitionValues.getSingleColumnRangeMap(),
+                        true);
+                Collection<Long> filteredPartitionIds = pruner.prune();
+                LOG.debug("hive partition fetch and prune for table {}.{} 
cost: {} ms",
+                        hmsTable.getDbName(), hmsTable.getName(), 
(System.currentTimeMillis() - start));
+                partitionItems = 
Lists.newArrayListWithCapacity(filteredPartitionIds.size());
+                for (Long id : filteredPartitionIds) {
+                    partitionItems.add(idToPartitionItem.get(id));
+                }
+            } else {
+                // partitions has benn pruned by Nereids, in 
PruneFileScanPartition,
+                // so just use the selected partitions.
+                this.totalPartitionNum = selectedPartitions.totalPartitionNum;
+                partitionItems = 
selectedPartitions.selectedPartitions.values();
             }
-            List<HivePartition> allPartitions =
-                    cache.getAllPartitionsWithCache(hmsTable.getDbName(), 
hmsTable.getName(), partitionValuesList);
-            if (ConnectContext.get().getExecutor() != null) {
-                
ConnectContext.get().getExecutor().getSummaryProfile().setGetPartitionsFinishTime();
+            Preconditions.checkNotNull(partitionItems);
+            this.readPartitionNum = partitionItems.size();
+
+            // get partitions from cache
+            List<List<String>> partitionValuesList = 
Lists.newArrayListWithCapacity(partitionItems.size());
+            for (PartitionItem item : partitionItems) {
+                partitionValuesList.add(((ListPartitionItem) 
item).getItems().get(0).getPartitionValuesAsStringList());
             }
-            return allPartitions;
+            resPartitions = 
cache.getAllPartitionsWithCache(hmsTable.getDbName(), hmsTable.getName(),
+                    partitionValuesList);
         } else {
-            // unpartitioned table, create a dummy partition to save location 
and inputformat,
+            // non partitioned table, create a dummy partition to save 
location and inputformat,
             // so that we can unify the interface.
             HivePartition dummyPartition = new 
HivePartition(hmsTable.getDbName(), hmsTable.getName(), true,
                     hmsTable.getRemoteTable().getSd().getInputFormat(),
                     hmsTable.getRemoteTable().getSd().getLocation(), null);
             this.totalPartitionNum = 1;
             this.readPartitionNum = 1;
-            if (ConnectContext.get().getExecutor() != null) {
-                
ConnectContext.get().getExecutor().getSummaryProfile().setGetPartitionsFinishTime();
-            }
-            return Lists.newArrayList(dummyPartition);
+            resPartitions.add(dummyPartition);
+        }
+        if (ConnectContext.get().getExecutor() != null) {
+            
ConnectContext.get().getExecutor().getSummaryProfile().setGetPartitionsFinishTime();
         }
+        return resPartitions;
     }
 
     @Override
diff --git 
a/regression-test/data/external_table_p2/hive/test_hive_default_partition.out 
b/regression-test/data/external_table_p2/hive/test_hive_default_partition.out
index 3737fbc7f6..cace5bba2b 100644
--- 
a/regression-test/data/external_table_p2/hive/test_hive_default_partition.out
+++ 
b/regression-test/data/external_table_p2/hive/test_hive_default_partition.out
@@ -28,6 +28,12 @@
 5      \N
 6      \N
 
+-- !one_partition6 --
+3      2
+4      2
+5      \N
+6      \N
+
 -- !two_partition1 --
 1      \N      one
 2      \N      one
@@ -133,3 +139,37 @@
 1      \N      one
 2      \N      one
 
+-- !two_partition18 --
+5      3       three
+6      3       three
+
+-- !string_part_prune1 --
+3      2023-08-17      2023-08-17
+
+-- !string_part_prune2 --
+
+-- !string_part_prune3 --
+3      2023-08-17      2023-08-17
+
+-- !string_part_prune4 --
+1      2023-08-15      2023-08-15
+1      2023-8-15       2023-08-15
+2      2023-08-16      2023-08-16
+3      2023-08-17      2023-08-17
+
+-- !string_part_prune5 --
+3      2023-08-17      2023-08-17
+
+-- !string_part_prune5 --
+1      2023-08-15      2023-08-15
+2      2023-08-16      2023-08-16
+
+-- !string_part_prune5 --
+2      2023-08-16      2023-08-16
+
+-- !string_part_prune5 --
+2      2023-08-16      2023-08-16
+
+-- !string_part_prune5 --
+2      2023-08-16      2023-08-16
+
diff --git 
a/regression-test/suites/external_table_p2/hive/test_hive_default_partition.groovy
 
b/regression-test/suites/external_table_p2/hive/test_hive_default_partition.groovy
index 2deddb9d2d..e1ae749321 100644
--- 
a/regression-test/suites/external_table_p2/hive/test_hive_default_partition.groovy
+++ 
b/regression-test/suites/external_table_p2/hive/test_hive_default_partition.groovy
@@ -21,6 +21,7 @@ suite("test_hive_default_partition", "p2") {
     def one_partition3 = """select id from one_partition where part1 is not 
null order by id;"""
     def one_partition4 = """select part1 from one_partition where part1>0 
order by id;"""
     def one_partition5 = """select id, part1 from one_partition where part1 is 
null or id>3 order by id;"""
+    def one_partition6 = """select id, part1 from one_partition where part1 is 
null or part1>1 order by id;"""
 
     def two_partition1 = """select * from two_partition order by id;"""
     def two_partition2 = """select id, part1, part2 from two_partition where 
part1 is null order by id;"""
@@ -39,6 +40,17 @@ suite("test_hive_default_partition", "p2") {
     def two_partition15 = """select id, part1, part2 from two_partition where 
id > 5 order by id;"""
     def two_partition16 = """select id, part1, part2 from two_partition where 
part1>0 order by id;"""
     def two_partition17 = """select id, part1, part2 from two_partition where 
part2 = 'one' order by id;"""
+    def two_partition18 = """select id, part1, part2 from two_partition where 
part2 = 'three' order by id;"""
+
+    def string_part_prune1 = """select * from test_date_string_partition where 
cast(day1 as date) > cast("2023-08-16" as date);"""
+    def string_part_prune2 = """select * from test_date_string_partition where 
cast(day1 as date) > cast("2023-08-16" as date) and day2="2023-08-16";"""
+    def string_part_prune3 = """select * from test_date_string_partition where 
cast(day1 as date) > cast("2023-08-16" as date) and day2="2023-08-17";"""
+    def string_part_prune4 = """select * from test_date_string_partition where 
cast(day1 as date) > cast("2023-08-16" as date) or day2<"2023-08-17";"""
+    def string_part_prune5 = """select * from test_date_string_partition where 
cast(day1 as date) > cast("2023-08-16" as date) or cast(day2 as string) = 
"2023-08-17";"""
+    def string_part_prune6 = """select * from test_date_string_partition where 
day1 in ("2023-08-16", "2023-08-15");"""
+    def string_part_prune7 = """select * from test_date_string_partition where 
day1 in ("2023-08-16", "2023-08-18");"""
+    def string_part_prune8 = """select * from test_date_string_partition where 
cast(day1 as date) in ("2023-08-16", "2023-08-18");"""
+    def string_part_prune9 = """select * from test_date_string_partition where 
cast(day1 as date) in (cast("2023-08-16" as date), "2023-08-18");"""
 
     String enabled = context.config.otherConfigs.get("enableExternalHiveTest")
     if (enabled != null && enabled.equalsIgnoreCase("true")) {
@@ -56,11 +68,14 @@ suite("test_hive_default_partition", "p2") {
         sql """switch ${catalog_name};"""
         logger.info("switched to catalog " + catalog_name)
         sql """use multi_catalog;"""
+        sql """set experimental_enable_nereids_planner=true;"""
+        sql """set enable_fallback_to_original_planner=false;"""
         qt_one_partition1 one_partition1
         qt_one_partition2 one_partition2
         qt_one_partition3 one_partition3
         qt_one_partition4 one_partition4
         qt_one_partition5 one_partition5
+        qt_one_partition6 one_partition6
 
         qt_two_partition1 two_partition1
         qt_two_partition2 two_partition2
@@ -79,7 +94,152 @@ suite("test_hive_default_partition", "p2") {
         qt_two_partition15 two_partition15
         qt_two_partition16 two_partition16
         qt_two_partition17 two_partition17
+        qt_two_partition18 two_partition18
+
+        order_qt_string_part_prune1 string_part_prune1
+        order_qt_string_part_prune2 string_part_prune2
+        order_qt_string_part_prune3 string_part_prune3
+        order_qt_string_part_prune4 string_part_prune4
+        order_qt_string_part_prune5 string_part_prune5
+        order_qt_string_part_prune5 string_part_prune6
+        order_qt_string_part_prune5 string_part_prune7
+        order_qt_string_part_prune5 string_part_prune8
+        order_qt_string_part_prune5 string_part_prune9
+
+        explain {
+            sql("${one_partition1}")
+            contains "partition=3/3"
+        }
+        explain {
+            sql("${one_partition2}")
+            contains "partition=1/3"
+        }
+        explain {
+            sql("${one_partition3}")
+            contains "partition=3/3"
+        }
+        explain {
+            sql("${one_partition4}")
+            contains "partition=3/3"
+        }
+        explain {
+            sql("${one_partition5}")
+            contains "partition=3/3"
+        }
+        explain {
+            sql("${one_partition6}")
+            contains "partition=2/3"
+        }
+
+        explain {
+            sql("${two_partition1}")
+            contains "partition=4/4"
+        }
+        explain {
+            sql("${two_partition2}")
+            contains "partition=3/4"
+        }
+        explain {
+            sql("${two_partition3}")
+            contains "partition=4/4"
+        }
+        explain {
+            sql("${two_partition4}")
+            contains "partition=3/4"
+        }
+        explain {
+            sql("${two_partition5}")
+            contains "partition=4/4"
+        }
+        explain {
+            sql("${two_partition6}")
+            contains "partition=4/4"
+        }
+        explain {
+            sql("${two_partition7}")
+            contains "partition=3/4"
+        }
+        explain {
+            sql("${two_partition8}")
+            contains "partition=3/4"
+        }
+        explain {
+            sql("${two_partition9}")
+            contains "partition=3/4"
+        }
+        explain {
+            sql("${two_partition10}")
+            contains "partition=4/4"
+        }
+        explain {
+            sql("${two_partition11}")
+            contains "partition=4/4"
+        }
+        explain {
+            sql("${two_partition12}")
+            contains "partition=4/4"
+        }
+        explain {
+            sql("${two_partition13}")
+            contains "partition=3/4"
+        }
+        explain {
+            sql("${two_partition14}")
+            contains "partition=4/4"
+        }
+        explain {
+            sql("${two_partition15}")
+            contains "partition=4/4"
+        }
+        explain {
+            sql("${two_partition16}")
+            contains "partition=4/4"
+        }
+        explain {
+            sql("${two_partition17}")
+            contains "partition=3/4"
+        }
+        explain {
+            sql("${two_partition18}")
+            contains "partition=4/4"
+        }
 
+        explain {
+            sql("${string_part_prune1}")
+            contains "partition=2/4"
+        }
+        explain {
+            sql("${string_part_prune2}")
+            contains "partition=0/4"
+        }
+        explain {
+            sql("${string_part_prune3}")
+            contains "partition=1/4"
+        }
+        explain {
+            sql("${string_part_prune4}")
+            contains "partition=4/4"
+        }
+        explain {
+            sql("${string_part_prune5}")
+            contains "partition=2/4"
+        }
+        explain {
+            sql("${string_part_prune6}")
+            contains "partition=2/4"
+        }
+        explain {
+            sql("${string_part_prune7}")
+            contains "partition=1/4"
+        }
+        explain {
+            sql("${string_part_prune8}")
+            contains "partition=2/4"
+        }
+        explain {
+            sql("${string_part_prune9}")
+            contains "partition=2/4"
+        }
     }
 }
 
diff --git 
a/regression-test/suites/external_table_p2/hive/test_select_count_optimize.groovy
 
b/regression-test/suites/external_table_p2/hive/test_select_count_optimize.groovy
index 2a95dc4294..88a9cfeb12 100644
--- 
a/regression-test/suites/external_table_p2/hive/test_select_count_optimize.groovy
+++ 
b/regression-test/suites/external_table_p2/hive/test_select_count_optimize.groovy
@@ -32,7 +32,9 @@ suite("test_select_count_optimize", "p2") {
         logger.info("catalog " + catalog_name + " created")
         sql """switch ${catalog_name};"""
         logger.info("switched to catalog " + catalog_name)
-        sql """ set query_timeout=3600; """ 
+
+        sql """set experimental_enable_nereids_planner=true;"""
+        sql """set enable_fallback_to_original_planner=false;"""
 
         //parquet 
         qt_sql """ select * from tpch_1000_parquet.nation order by 
n_name,n_regionkey,n_nationkey,n_comment ; """


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to