This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 36b7fcf055 [tmp](hive) support hive partition 00 (#23224)
36b7fcf055 is described below

commit 36b7fcf055a0e0ddd0d90d87f39d6d4289231e7f
Author: Mingyu Chen <morning...@163.com>
AuthorDate: Sat Aug 26 12:58:31 2023 +0800

    [tmp](hive) support hive partition 00 (#23224)
    
    in some case, a hive table with int partition column may has following 
partition value:
    hour=00, hour=01
    we need to support this.
---
 .../org/apache/doris/catalog/PartitionKey.java     | 15 +++++++++--
 .../doris/datasource/hive/HiveMetaStoreCache.java  |  2 +-
 .../doris/planner/external/HiveScanNode.java       | 31 +++++++++++++---------
 .../planner/external/TablePartitionValues.java     |  3 +--
 4 files changed, 33 insertions(+), 18 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java
index f9920701e3..bf69a209e9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java
@@ -53,12 +53,14 @@ import java.util.zip.CRC32;
 public class PartitionKey implements Comparable<PartitionKey>, Writable {
     private static final Logger LOG = LogManager.getLogger(PartitionKey.class);
     private List<LiteralExpr> keys;
+    private List<String> originHiveKeys;
     private List<PrimitiveType> types;
     private boolean isDefaultListPartitionKey = false;
 
     // constructor for partition prune
     public PartitionKey() {
         keys = Lists.newArrayList();
+        originHiveKeys = Lists.newArrayList();
         types = Lists.newArrayList();
     }
 
@@ -101,7 +103,8 @@ public class PartitionKey implements 
Comparable<PartitionKey>, Writable {
         return partitionKey;
     }
 
-    public static PartitionKey 
createListPartitionKeyWithTypes(List<PartitionValue> values, List<Type> types)
+    public static PartitionKey 
createListPartitionKeyWithTypes(List<PartitionValue> values, List<Type> types,
+            boolean isHive)
             throws AnalysisException {
         // for multi list partition:
         //
@@ -134,6 +137,9 @@ public class PartitionKey implements 
Comparable<PartitionKey>, Writable {
         PartitionKey partitionKey = new PartitionKey();
         for (int i = 0; i < values.size(); i++) {
             partitionKey.keys.add(values.get(i).getValue(types.get(i)));
+            if (isHive) {
+                
partitionKey.originHiveKeys.add(values.get(i).getStringValue());
+            }
             partitionKey.types.add(types.get(i).getPrimitiveType());
         }
         if (values.isEmpty()) {
@@ -151,7 +157,7 @@ public class PartitionKey implements 
Comparable<PartitionKey>, Writable {
     public static PartitionKey createListPartitionKey(List<PartitionValue> 
values, List<Column> columns)
             throws AnalysisException {
         List<Type> types = columns.stream().map(c -> 
c.getType()).collect(Collectors.toList());
-        return createListPartitionKeyWithTypes(values, types);
+        return createListPartitionKeyWithTypes(values, types, false);
     }
 
     public void pushColumn(LiteralExpr keyValue, PrimitiveType keyType) {
@@ -205,6 +211,11 @@ public class PartitionKey implements 
Comparable<PartitionKey>, Writable {
         return keys.stream().map(k -> 
k.getStringValue()).collect(Collectors.toList());
     }
 
+    public List<String> getPartitionValuesAsStringListForHive() {
+        Preconditions.checkState(originHiveKeys.size() == keys.size());
+        return originHiveKeys;
+    }
+
     public static int compareLiteralExpr(LiteralExpr key1, LiteralExpr key2) {
         int ret = 0;
         if (key1 instanceof MaxLiteral || key2 instanceof MaxLiteral) {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
index e7e621948e..1ad1b12047 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java
@@ -310,7 +310,7 @@ public class HiveMetaStoreCache {
             values.add(new PartitionValue(partitionValue, 
HIVE_DEFAULT_PARTITION.equals(partitionValue)));
         }
         try {
-            PartitionKey key = 
PartitionKey.createListPartitionKeyWithTypes(values, types);
+            PartitionKey key = 
PartitionKey.createListPartitionKeyWithTypes(values, types, true);
             return new ListPartitionItem(Lists.newArrayList(key));
         } catch (AnalysisException e) {
             throw new CacheException("failed to convert hive partition %s to 
list partition in catalog %s",
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java 
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java
index 2be52b8f07..8e71e02fac 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java
@@ -184,18 +184,22 @@ public class HiveScanNode extends FileQueryScanNode {
                         hmsTable.getDbName(), hmsTable.getName(), 
partitionColumnTypes);
                 Map<Long, PartitionItem> idToPartitionItem = 
hivePartitionValues.getIdToPartitionItem();
                 this.totalPartitionNum = idToPartitionItem.size();
-                ListPartitionPrunerV2 pruner = new 
ListPartitionPrunerV2(idToPartitionItem,
-                        hmsTable.getPartitionColumns(), columnNameToRange,
-                        hivePartitionValues.getUidToPartitionRange(),
-                        hivePartitionValues.getRangeToId(),
-                        hivePartitionValues.getSingleColumnRangeMap(),
-                        true);
-                Collection<Long> filteredPartitionIds = pruner.prune();
-                LOG.debug("hive partition fetch and prune for table {}.{} 
cost: {} ms",
-                        hmsTable.getDbName(), hmsTable.getName(), 
(System.currentTimeMillis() - start));
-                partitionItems = 
Lists.newArrayListWithCapacity(filteredPartitionIds.size());
-                for (Long id : filteredPartitionIds) {
-                    partitionItems.add(idToPartitionItem.get(id));
+                if (!conjuncts.isEmpty()) {
+                    ListPartitionPrunerV2 pruner = new 
ListPartitionPrunerV2(idToPartitionItem,
+                            hmsTable.getPartitionColumns(), columnNameToRange,
+                            hivePartitionValues.getUidToPartitionRange(),
+                            hivePartitionValues.getRangeToId(),
+                            hivePartitionValues.getSingleColumnRangeMap(),
+                            true);
+                    Collection<Long> filteredPartitionIds = pruner.prune();
+                    LOG.debug("hive partition fetch and prune for table {}.{} 
cost: {} ms",
+                            hmsTable.getDbName(), hmsTable.getName(), 
(System.currentTimeMillis() - start));
+                    partitionItems = 
Lists.newArrayListWithCapacity(filteredPartitionIds.size());
+                    for (Long id : filteredPartitionIds) {
+                        partitionItems.add(idToPartitionItem.get(id));
+                    }
+                } else {
+                    partitionItems = idToPartitionItem.values();
                 }
             } else {
                 // partitions has benn pruned by Nereids, in 
PruneFileScanPartition,
@@ -209,7 +213,8 @@ public class HiveScanNode extends FileQueryScanNode {
             // get partitions from cache
             List<List<String>> partitionValuesList = 
Lists.newArrayListWithCapacity(partitionItems.size());
             for (PartitionItem item : partitionItems) {
-                partitionValuesList.add(((ListPartitionItem) 
item).getItems().get(0).getPartitionValuesAsStringList());
+                partitionValuesList.add(
+                        ((ListPartitionItem) 
item).getItems().get(0).getPartitionValuesAsStringListForHive());
             }
             resPartitions = 
cache.getAllPartitionsWithCache(hmsTable.getDbName(), hmsTable.getName(),
                     partitionValuesList);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/TablePartitionValues.java
 
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/TablePartitionValues.java
index 87f11e5863..a207f5f082 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/TablePartitionValues.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/TablePartitionValues.java
@@ -190,8 +190,7 @@ public class TablePartitionValues {
         try {
             PartitionKey key = PartitionKey.createListPartitionKeyWithTypes(
                     partitionValues.stream().map(p -> new PartitionValue(p, 
HIVE_DEFAULT_PARTITION.equals(p)))
-                            .collect(Collectors.toList()),
-                    types);
+                            .collect(Collectors.toList()), types, false);
             return new ListPartitionItem(Lists.newArrayList(key));
         } catch (AnalysisException e) {
             throw new CacheException("failed to convert partition %s to list 
partition",


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to