This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
     new 1900e91a78d [branch-2.0-fix](partial update) 
`enable_unique_key_partial_update` should not affect tables which are not 
merge-on-write unique tables (#40170)
1900e91a78d is described below

commit 1900e91a78d767e7961f47acac669879de71079c
Author: bobhan1 <bh2444151...@outlook.com>
AuthorDate: Mon Sep 2 10:17:38 2024 +0800

    [branch-2.0-fix](partial update) `enable_unique_key_partial_update` should 
not affect tables which are not merge-on-write unique tables (#40170)
---
 .../apache/doris/analysis/NativeInsertStmt.java    |   2 +-
 .../glue/translator/PhysicalPlanTranslator.java    |   4 -
 .../doris/nereids/rules/analysis/BindSink.java     |  12 +-
 .../partial_update_columns_check1.csv              |   3 +
 .../test_partial_update_columns_check.out          |  41 +++++++
 .../test_partial_update_native_insert_stmt.out     |   8 ++
 .../test_partial_update_columns_check.groovy       | 128 +++++++++++++++++++++
 .../test_partial_update_native_insert_stmt.groovy  |  12 +-
 8 files changed, 195 insertions(+), 15 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java
index b9a745dad20..8b5f6f9aa3b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java
@@ -1109,7 +1109,7 @@ public class NativeInsertStmt extends InsertStmt {
             return;
         }
         if (!olapTable.getEnableUniqueKeyMergeOnWrite()) {
-            throw new UserException("Partial update is only allowed on unique 
table with merge-on-write enabled.");
+            return;
         }
         if (hasEmptyTargetColumns) {
             throw new AnalysisException("You must explicitly specify the 
columns to be updated when "
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
index 31b01b4af22..2d0822fbcb9 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
@@ -377,10 +377,6 @@ public class PhysicalPlanTranslator extends 
DefaultPlanVisitor<PlanFragment, Pla
         boolean isPartialUpdate = olapTableSink.isPartialUpdate();
         if (isPartialUpdate) {
             OlapTable olapTable = (OlapTable) olapTableSink.getTargetTable();
-            if (!olapTable.getEnableUniqueKeyMergeOnWrite()) {
-                throw new AnalysisException("Partial update is only allowed in"
-                        + "unique table with merge-on-write enabled.");
-            }
             for (Column col : olapTable.getFullSchema()) {
                 boolean exists = false;
                 for (Column insertCol : olapTableSink.getCols()) {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindSink.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindSink.java
index 0e12b8128e7..d5c4a50ab75 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindSink.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindSink.java
@@ -22,6 +22,7 @@ import org.apache.doris.analysis.SlotRef;
 import org.apache.doris.catalog.Column;
 import org.apache.doris.catalog.Database;
 import org.apache.doris.catalog.DatabaseIf;
+import org.apache.doris.catalog.KeysType;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.Partition;
 import org.apache.doris.catalog.TableIf;
@@ -76,9 +77,12 @@ public class BindSink implements AnalysisRuleFactory {
                             OlapTable table = pair.second;
 
                             LogicalPlan child = ((LogicalPlan) sink.child());
+                            boolean isPartialUpdate = (sink.isPartialUpdate()
+                                    && table.getKeysType() == 
KeysType.UNIQUE_KEYS
+                                    && table.getEnableUniqueKeyMergeOnWrite());
 
                             if (sink.getColNames().isEmpty() && 
sink.isFromNativeInsertStmt()
-                                    && sink.isPartialUpdate()) {
+                                    && isPartialUpdate) {
                                 throw new AnalysisException("You must 
explicitly specify the columns to be updated "
                                         + "when updating partial columns using 
the INSERT statement.");
                             }
@@ -91,7 +95,7 @@ public class BindSink implements AnalysisRuleFactory {
                                     child.getOutput().stream()
                                             .map(NamedExpression.class::cast)
                                             
.collect(ImmutableList.toImmutableList()),
-                                    sink.isPartialUpdate(),
+                                    isPartialUpdate,
                                     sink.isFromNativeInsertStmt(),
                                     sink.child());
 
@@ -142,7 +146,7 @@ public class BindSink implements AnalysisRuleFactory {
                                         }
                                     }
 
-                                    if (!haveInputSeqCol && 
!sink.isPartialUpdate()) {
+                                    if (!haveInputSeqCol && !isPartialUpdate) {
                                         if (!seqColInTable.isPresent() || 
seqColInTable.get().getDefaultValue() == null
                                                 || 
!seqColInTable.get().getDefaultValue()
                                                 
.equals(DefaultValue.CURRENT_TIMESTAMP)) {
@@ -211,7 +215,7 @@ public class BindSink implements AnalysisRuleFactory {
                                                 }
                                                 
columnToOutput.put(column.getName(), seqColumn);
                                             }
-                                        } else if (sink.isPartialUpdate()) {
+                                        } else if (isPartialUpdate) {
                                             // If the current load is a 
partial update, the values of unmentioned
                                             // columns will be filled in 
SegmentWriter. And the output of sink node
                                             // should not contain these 
unmentioned columns, so we just skip them.
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/partial_update_columns_check1.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/partial_update_columns_check1.csv
new file mode 100644
index 00000000000..62bf8d15885
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/partial_update_columns_check1.csv
@@ -0,0 +1,3 @@
+1,1,1
+2,2,2
+3,3,3
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_columns_check.out
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_columns_check.out
new file mode 100644
index 00000000000..99d4a1bf24a
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_columns_check.out
@@ -0,0 +1,41 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      1       1
+2      2       2
+3      3       3
+
+-- !mor --
+1      1       1
+2      2       2
+3      3       3
+
+-- !dup --
+1      1       1
+2      2       2
+3      3       3
+
+-- !agg --
+1      1       1
+2      2       2
+3      3       3
+
+-- !sql --
+1      1       1
+2      2       2
+3      3       3
+
+-- !mor --
+1      1       1
+2      2       2
+3      3       3
+
+-- !dup --
+1      1       1
+2      2       2
+3      3       3
+
+-- !agg --
+1      1       1
+2      2       2
+3      3       3
+
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out
index 3ee9fbcb760..3c2ae8804c1 100644
--- 
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out
@@ -145,3 +145,11 @@
 10000  2017-10-01      2017-10-01T08:00:05     北京      20      0       
2017-10-01T06:00        20      10      10
 10000  2017-10-01      2017-10-01T09:00:05     北京      20      0       
2017-10-01T07:00        15      2       2
 
+-- !sql --
+10000  2017-10-01      2017-10-01T08:00:05     北京      20      0       
2017-10-01T06:00        20      10      10
+10000  2017-10-01      2017-10-01T09:00:05     北京      20      0       
2017-10-01T07:00        15      2       2
+
+-- !sql --
+10000  2017-10-01      2017-10-01T08:00:05     北京      20      0       
2017-10-01T06:00        20      10      10
+10000  2017-10-01      2017-10-01T09:00:05     北京      20      0       
2017-10-01T07:00        15      2       2
+
diff --git 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_columns_check.groovy
 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_columns_check.groovy
new file mode 100644
index 00000000000..3ecb2bfe539
--- /dev/null
+++ 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_columns_check.groovy
@@ -0,0 +1,128 @@
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_partial_update_columns_check", "p0") {
+    String db = context.config.getDbNameByFile(context.file)
+    sql "select 1;" // to create database
+
+    for (def use_nereids_planner : [true, false]) {
+        logger.info("current params: use_nereids_planner: 
${use_nereids_planner}")
+        connect(user = context.config.jdbcUser, password = 
context.config.jdbcPassword, url = context.config.jdbcUrl) {
+            sql "use ${db};"
+            if (use_nereids_planner) {
+                sql """ set enable_nereids_dml = true; """
+                sql """ set enable_nereids_planner=true; """
+                sql """ set enable_fallback_to_original_planner=false; """
+            } else {
+                sql """ set enable_nereids_dml = false; """
+                sql """ set enable_nereids_planner = false; """
+            }
+            sql "sync;"
+
+            sql """ DROP TABLE IF EXISTS t1 """
+            sql """  CREATE TABLE IF NOT EXISTS t1 (
+                `k` BIGINT NOT NULL,
+                `c1` int,
+                `c2` int
+                ) DUPLICATE KEY(`k`)
+                DISTRIBUTED BY HASH(`k`) BUCKETS 1
+                PROPERTIES (
+                "replication_allocation" = "tag.location.default: 1"
+                );
+                """
+            sql """ DROP TABLE IF EXISTS t_mow """
+            sql """  CREATE TABLE IF NOT EXISTS t_mow (
+                `k` BIGINT NOT NULL,
+                `c1` int,
+                `c2` int
+                ) UNIQUE KEY(`k`)
+                DISTRIBUTED BY HASH(`k`) BUCKETS 1
+                PROPERTIES (
+                "replication_allocation" = "tag.location.default: 1",
+                "enable_unique_key_merge_on_write" = "true"
+                );
+                """
+            sql """ DROP TABLE IF EXISTS t_mor """
+            sql """  CREATE TABLE IF NOT EXISTS t_mor (
+                `k` BIGINT NOT NULL,
+                `c1` int,
+                `c2` int
+                ) UNIQUE KEY(`k`)
+                DISTRIBUTED BY HASH(`k`) BUCKETS 1
+                PROPERTIES (
+                "replication_allocation" = "tag.location.default: 1",
+                "enable_unique_key_merge_on_write" = "false"
+                );
+                """
+            sql """ DROP TABLE IF EXISTS t_dup """
+            sql """  CREATE TABLE IF NOT EXISTS t_dup (
+                `k` BIGINT NOT NULL,
+                `c1` int,
+                `c2` int
+                ) DUPLICATE KEY(`k`)
+                DISTRIBUTED BY HASH(`k`) BUCKETS 1
+                PROPERTIES (
+                "replication_allocation" = "tag.location.default: 1"
+                );
+                """
+            sql """ DROP TABLE IF EXISTS t_agg """
+            sql """  CREATE TABLE IF NOT EXISTS t_agg (
+                `k` BIGINT NOT NULL,
+                `c1` int max,
+                `c2` int min
+                ) AGGREGATE KEY(`k`)
+                DISTRIBUTED BY HASH(`k`) BUCKETS 1
+                PROPERTIES (
+                "replication_allocation" = "tag.location.default: 1"
+                );
+                """
+            streamLoad {
+                table "t1"
+                set 'columns', 'k,c1,c2'
+                set 'column_separator', ','
+                set 'format', 'csv'
+                file 'partial_update_columns_check1.csv'
+                time 10000
+            }
+            order_qt_sql "select * from t1;"
+
+
+            sql "set enable_unique_key_partial_update = true;"
+            sql "set enable_insert_strict = false;"
+            sql "sync;"
+
+            test {
+                sql "insert into t_mow select * from t1;"
+                exception "You must explicitly specify the columns to be 
updated when updating partial columns using the INSERT statement."
+            }
+
+            sql "insert into t_mor select * from t1;"
+            order_qt_mor "select * from t_mor;"
+            sql "insert into t_dup select * from t1;"
+            order_qt_dup "select * from t_dup;"
+            sql "insert into t_agg select * from t1;"
+            order_qt_agg "select * from t_agg;"
+
+            sql """ DROP TABLE IF EXISTS t1;"""
+            sql """ DROP TABLE IF EXISTS t_mow;"""
+            sql """ DROP TABLE IF EXISTS t_mor;"""
+            sql """ DROP TABLE IF EXISTS t_dup;"""
+            sql """ DROP TABLE IF EXISTS t_agg;"""
+        }
+    }
+}
diff --git 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy
 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy
index bf128ba26b4..5bd69030cc7 100644
--- 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy
+++ 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy
@@ -329,12 +329,12 @@ suite("test_partial_update_native_insert_stmt", "p0") {
         DISTRIBUTED BY HASH(`user_id`) BUCKETS 1
         PROPERTIES ("replication_allocation" = "tag.location.default: 1", 
"enable_unique_key_merge_on_write" = "false");"""
 
-        test {
-            sql """insert into ${tableName10} values
-            (10000,"2017-10-01","2017-10-01 08:00:05","北京",20,0,"2017-10-01 
06:00:00",20,10,10),
-            (10000,"2017-10-01","2017-10-01 09:00:05","北京",20,0,"2017-10-01 
07:00:00",15,2,2);  """
-            exception "Partial update is only allowed on unique table with 
merge-on-write enabled"
-        }
+
+        sql """insert into ${tableName10} values
+        (10000,"2017-10-01","2017-10-01 08:00:05","北京",20,0,"2017-10-01 
06:00:00",20,10,10),
+        (10000,"2017-10-01","2017-10-01 09:00:05","北京",20,0,"2017-10-01 
07:00:00",15,2,2);  """
+        order_qt_sql "select * from ${tableName10};"
+
 
         sql """ DROP TABLE IF EXISTS ${tableName8}; """
         sql """ DROP TABLE IF EXISTS ${tableName9}; """


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to