This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new 32529ecda27 [cherry-pick](branch-2.1) Pick "[Enhancement](partial 
update) Add partial update mix cases (#37113)" (#37384)
32529ecda27 is described below

commit 32529ecda2754780efcc8acf3252e9ba94341510
Author: abmdocrt <yukang.lian2...@gmail.com>
AuthorDate: Sun Jul 7 18:26:46 2024 +0800

    [cherry-pick](branch-2.1) Pick "[Enhancement](partial update) Add partial 
update mix cases (#37113)" (#37384)
    
    #37113
---
 .../partial_update/test_mix_partial_update.out     | 365 ++++++++++++++++++++
 .../test_mix_partial_update_load1.csv              |   4 +
 .../test_mix_partial_update_load2.csv              |   1 +
 .../test_mix_partial_update_load3.csv              |   1 +
 .../test_mix_partial_update_load4.csv              |   1 +
 .../test_mix_partial_update_load5.csv              |   1 +
 .../test_mix_partial_update_load6.csv              |   1 +
 .../test_mix_partial_update_load7.csv              |   1 +
 .../test_mix_partial_update_load_A.csv             |   1 +
 .../test_mix_partial_update_load_B.csv             |   1 +
 .../test_mix_partial_update_load_C.csv             |   1 +
 .../test_mix_partial_update_load_D.csv             |   1 +
 .../partial_update/test_mix_partial_update.groovy  | 381 +++++++++++++++++++++
 13 files changed, 760 insertions(+)

diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update.out
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update.out
new file mode 100644
index 00000000000..13b699ae13c
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update.out
@@ -0,0 +1,365 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !select1 --
+1      1       1       1       10
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select2 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select3 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select4 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select5 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select6 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select7 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select8 --
+2      2       2       2       2
+3      3       3       3       3
+4      4       4       4       4
+
+-- !select9 --
+2      2       2       2       2
+3      3       3       3       3
+4      4       4       4       4
+
+-- !select1 --
+1      1       1       1       10
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select2 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select3 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select4 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select5 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select6 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select7 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select8 --
+2      2       2       2       2
+3      3       3       3       3
+4      4       4       4       4
+
+-- !select9 --
+2      2       2       2       2
+3      3       3       3       3
+4      4       4       4       4
+
+-- !select_A --
+1      1       1       {"a":100, "b":100}
+
+-- !select_AA --
+1
+
+-- !select_AAA --
+1
+
+-- !select_B --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+
+-- !select_BB --
+2
+
+-- !select_BBB --
+2
+
+-- !select_C --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+
+-- !select_CC --
+1
+
+-- !select_CCC --
+2
+
+-- !select_D --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+3      3       3       {"a":300, "b":300}
+
+-- !select_DD --
+2
+
+-- !select_DDD --
+1
+
+-- !select_E --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+3      3       3       {"a":300, "b":300}
+4      4       4       {"a":400, "b":400}
+
+-- !select_EE --
+3
+
+-- !select_EEE --
+2
+
+-- !select_A --
+1      1       1       {"a":100, "b":100}
+
+-- !select_AA --
+1
+
+-- !select_AAA --
+1
+
+-- !select_B --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+
+-- !select_BB --
+2
+
+-- !select_BBB --
+2
+
+-- !select_C --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+
+-- !select_CC --
+1
+
+-- !select_CCC --
+2
+
+-- !select_D --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+3      3       3       {"a":300, "b":300}
+
+-- !select_DD --
+2
+
+-- !select_DDD --
+1
+
+-- !select_E --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+3      3       3       {"a":300, "b":300}
+4      4       4       {"a":400, "b":400}
+
+-- !select_EE --
+3
+
+-- !select_EEE --
+2
+
+-- !select1 --
+1      1       1       1       10
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select2 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select3 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select4 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select5 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select6 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select7 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select8 --
+2      2       2       2       2
+3      3       3       3       3
+4      4       4       4       4
+
+-- !select9 --
+2      2       2       2       2
+3      3       3       3       3
+4      4       4       4       4
+
+-- !select1 --
+1      1       1       1       10
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select2 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select3 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select4 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select5 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select6 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select7 --
+2      2       2       2       2
+3      3       3       3       3
+
+-- !select8 --
+2      2       2       2       2
+3      3       3       3       3
+4      4       4       4       4
+
+-- !select9 --
+2      2       2       2       2
+3      3       3       3       3
+4      4       4       4       4
+
+-- !select_A --
+1      1       1       {"a":100, "b":100}
+
+-- !select_AA --
+1
+
+-- !select_AAA --
+1
+
+-- !select_B --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+
+-- !select_BB --
+2
+
+-- !select_BBB --
+2
+
+-- !select_C --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+
+-- !select_CC --
+1
+
+-- !select_CCC --
+2
+
+-- !select_D --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+3      3       3       {"a":300, "b":300}
+
+-- !select_DD --
+2
+
+-- !select_DDD --
+1
+
+-- !select_E --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+3      3       3       {"a":300, "b":300}
+4      4       4       {"a":400, "b":400}
+
+-- !select_EE --
+3
+
+-- !select_EEE --
+2
+
+-- !select_A --
+1      1       1       {"a":100, "b":100}
+
+-- !select_AA --
+1
+
+-- !select_AAA --
+1
+
+-- !select_B --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+
+-- !select_BB --
+2
+
+-- !select_BBB --
+2
+
+-- !select_C --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+
+-- !select_CC --
+1
+
+-- !select_CCC --
+2
+
+-- !select_D --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+3      3       3       {"a":300, "b":300}
+
+-- !select_DD --
+2
+
+-- !select_DDD --
+1
+
+-- !select_E --
+1      1       1       {"a":100, "b":100}
+2      2       2       {"a":200, "b":200}
+3      3       3       {"a":300, "b":300}
+4      4       4       {"a":400, "b":400}
+
+-- !select_EE --
+3
+
+-- !select_EEE --
+2
+
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load1.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load1.csv
new file mode 100644
index 00000000000..c6d4a781493
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load1.csv
@@ -0,0 +1,4 @@
+1,1,1,1,1
+1,1,1,1,10
+2,2,2,2,2
+3,3,3,3,3
\ No newline at end of file
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load2.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load2.csv
new file mode 100644
index 00000000000..358bc6ea41e
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load2.csv
@@ -0,0 +1 @@
+1,10,null,10,10,1
\ No newline at end of file
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load3.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load3.csv
new file mode 100644
index 00000000000..7d5943d7378
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load3.csv
@@ -0,0 +1 @@
+1,10,null,10,20,1
\ No newline at end of file
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load4.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load4.csv
new file mode 100644
index 00000000000..fa5a617f2ad
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load4.csv
@@ -0,0 +1 @@
+1,10,null,10,1,0
\ No newline at end of file
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load5.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load5.csv
new file mode 100644
index 00000000000..8bb28fd1159
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load5.csv
@@ -0,0 +1 @@
+2,1
\ No newline at end of file
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load6.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load6.csv
new file mode 100644
index 00000000000..08ecb109529
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load6.csv
@@ -0,0 +1 @@
+3,1,1
\ No newline at end of file
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load7.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load7.csv
new file mode 100644
index 00000000000..82413299cf3
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load7.csv
@@ -0,0 +1 @@
+4,4,4,4,4
\ No newline at end of file
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_A.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_A.csv
new file mode 100644
index 00000000000..4784f6c8c11
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_A.csv
@@ -0,0 +1 @@
+1      1       1       {"a":100,"b":100}
\ No newline at end of file
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_B.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_B.csv
new file mode 100644
index 00000000000..0e2f1484994
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_B.csv
@@ -0,0 +1 @@
+2      2       2       {"a":200,"b":200}
\ No newline at end of file
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_C.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_C.csv
new file mode 100644
index 00000000000..8b75516f5d9
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_C.csv
@@ -0,0 +1 @@
+3      3       3       {"a":300,"b":300}
\ No newline at end of file
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_D.csv
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_D.csv
new file mode 100644
index 00000000000..687b4ad147c
--- /dev/null
+++ 
b/regression-test/data/unique_with_mow_p0/partial_update/test_mix_partial_update_load_D.csv
@@ -0,0 +1 @@
+4      4       4       4       {"a":400,"b":400}
\ No newline at end of file
diff --git 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_mix_partial_update.groovy
 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_mix_partial_update.groovy
new file mode 100644
index 00000000000..0a2322b9d0b
--- /dev/null
+++ 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_mix_partial_update.groovy
@@ -0,0 +1,381 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite('test_mix_partial_update') {
+
+    String db = context.config.getDbNameByFile(context.file)
+    sql "select 1;" // to create database
+
+    for (def use_row_store : [false, true]) {
+        logger.info("current params: use_row_store: ${use_row_store}")
+
+        connect(user = context.config.jdbcUser, password = 
context.config.jdbcPassword, url = context.config.jdbcUrl) {
+            sql "use ${db};"
+
+            sql "set enable_nereids_planner=true"
+            sql "set enable_fallback_to_original_planner=false"
+
+            def tableInsertName1 = "test_mix_partial_update"
+            sql "DROP TABLE IF EXISTS ${tableInsertName1};"
+            sql """ CREATE TABLE IF NOT EXISTS ${tableInsertName1} (
+                    `k1` int NOT NULL,
+                    `c1` int,
+                    `c2` int,
+                    `c3` int,
+                    `seq` int
+                    )UNIQUE KEY(k1)
+                DISTRIBUTED BY HASH(k1) BUCKETS 1
+                PROPERTIES (
+                    "disable_auto_compaction" = "true",
+                    "replication_num" = "1",
+                    "function_column.sequence_col" = "seq",
+                    "store_row_column" = "${use_row_store}"); """
+            sql "insert into ${tableInsertName1} 
values(1,1,1,1,1),(1,1,1,1,10),(2,2,2,2,2),(3,3,3,3,3)"
+            // 1,1,1,1,10
+            // 2,2,2,2,2
+            // 3,3,3,3,3
+            qt_select1 "select * from ${tableInsertName1} order by k1"
+            sql "insert into ${tableInsertName1} 
(k1,c1,c2,c3,seq,__DORIS_DELETE_SIGN__) values(1,10,null,10,10,1)"
+            // 2,2,2,2,2
+            // 3,3,3,3,3
+            qt_select2 "select * from ${tableInsertName1} order by k1"
+            sql "insert into ${tableInsertName1} 
(k1,c1,c2,c3,seq,__DORIS_DELETE_SIGN__) values(1,10,null,10,20,1)"
+            // 2,2,2,2,2
+            // 3,3,3,3,3
+            qt_select3 "select * from ${tableInsertName1} order by k1"
+            sql "insert into ${tableInsertName1} 
(k1,c1,c2,c3,seq,__DORIS_DELETE_SIGN__) values(1,10,null,10,1,0)"
+            // error
+            // 1,10,null,10,1
+            // 2,2,2,2,2
+            // 3,3,3,3,3
+            qt_select4 "select * from ${tableInsertName1} order by k1"
+            sql "update ${tableInsertName1} set seq = 30 where k1 = 1"
+            // 1,10,null,30,1
+            // 2,2,2,2,2
+            // 3,3,3,3,3
+            qt_select5 "select * from ${tableInsertName1} order by k1"
+
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+
+            sql "insert into ${tableInsertName1} (k1,seq) values(2,1)"
+            // 2,2,2,2,1
+            // 3,3,3,3,3
+            qt_select6 "select * from ${tableInsertName1} order by k1"
+
+            sql "insert into ${tableInsertName1} 
(k1,seq,__DORIS_DELETE_SIGN__) values(3,1,1)"
+            // 2,2,2,2,1
+            // 3,3,3,3,3
+            qt_select7 "select * from ${tableInsertName1} order by k1"
+
+            sql "set enable_unique_key_partial_update=false;"
+            sql "set enable_insert_strict=true;"
+            sql "insert into ${tableInsertName1} values(4,4,4,4,4)"
+            // 2,2,2,2,1
+            // 3,3,3,3,3
+            // 4,4,4,4,4
+            qt_select8 "select * from ${tableInsertName1} order by k1"
+
+            sql "update ${tableInsertName1} set seq = 1 where k1 = 4"
+            // 2,2,2,2,1
+            // 3,3,3,3,3
+            // 4,4,4,4,4
+            qt_select9 "select * from ${tableInsertName1} order by k1"
+
+            def tableStreamName1 = "test_mix_partial_update"
+            sql "DROP TABLE IF EXISTS ${tableStreamName1};"
+            sql """ CREATE TABLE IF NOT EXISTS ${tableStreamName1} (
+                    `k1` int NOT NULL,
+                    `c1` int,
+                    `c2` int,
+                    `c3` int,
+                    `seq` int
+                    )UNIQUE KEY(k1)
+                DISTRIBUTED BY HASH(k1) BUCKETS 1
+                PROPERTIES (
+                    "disable_auto_compaction" = "true",
+                    "replication_num" = "1",
+                    "function_column.sequence_col" = "seq",
+                    "store_row_column" = "${use_row_store}"); """
+
+            streamLoad {
+                table "${tableStreamName1}"
+                set 'column_separator', ','
+                set 'format', 'csv'
+                file "test_mix_partial_update_load1.csv"
+                time 10000 // limit inflight 10s
+            }
+            // sql "insert into ${tableInsertName1} 
values(1,1,1,1,1),(1,1,1,1,10),(2,2,2,2,2),(3,3,3,3,3)"
+            // 1,1,1,1,10
+            // 2,2,2,2,2
+            // 3,3,3,3,3
+            qt_select1 "select * from ${tableStreamName1} order by k1"
+
+            streamLoad {
+                table "${tableStreamName1}"
+                set 'column_separator', ','
+                set 'format', 'csv'
+                set 'columns', 'k1,c1,c2,c3,seq,__DORIS_DELETE_SIGN__'
+                //set 'partial_columns', 'true'
+                file "test_mix_partial_update_load2.csv"
+                time 10000 // limit inflight 10s
+            }
+            //sql "insert into ${tableStreamName1} 
(k1,c1,c2,c3,seq,__DORIS_DELETE_SIGN__) values(1,10,null,10,10,1)"
+            // 2,2,2,2,2
+            // 3,3,3,3,3
+            qt_select2 "select * from ${tableStreamName1} order by k1"
+
+
+            streamLoad {
+                table "${tableStreamName1}"
+                set 'column_separator', ','
+                set 'format', 'csv'
+                set 'columns', 'k1,c1,c2,c3,seq,__DORIS_DELETE_SIGN__'
+                //set 'partial_columns', 'true'
+                file "test_mix_partial_update_load3.csv"
+                time 10000 // limit inflight 10s
+            }
+            //sql "insert into ${tableStreamName1} 
(k1,c1,c2,c3,seq,__DORIS_DELETE_SIGN__) values(1,10,null,10,20,1)"
+            // 2,2,2,2,2
+            // 3,3,3,3,3
+            qt_select3 "select * from ${tableStreamName1} order by k1"
+
+
+            streamLoad {
+                table "${tableStreamName1}"
+                set 'column_separator', ','
+                set 'format', 'csv'
+                set 'columns', 'k1,c1,c2,c3,seq,__DORIS_DELETE_SIGN__'
+                //set 'partial_columns', 'true'
+                file "test_mix_partial_update_load4.csv"
+                time 10000 // limit inflight 10s
+            }
+            //sql "insert into ${tableStreamName1} 
(k1,c1,c2,c3,seq,__DORIS_DELETE_SIGN__) values(1,10,null,10,1,0)"
+            // 1,10,null,10,1
+            // 2,2,2,2,2
+            // 3,3,3,3,3
+            qt_select4 "select * from ${tableStreamName1} order by k1"
+            sql "update ${tableInsertName1} set seq = 30 where k1 = 1"
+            // 1,10,null,30,1
+            // 2,2,2,2,2
+            // 3,3,3,3,3
+            qt_select5 "select * from ${tableStreamName1} order by k1"
+
+            streamLoad {
+                table "${tableStreamName1}"
+                set 'column_separator', ','
+                set 'format', 'csv'
+                set 'columns', 'k1,seq'
+                set 'partial_columns', 'true'
+                file "test_mix_partial_update_load5.csv"
+                time 10000 // limit inflight 10s
+            }
+            //sql "set enable_unique_key_partial_update=true;"
+            //sql "set enable_insert_strict=false;"
+            //sql "insert into ${tableStreamName1} (k1,seq) values(2,1)"
+            // 2,2,2,2,1
+            // 3,3,3,3,3
+            qt_select6 "select * from ${tableStreamName1} order by k1"
+
+            streamLoad {
+                table "${tableStreamName1}"
+                set 'column_separator', ','
+                set 'format', 'csv'
+                set 'columns', 'k1,seq,__DORIS_DELETE_SIGN__'
+                set 'partial_columns', 'true'
+                file "test_mix_partial_update_load6.csv"
+                time 10000 // limit inflight 10s
+            }
+            //sql "set enable_unique_key_partial_update=true;"
+            //sql "set enable_insert_strict=false;"
+            // sql "insert into ${tableStreamName1} 
(k1,seq,__DORIS_DELETE_SIGN__) values(3,1,1)"
+            // 2,2,2,2,1
+            // 3,3,3,3,3
+            qt_select7 "select * from ${tableStreamName1} order by k1"
+
+            streamLoad {
+                table "${tableStreamName1}"
+                set 'column_separator', ','
+                set 'format', 'csv'
+                file "test_mix_partial_update_load7.csv"
+                time 10000 // limit inflight 10s
+            }
+            // sql "insert into ${tableStreamName1} values(4,4,4,4,4)"
+            // 2,2,2,2,1
+            // 3,3,3,3,3
+            // 4,4,4,4,4
+            qt_select8 "select * from ${tableStreamName1} order by k1"
+
+            sql "update ${tableStreamName1} set seq = 1 where k1 = 4"
+            // 2,2,2,2,1
+            // 3,3,3,3,3
+            // 4,4,4,4,4
+            qt_select9 "select * from ${tableStreamName1} order by k1"
+
+
+            sql "set enable_nereids_planner=true"
+            sql "set enable_fallback_to_original_planner=false"
+
+            def tableInsertName2 = "test_mix_partial_update2"
+            sql "DROP TABLE IF EXISTS ${tableInsertName2};"
+            sql """ CREATE TABLE IF NOT EXISTS ${tableInsertName2} (
+                    `k1` int NOT NULL,
+                    `c1` bigint NOT NULL auto_increment(100),
+                    `c2` int,
+                    `c3` int,
+                    `c4` map<string, int>,
+                    `c5` datetimev2(3) DEFAULT CURRENT_TIMESTAMP,
+                    `c6` datetimev2(3) DEFAULT CURRENT_TIMESTAMP
+                    )UNIQUE KEY(k1)
+                DISTRIBUTED BY HASH(k1) BUCKETS 1
+                PROPERTIES (
+                    "disable_auto_compaction" = "true",
+                    "replication_num" = "1",
+                    "store_row_column" = "${use_row_store}"); """
+            
+            sql "insert into ${tableInsertName2} (k1,c2,c3,c4) 
values(1,1,1,{'a':100,'b':100})"
+            qt_select_A "select k1,c2,c3,c4 from ${tableInsertName2}"
+            qt_select_AA "select count(distinct c1) from ${tableInsertName2}"
+            qt_select_AAA"select count(*) from ${tableInsertName2} where c5 = 
c6"
+
+
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+            sql "insert into ${tableInsertName2} (k1,c2,c3,c4) 
values(2,2,2,{'a':200,'b':200})"
+            qt_select_B "select k1,c2,c3,c4 from ${tableInsertName2}"
+            qt_select_BB "select count(distinct c1) from ${tableInsertName2}"
+            qt_select_BBB "select count(*) from ${tableInsertName2} where c5 = 
c6"
+            sql "set enable_unique_key_partial_update=false;"
+            sql "set enable_insert_strict=true;"
+
+            sql "update ${tableInsertName2} set c1 = 100"
+            qt_select_C "select k1,c2,c3,c4 from ${tableInsertName2}"
+            qt_select_CC "select count(distinct c1) from ${tableInsertName2}"
+            qt_select_CCC "select count(*) from ${tableInsertName2} where c5 = 
c6"
+
+            // do light weight schema change
+            sql """ ALTER TABLE ${tableInsertName2} add column `c7` 
datetimev2(3) DEFAULT CURRENT_TIMESTAMP after c6; """
+            waitForSchemaChangeDone {
+                sql """ SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableInsertName2}' ORDER BY createtime DESC LIMIT 1 """
+                time 60
+            }
+
+            sql "insert into ${tableInsertName2} (k1,c2,c3,c4) 
values(3,3,3,{'a':300,'b':300})"
+            qt_select_D "select k1,c2,c3,c4 from ${tableInsertName2}"
+            qt_select_DD "select count(distinct c1) from ${tableInsertName2}"
+            qt_select_DDD "select count(*) from ${tableInsertName2} where c5 = 
c7"
+
+            // do heavy weight schema change
+            sql """ ALTER TABLE ${tableInsertName2} add column `k2` int after 
k1; """
+            waitForSchemaChangeDone {
+                sql """ SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableInsertName2}' ORDER BY createtime DESC LIMIT 1 """
+                time 60
+            }
+
+            sql "insert into ${tableInsertName2} (k1,k2,c2,c3,c4) 
values(4,4,4,4,{'a':400,'b':400})"
+            qt_select_E "select k1,c2,c3,c4 from ${tableInsertName2}"
+            qt_select_EE "select count(distinct c1) from ${tableInsertName2}"
+            qt_select_EEE "select count(*) from ${tableInsertName2} where c5 = 
c7"
+
+            def tableStreamName2 = "test_mix_partial_update2"
+            sql "DROP TABLE IF EXISTS ${tableStreamName2};"
+            sql """ CREATE TABLE IF NOT EXISTS ${tableStreamName2} (
+                    `k1` int NOT NULL,
+                    `c1` bigint NOT NULL auto_increment(100),
+                    `c2` int,
+                    `c3` int,
+                    `c4` map<string, int>,
+                    `c5` datetimev2(3) DEFAULT CURRENT_TIMESTAMP,
+                    `c6` datetimev2(3) DEFAULT CURRENT_TIMESTAMP
+                    )UNIQUE KEY(k1)
+                DISTRIBUTED BY HASH(k1) BUCKETS 1
+                PROPERTIES (
+                    "disable_auto_compaction" = "true",
+                    "replication_num" = "1",
+                    "store_row_column" = "${use_row_store}"); """
+
+            //sql "insert into ${tableInsertName2} (k1,c2,c3,c4) 
values(1,1,1,{'a':100,'b':100})"
+            streamLoad {
+                table "${tableStreamName2}"
+                set 'columns', 'k1,c2,c3,c4'
+                set 'format', 'csv'
+                file "test_mix_partial_update_load_A.csv"
+                time 10000 // limit inflight 10s
+            }
+            qt_select_A "select k1,c2,c3,c4 from ${tableStreamName2}"
+            qt_select_AA "select count(distinct c1) from ${tableStreamName2}"
+            qt_select_AAA "select count(*) from ${tableStreamName2} where c5 = 
c6"
+
+
+            // sql "insert into ${tableStreamName2} (k1,c2,c3,c4) 
values(2,2,2,{'a':200,'b':200})"
+            streamLoad {
+                table "${tableStreamName2}"
+                set 'columns', 'k1,c2,c3,c4'
+                set 'partial_columns', 'true'
+                set 'format', 'csv'
+                file "test_mix_partial_update_load_B.csv"
+                time 10000 // limit inflight 10s
+            }
+            qt_select_B "select k1,c2,c3,c4 from ${tableStreamName2}"
+            qt_select_BB "select count(distinct c1) from ${tableStreamName2}"
+            qt_select_BBB "select count(*) from ${tableStreamName2} where c5 = 
c6"
+
+            sql "update ${tableStreamName2} set c1 = 100"
+            qt_select_C "select k1,c2,c3,c4 from ${tableStreamName2}"
+            qt_select_CC "select count(distinct c1) from ${tableStreamName2}"
+            qt_select_CCC "select count(*) from ${tableStreamName2} where c5 = 
c6"
+
+            // do light weight schema change
+            sql """ ALTER TABLE ${tableStreamName2} add column `c7` 
datetimev2(3) DEFAULT CURRENT_TIMESTAMP after c6; """
+            waitForSchemaChangeDone {
+                sql """ SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableStreamName2}' ORDER BY createtime DESC LIMIT 1 """
+                time 60
+            }
+
+            // sql "insert into ${tableInsertName2} (k1,c2,c3,c4) 
values(3,3,3,{'a':300,'b':300})"
+            streamLoad {
+                table "${tableStreamName2}"
+                set 'columns', 'k1,c2,c3,c4'
+                set 'format', 'csv'
+                file "test_mix_partial_update_load_C.csv"
+                time 10000 // limit inflight 10s
+            }
+            qt_select_D "select k1,c2,c3,c4 from ${tableStreamName2}"
+            qt_select_DD "select count(distinct c1) from ${tableStreamName2}"
+            qt_select_DDD "select count(*) from ${tableStreamName2} where c5 = 
c7"
+
+            // do heavy weight schema change
+            sql """ ALTER TABLE ${tableStreamName2} add column `k2` int after 
k1; """
+            waitForSchemaChangeDone {
+                sql """ SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableInsertName2}' ORDER BY createtime DESC LIMIT 1 """
+                time 60
+            }
+
+            // sql "insert into ${tableInsertName2} (k1,k2,c2,c3,c4) 
values(4,4,4,4,{'a':400,'b':400})"
+            streamLoad {
+                table "${tableStreamName2}"
+                set 'columns', 'k1,k2,c2,c3,c4'
+                set 'format', 'csv'
+                file "test_mix_partial_update_load_D.csv"
+                time 10000 // limit inflight 10s
+            }
+            qt_select_E "select k1,c2,c3,c4 from ${tableStreamName2}"
+            qt_select_EE "select count(distinct c1) from ${tableStreamName2}"
+            qt_select_EEE "select count(*) from ${tableStreamName2} where c5 = 
c7"
+        }
+    }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to