This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new b5937f512d0 branch-3.0: [fix](group commit) group commit support 
generated column #48538 (#48582)
b5937f512d0 is described below

commit b5937f512d0de50b7206b6dc8805c1f029131321
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Tue Mar 4 17:22:09 2025 +0800

    branch-3.0: [fix](group commit) group commit support generated column 
#48538 (#48582)
    
    Cherry-picked from #48538
    
    Co-authored-by: meiyi <me...@selectdb.com>
---
 .../doris/nereids/parser/LogicalPlanBuilder.java   |   1 +
 .../trees/plans/commands/insert/InsertUtils.java   |   3 +-
 .../insert_group_commit_with_large_data.out        | Bin 0 -> 221 bytes
 .../insert_group_commit_with_large_data.groovy     |  50 ++++++++++++++++++---
 4 files changed, 46 insertions(+), 8 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java
index 3e300722e90..a77a650a0a3 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java
@@ -605,6 +605,7 @@ public class LogicalPlanBuilder extends 
DorisParserBaseVisitor<Object> {
                     
.getTableNameByTableId(Long.valueOf(ctx.tableId.getText()));
             tableName.add(name.getDb());
             tableName.add(name.getTbl());
+            ConnectContext.get().setDatabase(name.getDb());
         } else {
             throw new ParseException("tableName and tableId cannot both be 
null");
         }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertUtils.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertUtils.java
index a30592e21d1..2e3f05947a6 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertUtils.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertUtils.java
@@ -507,7 +507,8 @@ public class InsertUtils {
     private static void checkGeneratedColumnForInsertIntoSelect(TableIf table,
             UnboundLogicalSink<? extends Plan> unboundLogicalSink, 
Optional<InsertCommandContext> insertCtx) {
         // should not check delete stmt, because deletestmt can transform to 
insert delete sign
-        if (unboundLogicalSink.getDMLCommandType() == DMLCommandType.DELETE) {
+        if (unboundLogicalSink.getDMLCommandType() == DMLCommandType.DELETE
+                || unboundLogicalSink.getDMLCommandType() == 
DMLCommandType.GROUP_COMMIT) {
             return;
         }
         // This is for the insert overwrite values(),()
diff --git 
a/regression-test/data/insert_p0/insert_group_commit_with_large_data.out 
b/regression-test/data/insert_p0/insert_group_commit_with_large_data.out
new file mode 100644
index 00000000000..06acc23ec64
Binary files /dev/null and 
b/regression-test/data/insert_p0/insert_group_commit_with_large_data.out differ
diff --git 
a/regression-test/suites/insert_p0/insert_group_commit_with_large_data.groovy 
b/regression-test/suites/insert_p0/insert_group_commit_with_large_data.groovy
index 8fa975f56ed..39773f887cb 100644
--- 
a/regression-test/suites/insert_p0/insert_group_commit_with_large_data.groovy
+++ 
b/regression-test/suites/insert_p0/insert_group_commit_with_large_data.groovy
@@ -19,13 +19,13 @@ import com.mysql.cj.jdbc.StatementImpl
 
 suite("insert_group_commit_with_large_data") {
     def db = "regression_test_insert_p0"
-    def table = "insert_group_commit_with_large_data"
+    def testTable = "insert_group_commit_with_large_data"
 
     def getRowCount = { expectedRowCount ->
         def retry = 0
         while (retry < 30) {
             sleep(2000)
-            def rowCount = sql "select count(*) from ${table}"
+            def rowCount = sql "select count(*) from ${testTable}"
             logger.info("rowCount: " + rowCount + ", retry: " + retry)
             if (rowCount[0][0] >= expectedRowCount) {
                 break
@@ -50,10 +50,10 @@ suite("insert_group_commit_with_large_data") {
 
     try {
         // create table
-        sql """ drop table if exists ${table}; """
+        sql """ drop table if exists ${testTable}; """
 
         sql """
-            CREATE TABLE `${table}` (
+            CREATE TABLE `${testTable}` (
                 `id` int(11) NOT NULL,
                 `name` varchar(1100) NULL,
                 `score` int(11) NULL default "-1"
@@ -71,7 +71,7 @@ suite("insert_group_commit_with_large_data") {
             sql """ use ${db}; """
 
             // insert into 5000 rows
-            def insert_sql = """ insert into ${table} values(1, 'a', 10)  """
+            def insert_sql = """ insert into ${testTable} values(1, 'a', 10)  
"""
             for (def i in 2..5000) {
                 insert_sql += """, (${i}, 'a', 10) """
             }
@@ -83,7 +83,7 @@ suite("insert_group_commit_with_large_data") {
             for (def i in 0..1024) {
                 name_value += 'a'
             }
-            insert_sql = """ insert into ${table} values(1, '${name_value}', 
10)  """
+            insert_sql = """ insert into ${testTable} values(1, 
'${name_value}', 10)  """
             for (def i in 2..5000) {
                 insert_sql += """, (${i}, '${name_value}', 10) """
             }
@@ -93,6 +93,42 @@ suite("insert_group_commit_with_large_data") {
             */
         }
     } finally {
-        // try_sql("DROP TABLE ${table}")
+        // try_sql("DROP TABLE ${testTable}")
     }
+
+    // test generated column
+    testTable = "test_group_commit_generated_column"
+    sql """ drop table if exists ${testTable}; """
+    sql """create table ${testTable}(a int,b int,c double generated always as 
(abs(a+b)) not null)
+    DISTRIBUTED BY HASH(a) PROPERTIES("replication_num" = "1", 
"group_commit_interval_ms" = "40");"""
+    sql "INSERT INTO ${testTable} values(6,7,default);"
+    sql "INSERT INTO ${testTable}(a,b) values(1,2);"
+    sql "INSERT INTO ${testTable} values(3,5,default);"
+    getRowCount(3)
+    qt_select1  "select * from ${testTable} order by 1,2,3;"
+
+    streamLoad {
+        table "${testTable}"
+
+        set 'column_separator', ','
+        set 'columns', 'a,dummy,b'
+        file "test_group_commit_1.csv"
+        unset 'label'
+        set 'group_commit', 'async_mode'
+        set 'strict_mode', 'true'
+
+        time 10000 // limit inflight 10s
+
+        check { result, exception, startTime, endTime ->
+            if (exception != null) {
+                throw exception
+            }
+            log.info("Stream load result: ${result}".toString())
+            def json = parseJson(result)
+            assertEquals("success", json.Status.toLowerCase())
+            assertEquals(4, json.NumberTotalRows)
+        }
+    }
+    getRowCount(7)
+    qt_select2  "select * from ${testTable} order by 1,2,3;"
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to