This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 1593b4dd70b [opt](autoinc) Remove some restrictions on schema change 
on table that has auto-increment column (#40280)
1593b4dd70b is described below

commit 1593b4dd70b8e901ca212bcfc8cb9ed4fb1bc5de
Author: bobhan1 <bh2444151...@outlook.com>
AuthorDate: Tue Sep 24 23:17:11 2024 +0800

    [opt](autoinc) Remove some restrictions on schema change on table that has 
auto-increment column (#40280)
    
    ## Proposed changes
    
    - #37186 forbids some schema change on table with auto-increment column,
    one of which is to forbid schema change that involves double write on
    tables which has auto-increment column. However, the process of
    generating auto-increment column values is before the distributing data
    to indexes. So this restriction can be removed.
    
    branch-2.1-pick: https://github.com/apache/doris/pull/41096
---
 be/src/cloud/cloud_schema_change_job.cpp           |   2 +
 be/src/olap/schema_change.cpp                      |   2 +
 .../apache/doris/alter/SchemaChangeHandler.java    |   6 -
 .../test_auto_inc_schema_change_double_write.out   |   9 ++
 ...test_auto_inc_schema_change_double_write.groovy | 129 +++++++++++++++++++++
 .../test_schema_change_auto_inc.groovy             |   6 -
 6 files changed, 142 insertions(+), 12 deletions(-)

diff --git a/be/src/cloud/cloud_schema_change_job.cpp 
b/be/src/cloud/cloud_schema_change_job.cpp
index 614202b0258..b7e3be93e85 100644
--- a/be/src/cloud/cloud_schema_change_job.cpp
+++ b/be/src/cloud/cloud_schema_change_job.cpp
@@ -252,6 +252,8 @@ Status 
CloudSchemaChangeJob::_convert_historical_rowsets(const SchemaChangeParam
             changer, sc_sorting,
             
_cloud_storage_engine.memory_limitation_bytes_per_thread_for_schema_change());
 
+    DBUG_EXECUTE_IF("CloudSchemaChangeJob::_convert_historical_rowsets.block", 
DBUG_BLOCK);
+
     // 3. Convert historical data
     bool already_exist_any_version = false;
     for (const auto& rs_reader : sc_params.ref_rowset_readers) {
diff --git a/be/src/olap/schema_change.cpp b/be/src/olap/schema_change.cpp
index f6cfdf3dbde..d40a44fd710 100644
--- a/be/src/olap/schema_change.cpp
+++ b/be/src/olap/schema_change.cpp
@@ -1134,6 +1134,8 @@ Status SchemaChangeJob::_convert_historical_rowsets(const 
SchemaChangeParams& sc
             changer, sc_sorting, sc_directly,
             
_local_storage_engine.memory_limitation_bytes_per_thread_for_schema_change());
 
+    DBUG_EXECUTE_IF("SchemaChangeJob::_convert_historical_rowsets.block", 
DBUG_BLOCK);
+
     // c.Convert historical data
     bool have_failure_rowset = false;
     for (const auto& rs_reader : sc_params.ref_rowset_readers) {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java 
b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
index 0ee38f28676..91bc4c13695 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
@@ -741,12 +741,6 @@ public class SchemaChangeHandler extends AlterHandler {
         } // end for handling other indices
 
         if (typeChanged && !lightSchemaChange) {
-            Optional<Column> autoIncCol = 
olapTable.getBaseSchema(true).stream()
-                    .filter(col -> col.isAutoInc()).findFirst();
-            if (autoIncCol.isPresent()) {
-                throw new DdlException("Can not modify column " + 
modColumn.getName() + " becasue table "
-                        + olapTable.getName() + " has auto-increment column " 
+ autoIncCol.get().getName());
-            }
             /*
              * In new alter table process (AlterJobV2), any modified columns 
are treated as new columns.
              * But the modified columns' name does not changed. So in order to 
distinguish this, we will add
diff --git 
a/regression-test/data/fault_injection_p0/test_auto_inc_schema_change_double_write.out
 
b/regression-test/data/fault_injection_p0/test_auto_inc_schema_change_double_write.out
new file mode 100644
index 00000000000..d0405083b99
--- /dev/null
+++ 
b/regression-test/data/fault_injection_p0/test_auto_inc_schema_change_double_write.out
@@ -0,0 +1,9 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+
+-- !sql --
+
+-- !sql --
+
+-- !sql --
+
diff --git 
a/regression-test/suites/fault_injection_p0/test_auto_inc_schema_change_double_write.groovy
 
b/regression-test/suites/fault_injection_p0/test_auto_inc_schema_change_double_write.groovy
new file mode 100644
index 00000000000..98a99f4fdf9
--- /dev/null
+++ 
b/regression-test/suites/fault_injection_p0/test_auto_inc_schema_change_double_write.groovy
@@ -0,0 +1,129 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.junit.Assert
+import java.util.concurrent.TimeUnit
+import java.util.concurrent.atomic.AtomicBoolean
+
+suite("test_auto_inc_schema_change_double_write", "nonConcurrent") {
+    def backends = sql_return_maparray('show backends')
+    def replicaNum = 0
+    for (def be : backends) {
+        def alive = be.Alive.toBoolean()
+        def decommissioned = be.SystemDecommissioned.toBoolean()
+        if (alive && !decommissioned) {
+            replicaNum++
+        }
+    }
+    assertTrue(replicaNum > 0)
+    if (isCloudMode()) {
+        replicaNum = 1
+    }
+
+    def block_convert_historical_rowsets = {
+        if (isCloudMode()) {
+            
GetDebugPoint().enableDebugPointForAllBEs("CloudSchemaChangeJob::_convert_historical_rowsets.block")
+        } else {
+            
GetDebugPoint().enableDebugPointForAllBEs("SchemaChangeJob::_convert_historical_rowsets.block")
+        }
+    }
+
+    def unblock = {
+        if (isCloudMode()) {
+            
GetDebugPoint().disableDebugPointForAllBEs("CloudSchemaChangeJob::_convert_historical_rowsets.block")
+        } else {
+            
GetDebugPoint().disableDebugPointForAllBEs("SchemaChangeJob::_convert_historical_rowsets.block")
+        }
+    }
+
+    for (def model : ["UNIQUE", "DUPLICATE"]) {
+        try {
+            GetDebugPoint().clearDebugPointsForAllFEs()
+            GetDebugPoint().clearDebugPointsForAllBEs()
+            def tableName = "test_auto_inc_schema_change_double_write"
+            def table1 = "${tableName}_${model}"
+            sql "DROP TABLE IF EXISTS ${table1} FORCE;"
+            sql """ CREATE TABLE IF NOT EXISTS ${table1} (
+                    `k1` BIGINT NOT NULL AUTO_INCREMENT,
+                    `c1` int,
+                    `c2` int,
+                    `c3` int,
+                    `c4` int
+                    )${model} KEY(k1)
+                DISTRIBUTED BY HASH(k1) BUCKETS 1
+                PROPERTIES (
+                    "disable_auto_compaction" = "true",
+                    "replication_num" = "${replicaNum}"); """
+
+            sql """insert into ${table1}(c1,c2,c3,c4) select 
number,number,number,number from numbers("number"="5000"); """
+            sql "sync;"
+            qt_sql "select count(*) from ${table1} group by k1 having count(*) 
> 1;"
+
+            block_convert_historical_rowsets()
+            
+            AtomicBoolean stopped = new AtomicBoolean(false)
+
+            def iters = 3
+            def rows = 500
+            def thread_num = 4
+            def t1 = Thread.start {
+                def threads = []
+                (1..thread_num).each { id1 -> 
+                    threads.add(Thread.start {
+                        while (!stopped.get()) {
+                            (1..iters).each { id2 -> 
+                                sql """insert into ${table1}(c1,c2,c3,c4) 
select number,number,number,number from numbers("number"="${rows}");"""
+                            }
+                            Thread.sleep(200)
+                        }
+                    })
+                }
+
+                threads.each { thread -> thread.join() }
+            }
+
+            Thread.sleep(3000)
+
+            sql "alter table ${table1} modify column c3 varchar(100) null;"
+
+            Thread.sleep(3000);
+
+            unblock()
+
+            def t2 = Thread.start {
+                waitForSchemaChangeDone {
+                    sql """SHOW ALTER TABLE COLUMN WHERE TableName='${table1}' 
ORDER BY createtime DESC LIMIT 1"""
+                    time 20000
+                }
+            }
+            
+            Thread.sleep(5000);
+            stopped.set(true)
+            t1.join()
+            t2.join()
+
+            qt_sql "select count(*) from ${table1} group by k1 having count(*) 
> 1;"
+
+        } catch(Exception e) {
+            logger.info(e.getMessage())
+            throw e
+        } finally {
+            GetDebugPoint().clearDebugPointsForAllFEs()
+            GetDebugPoint().clearDebugPointsForAllBEs()
+        }
+    }
+}
diff --git 
a/regression-test/suites/schema_change_p0/test_schema_change_auto_inc.groovy 
b/regression-test/suites/schema_change_p0/test_schema_change_auto_inc.groovy
index eeae07bd247..d249d3c5632 100644
--- a/regression-test/suites/schema_change_p0/test_schema_change_auto_inc.groovy
+++ b/regression-test/suites/schema_change_p0/test_schema_change_auto_inc.groovy
@@ -53,12 +53,6 @@ suite("test_schema_change_auto_inc") {
         exception "Can't modify the column[id2]'s auto-increment attribute."
     }
 
-    // schema change that invoke double write on a table which has 
auto-increment column is forbidden 
-    test {
-        sql "alter table ${table1} modify column value VARCHAR(20) NOT NULL"
-        exception "Can not modify column value becasue table ${table1} has 
auto-increment column id"
-    }
-
     sql """ insert into ${table1}(name, value, id2) values("A", 999, 1), ("B", 
888, 2), ("C", 777, 3);"""
     qt_sql "select count(distinct id) from ${table1};"
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to