This is an automated email from the ASF dual-hosted git repository. zhangchen pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push: new 4984222d8eb [Fix](schema change) Fix can't do reorder column schema change for MOW table and duplicate key table (#37067) 4984222d8eb is described below commit 4984222d8eb55ebf6c7674ffa17e98331553f2e9 Author: bobhan1 <bh2444151...@outlook.com> AuthorDate: Wed Jul 3 15:28:45 2024 +0800 [Fix](schema change) Fix can't do reorder column schema change for MOW table and duplicate key table (#37067) ## Proposed changes 1. set column's field `isAggregationTypeImplicit` correctly for mow table and duplicate key table to let `SchemaChangeHandler::createJob` can recognize that the schema change is a column reorder case. 2. remove useless test cases `test_schema_change_datev2_with_delete`, `test_dup_keys_schema_change_datev2`. These two cases wanted to test schema change from `datev1`/`datetimev1` to `datev2`/`datetimev2`. But the default type of columns created as `date`/`datetime` are already `datev2`/`datetimev2` now. Thest test cases should have met error "Nothing is changed. please check your alter stmt." when doris enable `datev2`/`datetimev2` by default but it didn't because `isAggregationTypeImplicit` is wrongly set for duplicate table column. --- .../apache/doris/alter/SchemaChangeHandler.java | 10 +- .../plans/commands/info/ColumnDefinition.java | 2 +- .../test_schema_change_datev2_with_delete.out | 81 ---------- .../datev2/test_dup_keys_schema_change_datev2.out | 70 --------- .../test_modify_reorder_column.out | 22 +++ .../alter_column_test_generated_column.groovy | 4 - .../test_schema_change_datev2_with_delete.groovy | 121 --------------- .../test_dup_keys_schema_change_datev2.groovy | 167 --------------------- .../test_modify_reorder_column.groovy | 131 ++++++++++++++++ 9 files changed, 163 insertions(+), 445 deletions(-) diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java index b3b04bce21a..b9fd4acca79 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java @@ -562,7 +562,7 @@ public class SchemaChangeHandler extends AlterHandler { } if (!modColumn.isKey()) { if (olapTable.getEnableUniqueKeyMergeOnWrite()) { - modColumn.setAggregationType(AggregateType.NONE, false); + modColumn.setAggregationType(AggregateType.NONE, true); } else { modColumn.setAggregationType(AggregateType.REPLACE, true); } @@ -1387,6 +1387,14 @@ public class SchemaChangeHandler extends AlterHandler { if (!alterColumn.equals(originSchema.get(i))) { needAlterColumns.add(alterColumn); hasColumnChange = true; + } else { + Column oriColumn = originSchema.get(i); + if ((oriColumn.getGeneratedColumnInfo() != null + || alterColumn.getGeneratedColumnInfo() != null) + && !oriColumn.getGeneratedColumnInfo().getExprSql() + .equals(alterColumn.getGeneratedColumnInfo().getExprSql())) { + throw new DdlException("Not supporting alter table modify generated columns."); + } } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/ColumnDefinition.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/ColumnDefinition.java index 569151430cb..8b8a4e0e5e3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/ColumnDefinition.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/ColumnDefinition.java @@ -288,7 +288,7 @@ public class ColumnDefinition { } if (isOlap) { - if (!isKey && keysType.equals(KeysType.UNIQUE_KEYS)) { + if (!isKey && (keysType.equals(KeysType.UNIQUE_KEYS) || keysType.equals(KeysType.DUP_KEYS))) { aggTypeImplicit = true; } diff --git a/regression-test/data/schema_change/test_schema_change_datev2_with_delete.out b/regression-test/data/schema_change/test_schema_change_datev2_with_delete.out deleted file mode 100644 index 2ef72df7c71..00000000000 --- a/regression-test/data/schema_change/test_schema_change_datev2_with_delete.out +++ /dev/null @@ -1,81 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - diff --git a/regression-test/data/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.out b/regression-test/data/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.out deleted file mode 100644 index de1946b723f..00000000000 --- a/regression-test/data/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.out +++ /dev/null @@ -1,70 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- -2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11 -2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11 -2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11 -2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11 -2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11 - --- !sql -- - diff --git a/regression-test/data/schema_change_p0/test_modify_reorder_column.out b/regression-test/data/schema_change_p0/test_modify_reorder_column.out new file mode 100644 index 00000000000..64e030f740e --- /dev/null +++ b/regression-test/data/schema_change_p0/test_modify_reorder_column.out @@ -0,0 +1,22 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !dup -- +1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} {"f1": "C", "f2": "D", "f3": 20, "f4": 8.343} {"a":1,"b":[1],"c":1.0} + +-- !dup -- +1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} {"a":1,"b":[1],"c":1.0} {"f1": "C", "f2": "D", "f3": 20, "f4": 8.343} +2 {"f1": "E", "f2": "F", "f3": 30, "f4": 484.3234} {"a":1,"b":[1],"c":1.0} \N + +-- !mor -- +1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} {"f1": "C", "f2": "D", "f3": 20, "f4": 8.343} {"a":1,"b":[1],"c":1.0} + +-- !mor -- +1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} {"a":1,"b":[1],"c":1.0} {"f1": "C", "f2": "D", "f3": 20, "f4": 8.343} +2 {"f1": "E", "f2": "F", "f3": 30, "f4": 484.3234} {"a":1,"b":[1],"c":1.0} \N + +-- !mow -- +1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} {"f1": "C", "f2": "D", "f3": 20, "f4": 8.343} {"a":1,"b":[1],"c":1.0} + +-- !mow -- +1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} {"a":1,"b":[1],"c":1.0} {"f1": "C", "f2": "D", "f3": 20, "f4": 8.343} +2 {"f1": "E", "f2": "F", "f3": 30, "f4": 484.3234} {"a":1,"b":[1],"c":1.0} \N + diff --git a/regression-test/suites/ddl_p0/test_create_table_generated_column/alter_column_test_generated_column.groovy b/regression-test/suites/ddl_p0/test_create_table_generated_column/alter_column_test_generated_column.groovy index d6a6695bce6..f2108a4c8f8 100644 --- a/regression-test/suites/ddl_p0/test_create_table_generated_column/alter_column_test_generated_column.groovy +++ b/regression-test/suites/ddl_p0/test_create_table_generated_column/alter_column_test_generated_column.groovy @@ -114,10 +114,6 @@ suite("alter_column_test_generated_column") { sql "alter table alter_column_gen_col modify column c double as (a+b) after e;" exception "Not supporting alter table modify generated columns." } - test { - sql "alter table alter_column_gen_col modify column c int as (a+b) after e;" - exception "Not supporting alter table modify generated columns." - } // reorder column qt_reorder "alter table alter_column_gen_col order by(a,c,b,d,e);" diff --git a/regression-test/suites/schema_change/test_schema_change_datev2_with_delete.groovy b/regression-test/suites/schema_change/test_schema_change_datev2_with_delete.groovy deleted file mode 100644 index b6a827ebca1..00000000000 --- a/regression-test/suites/schema_change/test_schema_change_datev2_with_delete.groovy +++ /dev/null @@ -1,121 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_schema_change_datev2_with_delete") { - sql """ SET enable_profile = true """ - def tbName = "test_schema_change_datev2_with_delete" - def getJobState = { tableName -> - def jobStateResult = sql """ SHOW ALTER TABLE COLUMN WHERE IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ - return jobStateResult[0][9] - } - - sql """ DROP TABLE IF EXISTS ${tbName} FORCE""" - // Create table and disable light weight schema change - sql """ - CREATE TABLE IF NOT EXISTS ${tbName} - ( - `datek1` date DEFAULT '2022-01-01', - `datek2` datetime DEFAULT '2022-01-01 11:11:11', - `datev1` date DEFAULT '2022-01-01', - `datev2` datetime DEFAULT '2022-01-01 11:11:11' - ) - DUPLICATE KEY(`datek1`,`datek2`) - DISTRIBUTED BY HASH(`datek1`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "light_schema_change" = "false"); - """ - sql """ insert into ${tbName} values('2022-01-02', '2022-01-02 11:11:11', '2022-01-02', '2022-01-02 11:11:11');""" - sql """ insert into ${tbName} (`datek1`, `datek2`, `datev1`) values('2022-01-03', '2022-01-03 11:11:11', '2022-01-03');""" - sql """ insert into ${tbName} (`datek1`, `datek2`, `datev2`) values('2022-01-04', '2022-01-04 11:11:11', '2022-01-04 11:11:11');""" - sql """ insert into ${tbName} (`datek1`, `datev1`, `datev2`) values('2022-01-05', '2022-01-05', '2022-01-05 11:11:11');""" - sql """ insert into ${tbName} (`datek2`, `datev1`, `datev2`) values('2022-01-06 11:11:11', '2022-01-06', '2022-01-06 11:11:11');""" - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ alter table ${tbName} modify column `datev1` datev2 DEFAULT '2022-01-01' """ - int max_try_time = 1000 - while(max_try_time--){ - String result = getJobState(tbName) - if (result == "FINISHED") { - sleep(3000) - break - } else { - sleep(100) - if (max_try_time < 1){ - assertEquals(1,2) - } - } - } - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ alter table ${tbName} modify column `datev2` datetimev2 DEFAULT '2022-01-01 11:11:11' """ - max_try_time = 1000 - while(max_try_time--){ - String result = getJobState(tbName) - if (result == "FINISHED") { - sleep(3000) - break - } else { - sleep(100) - if (max_try_time < 1){ - assertEquals(1,2) - } - } - } - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ alter table ${tbName} modify column `datev2` datetimev2(3) DEFAULT '2022-01-01 11:11:11' """ - max_try_time = 1000 - while(max_try_time--){ - String result = getJobState(tbName) - if (result == "FINISHED") { - sleep(3000) - break - } else { - sleep(100) - if (max_try_time < 1){ - assertEquals(1,2) - } - } - } - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ delete from ${tbName} where `datev1`='2022-01-02';""" - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ delete from ${tbName} where `datev2`='2022-01-04 11:11:11.111';""" - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ delete from ${tbName} where `datev2`='2022-01-04 11:11:11';""" - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ insert into ${tbName} values('2022-01-02', '2022-01-02 11:11:11', '2022-01-02', '2022-01-02 11:11:11');""" - sql """ insert into ${tbName} (`datek1`, `datek2`, `datev1`) values('2022-01-03', '2022-01-03 11:11:11', '2022-01-03');""" - sql """ insert into ${tbName} (`datek1`, `datek2`, `datev2`) values('2022-01-04', '2022-01-04 11:11:11', '2022-01-04 11:11:11');""" - sql """ insert into ${tbName} (`datek1`, `datev1`, `datev2`) values('2022-01-05', '2022-01-05', '2022-01-05 11:11:11');""" - sql """ insert into ${tbName} (`datek2`, `datev1`, `datev2`) values('2022-01-06 11:11:11', '2022-01-06', '2022-01-06 11:11:11');""" - - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - sql """ delete from ${tbName} where `datev1`='2022-01-01';""" - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ delete from ${tbName} where `datev2`='2022-01-01 11:11:11.111';""" - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ delete from ${tbName} where `datev2`='2022-01-01 11:11:11';""" - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ DROP TABLE ${tbName} force""" -} diff --git a/regression-test/suites/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.groovy b/regression-test/suites/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.groovy deleted file mode 100644 index cb50fc85b09..00000000000 --- a/regression-test/suites/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.groovy +++ /dev/null @@ -1,167 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -import org.codehaus.groovy.runtime.IOGroovyMethods - -suite("test_dup_keys_schema_change_datev2") { - def tbName = "test_dup_keys_schema_change_datev2" - def getJobState = { tableName -> - def jobStateResult = sql """ SHOW ALTER TABLE COLUMN WHERE IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ - return jobStateResult[0][9] - } - - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - def do_compact = { tableName -> - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - boolean running = true - do { - Thread.sleep(100) - String tablet_id = tablet[0] - backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - - sql """ DROP TABLE IF EXISTS ${tbName} FORCE""" - // Create table and disable light weight schema change - sql """ - CREATE TABLE IF NOT EXISTS ${tbName} - ( - `datek1` date DEFAULT '2022-01-01', - `datek2` datetime DEFAULT '2022-01-01 11:11:11', - `datev1` date DEFAULT '2022-01-01', - `datev2` datetime DEFAULT '2022-01-01 11:11:11' - ) - DUPLICATE KEY(`datek1`,`datek2`) - DISTRIBUTED BY HASH(`datek1`) BUCKETS 1 - PROPERTIES("replication_num" = "1", "light_schema_change" = "false"); - """ - // datev2 - sql """ insert into ${tbName} values('2022-01-02', '2022-01-02 11:11:11', '2022-01-02', '2022-01-02 11:11:11');""" - sql """ insert into ${tbName} (`datek1`, `datek2`, `datev1`) values('2022-01-03', '2022-01-03 11:11:11', '2022-01-03');""" - sql """ insert into ${tbName} (`datek1`, `datek2`, `datev2`) values('2022-01-04', '2022-01-04 11:11:11', '2022-01-04 11:11:11');""" - sql """ insert into ${tbName} (`datek1`, `datev1`, `datev2`) values('2022-01-05', '2022-01-05', '2022-01-05 11:11:11');""" - sql """ insert into ${tbName} (`datek2`, `datev1`, `datev2`) values('2022-01-06 11:11:11', '2022-01-06', '2022-01-06 11:11:11');""" - - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ alter table ${tbName} modify column `datev1` datev2 DEFAULT '2022-01-01' """ - int max_try_time = 1000 - while (max_try_time--){ - String result = getJobState(tbName) - if (result == "FINISHED") { - sleep(3000) - break - } else { - sleep(100) - if (max_try_time < 1){ - assertEquals(1,2) - } - } - } - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - do_compact(tbName) - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - sql """delete from ${tbName} where `datev1` <= '2022-01-06';""" - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - // datetimev2(0) - sql """ insert into ${tbName} values('2022-01-02', '2022-01-02 11:11:11', '2022-01-02', '2022-01-02 11:11:11');""" - sql """ insert into ${tbName} (`datek1`, `datek2`, `datev1`) values('2022-01-03', '2022-01-03 11:11:11', '2022-01-03');""" - sql """ insert into ${tbName} (`datek1`, `datek2`, `datev2`) values('2022-01-04', '2022-01-04 11:11:11', '2022-01-04 11:11:11');""" - sql """ insert into ${tbName} (`datek1`, `datev1`, `datev2`) values('2022-01-05', '2022-01-05', '2022-01-05 11:11:11');""" - sql """ insert into ${tbName} (`datek2`, `datev1`, `datev2`) values('2022-01-06 11:11:11', '2022-01-06', '2022-01-06 11:11:11');""" - - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - sql """ alter table ${tbName} modify column `datev2` datetimev2 DEFAULT '2022-01-01 11:11:11' """ - max_try_time = 1000 - while (max_try_time--){ - String result = getJobState(tbName) - if (result == "FINISHED") { - sleep(3000) - break - } else { - sleep(100) - if (max_try_time < 1){ - assertEquals(1,2) - } - } - } - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - do_compact(tbName) - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - sql """delete from ${tbName} where `datev2` <= '2022-01-06 11:11:11';""" - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - // datetimev2(3) - sql """ insert into ${tbName} values('2022-01-02', '2022-01-02 11:11:11', '2022-01-02', '2022-01-02 11:11:11');""" - sql """ insert into ${tbName} (`datek1`, `datek2`, `datev1`) values('2022-01-03', '2022-01-03 11:11:11', '2022-01-03');""" - sql """ insert into ${tbName} (`datek1`, `datek2`, `datev2`) values('2022-01-04', '2022-01-04 11:11:11', '2022-01-04 11:11:11');""" - sql """ insert into ${tbName} (`datek1`, `datev1`, `datev2`) values('2022-01-05', '2022-01-05', '2022-01-05 11:11:11');""" - sql """ insert into ${tbName} (`datek2`, `datev1`, `datev2`) values('2022-01-06 11:11:11', '2022-01-06', '2022-01-06 11:11:11');""" - - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - sql """ alter table ${tbName} modify column `datev2` datetimev2(3) DEFAULT '2022-01-01 11:11:11' """ - max_try_time = 1000 - while (max_try_time--){ - String result = getJobState(tbName) - if (result == "FINISHED") { - sleep(3000) - break - } else { - sleep(100) - if (max_try_time < 1){ - assertEquals(1,2) - } - } - } - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - do_compact(tbName) - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - sql """delete from ${tbName} where `datev2` < '2022-01-06 11:11:11.111';""" - qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - - sql """ DROP TABLE ${tbName} force""" -} diff --git a/regression-test/suites/schema_change_p0/test_modify_reorder_column.groovy b/regression-test/suites/schema_change_p0/test_modify_reorder_column.groovy new file mode 100644 index 00000000000..9262974e473 --- /dev/null +++ b/regression-test/suites/schema_change_p0/test_modify_reorder_column.groovy @@ -0,0 +1,131 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite ("test_modify_reorder_column") { + + def getJobState = { tableName -> + def jobStateResult = sql """ SHOW ALTER TABLE COLUMN WHERE IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ + return jobStateResult[0][9] + } + + // test reorder column on duplicate table + def tbl1 = "test_modify_reorder_column_dup" + sql """ DROP TABLE IF EXISTS ${tbl1}; """ + sql """ create table ${tbl1} ( + k1 TINYINT, + colnotnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4: double> NOT NULL, + colnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4: double> NULL, + v VARIANT + ) duplicate key(k1) distributed by hash(k1) buckets 1 + properties( "replication_num" = "1" ); """ + + sql """insert into ${tbl1} values + (1, {"A", "B", 10, 3.14}, {"C", "D", 20, 8.343}, '{"a" : 1, "b" : [1], "c": 1.0}') """ + qt_dup """ select * from ${tbl1} order by k1;""" + + sql "ALTER TABLE ${tbl1} MODIFY COLUMN colnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4: double> NULL AFTER v" + + + int max_try_time = 100 + while (max_try_time--){ + String result = getJobState("${tbl1}") + if (result == "FINISHED") { + sleep(1000) + break + } else { + sleep(1000) + assertTrue(max_try_time>1) + } + } + + sql """insert into ${tbl1} values + (2, {"E", "F", 30, 484.3234}, '{"a" : 1, "b" : [1], "c": 1.0}', null) """ + qt_dup """ select * from ${tbl1} order by k1;""" + sql """DROP TABLE IF EXISTS ${tbl1} FORCE; """ + + + // test reorder column on MOR table + def tbl2 = "test_modify_reorder_column_mor" + sql """ DROP TABLE IF EXISTS ${tbl2}; """ + sql """ create table ${tbl2} ( + k1 TINYINT, + colnotnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4: double> NOT NULL, + colnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4: double> NULL, + v VARIANT + ) unique key(k1) distributed by hash(k1) buckets 1 + properties( "replication_num" = "1", "enable_unique_key_merge_on_write" = "false" ); """ + + sql """insert into ${tbl2} values + (1, {"A", "B", 10, 3.14}, {"C", "D", 20, 8.343}, '{"a" : 1, "b" : [1], "c": 1.0}') """ + qt_mor """ select * from ${tbl2} order by k1;""" + + sql "ALTER TABLE ${tbl2} MODIFY COLUMN colnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4: double> NULL AFTER v" + + max_try_time = 100 + while (max_try_time--){ + String result = getJobState("${tbl2}") + if (result == "FINISHED") { + sleep(1000) + break + } else { + sleep(1000) + assertTrue(max_try_time>1) + } + } + + sql """insert into ${tbl2} values + (2, {"E", "F", 30, 484.3234}, '{"a" : 1, "b" : [1], "c": 1.0}', null) """ + qt_mor """ select * from ${tbl2} order by k1;""" + sql """DROP TABLE IF EXISTS ${tbl2} FORCE; """ + + + // test reorder column on MOW table + def tbl3 = "test_modify_reorder_column_mow" + sql """ DROP TABLE IF EXISTS ${tbl3}; """ + sql """ create table ${tbl3} ( + k1 TINYINT, + colnotnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4: double> NOT NULL, + colnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4: double> NULL, + v VARIANT + ) unique key(k1) distributed by hash(k1) buckets 1 + properties( "replication_num" = "1", "enable_unique_key_merge_on_write" = "false" ); """ + + sql """insert into ${tbl3} values + (1, {"A", "B", 10, 3.14}, {"C", "D", 20, 8.343}, '{"a" : 1, "b" : [1], "c": 1.0}') """ + qt_mow """ select * from ${tbl3} order by k1;""" + + sql "ALTER TABLE ${tbl3} MODIFY COLUMN colnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4: double> NULL AFTER v" + + max_try_time = 100 + while (max_try_time--){ + String result = getJobState("${tbl3}") + if (result == "FINISHED") { + sleep(1000) + break + } else { + sleep(1000) + assertTrue(max_try_time>1) + } + } + + sql """insert into ${tbl3} values + (2, {"E", "F", 30, 484.3234}, '{"a" : 1, "b" : [1], "c": 1.0}', null) """ + qt_mow """ select * from ${tbl3} order by k1;""" + sql """DROP TABLE IF EXISTS ${tbl3} FORCE; """ +} --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org