This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 9078ab4d24 [feature](FE): add new property to control whether use 
light schema change or not (#11169)
9078ab4d24 is described below

commit 9078ab4d2471517e165a720c09181e1a97c36c15
Author: AlexYue <yj976240...@qq.com>
AuthorDate: Thu Aug 4 15:49:05 2022 +0800

    [feature](FE): add new property to control whether use light schema change 
or not (#11169)
---
 .../Create/CREATE-TABLE.md                         |   6 +
 .../Create/CREATE-TABLE.md                         |   6 +
 .../org/apache/doris/alter/SchemaChangeJobV2.java  |   4 +-
 .../org/apache/doris/analysis/SchemaTableType.java |  20 +-
 .../org/apache/doris/analysis/ShowAlterStmt.java   |   6 +-
 .../main/java/org/apache/doris/catalog/Env.java    |  14 +-
 .../java/org/apache/doris/catalog/OlapTable.java   |  26 ++-
 .../org/apache/doris/catalog/TableProperty.java    |  27 ++-
 .../apache/doris/common/util/PropertyAnalyzer.java |  21 ++
 .../org/apache/doris/common/util/TimeUtils.java    |  16 +-
 .../doris/datasource/InternalDataSource.java       |  24 ++-
 .../doris/alter/SchemaChangeHandlerTest.java       |   6 +-
 .../schema_change/test_agg_keys_schema_change.out  |  24 +++
 .../schema_change/test_agg_mv_schema_change.out    |  14 ++
 .../test_agg_rollup_schema_change.out              |  14 ++
 .../schema_change/test_agg_vals_schema_change.out  |  32 +++
 .../test_compaction_schema_change.out              |  11 +
 .../schema_change/test_delete_schema_change.out    |  44 ++++
 .../schema_change/test_dup_keys_schema_change.out  |  28 +++
 .../schema_change/test_dup_mv_schema_change.out    |  28 +++
 .../test_dup_rollup_schema_change.out              |  27 +++
 .../schema_change/test_dup_vals_schema_change.out  |  28 +++
 .../schema_change/test_partition_schema_change.out |  33 +++
 .../test_agg_keys_schema_change.groovy             | 225 +++++++++++++++++++
 .../schema_change/test_agg_mv_schema_change.groovy | 217 +++++++++++++++++++
 .../test_agg_rollup_schema_change.groovy           | 215 ++++++++++++++++++
 .../test_agg_vals_schema_change.groovy             | 219 +++++++++++++++++++
 .../schema_change/test_alter_table_column.groovy   |   4 +-
 .../schema_change/test_delete_schema_change.sql    |  44 ++++
 .../test_dup_keys_schema_change.groovy             | 225 +++++++++++++++++++
 .../schema_change/test_dup_mv_schema_change.groovy | 239 +++++++++++++++++++++
 .../test_dup_rollup_schema_change.groovy           | 236 ++++++++++++++++++++
 .../test_dup_vals_schema_change.groovy             | 209 ++++++++++++++++++
 .../schema_change/test_partition_schema_change.sql |  44 ++++
 .../suites/schema_change/test_schema_change.groovy |   2 +-
 35 files changed, 2297 insertions(+), 41 deletions(-)

diff --git 
a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE.md
 
b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE.md
index f8d086d49c..0c38e29af9 100644
--- 
a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE.md
+++ 
b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE.md
@@ -311,6 +311,12 @@ distribution_info
         The default compression method for Doris tables is LZ4. After version 
1.1, it is supported to specify the compression method as ZSTD to obtain a 
higher compression ratio.
     
         `"compression"="zstd"`
+
+    * `light_schema_change`
+
+        Doris would not use light schema change optimization by default. It is 
supported to turn on the optimization by set the property as true.
+    
+        `"light_schema_change"="true"`
     
     * Dynamic partition related
     
diff --git 
a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE.md
 
b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE.md
index 2a29932ce2..34df1e14a3 100644
--- 
a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE.md
+++ 
b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE.md
@@ -313,6 +313,12 @@ distribution_info
     
         `"function_column.sequence_type" = 'Date'`
     
+    * `light_schema_change`
+
+        Doris默认不使用light schema change优化。如果想使用该优化需要指定为true。
+    
+        `"light_schema_change" = 'true'`
+
     * 动态分区相关
     
         动态分区相关参数如下:
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java 
b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java
index d31769dbec..c0190fcd40 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java
@@ -857,8 +857,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 {
             List<Comparable> info = Lists.newArrayList();
             info.add(jobId);
             info.add(tableName);
-            info.add(TimeUtils.longToTimeString(createTimeMs));
-            info.add(TimeUtils.longToTimeString(finishedTimeMs));
+            info.add(TimeUtils.longToTimeStringWithms(createTimeMs));
+            info.add(TimeUtils.longToTimeStringWithms(finishedTimeMs));
             // only show the origin index name
             
info.add(indexIdToName.get(shadowIndexId).substring(SchemaChangeHandler.SHADOW_NAME_PRFIX.length()));
             info.add(shadowIndexId);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java
index d4f8e68f54..43dc65ed13 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java
@@ -25,41 +25,41 @@ public enum SchemaTableType {
     SCH_CHARSETS("CHARSETS", "CHARACTER_SETS", TSchemaTableType.SCH_CHARSETS),
     SCH_COLLATIONS("COLLATIONS", "COLLATIONS", 
TSchemaTableType.SCH_COLLATIONS),
     
SCH_COLLATION_CHARACTER_SET_APPLICABILITY("COLLATION_CHARACTER_SET_APPLICABILITY",
-      "COLLATION_CHARACTER_SET_APPLICABILITY",
-      TSchemaTableType.SCH_COLLATION_CHARACTER_SET_APPLICABILITY),
+            "COLLATION_CHARACTER_SET_APPLICABILITY",
+            TSchemaTableType.SCH_COLLATION_CHARACTER_SET_APPLICABILITY),
     SCH_COLUMNS("COLUMNS", "COLUMNS", TSchemaTableType.SCH_COLUMNS),
     SCH_COLUMN_PRIVILEGES("COLUMN_PRIVILEGES", "COLUMN_PRIVILEGES",
-      TSchemaTableType.SCH_COLUMN_PRIVILEGES),
+            TSchemaTableType.SCH_COLUMN_PRIVILEGES),
     SCH_ENGINES("ENGINES", "ENGINES", TSchemaTableType.SCH_ENGINES),
     SCH_EVENTS("EVENTS", "EVENTS", TSchemaTableType.SCH_EVENTS),
     SCH_FILES("FILES", "FILES", TSchemaTableType.SCH_FILES),
     SCH_GLOBAL_STATUS("GLOBAL_STATUS", "GLOBAL_STATUS", 
TSchemaTableType.SCH_GLOBAL_STATUS),
     SCH_GLOBAL_VARIABLES("GLOBAL_VARIABLES", "GLOBAL_VARIABLES",
-      TSchemaTableType.SCH_GLOBAL_VARIABLES),
+            TSchemaTableType.SCH_GLOBAL_VARIABLES),
     SCH_KEY_COLUMN_USAGE("KEY_COLUMN_USAGE", "KEY_COLUMN_USAGE",
-      TSchemaTableType.SCH_KEY_COLUMN_USAGE),
+            TSchemaTableType.SCH_KEY_COLUMN_USAGE),
     SCH_OPEN_TABLES("OPEN_TABLES", "OPEN_TABLES", 
TSchemaTableType.SCH_OPEN_TABLES),
     SCH_PARTITIONS("PARTITIONS", "PARTITIONS", 
TSchemaTableType.SCH_PARTITIONS),
     SCH_PLUGINS("PLUGINS", "PLUGINS", TSchemaTableType.SCH_PLUGINS),
     SCH_PROCESSLIST("PROCESSLIST", "PROCESSLIST", 
TSchemaTableType.SCH_PROCESSLIST),
     SCH_PROFILES("PROFILES", "PROFILES", TSchemaTableType.SCH_PROFILES),
     SCH_REFERENTIAL_CONSTRAINTS("REFERENTIAL_CONSTRAINTS", 
"REFERENTIAL_CONSTRAINTS",
-      TSchemaTableType.SCH_REFERENTIAL_CONSTRAINTS),
+            TSchemaTableType.SCH_REFERENTIAL_CONSTRAINTS),
     SCH_PROCEDURES("ROUTINES", "ROUTINES", TSchemaTableType.SCH_PROCEDURES),
     SCH_SCHEMATA("SCHEMATA", "SCHEMATA", TSchemaTableType.SCH_SCHEMATA),
     SCH_SCHEMA_PRIVILEGES("SCHEMA_PRIVILEGES", "SCHEMA_PRIVILEGES",
-      TSchemaTableType.SCH_SCHEMA_PRIVILEGES),
+            TSchemaTableType.SCH_SCHEMA_PRIVILEGES),
     SCH_SESSION_STATUS("SESSION_STATUS", "SESSION_STATUS", 
TSchemaTableType.SCH_SESSION_STATUS),
     SCH_SESSION_VARIABLES("SESSION_VARIABLES", "SESSION_VARIABLES",
-      TSchemaTableType.SCH_SESSION_VARIABLES),
+            TSchemaTableType.SCH_SESSION_VARIABLES),
     SCH_STATISTICS("STATISTICS", "STATISTICS", 
TSchemaTableType.SCH_STATISTICS),
     SCH_STATUS("STATUS", "STATUS", TSchemaTableType.SCH_STATUS),
     SCH_TABLES("TABLES", "TABLES", TSchemaTableType.SCH_TABLES),
     SCH_TABLE_CONSTRAINTS("TABLE_CONSTRAINTS", "TABLE_CONSTRAINTS",
-      TSchemaTableType.SCH_TABLE_CONSTRAINTS),
+            TSchemaTableType.SCH_TABLE_CONSTRAINTS),
     SCH_TABLE_NAMES("TABLE_NAMES", "TABLE_NAMES", 
TSchemaTableType.SCH_TABLE_NAMES),
     SCH_TABLE_PRIVILEGES("TABLE_PRIVILEGES", "TABLE_PRIVILEGES",
-      TSchemaTableType.SCH_TABLE_PRIVILEGES),
+            TSchemaTableType.SCH_TABLE_PRIVILEGES),
     SCH_TRIGGERS("TRIGGERS", "TRIGGERS", TSchemaTableType.SCH_TRIGGERS),
     SCH_USER_PRIVILEGES("USER_PRIVILEGES", "USER_PRIVILEGES", 
TSchemaTableType.SCH_USER_PRIVILEGES),
     SCH_VARIABLES("VARIABLES", "VARIABLES", TSchemaTableType.SCH_VARIABLES),
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java
index 81a5c345f5..5551993b11 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java
@@ -104,12 +104,13 @@ public class ShowAlterStmt extends ShowStmt {
         if (!(subExpr instanceof BinaryPredicate)) {
             throw new AnalysisException("The operator =|>=|<=|>|<|!= are 
supported.");
         }
+
         BinaryPredicate binaryPredicate = (BinaryPredicate) subExpr;
         if (!(subExpr.getChild(0) instanceof SlotRef)) {
             throw new AnalysisException("Only support column = xxx syntax.");
         }
         String leftKey = ((SlotRef) 
subExpr.getChild(0)).getColumnName().toLowerCase();
-        if (leftKey.equals("tablename") || leftKey.equals("state")) {
+        if (leftKey.equals("tablename") || leftKey.equals("state") || 
leftKey.equals("indexname")) {
             if (!(subExpr.getChild(1) instanceof StringLiteral)
                     || binaryPredicate.getOp() != BinaryPredicate.Operator.EQ) 
{
                 throw new AnalysisException("Where clause : TableName = 
\"table1\" or "
@@ -123,7 +124,8 @@ public class ShowAlterStmt extends ShowStmt {
             subExpr.setChild(1, (subExpr.getChild(1)).castTo(
                     ScalarType.getDefaultDateType(Type.DATETIME)));
         } else {
-            throw new AnalysisException("The columns of 
TableName/CreateTime/FinishTime/State are supported.");
+            throw new AnalysisException(
+                    "The columns of 
TableName/IndexName/CreateTime/FinishTime/State are supported.");
         }
         filterMap.put(leftKey, subExpr);
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
index c6bd37d08a..42a118c528 100755
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
@@ -1536,7 +1536,7 @@ public class Env {
                 MetaHelper.complete(filename, dir);
             } else {
                 LOG.warn("get an image with a lower version, 
localImageVersion: {}, got version: {}",
-                         localImageVersion, version);
+                        localImageVersion, version);
             }
         } catch (Exception e) {
             throw new IOException(e);
@@ -2890,6 +2890,12 @@ public class Env {
                 sb.append(olapTable.getCompressionType()).append("\"");
             }
 
+            // show lightSchemaChange only when it is set true
+            if (olapTable.getUseLightSchemaChange()) {
+                
sb.append(",\n\"").append(PropertyAnalyzer.PROPERTIES_USE_LIGHT_SCHEMA_CHANGE).append("\"
 = \"");
+                sb.append(olapTable.getUseLightSchemaChange()).append("\"");
+            }
+
             // storage policy
             if (olapTable.getStoragePolicy() != null && 
!olapTable.getStoragePolicy().equals("")) {
                 
sb.append(",\n\"").append(PropertyAnalyzer.PROPERTIES_STORAGE_POLICY).append("\"
 = \"");
@@ -2899,7 +2905,7 @@ public class Env {
             // sequence type
             if (olapTable.hasSequenceCol()) {
                 
sb.append(",\n\"").append(PropertyAnalyzer.PROPERTIES_FUNCTION_COLUMN + "."
-                    + PropertyAnalyzer.PROPERTIES_SEQUENCE_TYPE).append("\" = 
\"");
+                        + 
PropertyAnalyzer.PROPERTIES_SEQUENCE_TYPE).append("\" = \"");
                 sb.append(olapTable.getSequenceType().toString()).append("\"");
             }
 
@@ -4118,7 +4124,7 @@ public class Env {
                     
olapTable.getPartitionInfo().setIsInMemory(partition.getId(), 
tableProperty.isInMemory());
                     // storage policy re-use modify in memory
                     
Optional.ofNullable(tableProperty.getStoragePolicy()).filter(p -> !p.isEmpty())
-                        .ifPresent(p -> 
olapTable.getPartitionInfo().setStoragePolicy(partition.getId(), p));
+                            .ifPresent(p -> 
olapTable.getPartitionInfo().setStoragePolicy(partition.getId(), p));
                 }
             }
         } finally {
@@ -5003,7 +5009,7 @@ public class Env {
             Optional<Table> table = db.getTable(tableId);
             if (table.isPresent()) {
                 return new TableName(InternalDataSource.INTERNAL_DS_NAME,
-                                    db.getFullName(), table.get().getName());
+                        db.getFullName(), table.get().getName());
             }
         }
         return null;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
index 96c1db9756..47440673e1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
@@ -161,12 +161,12 @@ public class OlapTable extends Table {
     }
 
     public OlapTable(long id, String tableName, List<Column> baseSchema, 
KeysType keysType,
-                     PartitionInfo partitionInfo, DistributionInfo 
defaultDistributionInfo) {
+            PartitionInfo partitionInfo, DistributionInfo 
defaultDistributionInfo) {
         this(id, tableName, baseSchema, keysType, partitionInfo, 
defaultDistributionInfo, null);
     }
 
     public OlapTable(long id, String tableName, List<Column> baseSchema, 
KeysType keysType,
-                     PartitionInfo partitionInfo, DistributionInfo 
defaultDistributionInfo, TableIndexes indexes) {
+            PartitionInfo partitionInfo, DistributionInfo 
defaultDistributionInfo, TableIndexes indexes) {
         super(id, tableName, TableType.OLAP, baseSchema);
 
         this.state = OlapTableState.NORMAL;
@@ -519,7 +519,7 @@ public class OlapTable extends Table {
                     try {
                         Map<Tag, List<Long>> tag2beIds =
                                 
Env.getCurrentSystemInfo().selectBackendIdsForReplicaCreation(
-                                replicaAlloc, db.getClusterName(), null);
+                                        replicaAlloc, db.getClusterName(), 
null);
                         for (Map.Entry<Tag, List<Long>> entry3 : 
tag2beIds.entrySet()) {
                             for (Long beId : entry3.getValue()) {
                                 long newReplicaId = env.getNextId();
@@ -1468,6 +1468,7 @@ public class OlapTable extends Table {
 
     /**
      * Get the proximate row count of this table, if you need accurate row 
count should select count(*) from table.
+     *
      * @return proximate row count
      */
     public long proximateRowCount() {
@@ -1561,6 +1562,23 @@ public class OlapTable extends Table {
         tableProperty.buildInMemory();
     }
 
+    public Boolean getUseLightSchemaChange() {
+        if (tableProperty != null) {
+            return tableProperty.getUseSchemaLightChange();
+        }
+        // property is set false by default
+        return false;
+    }
+
+    public void setUseLightSchemaChange(boolean useLightSchemaChange) {
+        if (tableProperty == null) {
+            tableProperty = new TableProperty(new HashMap<>());
+        }
+        
tableProperty.modifyTableProperties(PropertyAnalyzer.PROPERTIES_USE_LIGHT_SCHEMA_CHANGE,
+                Boolean.valueOf(useLightSchemaChange).toString());
+        tableProperty.buildUseLightSchemaChange();
+    }
+
     public void setStoragePolicy(String storagePolicy) {
         if (tableProperty == null) {
             tableProperty = new TableProperty(new HashMap<>());
@@ -1675,7 +1693,7 @@ public class OlapTable extends Table {
     }
 
     private void checkPartition(List<String> partitionNames, List<String> 
tempPartitionNames,
-                                boolean strictRange) throws DdlException {
+            boolean strictRange) throws DdlException {
         if (strictRange) {
             List<PartitionItem> list = Lists.newArrayList();
             List<PartitionItem> tempList = Lists.newArrayList();
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java
index 5096e23891..12c14f1520 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java
@@ -40,10 +40,11 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
-/**  TableProperty contains additional information about OlapTable
- *  TableProperty includes properties to persistent the additional information
- *  Different properties is recognized by prefix such as dynamic_partition
- *  If there is different type properties is added, write a method such as 
buildDynamicProperty to build it.
+/**
+ * TableProperty contains additional information about OlapTable
+ * TableProperty includes properties to persistent the additional information
+ * Different properties is recognized by prefix such as dynamic_partition
+ * If there is different type properties is added, write a method such as 
buildDynamicProperty to build it.
  */
 public class TableProperty implements Writable {
     private static final Logger LOG = 
LogManager.getLogger(TableProperty.class);
@@ -72,6 +73,8 @@ public class TableProperty implements Writable {
 
     private TCompressionType compressionType = TCompressionType.LZ4F;
 
+    private Boolean useSchemaLightChange;
+
     private DataSortInfo dataSortInfo = new DataSortInfo();
 
     // remote storage policy, for cold data
@@ -110,6 +113,7 @@ public class TableProperty implements Writable {
 
     /**
      * Reset properties to correct values.
+     *
      * @return this for chained
      */
     public TableProperty resetPropertiesForRestore() {
@@ -142,6 +146,12 @@ public class TableProperty implements Writable {
         return this;
     }
 
+    public TableProperty buildUseLightSchemaChange() {
+        useSchemaLightChange = Boolean.parseBoolean(
+                
properties.getOrDefault(PropertyAnalyzer.PROPERTIES_USE_LIGHT_SCHEMA_CHANGE, 
"false"));
+        return this;
+    }
+
     public TableProperty buildStoragePolicy() {
         storagePolicy = 
properties.getOrDefault(PropertyAnalyzer.PROPERTIES_STORAGE_POLICY, "");
         return this;
@@ -251,13 +261,17 @@ public class TableProperty implements Writable {
         return compressionType;
     }
 
+    public Boolean getUseSchemaLightChange() {
+        return useSchemaLightChange;
+    }
+
     public void setEnableUniqueKeyMergeOnWrite(boolean enable) {
         properties.put(PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE, 
Boolean.toString(enable));
     }
 
     public boolean getEnableUniqueKeyMergeOnWrite() {
         return Boolean.parseBoolean(properties.getOrDefault(
-                    PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE, 
"false"));
+                PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE, "false"));
     }
 
     public void buildReplicaAllocation() {
@@ -286,7 +300,8 @@ public class TableProperty implements Writable {
                 .buildDataSortInfo()
                 .buildRemoteStoragePolicy()
                 .buildCompressionType()
-                .buildStoragePolicy();
+                .buildStoragePolicy()
+                .buildUseLightSchemaChange();
         if (Env.getCurrentEnvJournalVersion() < FeMetaVersion.VERSION_105) {
             // get replica num from property map and create replica allocation
             String repNum = 
tableProperty.properties.remove(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java 
b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
index e332a47e35..865e4a2c6c 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
@@ -79,6 +79,8 @@ public class PropertyAnalyzer {
     public static final String PROPERTIES_TIMEOUT = "timeout";
     public static final String PROPERTIES_COMPRESSION = "compression";
 
+    public static final String PROPERTIES_USE_LIGHT_SCHEMA_CHANGE = 
"light_schema_change";
+
     public static final String PROPERTIES_DISTRIBUTION_TYPE = 
"distribution_type";
     public static final String PROPERTIES_SEND_CLEAR_ALTER_TASK = 
"send_clear_alter_tasks";
     /*
@@ -451,6 +453,25 @@ public class PropertyAnalyzer {
         return timeout;
     }
 
+    public static Boolean analyzeUseLightSchemaChange(Map<String, String> 
properties) throws AnalysisException {
+        if (properties == null || properties.isEmpty()) {
+            return false;
+        }
+        String value = properties.get(PROPERTIES_USE_LIGHT_SCHEMA_CHANGE);
+        // set light schema change false by default
+        if (null == value) {
+            return false;
+        }
+        properties.remove(PROPERTIES_USE_LIGHT_SCHEMA_CHANGE);
+        if (value.equalsIgnoreCase("true")) {
+            return true;
+        } else if (value.equalsIgnoreCase("false")) {
+            return false;
+        }
+        throw new AnalysisException(PROPERTIES_USE_LIGHT_SCHEMA_CHANGE
+                + " must be `true` or `false`");
+    }
+
     // analyzeCompressionType will parse the compression type from properties
     public static TCompressionType analyzeCompressionType(Map<String, String> 
properties) throws  AnalysisException {
         String compressionType = "";
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/common/util/TimeUtils.java 
b/fe/fe-core/src/main/java/org/apache/doris/common/util/TimeUtils.java
index 2a0d8efe0b..de63586f13 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/TimeUtils.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/TimeUtils.java
@@ -82,6 +82,9 @@ public class TimeUtils {
     private static ThreadLocal<SimpleDateFormat> datetimeFormatThreadLocal =
             ThreadLocal.withInitial(() -> new SimpleDateFormat("yyyy-MM-dd 
HH:mm:ss"));
 
+    private static ThreadLocal<SimpleDateFormat> datetimeMSFormatThreadLocal =
+            ThreadLocal.withInitial(() -> new SimpleDateFormat("yyyy-MM-dd 
HH:mm:ss:S"));
+
     static {
         TIME_ZONE = new SimpleTimeZone(8 * 3600 * 1000, "");
 
@@ -149,13 +152,22 @@ public class TimeUtils {
         return dateFormat.format(new Date(timeStamp));
     }
 
-    public static String longToTimeString(long timeStamp) {
-        SimpleDateFormat datetimeFormatTimeZone = 
datetimeFormatThreadLocal.get();
+    public static String longToTimeStringWithFormat(long timeStamp, 
SimpleDateFormat datetimeFormatTimeZone) {
         TimeZone timeZone = getTimeZone();
         datetimeFormatTimeZone.setTimeZone(timeZone);
         return longToTimeString(timeStamp, datetimeFormatTimeZone);
     }
 
+    public static String longToTimeString(long timeStamp) {
+        SimpleDateFormat datetimeFormatTimeZone = 
datetimeFormatThreadLocal.get();
+        return longToTimeStringWithFormat(timeStamp, datetimeFormatTimeZone);
+    }
+
+    public static String longToTimeStringWithms(long timeStamp) {
+        SimpleDateFormat datatimeFormatTimeZone = 
datetimeMSFormatThreadLocal.get();
+        return longToTimeStringWithFormat(timeStamp, datatimeFormatTimeZone);
+    }
+
     public static synchronized Date getTimeAsDate(String timeString) {
         try {
             Date date = TIME_FORMAT.parse(timeString);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java
index 027c7dd9c4..a490745d74 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java
@@ -1716,11 +1716,6 @@ public class InternalDataSource implements 
DataSourceIf<Database> {
         OlapTable olapTable = new OlapTable(tableId, tableName, baseSchema, 
keysType, partitionInfo,
                 defaultDistributionInfo, indexes);
 
-        for (Column column : baseSchema) {
-            column.setUniqueId(olapTable.incAndGetMaxColUniqueId());
-            LOG.debug("table: {}, newColumn: {}, uniqueId: {}", 
olapTable.getName(), column.getName(),
-                    column.getUniqueId());
-        }
         olapTable.setComment(stmt.getComment());
 
         // set base index id
@@ -1731,6 +1726,25 @@ public class InternalDataSource implements 
DataSourceIf<Database> {
         // this should be done before create partition.
         Map<String, String> properties = stmt.getProperties();
 
+        // get use light schema change
+        Boolean useLightSchemaChange = false;
+        try {
+            useLightSchemaChange = 
PropertyAnalyzer.analyzeUseLightSchemaChange(properties);
+        } catch (AnalysisException e) {
+            throw new DdlException(e.getMessage());
+        }
+        // use light schema change optimization
+        olapTable.setUseLightSchemaChange(useLightSchemaChange);
+        if (useLightSchemaChange) {
+            for (Column column : baseSchema) {
+                column.setUniqueId(olapTable.incAndGetMaxColUniqueId());
+                LOG.debug("table: {}, newColumn: {}, uniqueId: {}", 
olapTable.getName(), column.getName(),
+                        column.getUniqueId());
+            }
+        } else {
+            LOG.debug("table: {} doesn't use light schema change", 
olapTable.getName());
+        }
+
         // get storage format
         TStorageFormat storageFormat = TStorageFormat.V2; // default is 
segment v2
         try {
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeHandlerTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeHandlerTest.java
index 11ef58facc..8a44fd7e96 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeHandlerTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeHandlerTest.java
@@ -58,20 +58,20 @@ public class SchemaChangeHandlerTest extends 
TestWithFeService {
                 + "last_visit_date DATETIME REPLACE DEFAULT '1970-01-01 
00:00:00',\n" + "cost BIGINT SUM DEFAULT '0',\n"
                 + "max_dwell_time INT MAX DEFAULT '0',\n" + "min_dwell_time 
INT MIN DEFAULT '99999')\n"
                 + "AGGREGATE KEY(user_id, date, city, age, sex)\n" + 
"DISTRIBUTED BY HASH(user_id) BUCKETS 1\n"
-                + "PROPERTIES ('replication_num' = '1');";
+                + "PROPERTIES ('replication_num' = '1', 'light_schema_change' 
= 'true');";
         createTable(createAggTblStmtStr);
 
         String createUniqTblStmtStr = "CREATE TABLE IF NOT EXISTS test.sc_uniq 
(\n" + "user_id LARGEINT NOT NULL,\n"
                 + "username VARCHAR(50) NOT NULL,\n" + "city VARCHAR(20),\n" + 
"age SMALLINT,\n" + "sex TINYINT,\n"
                 + "phone LARGEINT,\n" + "address VARCHAR(500),\n" + 
"register_time DATETIME)\n"
                 + "UNIQUE  KEY(user_id, username)\n" + "DISTRIBUTED BY 
HASH(user_id) BUCKETS 1\n"
-                + "PROPERTIES ('replication_num' = '1');";
+                + "PROPERTIES ('replication_num' = '1', 'light_schema_change' 
= 'true');";
         createTable(createUniqTblStmtStr);
 
         String createDupTblStmtStr = "CREATE TABLE IF NOT EXISTS test.sc_dup 
(\n" + "timestamp DATETIME,\n"
                 + "type INT,\n" + "error_code INT,\n" + "error_msg 
VARCHAR(1024),\n" + "op_id BIGINT,\n"
                 + "op_time DATETIME)\n" + "DUPLICATE  KEY(timestamp, type)\n" 
+ "DISTRIBUTED BY HASH(type) BUCKETS 1\n"
-                + "PROPERTIES ('replication_num' = '1');";
+                + "PROPERTIES ('replication_num' = '1', 'light_schema_change' 
= 'true');";
         createTable(createDupTblStmtStr);
     }
 
diff --git a/regression-test/data/schema_change/test_agg_keys_schema_change.out 
b/regression-test/data/schema_change/test_agg_keys_schema_change.out
new file mode 100644
index 0000000000..1b97d971be
--- /dev/null
+++ b/regression-test/data/schema_change/test_agg_keys_schema_change.out
@@ -0,0 +1,24 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+1      2017-10-01      Beijing 10      1       2       31      19      \N      
\N
+2      2017-10-01      Beijing 10      1       2       32      20      \N      
\N
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2       100     32      20      
\N      \N
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2       100     32      20      
\N      \N
+3      2017-10-01      Beijing 10      1       3       110     32      20      
\N      \N
+
+-- !sc --
+4
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       210     32      20      \N      
\N
+
+-- !sc --
+4
+
+-- !sc --
+2      2017-10-01      Beijing 10      1       2       32      20      \N      
\N
+
diff --git a/regression-test/data/schema_change/test_agg_mv_schema_change.out 
b/regression-test/data/schema_change/test_agg_mv_schema_change.out
new file mode 100644
index 0000000000..7060c8e87f
--- /dev/null
+++ b/regression-test/data/schema_change/test_agg_mv_schema_change.out
@@ -0,0 +1,14 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+1      2017-10-01      Beijing 10      1       2       31      19      \N      
\N
+2      2017-10-01      Beijing 10      1       2       32      20      \N      
\N
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       32      20      \N      \N
+
+-- !sc --
+4
+
+-- !sc --
+2      2017-10-01      Beijing 10      1       32      20      \N      \N
+
diff --git 
a/regression-test/data/schema_change/test_agg_rollup_schema_change.out 
b/regression-test/data/schema_change/test_agg_rollup_schema_change.out
new file mode 100644
index 0000000000..7060c8e87f
--- /dev/null
+++ b/regression-test/data/schema_change/test_agg_rollup_schema_change.out
@@ -0,0 +1,14 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+1      2017-10-01      Beijing 10      1       2       31      19      \N      
\N
+2      2017-10-01      Beijing 10      1       2       32      20      \N      
\N
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       32      20      \N      \N
+
+-- !sc --
+4
+
+-- !sc --
+2      2017-10-01      Beijing 10      1       32      20      \N      \N
+
diff --git a/regression-test/data/schema_change/test_agg_vals_schema_change.out 
b/regression-test/data/schema_change/test_agg_vals_schema_change.out
new file mode 100644
index 0000000000..be4fe651e5
--- /dev/null
+++ b/regression-test/data/schema_change/test_agg_vals_schema_change.out
@@ -0,0 +1,32 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+1      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        2       31      19      \N      
\N
+2      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        2       32      20      \N      
\N
+
+-- !sc --
+2      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        2       32      20      \N      
\N      1
+
+-- !sc --
+2      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        3       32      20      \N      
\N      2
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      \N      
\N      1
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        2       32      20      \N      
\N      2
+
+-- !sc --
+3
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2       32      20      \N      \N      2
+
+-- !sc --
+4      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        1       32      20      \N      \N      2
+
+-- !sc --
+5
+
+-- !sc --
+2      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        3       32      20      \N      \N      2
+
diff --git 
a/regression-test/data/schema_change/test_compaction_schema_change.out 
b/regression-test/data/schema_change/test_compaction_schema_change.out
new file mode 100644
index 0000000000..c7f14cd658
--- /dev/null
+++ b/regression-test/data/schema_change/test_compaction_schema_change.out
@@ -0,0 +1,11 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !compaction --
+100
+
+-- !compaction --
+100
+
+-- !compaction --
+1
+2
+
diff --git a/regression-test/data/schema_change/test_delete_schema_change.out 
b/regression-test/data/schema_change/test_delete_schema_change.out
new file mode 100644
index 0000000000..ff8a11901d
--- /dev/null
+++ b/regression-test/data/schema_change/test_delete_schema_change.out
@@ -0,0 +1,44 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !test_delete_schema_change --
+0
+
+-- !test_delete_schema_change_2 --
+0
+
+-- !test_delete_schema_change_3 --
+1
+
+-- !test_delete_schema_change_4 --
+1
+
+-- !test_delete_schema_change_5 --
+1      2017-10-01      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2020-01-01T00:00        1       30      20
+2      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21
+
+-- !test_delete_schema_change_6 --
+0
+
+-- !test_delete_schema_change_7 --
+2      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21      1
+1      2017-10-01      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2020-01-01T00:00        1       30      20      1
+
+-- !test_delete_schema_change_8 --
+1
+
+-- !test_delete_schema_change_9 --
+1
+
+-- !test_delete_schema_change_10 --
+1
+
+-- !test_delete_schema_change_11 --
+1
+
+-- !test_delete_schema_change_12 --
+0
+
+-- !test_delete_schema_change_13 --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+2      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+1      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      19      2
+
diff --git a/regression-test/data/schema_change/test_dup_keys_schema_change.out 
b/regression-test/data/schema_change/test_dup_keys_schema_change.out
new file mode 100644
index 0000000000..b3c7185e3d
--- /dev/null
+++ b/regression-test/data/schema_change/test_dup_keys_schema_change.out
@@ -0,0 +1,28 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+4
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+6
+
+-- !sc --
+3      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+3      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+4      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+13
+
+-- !sc --
+2      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+2      2017-10-01      Beijing 10      2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21      1
+
diff --git a/regression-test/data/schema_change/test_dup_mv_schema_change.out 
b/regression-test/data/schema_change/test_dup_mv_schema_change.out
new file mode 100644
index 0000000000..b3c7185e3d
--- /dev/null
+++ b/regression-test/data/schema_change/test_dup_mv_schema_change.out
@@ -0,0 +1,28 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+4
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+6
+
+-- !sc --
+3      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+3      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+4      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+13
+
+-- !sc --
+2      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+2      2017-10-01      Beijing 10      2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21      1
+
diff --git 
a/regression-test/data/schema_change/test_dup_rollup_schema_change.out 
b/regression-test/data/schema_change/test_dup_rollup_schema_change.out
new file mode 100644
index 0000000000..5fb46c0cad
--- /dev/null
+++ b/regression-test/data/schema_change/test_dup_rollup_schema_change.out
@@ -0,0 +1,27 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+4
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+6
+
+-- !sc --
+3      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+3      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+4      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+13
+
+-- !sc --
+2      2017-10-01      Beijing 10      2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+2      2017-10-01      Beijing 10      2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21      1
\ No newline at end of file
diff --git a/regression-test/data/schema_change/test_dup_vals_schema_change.out 
b/regression-test/data/schema_change/test_dup_vals_schema_change.out
new file mode 100644
index 0000000000..003c3cb65f
--- /dev/null
+++ b/regression-test/data/schema_change/test_dup_vals_schema_change.out
@@ -0,0 +1,28 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+4
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+6
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        1       32      20      1
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+4      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+13
+
+-- !sc --
+2      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        1       32      20      1
+2      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        1       31      21      1
+
diff --git 
a/regression-test/data/schema_change/test_partition_schema_change.out 
b/regression-test/data/schema_change/test_partition_schema_change.out
new file mode 100644
index 0000000000..5c568d5ecd
--- /dev/null
+++ b/regression-test/data/schema_change/test_partition_schema_change.out
@@ -0,0 +1,33 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !test_partition_schema_change --
+0
+
+-- !test_partition_schema_change_2 --
+0
+
+-- !test_partition_schema_change_3 --
+1
+
+-- !test_partition_schema_change_4 --
+1
+
+-- !test_partition_schema_change_5 --
+1
+
+-- !test_partition_schema_change_6 --
+1      2017-01-02      Beijing 10      1       2017-01-02T00:00        1       
30      20
+1      2017-02-02      Beijing 10      1       2017-02-02T00:00        1       
30      20
+1      2017-03-02      Beijing 10      1       2017-03-02T00:00        1       
30      20
+
+-- !test_partition_schema_change_7 --
+0
+
+-- !test_partition_schema_change_8 --
+1
+
+-- !test_partition_schema_change_9 --
+1      2017-01-02      Beijing 10      1       2017-01-02T00:00        1       
30      20      1
+1      2017-02-02      Beijing 10      1       2017-02-02T00:00        1       
30      20      1
+2      2017-02-03      Beijing 10      1       2017-02-02T00:00        1       
30      20      2
+1      2017-03-02      Beijing 10      1       2017-03-02T00:00        1       
30      20      1
+
diff --git 
a/regression-test/suites/schema_change/test_agg_keys_schema_change.groovy 
b/regression-test/suites/schema_change/test_agg_keys_schema_change.groovy
new file mode 100644
index 0000000000..6e944e654c
--- /dev/null
+++ b/regression-test/suites/schema_change/test_agg_keys_schema_change.groovy
@@ -0,0 +1,225 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_agg_keys_schema_change") {
+    def tableName = "schema_change_agg_keys_regression_test"
+
+    try {
+
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        for (String[] backend in backends) {
+            backendId_to_backendIP.put(backend[0], backend[2])
+            backendId_to_backendHttpPort.put(backend[0], backend[5])
+        }
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS schema_change_agg_keys_regression_test """
+        sql """
+                CREATE TABLE schema_change_agg_keys_regression_test (
+                    `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                    `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                    `city` VARCHAR(20) COMMENT "用户所在城市",
+                    `age` SMALLINT COMMENT "用户年龄",
+                    `sex` TINYINT COMMENT "用户性别",
+
+                    `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费",
+                    `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间",
+                    `min_dwell_time` INT MIN DEFAULT "99999" COMMENT 
"用户最小停留时间",
+                    `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列",
+                    `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT 
"bitmap列")
+                AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) 
DISTRIBUTED BY HASH(`user_id`)
+                BUCKETS 1
+                PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true" );
+            """
+
+        sql """ INSERT INTO schema_change_agg_keys_regression_test VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, 1, 30, 20, hll_hash(1), 
to_bitmap(1))
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, 1, 31, 19, hll_hash(2), 
to_bitmap(2))
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, 1, 31, 21, hll_hash(2), 
to_bitmap(2))
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, 1, 32, 20, hll_hash(3), 
to_bitmap(3))
+            """
+
+        qt_sc """ select * from schema_change_agg_keys_regression_test order 
by user_id"""
+
+        // add key column case 1, not light schema change
+        sql """
+            ALTER table ${tableName} ADD COLUMN new_key_column INT default "2" 
+            """
+
+        result = "null"
+        while (!result.contains("FINISHED")){
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = result.toString()
+            logger.info("result: ${result}")
+            if(result.contains("CANCELLED")){
+                return
+            }
+            Thread.sleep(100)
+        }
+
+        sql """ INSERT INTO ${tableName} 
(`user_id`,`date`,`city`,`age`,`sex`,`cost`,`max_dwell_time`,`min_dwell_time`, 
`hll_col`, `bitmap_col`)
+                VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, 100, 32, 20, hll_hash(4), 
to_bitmap(4))
+            """
+       qt_sc """SELECT * FROM ${tableName} WHERE user_id = 3"""
+
+        // add key column case 2
+        sql """ INSERT INTO ${tableName} VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, 3, 110, 32, 20, 
hll_hash(4), to_bitmap(4))
+            """
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """
+
+
+        qt_sc """ select count(*) from ${tableName} """
+
+        // drop key column, not light schema change
+        sql """
+            ALTER TABLE ${tableName} DROP COLUMN new_key_column
+            """
+        result = "null"
+        while (!result.contains("FINISHED")){
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = result.toString()
+            logger.info("result: ${result}")
+            if(result.contains("CANCELLED")){
+                return
+            }
+            Thread.sleep(100)
+        }
+
+        qt_sc """ select * from ${tableName} where user_id = 3 """
+
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        // compaction
+        String[][] tablets = sql """ show tablets from ${tableName}; """
+        for (String[] tablet in tablets) {
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                logger.info("run compaction:" + tablet_id)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X POST http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run?tablet_id=")
+                sb.append(tablet_id)
+                sb.append("&compact_type=cumulative")
+
+                String command = sb.toString()
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
+        }
+
+        // wait for all compactions done
+        for (String[] tablet in tablets) {
+                boolean running = true
+                do {
+                    Thread.sleep(100)
+                    String tablet_id = tablet[0]
+                    backend_id = tablet[2]
+                    StringBuilder sb = new StringBuilder();
+                    sb.append("curl -X GET http://";)
+                    sb.append(backendId_to_backendIP.get(backend_id))
+                    sb.append(":")
+                    sb.append(backendId_to_backendHttpPort.get(backend_id))
+                    sb.append("/api/compaction/run_status?tablet_id=")
+                    sb.append(tablet_id)
+
+                    String command = sb.toString()
+                    process = command.execute()
+                    code = process.waitFor()
+                    err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                    out = process.getText()
+                    logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
+                    assertEquals(code, 0)
+                    def compactionStatus = parseJson(out.trim())
+                    assertEquals("success", 
compactionStatus.status.toLowerCase())
+                    running = compactionStatus.run_status
+                } while (running)
+        }
+         
+        qt_sc """ select count(*) from ${tableName} """
+
+        qt_sc """  SELECT * FROM schema_change_agg_keys_regression_test WHERE 
user_id=2 """
+
+    } finally {
+        //try_sql("DROP TABLE IF EXISTS ${tableName}")
+    }
+}
\ No newline at end of file
diff --git 
a/regression-test/suites/schema_change/test_agg_mv_schema_change.groovy 
b/regression-test/suites/schema_change/test_agg_mv_schema_change.groovy
new file mode 100644
index 0000000000..169f07240d
--- /dev/null
+++ b/regression-test/suites/schema_change/test_agg_mv_schema_change.groovy
@@ -0,0 +1,217 @@
+      
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_agg_mv_schema_change") {
+    def tableName = "schema_change_agg_mv_regression_test"
+
+    try {
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        for (String[] backend in backends) {
+            backendId_to_backendIP.put(backend[0], backend[2])
+            backendId_to_backendHttpPort.put(backend[0], backend[5])
+        }
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
+                CREATE TABLE ${tableName} (
+                    `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                    `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                    `city` VARCHAR(20) COMMENT "用户所在城市",
+                    `age` SMALLINT COMMENT "用户年龄",
+                    `sex` TINYINT COMMENT "用户性别",
+
+                    `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费",
+                    `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间",
+                    `min_dwell_time` INT MIN DEFAULT "99999" COMMENT 
"用户最小停留时间",
+                    `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列",
+                    `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT 
"bitmap列")
+                AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) 
DISTRIBUTED BY HASH(`user_id`)
+                BUCKETS 1
+                PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true" );
+            """
+
+        //add materialized view
+        def result = "null"
+        def mvName = "mv1"
+        sql "create materialized view ${mvName} as select user_id, date, city, 
age, sex, sum(cost) from ${tableName} group by user_id, date, city, age, sex, 
cost;"
+        while (!result.contains("FINISHED")){
+            result = sql "SHOW ALTER TABLE MATERIALIZED VIEW WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = result.toString()
+            logger.info("result: ${result}")
+            if(result.contains("CANCELLED")){
+                return
+            }
+            Thread.sleep(100)
+        }
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, 1, 30, 20, hll_hash(1), 
to_bitmap(1))
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, 1, 31, 19, hll_hash(2), 
to_bitmap(2))
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, 1, 31, 21, hll_hash(2), 
to_bitmap(2))
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, 1, 32, 20, hll_hash(3), 
to_bitmap(3))
+            """
+
+        qt_sc """ select * from ${tableName} order by user_id"""
+
+        // drop value column with mv, not light schema change
+        sql """
+            ALTER TABLE ${tableName} DROP COLUMN cost
+            """
+
+        result = "null"
+        while (!result.contains("FINISHED")){
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = result.toString()
+            logger.info("result: ${result}")
+            if(result.contains("CANCELLED")) {
+                log.info("rollup job is cancelled, result: 
${result}".toString())
+                return
+            }
+            Thread.sleep(100)
+        }
+
+        sql """ INSERT INTO ${tableName} (`user_id`, `date`, `city`, `age`, 
`sex`, `max_dwell_time`,`min_dwell_time`, `hll_col`, `bitmap_col`)
+                VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(4), 
to_bitmap(4))
+            """
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        // compaction
+        String[][] tablets = sql """ show tablets from ${tableName}; """
+        for (String[] tablet in tablets) {
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                logger.info("run compaction:" + tablet_id)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X POST http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run?tablet_id=")
+                sb.append(tablet_id)
+                sb.append("&compact_type=cumulative")
+
+                String command = sb.toString()
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
+                //assertEquals(code, 0)
+        }
+
+        // wait for all compactions done
+        for (String[] tablet in tablets) {
+                boolean running = true
+                do {
+                    Thread.sleep(100)
+                    String tablet_id = tablet[0]
+                    backend_id = tablet[2]
+                    StringBuilder sb = new StringBuilder();
+                    sb.append("curl -X GET http://";)
+                    sb.append(backendId_to_backendIP.get(backend_id))
+                    sb.append(":")
+                    sb.append(backendId_to_backendHttpPort.get(backend_id))
+                    sb.append("/api/compaction/run_status?tablet_id=")
+                    sb.append(tablet_id)
+
+                    String command = sb.toString()
+                    process = command.execute()
+                    code = process.waitFor()
+                    err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                    out = process.getText()
+                    logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
+                    assertEquals(code, 0)
+                    def compactionStatus = parseJson(out.trim())
+                    assertEquals("success", 
compactionStatus.status.toLowerCase())
+                    running = compactionStatus.run_status
+                } while (running)
+        }
+         
+        qt_sc """ select count(*) from ${tableName} """
+
+        qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 """
+
+        } finally {
+            //try_sql("DROP TABLE IF EXISTS ${tableName}")
+        }
+
+}
+
+    
\ No newline at end of file
diff --git 
a/regression-test/suites/schema_change/test_agg_rollup_schema_change.groovy 
b/regression-test/suites/schema_change/test_agg_rollup_schema_change.groovy
new file mode 100644
index 0000000000..850a4d02da
--- /dev/null
+++ b/regression-test/suites/schema_change/test_agg_rollup_schema_change.groovy
@@ -0,0 +1,215 @@
+      
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_agg_rollup_schema_change") {
+    def tableName = "schema_change_agg_rollup_regression_test"
+
+    try {
+
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        for (String[] backend in backends) {
+            backendId_to_backendIP.put(backend[0], backend[2])
+            backendId_to_backendHttpPort.put(backend[0], backend[5])
+        }
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
+                CREATE TABLE ${tableName} (
+                    `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                    `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                    `city` VARCHAR(20) COMMENT "用户所在城市",
+                    `age` SMALLINT COMMENT "用户年龄",
+                    `sex` TINYINT COMMENT "用户性别",
+
+                    `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费",
+                    `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间",
+                    `min_dwell_time` INT MIN DEFAULT "99999" COMMENT 
"用户最小停留时间",
+                    `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列",
+                    `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT 
"bitmap列")
+                AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) 
DISTRIBUTED BY HASH(`user_id`)
+                BUCKETS 1
+                PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true" );
+            """
+
+        //add rollup
+        def result = "null"
+        def rollupName = "rollup_cost"
+        sql "ALTER TABLE ${tableName} ADD ROLLUP 
${rollupName}(`user_id`,`date`,`city`,`age`,`sex`, cost);"
+        while (!result.contains("FINISHED")){
+            result = sql "SHOW ALTER TABLE ROLLUP WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = result.toString()
+            logger.info("result: ${result}")
+            if(result.contains("CANCELLED")){
+                return
+            }
+            Thread.sleep(100)
+        }
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, 1, 30, 20, hll_hash(1), 
to_bitmap(1))
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, 1, 31, 19, hll_hash(2), 
to_bitmap(2))
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, 1, 31, 21, hll_hash(2), 
to_bitmap(2))
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, 1, 32, 20, hll_hash(3), 
to_bitmap(3))
+            """
+
+        qt_sc """ select * from ${tableName} order by user_id """
+
+        // drop value column with rollup, not light schema change
+        sql """
+            ALTER TABLE ${tableName} DROP COLUMN cost
+            """
+
+        result = "null"
+        while (!result.contains("FINISHED")){
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = result.toString()
+            logger.info("result: ${result}")
+            if(result.contains("CANCELLED")) {
+                log.info("rollup job is cancelled, result: 
${result}".toString())
+                return
+            }
+            Thread.sleep(100)
+        }
+
+        sql """ INSERT INTO ${tableName} (`user_id`, `date`, `city`, `age`, 
`sex`, `max_dwell_time`,`min_dwell_time`, `hll_col`, `bitmap_col`)
+                VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(4), 
to_bitmap(4))
+            """
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, 32, 20, hll_hash(5), 
to_bitmap(5))
+            """
+
+        // compaction
+        String[][] tablets = sql """ show tablets from ${tableName}; """
+        for (String[] tablet in tablets) {
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                logger.info("run compaction:" + tablet_id)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X POST http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run?tablet_id=")
+                sb.append(tablet_id)
+                sb.append("&compact_type=cumulative")
+
+                String command = sb.toString()
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
+                //assertEquals(code, 0)
+        }
+
+        // wait for all compactions done
+        for (String[] tablet in tablets) {
+                boolean running = true
+                do {
+                    Thread.sleep(100)
+                    String tablet_id = tablet[0]
+                    backend_id = tablet[2]
+                    StringBuilder sb = new StringBuilder();
+                    sb.append("curl -X GET http://";)
+                    sb.append(backendId_to_backendIP.get(backend_id))
+                    sb.append(":")
+                    sb.append(backendId_to_backendHttpPort.get(backend_id))
+                    sb.append("/api/compaction/run_status?tablet_id=")
+                    sb.append(tablet_id)
+
+                    String command = sb.toString()
+                    process = command.execute()
+                    code = process.waitFor()
+                    err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                    out = process.getText()
+                    logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
+                    assertEquals(code, 0)
+                    def compactionStatus = parseJson(out.trim())
+                    assertEquals("success", 
compactionStatus.status.toLowerCase())
+                    running = compactionStatus.run_status
+                } while (running)
+        }
+         
+        qt_sc """ select count(*) from ${tableName} """
+
+        qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 """
+
+    } finally {
+            //try_sql("DROP TABLE IF EXISTS ${tableName}")
+    }
+}
diff --git 
a/regression-test/suites/schema_change/test_agg_vals_schema_change.groovy 
b/regression-test/suites/schema_change/test_agg_vals_schema_change.groovy
new file mode 100644
index 0000000000..a270c61563
--- /dev/null
+++ b/regression-test/suites/schema_change/test_agg_vals_schema_change.groovy
@@ -0,0 +1,219 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_agg_vals_schema_change") {
+    def tableName = "schema_change_agg_vals_regression_test"
+
+    try {
+
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        for (String[] backend in backends) {
+            backendId_to_backendIP.put(backend[0], backend[2])
+            backendId_to_backendHttpPort.put(backend[0], backend[5])
+        }
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+    sql """ DROP TABLE IF EXISTS ${tableName} """
+
+    sql """
+            CREATE TABLE ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME REPLACE DEFAULT "1970-01-01 
00:00:00" COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME REPLACE DEFAULT "1970-01-01 
00:00:00" COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME REPLACE NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间",
+                `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列",
+                `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT "bitmap列")
+            AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED 
BY HASH(`user_id`)
+            BUCKETS 1
+            PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true" );
+        """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2020-01-01', 1, 30, 20, hll_hash(1), to_bitmap(1))
+        """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 19, hll_hash(2), to_bitmap(2))
+        """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 21, hll_hash(2), to_bitmap(2))
+        """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, hll_hash(3), to_bitmap(3))
+        """
+    qt_sc """
+                       select * from ${tableName} order by user_id
+                    """
+
+    // add column
+    sql """
+        ALTER table ${tableName} ADD COLUMN new_column INT MAX default "1" 
+        """
+
+    qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2)
+        """
+    qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """
+
+
+    sql """ INSERT INTO ${tableName} 
(`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`,
+                                      
`last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`, `hll_col`, 
`bitmap_col`)
+            VALUES
+             (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4))
+        """
+
+    qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2)
+        """
+    qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """
+
+    qt_sc """ select count(*) from ${tableName} """
+
+    // drop column
+    sql """
+          ALTER TABLE ${tableName} DROP COLUMN last_visit_date
+          """
+    qt_sc """ select * from ${tableName} where user_id = 3 """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
1, 32, 20, hll_hash(4), to_bitmap(4), 2)
+        """
+
+    qt_sc """ select * from ${tableName} where user_id = 4 """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
1, 32, 20, hll_hash(5), to_bitmap(5), 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
1, 32, 20, hll_hash(5), to_bitmap(5), 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
1, 32, 20, hll_hash(5), to_bitmap(5), 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
1, 32, 20, hll_hash(5), to_bitmap(5), 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
1, 32, 20, hll_hash(5), to_bitmap(5), 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
1, 32, 20, hll_hash(5), to_bitmap(5), 2)
+        """
+
+    // compaction
+    String[][] tablets = sql """ show tablets from ${tableName}; """
+    for (String[] tablet in tablets) {
+            String tablet_id = tablet[0]
+            backend_id = tablet[2]
+            logger.info("run compaction:" + tablet_id)
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=cumulative")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            //assertEquals(code, 0)
+    }
+
+    // wait for all compactions done
+    for (String[] tablet in tablets) {
+            boolean running = true
+            do {
+                Thread.sleep(100)
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+    }
+    qt_sc """ select count(*) from ${tableName} """
+
+    qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 """
+
+    } finally {
+        //try_sql("DROP TABLE IF EXISTS ${tableName}")
+    }
+
+}
diff --git 
a/regression-test/suites/schema_change/test_alter_table_column.groovy 
b/regression-test/suites/schema_change/test_alter_table_column.groovy
index 446df2d231..84b4dc2c28 100644
--- a/regression-test/suites/schema_change/test_alter_table_column.groovy
+++ b/regression-test/suites/schema_change/test_alter_table_column.groovy
@@ -29,7 +29,7 @@ suite("test_alter_table_column", "schema_change") {
                 value1 INT
             )
             DUPLICATE KEY (k1)
-            DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = 
"1");
+            DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = 
"1", "light_schema_change" = "true");
         """
     sql """
             ALTER TABLE ${tbName1} 
@@ -86,7 +86,7 @@ suite("test_alter_table_column", "schema_change") {
                 value1 INT SUM
             )
             AGGREGATE KEY (k1)
-            DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = 
"1");
+            DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = 
"1", "light_schema_change" = "true");
         """
     sql """
             ALTER TABLE ${tbName2} 
diff --git a/regression-test/suites/schema_change/test_delete_schema_change.sql 
b/regression-test/suites/schema_change/test_delete_schema_change.sql
new file mode 100644
index 0000000000..b153d7d38b
--- /dev/null
+++ b/regression-test/suites/schema_change/test_delete_schema_change.sql
@@ -0,0 +1,44 @@
+DROP TABLE IF EXISTS schema_change_delete_regression_test;
+
+CREATE TABLE schema_change_delete_regression_test (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
+            DUPLICATE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED 
BY HASH(`user_id`)
+            PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true" );
+
+INSERT INTO schema_change_delete_regression_test VALUES
+             (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2020-01-01', 1, 30, 20);
+
+INSERT INTO schema_change_delete_regression_test VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 21);
+
+SELECT * FROM schema_change_delete_regression_test order by user_id ASC, 
last_visit_date;
+
+ALTER table schema_change_delete_regression_test ADD COLUMN new_column INT 
default "1";
+
+SELECT * FROM schema_change_delete_regression_test order by user_id DESC, 
last_visit_date;
+
+INSERT INTO schema_change_delete_regression_test VALUES
+             (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 19, 2);
+
+INSERT INTO schema_change_delete_regression_test VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2);
+
+INSERT INTO schema_change_delete_regression_test VALUES
+             (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 1);
+
+INSERT INTO schema_change_delete_regression_test VALUES
+             (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2);
+             
+DELETE FROM schema_change_delete_regression_test where new_column = 1;
+
+SELECT * FROM schema_change_delete_regression_test order by user_id DESC, 
last_visit_date;
\ No newline at end of file
diff --git 
a/regression-test/suites/schema_change/test_dup_keys_schema_change.groovy 
b/regression-test/suites/schema_change/test_dup_keys_schema_change.groovy
new file mode 100644
index 0000000000..e81be79de7
--- /dev/null
+++ b/regression-test/suites/schema_change/test_dup_keys_schema_change.groovy
@@ -0,0 +1,225 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_dup_keys_schema_change") {
+    def tableName = "schema_change_dup_keys_regression_test"
+
+    try {
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        for (String[] backend in backends) {
+            backendId_to_backendIP.put(backend[0], backend[2])
+            backendId_to_backendHttpPort.put(backend[0], backend[5])
+        }
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+
+        sql """
+                CREATE TABLE ${tableName} (
+                    `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                    `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                    `city` VARCHAR(20) COMMENT "用户所在城市",
+                    `age` SMALLINT COMMENT "用户年龄",
+                    `sex` TINYINT COMMENT "用户性别",
+                    `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                    `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                    `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                    `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                    `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                    `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
+                DUPLICATE KEY(`user_id`, `date`, `city`, `age`, `sex`) 
DISTRIBUTED BY HASH(`user_id`)
+                BUCKETS 1
+                PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true" );
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', 
'2020-01-01', '2020-01-01', 1, 30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', 
'2020-01-02', '2020-01-02', 1, 31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', 
'2020-01-02', '2020-01-02', 1, 31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20)
+            """
+        qt_sc """
+                        select count(*) from ${tableName}
+                        """
+
+        // add column
+        sql """
+            ALTER table ${tableName} ADD COLUMN new_column INT default "1" 
+            """
+
+        sql """ SELECT * FROM ${tableName} WHERE user_id=2 order by 
min_dwell_time """
+
+        sql """ INSERT INTO ${tableName} 
(`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`,
+                                        
`last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`)
+                VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20)
+            """
+
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """
+
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20, 2)
+            """
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 order by 
new_column """
+
+        qt_sc """ select count(*) from ${tableName} """
+
+        // drop column
+        sql """
+            ALTER TABLE ${tableName} DROP COLUMN sex
+            """
+        result = "null"
+        while (!result.contains("FINISHED")){
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = result.toString()
+            logger.info("result: ${result}")
+            if(result.contains("CANCELLED")) {
+                log.info("rollup job is cancelled, result: 
${result}".toString())
+                return
+            }
+            Thread.sleep(100)
+        }
+        qt_sc """ select * from ${tableName} where user_id = 3 order by 
new_column """
+
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (4, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+
+        qt_sc """ select * from ${tableName} where user_id = 4 """
+
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+
+        // compaction
+        String[][] tablets = sql """ show tablets from ${tableName}; """
+        for (String[] tablet in tablets) {
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                logger.info("run compaction:" + tablet_id)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X POST http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run?tablet_id=")
+                sb.append(tablet_id)
+                sb.append("&compact_type=cumulative")
+
+                String command = sb.toString()
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
+                //assertEquals(code, 0)
+        }
+
+        // wait for all compactions done
+        for (String[] tablet in tablets) {
+                boolean running = true
+                do {
+                    Thread.sleep(100)
+                    String tablet_id = tablet[0]
+                    backend_id = tablet[2]
+                    StringBuilder sb = new StringBuilder();
+                    sb.append("curl -X GET http://";)
+                    sb.append(backendId_to_backendIP.get(backend_id))
+                    sb.append(":")
+                    sb.append(backendId_to_backendHttpPort.get(backend_id))
+                    sb.append("/api/compaction/run_status?tablet_id=")
+                    sb.append(tablet_id)
+
+                    String command = sb.toString()
+                    process = command.execute()
+                    code = process.waitFor()
+                    err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                    out = process.getText()
+                    logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
+                    assertEquals(code, 0)
+                    def compactionStatus = parseJson(out.trim())
+                    assertEquals("success", 
compactionStatus.status.toLowerCase())
+                    running = compactionStatus.run_status
+                } while (running)
+        }
+
+        qt_sc """ select count(*) from ${tableName} """
+
+        qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 order by 
min_dwell_time"""
+
+    } finally {
+        //try_sql("DROP TABLE IF EXISTS ${tableName}")
+    }
+
+}
diff --git 
a/regression-test/suites/schema_change/test_dup_mv_schema_change.groovy 
b/regression-test/suites/schema_change/test_dup_mv_schema_change.groovy
new file mode 100644
index 0000000000..9318aaddd3
--- /dev/null
+++ b/regression-test/suites/schema_change/test_dup_mv_schema_change.groovy
@@ -0,0 +1,239 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_dup_mv_schema_change") {
+    def tableName = "schema_change_dup_mv_regression_test"
+
+    try {
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        for (String[] backend in backends) {
+            backendId_to_backendIP.put(backend[0], backend[2])
+            backendId_to_backendHttpPort.put(backend[0], backend[5])
+        }
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+
+        sql """
+                CREATE TABLE ${tableName} (
+                    `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                    `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                    `city` VARCHAR(20) COMMENT "用户所在城市",
+                    `age` SMALLINT COMMENT "用户年龄",
+                    `sex` TINYINT COMMENT "用户性别",
+                    `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                    `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                    `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                    `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                    `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                    `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
+                DUPLICATE KEY(`user_id`, `date`, `city`, `age`, `sex`) 
DISTRIBUTED BY HASH(`user_id`)
+                BUCKETS 1
+                PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true" );
+            """
+
+        //add materialized view
+        def result = "null"
+        def mvName = "mv1"
+        sql "create materialized view ${mvName} as select user_id, date, city, 
age,sex from ${tableName};"
+        while (!result.contains("FINISHED")){
+            result = sql "SHOW ALTER TABLE MATERIALIZED VIEW WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = result.toString()
+            logger.info("result: ${result}")
+            if(result.contains("CANCELLED")){
+                return 
+            }
+            Thread.sleep(100)
+        }
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', 
'2020-01-01', '2020-01-01', 1, 30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', 
'2020-01-02', '2020-01-02', 1, 31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', 
'2020-01-02', '2020-01-02', 1, 31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20)
+            """
+        qt_sc """
+                        select count(*) from ${tableName}
+                        """
+
+        // add column
+        sql """
+            ALTER table ${tableName} ADD COLUMN new_column INT default "1" 
+            """
+
+        sql """ SELECT * FROM ${tableName} WHERE user_id=2 order by 
min_dwell_time """
+
+        sql """ INSERT INTO ${tableName} 
(`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`,
+                                        
`last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`)
+                VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20)
+            """
+
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """
+
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20, 2)
+            """
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 order by 
new_column """
+
+        qt_sc """ select count(*) from ${tableName} """
+
+        // drop column
+        sql """
+            ALTER TABLE ${tableName} DROP COLUMN sex
+            """
+        result = "null"
+        while (!result.contains("FINISHED")){
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = result.toString()
+            logger.info("result: ${result}")
+            if(result.contains("CANCELLED")) {
+                log.info("rollup job is cancelled, result: 
${result}".toString())
+                return
+            }
+            Thread.sleep(100)
+        }
+
+        qt_sc """ select * from ${tableName} where user_id = 3 order by 
new_column """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (4, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+
+        qt_sc """ select * from ${tableName} where user_id = 4 """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+
+        // compaction
+        String[][] tablets = sql """ show tablets from ${tableName}; """
+        for (String[] tablet in tablets) {
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                logger.info("run compaction:" + tablet_id)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X POST http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run?tablet_id=")
+                sb.append(tablet_id)
+                sb.append("&compact_type=cumulative")
+
+                String command = sb.toString()
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
+                //assertEquals(code, 0)
+        }
+
+        // wait for all compactions done
+        for (String[] tablet in tablets) {
+                boolean running = true
+                do {
+                    Thread.sleep(100)
+                    String tablet_id = tablet[0]
+                    backend_id = tablet[2]
+                    StringBuilder sb = new StringBuilder();
+                    sb.append("curl -X GET http://";)
+                    sb.append(backendId_to_backendIP.get(backend_id))
+                    sb.append(":")
+                    sb.append(backendId_to_backendHttpPort.get(backend_id))
+                    sb.append("/api/compaction/run_status?tablet_id=")
+                    sb.append(tablet_id)
+
+                    String command = sb.toString()
+                    process = command.execute()
+                    code = process.waitFor()
+                    err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                    out = process.getText()
+                    logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
+                    assertEquals(code, 0)
+                    def compactionStatus = parseJson(out.trim())
+                    assertEquals("success", 
compactionStatus.status.toLowerCase())
+                    running = compactionStatus.run_status
+                } while (running)
+        }
+
+        qt_sc """ select count(*) from ${tableName} """
+
+
+        qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 order by 
min_dwell_time"""
+
+    } finally {
+        //try_sql("DROP TABLE IF EXISTS ${tableName}")
+    }
+
+}
diff --git 
a/regression-test/suites/schema_change/test_dup_rollup_schema_change.groovy 
b/regression-test/suites/schema_change/test_dup_rollup_schema_change.groovy
new file mode 100644
index 0000000000..eddcdd411c
--- /dev/null
+++ b/regression-test/suites/schema_change/test_dup_rollup_schema_change.groovy
@@ -0,0 +1,236 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_dup_rollup_schema_change") {
+    def tableName = "schema_change_dup_rollup_regression_test"
+
+    try {
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        for (String[] backend in backends) {
+            backendId_to_backendIP.put(backend[0], backend[2])
+            backendId_to_backendHttpPort.put(backend[0], backend[5])
+        }
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+
+        sql """
+                CREATE TABLE ${tableName} (
+                    `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                    `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                    `city` VARCHAR(20) COMMENT "用户所在城市",
+                    `age` SMALLINT COMMENT "用户年龄",
+                    `sex` TINYINT COMMENT "用户性别",
+                    `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                    `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                    `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                    `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                    `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                    `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
+                DUPLICATE KEY(`user_id`, `date`, `city`, `age`, `sex`) 
DISTRIBUTED BY HASH(`user_id`)
+                BUCKETS 1
+                PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true" );
+            """
+
+        //add rollup
+        def result = "null"
+        def rollupName = "rollup_cost"
+        sql "ALTER TABLE ${tableName} ADD ROLLUP 
${rollupName}(`user_id`,`date`,`city`,`age`,`sex`, cost);"
+        while (!result.contains("FINISHED")){
+            result = sql "SHOW ALTER TABLE ROLLUP WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = result.toString()
+            logger.info("result: ${result}")
+            if(result.contains("CANCELLED")){
+                return
+            }
+            Thread.sleep(100)
+        }
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', 
'2020-01-01', '2020-01-01', 1, 30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', 
'2020-01-02', '2020-01-02', 1, 31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', 
'2020-01-02', '2020-01-02', 1, 31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20)
+            """
+        qt_sc """
+                        select count(*) from ${tableName}
+                        """
+
+        // add column
+        sql """
+            ALTER table ${tableName} ADD COLUMN new_column INT default "1" 
+            """
+
+        sql """ SELECT * FROM ${tableName} WHERE user_id=2 order by 
min_dwell_time """
+
+        sql """ INSERT INTO ${tableName} 
(`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`,
+                                        
`last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`)
+                VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20)
+            """
+
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20, 2)
+            """
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 order by 
new_column """
+
+        qt_sc """ select count(*) from ${tableName} """
+        // drop column
+        sql """
+            ALTER TABLE ${tableName} DROP COLUMN sex
+            """
+        result = "null"
+        while (!result.contains("FINISHED")){
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
IndexName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = result.toString()
+            logger.info("result: ${result}")
+            if(result.contains("CANCELLED")) {
+                log.info("rollup job is cancelled, result: 
${result}".toString())
+                return
+            }
+            Thread.sleep(100)
+        }
+
+        qt_sc """ select * from ${tableName} where user_id = 3 order by 
new_column """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (4, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+
+        qt_sc """ select * from ${tableName} where user_id = 4 """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+
+        // compaction
+        String[][] tablets = sql """ show tablets from ${tableName}; """
+        for (String[] tablet in tablets) {
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                logger.info("run compaction:" + tablet_id)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X POST http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run?tablet_id=")
+                sb.append(tablet_id)
+                sb.append("&compact_type=cumulative")
+
+                String command = sb.toString()
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
+                //assertEquals(code, 0)
+        }
+
+        // wait for all compactions done
+        for (String[] tablet in tablets) {
+                boolean running = true
+                do {
+                    Thread.sleep(100)
+                    String tablet_id = tablet[0]
+                    backend_id = tablet[2]
+                    StringBuilder sb = new StringBuilder();
+                    sb.append("curl -X GET http://";)
+                    sb.append(backendId_to_backendIP.get(backend_id))
+                    sb.append(":")
+                    sb.append(backendId_to_backendHttpPort.get(backend_id))
+                    sb.append("/api/compaction/run_status?tablet_id=")
+                    sb.append(tablet_id)
+
+                    String command = sb.toString()
+                    process = command.execute()
+                    code = process.waitFor()
+                    err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                    out = process.getText()
+                    logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
+                    assertEquals(code, 0)
+                    def compactionStatus = parseJson(out.trim())
+                    assertEquals("success", 
compactionStatus.status.toLowerCase())
+                    running = compactionStatus.run_status
+                } while (running)
+        }
+
+        qt_sc """ select count(*) from ${tableName} """
+
+
+        qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 order by 
min_dwell_time """
+
+    } finally {
+        //try_sql("DROP TABLE IF EXISTS ${tableName}")
+    }
+
+}
diff --git 
a/regression-test/suites/schema_change/test_dup_vals_schema_change.groovy 
b/regression-test/suites/schema_change/test_dup_vals_schema_change.groovy
new file mode 100644
index 0000000000..5df6cd9ef7
--- /dev/null
+++ b/regression-test/suites/schema_change/test_dup_vals_schema_change.groovy
@@ -0,0 +1,209 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_dup_vals_schema_change") {
+    def tableName = "schema_change_dup_vals_regression_test"
+
+    try {
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        for (String[] backend in backends) {
+            backendId_to_backendIP.put(backend[0], backend[2])
+            backendId_to_backendHttpPort.put(backend[0], backend[5])
+        }
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+
+        sql """
+                CREATE TABLE ${tableName} (
+                    `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                    `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                    `city` VARCHAR(20) COMMENT "用户所在城市",
+                    `age` SMALLINT COMMENT "用户年龄",
+                    `sex` TINYINT COMMENT "用户性别",
+                    `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                    `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                    `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                    `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                    `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                    `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
+                DUPLICATE KEY(`user_id`, `date`, `city`, `age`, `sex`) 
DISTRIBUTED BY HASH(`user_id`)
+                BUCKETS 1
+                PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true" );
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', 
'2020-01-01', '2020-01-01', 1, 30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', 
'2020-01-02', '2020-01-02', 1, 31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', 
'2020-01-02', '2020-01-02', 1, 31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20)
+            """
+        qt_sc """
+                        select count(*) from ${tableName}
+                        """
+
+        // add column
+        sql """
+            ALTER table ${tableName} ADD COLUMN new_column INT default "1" 
+            """
+
+        sql """ SELECT * FROM ${tableName} WHERE user_id=2 order by 
min_dwell_time """
+
+        sql """ INSERT INTO ${tableName} 
(`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`,
+                                        
`last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`)
+                VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20)
+            """
+
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """
+
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20, 2)
+            """
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 order by 
new_column """
+
+        qt_sc """ select count(*) from ${tableName} """
+
+        // drop column
+        sql """
+            ALTER TABLE ${tableName} DROP COLUMN last_visit_date
+            """
+        qt_sc """ select * from ${tableName} where user_id = 3 order by 
new_column """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+
+        qt_sc """ select * from ${tableName} where user_id = 4 """
+
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+        sql """ INSERT INTO ${tableName} VALUES
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+            """
+
+        // compaction
+        String[][] tablets = sql """ show tablets from ${tableName}; """
+        for (String[] tablet in tablets) {
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                logger.info("run compaction:" + tablet_id)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X POST http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run?tablet_id=")
+                sb.append(tablet_id)
+                sb.append("&compact_type=cumulative")
+
+                String command = sb.toString()
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
+                //assertEquals(code, 0)
+        }
+
+        // wait for all compactions done
+        for (String[] tablet in tablets) {
+                boolean running = true
+                do {
+                    Thread.sleep(100)
+                    String tablet_id = tablet[0]
+                    backend_id = tablet[2]
+                    StringBuilder sb = new StringBuilder();
+                    sb.append("curl -X GET http://";)
+                    sb.append(backendId_to_backendIP.get(backend_id))
+                    sb.append(":")
+                    sb.append(backendId_to_backendHttpPort.get(backend_id))
+                    sb.append("/api/compaction/run_status?tablet_id=")
+                    sb.append(tablet_id)
+
+                    String command = sb.toString()
+                    process = command.execute()
+                    code = process.waitFor()
+                    err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                    out = process.getText()
+                    logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
+                    assertEquals(code, 0)
+                    def compactionStatus = parseJson(out.trim())
+                    assertEquals("success", 
compactionStatus.status.toLowerCase())
+                    running = compactionStatus.run_status
+                } while (running)
+        }
+        qt_sc """ select count(*) from ${tableName} """
+
+        qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 order by 
min_dwell_time """
+
+    } finally {
+        //try_sql("DROP TABLE IF EXISTS ${tableName}")
+    }
+}
diff --git 
a/regression-test/suites/schema_change/test_partition_schema_change.sql 
b/regression-test/suites/schema_change/test_partition_schema_change.sql
new file mode 100644
index 0000000000..ca118ee90e
--- /dev/null
+++ b/regression-test/suites/schema_change/test_partition_schema_change.sql
@@ -0,0 +1,44 @@
+DROP TABLE IF EXISTS example_range_tbl;
+CREATE TABLE example_range_tbl
+(
+    `user_id` LARGEINT NOT NULL COMMENT "用户id",
+    `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+    `city` VARCHAR(20) COMMENT "用户所在城市",
+    `age` SMALLINT COMMENT "用户年龄",
+    `sex` TINYINT COMMENT "用户性别",
+    `last_visit_date` DATETIME REPLACE DEFAULT "1970-01-01 00:00:00" COMMENT 
"用户最后一次访问时间",
+    `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费",
+    `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间",
+    `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间"
+)
+ENGINE=OLAP
+AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`)
+PARTITION BY RANGE(`date`)
+(
+    PARTITION `p201701` VALUES LESS THAN ("2017-02-01"),
+    PARTITION `p201702` VALUES LESS THAN ("2017-03-01"),
+    PARTITION `p201703` VALUES LESS THAN ("2017-04-01")
+)
+DISTRIBUTED BY HASH(`user_id`) BUCKETS 1
+PROPERTIES
+(
+    "replication_num" = "1", "light_schema_change" = "true"
+);
+
+INSERT INTO example_range_tbl VALUES
+    (1, '2017-01-02', 'Beijing', 10, 1, "2017-01-02 00:00:00", 1, 30, 20);
+
+INSERT INTO example_range_tbl VALUES
+    (1, '2017-02-02', 'Beijing', 10, 1, "2017-02-02 00:00:00", 1, 30, 20);
+
+INSERT INTO example_range_tbl VALUES
+    (1, '2017-03-02', 'Beijing', 10, 1, "2017-03-02 00:00:00", 1, 30, 20);
+
+select * from example_range_tbl order by `date`;
+
+ALTER table example_range_tbl ADD COLUMN new_column INT MAX default "1";
+
+INSERT INTO example_range_tbl VALUES
+    (2, '2017-02-03', 'Beijing', 10, 1, "2017-02-02 00:00:00", 1, 30, 20, 2);
+
+select * from example_range_tbl order by `date`;
\ No newline at end of file
diff --git a/regression-test/suites/schema_change/test_schema_change.groovy 
b/regression-test/suites/schema_change/test_schema_change.groovy
index a9be3a270c..843e33f9b6 100644
--- a/regression-test/suites/schema_change/test_schema_change.groovy
+++ b/regression-test/suites/schema_change/test_schema_change.groovy
@@ -41,7 +41,7 @@ suite("test_schema_change", "schema_change") {
                 PARTITION p201707 VALUES LESS THAN ('2021-12-01')
             )
             DISTRIBUTED BY HASH(siteid) BUCKETS 5
-            PROPERTIES("replication_num" = "1"); 
+            PROPERTIES("replication_num" = "1", "light_schema_change" = 
"true"); 
          """
      sql """ insert into ${tbName} 
values('2021-11-01',1,1,'用户A',1),('2021-11-01',1,1,'用户B',1),('2021-11-01',1,1,'用户A',3),('2021-11-02',1,1,'用户A',1),('2021-11-02',1,1,'用户B',1),('2021-11-02',101,112332121,'用户B',112312),('2021-11-02',103,112332211,'用户B',112312);
 """
      sql """ alter  table ${tbName} modify column citycode string """


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to