This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new b5531c5caf [BugFix](BE) fix condition index doesn't match (#11474)
b5531c5caf is described below

commit b5531c5cafec76501617705c7395d31225474897
Author: Lightman <31928846+lchangli...@users.noreply.github.com>
AuthorDate: Fri Aug 5 07:57:18 2022 +0800

    [BugFix](BE) fix condition index doesn't match (#11474)
    
    * [BugFix](Be) fix condition index doesn't match
---
 be/src/olap/bloom_filter_predicate.h               |   4 +-
 be/src/olap/column_predicate.h                     |   3 +-
 be/src/olap/comparison_predicate.h                 |   5 +-
 be/src/olap/field.h                                |   6 +-
 be/src/olap/in_list_predicate.h                    |   5 +-
 be/src/olap/like_column_predicate.h                |   4 +-
 be/src/olap/null_predicate.cpp                     |   9 +-
 be/src/olap/null_predicate.h                       |   4 +-
 be/src/olap/olap_cond.cpp                          |   8 +-
 be/src/olap/reader.cpp                             |   5 +-
 be/src/olap/rowset/beta_rowset_reader.cpp          |   1 -
 be/src/olap/rowset/segment_v2/segment.cpp          |   2 +-
 be/src/olap/rowset/segment_v2/segment_iterator.cpp |  67 +++---
 be/src/olap/rowset/segment_v2/segment_iterator.h   |   8 +-
 be/src/olap/schema.h                               |  11 +
 be/src/olap/tablet_schema.cpp                      |   1 +
 .../schema_change/test_uniq_keys_schema_change.out |  22 ++
 .../schema_change/test_uniq_mv_schema_change.out   |  25 +++
 .../test_uniq_rollup_schema_change.out             |  24 +++
 .../schema_change/test_uniq_vals_schema_change.out |  22 ++
 .../schema_change/test_update_schema_change.out    |  64 ++++++
 .../test_agg_keys_schema_change.groovy             |   4 +-
 .../schema_change/test_agg_mv_schema_change.groovy |   2 +-
 .../test_agg_rollup_schema_change.groovy           |   2 +-
 .../schema_change/test_alter_table_column.groovy   |   2 +-
 .../test_alter_table_column_with_delete.groovy     |   2 +-
 .../test_dup_keys_schema_change.groovy             |   2 +-
 .../schema_change/test_dup_mv_schema_change.groovy |   2 +-
 .../suites/schema_change/test_schema_change.groovy |   2 +-
 .../test_uniq_keys_schema_change.groovy            | 203 ++++++++++++++++++
 .../test_uniq_mv_schema_change.groovy              | 226 ++++++++++++++++++++
 .../test_uniq_rollup_schema_change.groovy          | 237 +++++++++++++++++++++
 ....groovy => test_uniq_vals_schema_change.groovy} |  51 ++---
 .../schema_change/test_update_schema_change.sql    |  50 +++++
 34 files changed, 980 insertions(+), 105 deletions(-)

diff --git a/be/src/olap/bloom_filter_predicate.h 
b/be/src/olap/bloom_filter_predicate.h
index e5833729b4..b1b4217ace 100644
--- a/be/src/olap/bloom_filter_predicate.h
+++ b/be/src/olap/bloom_filter_predicate.h
@@ -52,8 +52,8 @@ public:
     void evaluate_and(ColumnBlock* block, uint16_t* sel, uint16_t size,
                       bool* flags) const override {};
 
-    Status evaluate(const Schema& schema, const vector<BitmapIndexIterator*>& 
iterators,
-                    uint32_t num_rows, roaring::Roaring* roaring) const 
override {
+    Status evaluate(BitmapIndexIterator* iterators, uint32_t num_rows,
+                    roaring::Roaring* roaring) const override {
         return Status::OK();
     }
 
diff --git a/be/src/olap/column_predicate.h b/be/src/olap/column_predicate.h
index 2edf19d6da..d88b3c825f 100644
--- a/be/src/olap/column_predicate.h
+++ b/be/src/olap/column_predicate.h
@@ -82,8 +82,7 @@ public:
                               bool* flags) const = 0;
 
     //evaluate predicate on Bitmap
-    virtual Status evaluate(const Schema& schema,
-                            const std::vector<BitmapIndexIterator*>& 
iterators, uint32_t num_rows,
+    virtual Status evaluate(BitmapIndexIterator* iterator, uint32_t num_rows,
                             roaring::Roaring* roaring) const = 0;
 
     // evaluate predicate on IColumn
diff --git a/be/src/olap/comparison_predicate.h 
b/be/src/olap/comparison_predicate.h
index ec8bcce1f5..1ba25e5db2 100644
--- a/be/src/olap/comparison_predicate.h
+++ b/be/src/olap/comparison_predicate.h
@@ -110,9 +110,8 @@ public:
         }
     }
 
-    Status evaluate(const Schema& schema, const 
std::vector<BitmapIndexIterator*>& iterators,
-                    uint32_t num_rows, roaring::Roaring* bitmap) const 
override {
-        BitmapIndexIterator* iterator = iterators[_column_id];
+    Status evaluate(BitmapIndexIterator* iterator, uint32_t num_rows,
+                    roaring::Roaring* bitmap) const override {
         if (iterator == nullptr) {
             return Status::OK();
         }
diff --git a/be/src/olap/field.h b/be/src/olap/field.h
index 99b9bc0665..5f49bb9b2c 100644
--- a/be/src/olap/field.h
+++ b/be/src/olap/field.h
@@ -47,7 +47,8 @@ public:
               _key_coder(get_key_coder(column.type())),
               _name(column.name()),
               _index_size(column.index_length()),
-              _is_nullable(column.is_nullable()) {
+              _is_nullable(column.is_nullable()),
+              _unique_id(column.unique_id()) {
         if (column.type() == OLAP_FIELD_TYPE_ARRAY) {
             _agg_info = get_aggregate_info(column.aggregation(), column.type(),
                                            column.get_sub_column(0).type());
@@ -62,6 +63,7 @@ public:
     int32_t length() const { return _length; }
     size_t field_size() const { return size() + 1; }
     size_t index_size() const { return _index_size; }
+    int32_t unique_id() const { return _unique_id; }
     const std::string& name() const { return _name; }
 
     virtual void set_to_max(char* buf) const { return 
_type_info->set_to_max(buf); }
@@ -334,6 +336,7 @@ protected:
         other->_sub_fields.clear();
         other->_precision = this->_precision;
         other->_scale = this->_scale;
+        other->_unique_id = this->_unique_id;
         for (const auto& f : _sub_fields) {
             Field* item = f->clone();
             other->add_sub_field(std::unique_ptr<Field>(item));
@@ -350,6 +353,7 @@ private:
     std::vector<std::unique_ptr<Field>> _sub_fields;
     int32_t _precision;
     int32_t _scale;
+    int32_t _unique_id;
 };
 
 template <typename LhsCellType, typename RhsCellType>
diff --git a/be/src/olap/in_list_predicate.h b/be/src/olap/in_list_predicate.h
index 3d22ecb83f..35b4c4162e 100644
--- a/be/src/olap/in_list_predicate.h
+++ b/be/src/olap/in_list_predicate.h
@@ -110,9 +110,8 @@ public:
         }
     }
 
-    Status evaluate(const Schema& schema, const 
std::vector<BitmapIndexIterator*>& iterators,
-                    uint32_t num_rows, roaring::Roaring* result) const 
override {
-        BitmapIndexIterator* iterator = iterators[_column_id];
+    Status evaluate(BitmapIndexIterator* iterator, uint32_t num_rows,
+                    roaring::Roaring* result) const override {
         if (iterator == nullptr) {
             return Status::OK();
         }
diff --git a/be/src/olap/like_column_predicate.h 
b/be/src/olap/like_column_predicate.h
index b72846ff5f..fb00f7b146 100644
--- a/be/src/olap/like_column_predicate.h
+++ b/be/src/olap/like_column_predicate.h
@@ -40,8 +40,8 @@ public:
     }
     void evaluate_and(ColumnBlock* block, uint16_t* sel, uint16_t size,
                       bool* flags) const override {}
-    Status evaluate(const Schema& schema, const 
std::vector<BitmapIndexIterator*>& iterators,
-                    uint32_t num_rows, roaring::Roaring* roaring) const 
override {
+    Status evaluate(BitmapIndexIterator* iterator, uint32_t num_rows,
+                    roaring::Roaring* roaring) const override {
         return Status::OK();
     }
 
diff --git a/be/src/olap/null_predicate.cpp b/be/src/olap/null_predicate.cpp
index d755fd34eb..ade037374e 100644
--- a/be/src/olap/null_predicate.cpp
+++ b/be/src/olap/null_predicate.cpp
@@ -72,12 +72,11 @@ void NullPredicate::evaluate_and(ColumnBlock* block, 
uint16_t* sel, uint16_t siz
     }
 }
 
-Status NullPredicate::evaluate(const Schema& schema,
-                               const std::vector<BitmapIndexIterator*>& 
iterators,
-                               uint32_t num_rows, roaring::Roaring* roaring) 
const {
-    if (iterators[_column_id] != nullptr) {
+Status NullPredicate::evaluate(BitmapIndexIterator* iterator, uint32_t 
num_rows,
+                               roaring::Roaring* roaring) const {
+    if (iterator != nullptr) {
         roaring::Roaring null_bitmap;
-        RETURN_IF_ERROR(iterators[_column_id]->read_null_bitmap(&null_bitmap));
+        RETURN_IF_ERROR(iterator->read_null_bitmap(&null_bitmap));
         if (_is_null) {
             *roaring &= null_bitmap;
         } else {
diff --git a/be/src/olap/null_predicate.h b/be/src/olap/null_predicate.h
index caf05af16d..a42e576caf 100644
--- a/be/src/olap/null_predicate.h
+++ b/be/src/olap/null_predicate.h
@@ -37,8 +37,8 @@ public:
 
     void evaluate_and(ColumnBlock* block, uint16_t* sel, uint16_t size, bool* 
flags) const override;
 
-    Status evaluate(const Schema& schema, const vector<BitmapIndexIterator*>& 
iterators,
-                    uint32_t num_rows, roaring::Roaring* roaring) const 
override;
+    Status evaluate(BitmapIndexIterator* iterator, uint32_t num_rows,
+                    roaring::Roaring* roaring) const override;
 
     uint16_t evaluate(const vectorized::IColumn& column, uint16_t* sel,
                       uint16_t size) const override;
diff --git a/be/src/olap/olap_cond.cpp b/be/src/olap/olap_cond.cpp
index 627f846092..02e818331b 100644
--- a/be/src/olap/olap_cond.cpp
+++ b/be/src/olap/olap_cond.cpp
@@ -550,10 +550,10 @@ Status Conditions::append_condition(const TCondition& 
tcond) {
     }
 
     CondColumn* cond_col = nullptr;
-    auto it = _columns.find(index);
+    auto it = _columns.find(column.unique_id());
     if (it == _columns.end()) {
         cond_col = new CondColumn(*_schema, index);
-        _columns[index] = cond_col;
+        _columns[column.unique_id()] = cond_col;
     } else {
         cond_col = it->second;
     }
@@ -561,8 +561,8 @@ Status Conditions::append_condition(const TCondition& 
tcond) {
     return cond_col->add_cond(tcond, column);
 }
 
-CondColumn* Conditions::get_column(int32_t cid) const {
-    auto iter = _columns.find(cid);
+CondColumn* Conditions::get_column(int32_t uid) const {
+    auto iter = _columns.find(uid);
     if (iter != _columns.end()) {
         return iter->second;
     }
diff --git a/be/src/olap/reader.cpp b/be/src/olap/reader.cpp
index 49c1e11d60..50f4e7178a 100644
--- a/be/src/olap/reader.cpp
+++ b/be/src/olap/reader.cpp
@@ -549,13 +549,14 @@ void TabletReader::_init_load_bf_columns(const 
ReaderParams& read_params, Condit
                                          std::set<uint32_t>* load_bf_columns) {
     // add all columns with condition to load_bf_columns
     for (const auto& cond_column : conditions->columns()) {
-        if (!_tablet_schema->column(cond_column.first).is_bf_column()) {
+        int32_t column_id = _tablet_schema->field_index(cond_column.first);
+        if (!_tablet_schema->column(column_id).is_bf_column()) {
             continue;
         }
         for (const auto& cond : cond_column.second->conds()) {
             if (cond->op == OP_EQ ||
                 (cond->op == OP_IN && cond->operand_set.size() < 
MAX_OP_IN_FIELD_NUM)) {
-                load_bf_columns->insert(cond_column.first);
+                load_bf_columns->insert(column_id);
             }
         }
     }
diff --git a/be/src/olap/rowset/beta_rowset_reader.cpp 
b/be/src/olap/rowset/beta_rowset_reader.cpp
index accd4b9859..b96255d16f 100644
--- a/be/src/olap/rowset/beta_rowset_reader.cpp
+++ b/be/src/olap/rowset/beta_rowset_reader.cpp
@@ -72,7 +72,6 @@ Status BetaRowsetReader::init(RowsetReaderContext* 
read_context) {
                                               
read_context->predicates->begin(),
                                               read_context->predicates->end());
     }
-
     // Take a delete-bitmap for each segment, the bitmap contains all deletes
     // until the max read version, which is read_context->version.second
     if (read_context->delete_bitmap != nullptr) {
diff --git a/be/src/olap/rowset/segment_v2/segment.cpp 
b/be/src/olap/rowset/segment_v2/segment.cpp
index 735ae3c4e9..d042e4f7ef 100644
--- a/be/src/olap/rowset/segment_v2/segment.cpp
+++ b/be/src/olap/rowset/segment_v2/segment.cpp
@@ -80,7 +80,7 @@ Status Segment::new_iterator(const Schema& schema, const 
StorageReadOptions& rea
     // trying to prune the current segment by segment-level zone map
     if (read_options.conditions != nullptr) {
         for (auto& column_condition : read_options.conditions->columns()) {
-            int32_t column_unique_id = 
_tablet_schema->column(column_condition.first).unique_id();
+            int32_t column_unique_id = column_condition.first;
             if (_column_readers.count(column_unique_id) < 1 ||
                 !_column_readers.at(column_unique_id)->has_zone_map()) {
                 continue;
diff --git a/be/src/olap/rowset/segment_v2/segment_iterator.cpp 
b/be/src/olap/rowset/segment_v2/segment_iterator.cpp
index 1b39703863..0566e29b15 100644
--- a/be/src/olap/rowset/segment_v2/segment_iterator.cpp
+++ b/be/src/olap/rowset/segment_v2/segment_iterator.cpp
@@ -107,8 +107,6 @@ private:
 SegmentIterator::SegmentIterator(std::shared_ptr<Segment> segment, const 
Schema& schema)
         : _segment(std::move(segment)),
           _schema(schema),
-          _column_iterators(_schema.num_columns(), nullptr),
-          _bitmap_index_iterators(_schema.num_columns(), nullptr),
           _cur_rowid(0),
           _lazy_materialization_read(false),
           _inited(false),
@@ -116,10 +114,10 @@ SegmentIterator::SegmentIterator(std::shared_ptr<Segment> 
segment, const Schema&
 
 SegmentIterator::~SegmentIterator() {
     for (auto iter : _column_iterators) {
-        delete iter;
+        delete iter.second;
     }
     for (auto iter : _bitmap_index_iterators) {
-        delete iter;
+        delete iter.second;
     }
 }
 
@@ -222,13 +220,14 @@ Status SegmentIterator::_prepare_seek(const 
StorageReadOptions::KeyRange& key_ra
 
     // create used column iterator
     for (auto cid : _seek_schema->column_ids()) {
-        if (_column_iterators[cid] == nullptr) {
+        int32_t unique_id = _opts.tablet_schema->column(cid).unique_id();
+        if (_column_iterators.count(unique_id) < 1) {
             
RETURN_IF_ERROR(_segment->new_column_iterator(_opts.tablet_schema->column(cid),
-                                                          
&_column_iterators[cid]));
+                                                          
&_column_iterators[unique_id]));
             ColumnIteratorOptions iter_opts;
             iter_opts.stats = _opts.stats;
             iter_opts.file_reader = _file_reader.get();
-            RETURN_IF_ERROR(_column_iterators[cid]->init(iter_opts));
+            RETURN_IF_ERROR(_column_iterators[unique_id]->init(iter_opts));
         }
     }
 
@@ -256,21 +255,21 @@ Status 
SegmentIterator::_get_row_ranges_by_column_conditions() {
 }
 
 Status SegmentIterator::_get_row_ranges_from_conditions(RowRanges* 
condition_row_ranges) {
-    std::set<int32_t> cids;
+    std::set<int32_t> uids;
     if (_opts.conditions != nullptr) {
         for (auto& column_condition : _opts.conditions->columns()) {
-            cids.insert(column_condition.first);
+            uids.insert(column_condition.first);
         }
     }
 
     // first filter data by bloom filter index
     // bloom filter index only use CondColumn
     RowRanges bf_row_ranges = RowRanges::create_single(num_rows());
-    for (auto& cid : cids) {
+    for (auto& uid : uids) {
         // get row ranges by bf index of this column,
         RowRanges column_bf_row_ranges = RowRanges::create_single(num_rows());
-        CondColumn* column_cond = _opts.conditions->get_column(cid);
-        RETURN_IF_ERROR(_column_iterators[cid]->get_row_ranges_by_bloom_filter(
+        CondColumn* column_cond = _opts.conditions->get_column(uid);
+        RETURN_IF_ERROR(_column_iterators[uid]->get_row_ranges_by_bloom_filter(
                 column_cond, &column_bf_row_ranges));
         RowRanges::ranges_intersection(bf_row_ranges, column_bf_row_ranges, 
&bf_row_ranges);
     }
@@ -280,14 +279,14 @@ Status 
SegmentIterator::_get_row_ranges_from_conditions(RowRanges* condition_row
 
     RowRanges zone_map_row_ranges = RowRanges::create_single(num_rows());
     // second filter data by zone map
-    for (auto& cid : cids) {
+    for (auto& uid : uids) {
         // get row ranges by zone map of this column,
         RowRanges column_row_ranges = RowRanges::create_single(num_rows());
         CondColumn* column_cond = nullptr;
         if (_opts.conditions != nullptr) {
-            column_cond = _opts.conditions->get_column(cid);
+            column_cond = _opts.conditions->get_column(uid);
         }
-        
RETURN_IF_ERROR(_column_iterators[cid]->get_row_ranges_by_zone_map(column_cond, 
nullptr,
+        
RETURN_IF_ERROR(_column_iterators[uid]->get_row_ranges_by_zone_map(column_cond, 
nullptr,
                                                                            
&column_row_ranges));
         // intersect different columns's row ranges to get final row ranges by 
zone map
         RowRanges::ranges_intersection(zone_map_row_ranges, column_row_ranges,
@@ -298,13 +297,13 @@ Status 
SegmentIterator::_get_row_ranges_from_conditions(RowRanges* condition_row
     for (auto& delete_condition : _opts.delete_conditions) {
         RowRanges delete_condition_row_ranges = RowRanges::create_single(0);
         for (auto& delete_column_condition : delete_condition->columns()) {
-            const int32_t cid = delete_column_condition.first;
+            const int32_t uid = delete_column_condition.first;
             CondColumn* column_cond = nullptr;
             if (_opts.conditions != nullptr) {
-                column_cond = _opts.conditions->get_column(cid);
+                column_cond = _opts.conditions->get_column(uid);
             }
             RowRanges single_delete_condition_row_ranges = 
RowRanges::create_single(num_rows());
-            RETURN_IF_ERROR(_column_iterators[cid]->get_row_ranges_by_zone_map(
+            RETURN_IF_ERROR(_column_iterators[uid]->get_row_ranges_by_zone_map(
                     column_cond, delete_column_condition.second,
                     &single_delete_condition_row_ranges));
             RowRanges::ranges_union(delete_condition_row_ranges, 
single_delete_condition_row_ranges,
@@ -329,11 +328,13 @@ Status SegmentIterator::_apply_bitmap_index() {
     std::vector<ColumnPredicate*> remaining_predicates;
 
     for (auto pred : _col_predicates) {
-        if (_bitmap_index_iterators[pred->column_id()] == nullptr) {
+        int32_t unique_id = _schema.unique_id(pred->column_id());
+        if (_bitmap_index_iterators.count(unique_id) < 1 ||
+            _bitmap_index_iterators[unique_id] == nullptr) {
             // no bitmap index for this column
             remaining_predicates.push_back(pred);
         } else {
-            RETURN_IF_ERROR(pred->evaluate(_schema, _bitmap_index_iterators, 
_segment->num_rows(),
+            RETURN_IF_ERROR(pred->evaluate(_bitmap_index_iterators[unique_id], 
_segment->num_rows(),
                                            &_row_bitmap));
             if (_row_bitmap.isEmpty()) {
                 break; // all rows have been pruned, no need to process 
further predicates
@@ -350,14 +351,15 @@ Status SegmentIterator::_init_return_column_iterators() {
         return Status::OK();
     }
     for (auto cid : _schema.column_ids()) {
-        if (_column_iterators[cid] == nullptr) {
+        int32_t unique_id = _opts.tablet_schema->column(cid).unique_id();
+        if (_column_iterators.count(unique_id) < 1) {
             
RETURN_IF_ERROR(_segment->new_column_iterator(_opts.tablet_schema->column(cid),
-                                                          
&_column_iterators[cid]));
+                                                          
&_column_iterators[unique_id]));
             ColumnIteratorOptions iter_opts;
             iter_opts.stats = _opts.stats;
             iter_opts.use_page_cache = _opts.use_page_cache;
             iter_opts.file_reader = _file_reader.get();
-            RETURN_IF_ERROR(_column_iterators[cid]->init(iter_opts));
+            RETURN_IF_ERROR(_column_iterators[unique_id]->init(iter_opts));
         }
     }
     return Status::OK();
@@ -368,9 +370,10 @@ Status SegmentIterator::_init_bitmap_index_iterators() {
         return Status::OK();
     }
     for (auto cid : _schema.column_ids()) {
-        if (_bitmap_index_iterators[cid] == nullptr) {
-            
RETURN_IF_ERROR(_segment->new_bitmap_index_iterator(_opts.tablet_schema->column(cid),
-                                                                
&_bitmap_index_iterators[cid]));
+        int32_t unique_id = _opts.tablet_schema->column(cid).unique_id();
+        if (_bitmap_index_iterators.count(unique_id) < 1) {
+            RETURN_IF_ERROR(_segment->new_bitmap_index_iterator(
+                    _opts.tablet_schema->column(cid), 
&_bitmap_index_iterators[unique_id]));
         }
     }
     return Status::OK();
@@ -541,7 +544,7 @@ void SegmentIterator::_init_lazy_materialization() {
 
 Status SegmentIterator::_seek_columns(const std::vector<ColumnId>& column_ids, 
rowid_t pos) {
     for (auto cid : column_ids) {
-        RETURN_IF_ERROR(_column_iterators[cid]->seek_to_ordinal(pos));
+        
RETURN_IF_ERROR(_column_iterators[_schema.unique_id(cid)]->seek_to_ordinal(pos));
     }
     return Status::OK();
 }
@@ -552,7 +555,7 @@ Status SegmentIterator::_read_columns(const 
std::vector<ColumnId>& column_ids, R
         auto column_block = block->column_block(cid);
         ColumnBlockView dst(&column_block, row_offset);
         size_t rows_read = nrows;
-        RETURN_IF_ERROR(_column_iterators[cid]->next_batch(&rows_read, &dst));
+        
RETURN_IF_ERROR(_column_iterators[_schema.unique_id(cid)]->next_batch(&rows_read,
 &dst));
         DCHECK_EQ(nrows, rows_read);
     }
     return Status::OK();
@@ -809,7 +812,7 @@ bool 
SegmentIterator::_can_evaluated_by_vectorized(ColumnPredicate* predicate) {
         if (field_type == OLAP_FIELD_TYPE_VARCHAR || field_type == 
OLAP_FIELD_TYPE_CHAR ||
             field_type == OLAP_FIELD_TYPE_STRING) {
             return config::enable_low_cardinality_optimize &&
-                   _column_iterators[cid]->is_all_dict_encoding();
+                   
_column_iterators[_schema.unique_id(cid)]->is_all_dict_encoding();
         } else if (field_type == OLAP_FIELD_TYPE_DECIMAL) {
             return false;
         }
@@ -836,7 +839,7 @@ Status SegmentIterator::_read_columns(const 
std::vector<ColumnId>& column_ids,
     for (auto cid : column_ids) {
         auto& column = column_block[cid];
         size_t rows_read = nrows;
-        RETURN_IF_ERROR(_column_iterators[cid]->next_batch(&rows_read, 
column));
+        
RETURN_IF_ERROR(_column_iterators[_schema.unique_id(cid)]->next_batch(&rows_read,
 column));
         DCHECK_EQ(nrows, rows_read);
     }
     return Status::OK();
@@ -1008,8 +1011,8 @@ Status 
SegmentIterator::_read_columns_by_rowids(std::vector<ColumnId>& read_colu
         rowids[i] = rowid_vector[sel_rowid_idx[i]];
     }
     for (auto cid : read_column_ids) {
-        RETURN_IF_ERROR(_column_iterators[cid]->read_by_rowids(rowids.data(), 
select_size,
-                                                               
_current_return_columns[cid]));
+        
RETURN_IF_ERROR(_column_iterators[_schema.unique_id(cid)]->read_by_rowids(
+                rowids.data(), select_size, _current_return_columns[cid]));
     }
 
     return Status::OK();
diff --git a/be/src/olap/rowset/segment_v2/segment_iterator.h 
b/be/src/olap/rowset/segment_v2/segment_iterator.h
index 7170d60108..76ad8f3ff5 100644
--- a/be/src/olap/rowset/segment_v2/segment_iterator.h
+++ b/be/src/olap/rowset/segment_v2/segment_iterator.h
@@ -164,10 +164,10 @@ private:
     std::shared_ptr<Segment> _segment;
     const Schema& _schema;
     // _column_iterators.size() == _schema.num_columns()
-    // _column_iterators[cid] == nullptr if cid is not in _schema
-    std::vector<ColumnIterator*> _column_iterators;
-    // FIXME prefer vector<unique_ptr<BitmapIndexIterator>>
-    std::vector<BitmapIndexIterator*> _bitmap_index_iterators;
+    // map<unique_id, ColumnIterator*> 
_column_iterators/_bitmap_index_iterators;
+    // can use _schema get unique_id by cid
+    std::map<int32_t, ColumnIterator*> _column_iterators;
+    std::map<int32_t, BitmapIndexIterator*> _bitmap_index_iterators;
     // after init(), `_row_bitmap` contains all rowid to scan
     roaring::Roaring _row_bitmap;
     // an iterator for `_row_bitmap` that can be used to extract row range to 
scan
diff --git a/be/src/olap/schema.h b/be/src/olap/schema.h
index a36547e475..a93a12432f 100644
--- a/be/src/olap/schema.h
+++ b/be/src/olap/schema.h
@@ -40,6 +40,7 @@ public:
     Schema(const TabletSchema& tablet_schema) {
         size_t num_columns = tablet_schema.num_columns();
         std::vector<ColumnId> col_ids(num_columns);
+        _unique_ids.resize(num_columns);
         std::vector<TabletColumn> columns;
         columns.reserve(num_columns);
 
@@ -47,6 +48,7 @@ public:
         for (uint32_t cid = 0; cid < num_columns; ++cid) {
             col_ids[cid] = cid;
             const TabletColumn& column = tablet_schema.column(cid);
+            _unique_ids[cid] = column.unique_id();
             if (column.is_key()) {
                 ++num_key_columns;
             }
@@ -62,6 +64,7 @@ public:
     // All the columns of one table may exist in the columns param, but 
col_ids is only a subset.
     Schema(const std::vector<TabletColumn>& columns, const 
std::vector<ColumnId>& col_ids) {
         size_t num_key_columns = 0;
+        _unique_ids.resize(columns.size());
         for (size_t i = 0; i < columns.size(); ++i) {
             if (columns[i].is_key()) {
                 ++num_key_columns;
@@ -69,6 +72,7 @@ public:
             if (columns[i].name() == DELETE_SIGN) {
                 _delete_sign_idx = i;
             }
+            _unique_ids[i] = columns[i].unique_id();
         }
         _init(columns, col_ids, num_key_columns);
     }
@@ -76,8 +80,10 @@ public:
     // Only for UT
     Schema(const std::vector<TabletColumn>& columns, size_t num_key_columns) {
         std::vector<ColumnId> col_ids(columns.size());
+        _unique_ids.resize(columns.size());
         for (uint32_t cid = 0; cid < columns.size(); ++cid) {
             col_ids[cid] = cid;
+            _unique_ids[cid] = columns[cid].unique_id();
         }
 
         _init(columns, col_ids, num_key_columns);
@@ -85,11 +91,13 @@ public:
 
     Schema(const std::vector<const Field*>& cols, size_t num_key_columns) {
         std::vector<ColumnId> col_ids(cols.size());
+        _unique_ids.resize(cols.size());
         for (uint32_t cid = 0; cid < cols.size(); ++cid) {
             col_ids[cid] = cid;
             if (cols.at(cid)->name() == DELETE_SIGN) {
                 _delete_sign_idx = cid;
             }
+            _unique_ids[cid] = cols[cid]->unique_id();
         }
 
         _init(cols, col_ids, num_key_columns);
@@ -133,7 +141,9 @@ public:
     size_t num_columns() const { return _cols.size(); }
     size_t num_column_ids() const { return _col_ids.size(); }
     const std::vector<ColumnId>& column_ids() const { return _col_ids; }
+    const std::vector<int32_t>& unique_ids() const { return _unique_ids; }
     ColumnId column_id(size_t index) const { return _col_ids[index]; }
+    int32_t unique_id(size_t index) const { return _unique_ids[index]; }
     int32_t delete_sign_idx() const { return _delete_sign_idx; }
     bool has_sequence_col() const { return _has_sequence_col; }
 
@@ -148,6 +158,7 @@ private:
     // NOTE: The ColumnId here represents the sequential index number 
(starting from 0) of
     // a column in current row, not the unique id-identifier of each column
     std::vector<ColumnId> _col_ids;
+    std::vector<int32_t> _unique_ids;
     // NOTE: Both _cols[cid] and _col_offsets[cid] can only be accessed when 
the cid is
     // contained in _col_ids
     std::vector<Field*> _cols;
diff --git a/be/src/olap/tablet_schema.cpp b/be/src/olap/tablet_schema.cpp
index 597c5e60f2..b2040f5849 100644
--- a/be/src/olap/tablet_schema.cpp
+++ b/be/src/olap/tablet_schema.cpp
@@ -497,6 +497,7 @@ void TabletSchema::init_from_pb(const TabletSchemaPB& 
schema) {
     _num_null_columns = 0;
     _cols.clear();
     _field_name_to_index.clear();
+    _field_id_to_index.clear();
     for (auto& column_pb : schema.column()) {
         TabletColumn column;
         column.init_from_pb(column_pb);
diff --git 
a/regression-test/data/schema_change/test_uniq_keys_schema_change.out 
b/regression-test/data/schema_change/test_uniq_keys_schema_change.out
new file mode 100644
index 0000000000..ef963eca86
--- /dev/null
+++ b/regression-test/data/schema_change/test_uniq_keys_schema_change.out
@@ -0,0 +1,22 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+2
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+3
+
+-- !sc --
+4      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+5
+
+-- !sc --
+2      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+
diff --git a/regression-test/data/schema_change/test_uniq_mv_schema_change.out 
b/regression-test/data/schema_change/test_uniq_mv_schema_change.out
new file mode 100644
index 0000000000..2835e5c76e
--- /dev/null
+++ b/regression-test/data/schema_change/test_uniq_mv_schema_change.out
@@ -0,0 +1,25 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+2
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+3
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        32      20      2
+
+-- !sc --
+4      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        32      20      2
+
+-- !sc --
+5
+
+-- !sc --
+2      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        32      20      1
+
diff --git 
a/regression-test/data/schema_change/test_uniq_rollup_schema_change.out 
b/regression-test/data/schema_change/test_uniq_rollup_schema_change.out
new file mode 100644
index 0000000000..b8d58e78c0
--- /dev/null
+++ b/regression-test/data/schema_change/test_uniq_rollup_schema_change.out
@@ -0,0 +1,24 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+2
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+3
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        32      20      2
+
+-- !sc --
+4      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        32      20      2
+
+-- !sc --
+5
+
+-- !sc --
+2      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        32      20      1
\ No newline at end of file
diff --git 
a/regression-test/data/schema_change/test_uniq_vals_schema_change.out 
b/regression-test/data/schema_change/test_uniq_vals_schema_change.out
new file mode 100644
index 0000000000..de0526b3e7
--- /dev/null
+++ b/regression-test/data/schema_change/test_uniq_vals_schema_change.out
@@ -0,0 +1,22 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sc --
+2
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      1
+
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+3
+
+-- !sc --
+4      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        1       32      20      2
+
+-- !sc --
+5
+
+-- !sc --
+2      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        1       32      20      1
+
diff --git a/regression-test/data/schema_change/test_update_schema_change.out 
b/regression-test/data/schema_change/test_update_schema_change.out
new file mode 100644
index 0000000000..aeacfd28c6
--- /dev/null
+++ b/regression-test/data/schema_change/test_update_schema_change.out
@@ -0,0 +1,64 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !test_update_schema_change --
+0
+
+-- !test_update_schema_change_2 --
+0
+
+-- !test_update_schema_change_3 --
+1
+
+-- !test_update_schema_change_4 --
+1
+
+-- !test_update_schema_change_5 --
+1      2017-10-01      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2020-01-01T00:00        1       30      20
+2      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21
+
+-- !test_update_schema_change_6 --
+0
+
+-- !test_update_schema_change_7 --
+2      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21      1
+1      2017-10-01      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2020-01-01T00:00        1       30      20      1
+
+-- !test_update_schema_change_8 --
+1
+
+-- !test_update_schema_change_9 --
+1      2017-10-01      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2020-01-01T00:00        1       30      20      2
+2      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21      1
+
+-- !test_update_schema_change_10 --
+1
+
+-- !test_update_schema_change_11 --
+1
+
+-- !test_update_schema_change_12 --
+2
+
+-- !test_update_schema_change_13 --
+5      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21      20
+3      2017-10-01      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2020-01-01T00:00        1       30      20      20
+2      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21      1
+1      2017-10-01      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2020-01-01T00:00        1       30      20      20
+
+-- !test_update_schema_change_14 --
+0
+
+-- !test_update_schema_change_15 --
+5      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21
+3      2017-10-01      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2020-01-01T00:00        1       30      20
+2      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21
+1      2017-10-01      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2020-01-01T00:00        1       30      20
+
+-- !test_update_schema_change_16 --
+1
+
+-- !test_update_schema_change_17 --
+5      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        20      31      21
+3      2017-10-01      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2020-01-01T00:00        1       30      20
+2      2017-10-01      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2020-01-02T00:00        1       31      21
+1      2017-10-01      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2020-01-01T00:00        1       30      20
+
diff --git 
a/regression-test/suites/schema_change/test_agg_keys_schema_change.groovy 
b/regression-test/suites/schema_change/test_agg_keys_schema_change.groovy
index 6e944e654c..72e6dd6292 100644
--- a/regression-test/suites/schema_change/test_agg_keys_schema_change.groovy
+++ b/regression-test/suites/schema_change/test_agg_keys_schema_change.groovy
@@ -98,7 +98,7 @@ suite ("test_agg_keys_schema_change") {
 
         result = "null"
         while (!result.contains("FINISHED")){
-            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
IndexName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
             result = result.toString()
             logger.info("result: ${result}")
             if(result.contains("CANCELLED")){
@@ -128,7 +128,7 @@ suite ("test_agg_keys_schema_change") {
             """
         result = "null"
         while (!result.contains("FINISHED")){
-            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
IndexName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
             result = result.toString()
             logger.info("result: ${result}")
             if(result.contains("CANCELLED")){
diff --git 
a/regression-test/suites/schema_change/test_agg_mv_schema_change.groovy 
b/regression-test/suites/schema_change/test_agg_mv_schema_change.groovy
index 169f07240d..1bd0457207 100644
--- a/regression-test/suites/schema_change/test_agg_mv_schema_change.groovy
+++ b/regression-test/suites/schema_change/test_agg_mv_schema_change.groovy
@@ -112,7 +112,7 @@ suite ("test_agg_mv_schema_change") {
 
         result = "null"
         while (!result.contains("FINISHED")){
-            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
IndexName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
             result = result.toString()
             logger.info("result: ${result}")
             if(result.contains("CANCELLED")) {
diff --git 
a/regression-test/suites/schema_change/test_agg_rollup_schema_change.groovy 
b/regression-test/suites/schema_change/test_agg_rollup_schema_change.groovy
index 850a4d02da..6dfe0743e8 100644
--- a/regression-test/suites/schema_change/test_agg_rollup_schema_change.groovy
+++ b/regression-test/suites/schema_change/test_agg_rollup_schema_change.groovy
@@ -113,7 +113,7 @@ suite ("test_agg_rollup_schema_change") {
 
         result = "null"
         while (!result.contains("FINISHED")){
-            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
IndexName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
             result = result.toString()
             logger.info("result: ${result}")
             if(result.contains("CANCELLED")) {
diff --git 
a/regression-test/suites/schema_change/test_alter_table_column.groovy 
b/regression-test/suites/schema_change/test_alter_table_column.groovy
index 84b4dc2c28..333cbf5528 100644
--- a/regression-test/suites/schema_change/test_alter_table_column.groovy
+++ b/regression-test/suites/schema_change/test_alter_table_column.groovy
@@ -19,7 +19,7 @@ suite("test_alter_table_column", "schema_change") {
     def tbName1 = "alter_table_column_dup"
 
     def getJobState = { tableName ->
-        def jobStateResult = sql """  SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
+        def jobStateResult = sql """  SHOW ALTER TABLE COLUMN WHERE 
IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
         return jobStateResult[0][9]
     }
     sql "DROP TABLE IF EXISTS ${tbName1}"
diff --git 
a/regression-test/suites/schema_change/test_alter_table_column_with_delete.groovy
 
b/regression-test/suites/schema_change/test_alter_table_column_with_delete.groovy
index b26d82bfdf..8f034674af 100644
--- 
a/regression-test/suites/schema_change/test_alter_table_column_with_delete.groovy
+++ 
b/regression-test/suites/schema_change/test_alter_table_column_with_delete.groovy
@@ -18,7 +18,7 @@
 suite("test_alter_table_column_with_delete", "schema_change") {
     def tbName1 = "alter_table_column_dup_with_delete"
     def getJobState = { tableName ->
-        def jobStateResult = sql """  SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
+        def jobStateResult = sql """  SHOW ALTER TABLE COLUMN WHERE 
IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
         return jobStateResult[0][9]
     }
     sql "DROP TABLE IF EXISTS ${tbName1}"
diff --git 
a/regression-test/suites/schema_change/test_dup_keys_schema_change.groovy 
b/regression-test/suites/schema_change/test_dup_keys_schema_change.groovy
index e81be79de7..d8b7dd82d8 100644
--- a/regression-test/suites/schema_change/test_dup_keys_schema_change.groovy
+++ b/regression-test/suites/schema_change/test_dup_keys_schema_change.groovy
@@ -124,7 +124,7 @@ suite ("test_dup_keys_schema_change") {
             """
         result = "null"
         while (!result.contains("FINISHED")){
-            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
IndexName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
             result = result.toString()
             logger.info("result: ${result}")
             if(result.contains("CANCELLED")) {
diff --git 
a/regression-test/suites/schema_change/test_dup_mv_schema_change.groovy 
b/regression-test/suites/schema_change/test_dup_mv_schema_change.groovy
index 9318aaddd3..33401b0963 100644
--- a/regression-test/suites/schema_change/test_dup_mv_schema_change.groovy
+++ b/regression-test/suites/schema_change/test_dup_mv_schema_change.groovy
@@ -138,7 +138,7 @@ suite ("test_dup_mv_schema_change") {
             """
         result = "null"
         while (!result.contains("FINISHED")){
-            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+            result = sql "SHOW ALTER TABLE COLUMN WHERE 
IndexName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
             result = result.toString()
             logger.info("result: ${result}")
             if(result.contains("CANCELLED")) {
diff --git a/regression-test/suites/schema_change/test_schema_change.groovy 
b/regression-test/suites/schema_change/test_schema_change.groovy
index 843e33f9b6..e68013da06 100644
--- a/regression-test/suites/schema_change/test_schema_change.groovy
+++ b/regression-test/suites/schema_change/test_schema_change.groovy
@@ -20,7 +20,7 @@ suite("test_schema_change", "schema_change") {
      def tbName = "alter_table_column_type"
 
      def getJobState = { tableName ->
-          def jobStateResult = sql """  SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
+          def jobStateResult = sql """  SHOW ALTER TABLE COLUMN WHERE 
IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
           return jobStateResult[0][9]
      }
 
diff --git 
a/regression-test/suites/schema_change/test_uniq_keys_schema_change.groovy 
b/regression-test/suites/schema_change/test_uniq_keys_schema_change.groovy
new file mode 100644
index 0000000000..a179542d45
--- /dev/null
+++ b/regression-test/suites/schema_change/test_uniq_keys_schema_change.groovy
@@ -0,0 +1,203 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_uniq_keys_schema_change") {
+    def tableName = "schema_change_uniq_keys_regression_test"
+
+    try {
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        for (String[] backend in backends) {
+            backendId_to_backendIP.put(backend[0], backend[2])
+            backendId_to_backendHttpPort.put(backend[0], backend[5])
+        }
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+    sql """ DROP TABLE IF EXISTS ${tableName} """
+
+    sql """
+            CREATE TABLE schema_change_uniq_keys_regression_test (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
+            UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY 
HASH(`user_id`)
+            BUCKETS 1
+            PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true");
+        """
+
+    sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES
+             (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2020-01-01', 1, 30, 20)
+        """
+
+    sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES
+             (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 19)
+        """
+
+    sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 21)
+        """
+
+    sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20)
+        """
+    qt_sc """
+                       select count(*) from 
schema_change_uniq_keys_regression_test
+                    """
+
+    // add column
+    sql """
+        ALTER table ${tableName} ADD COLUMN new_column INT default "1" 
+        """
+
+    sql """ SELECT * FROM ${tableName} WHERE user_id=2 """
+
+    sql """ INSERT INTO ${tableName} 
(`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`,
+                                      
`last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`)
+            VALUES
+             (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20)
+        """
+
+    qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """
+
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+        """
+    qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """
+
+    qt_sc """ select count(*) from ${tableName} """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+        """
+
+    qt_sc """ select * from ${tableName} where user_id = 4 """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+        """
+
+    // compaction
+    String[][] tablets = sql """ show tablets from ${tableName}; """
+    for (String[] tablet in tablets) {
+            String tablet_id = tablet[0]
+            backend_id = tablet[2]
+            logger.info("run compaction:" + tablet_id)
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=cumulative")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            //assertEquals(code, 0)
+    }
+
+    // wait for all compactions done
+    for (String[] tablet in tablets) {
+            boolean running = true
+            do {
+                Thread.sleep(100)
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+    }
+    qt_sc """ select count(*) from ${tableName} """
+
+    qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 """
+
+    } finally {
+        //try_sql("DROP TABLE IF EXISTS ${tableName}")
+    }
+}
\ No newline at end of file
diff --git 
a/regression-test/suites/schema_change/test_uniq_mv_schema_change.groovy 
b/regression-test/suites/schema_change/test_uniq_mv_schema_change.groovy
new file mode 100644
index 0000000000..8695b4db98
--- /dev/null
+++ b/regression-test/suites/schema_change/test_uniq_mv_schema_change.groovy
@@ -0,0 +1,226 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_uniq_mv_schema_change") {
+    def tableName = "schema_change_uniq_mv_regression_test"
+
+    try {
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        for (String[] backend in backends) {
+            backendId_to_backendIP.put(backend[0], backend[2])
+            backendId_to_backendHttpPort.put(backend[0], backend[5])
+        }
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+    sql """ DROP TABLE IF EXISTS ${tableName} """
+
+    sql """
+            CREATE TABLE ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
+            UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY 
HASH(`user_id`)
+            BUCKETS 1
+            PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true");
+        """
+
+    //add materialized view
+    def result = "null"
+    def mvName = "mv1"
+    sql "create materialized view ${mvName} as select user_id, date, city, 
age, sex from ${tableName} group by user_id, date, city, age, sex;"
+    while (!result.contains("FINISHED")){
+        result = sql "SHOW ALTER TABLE MATERIALIZED VIEW WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+        result = result.toString()
+        logger.info("result: ${result}")
+        if(result.contains("CANCELLED")){
+            return
+        }
+        Thread.sleep(100)
+    }
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2020-01-01', 1, 30, 20)
+        """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 19)
+        """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 21)
+        """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20)
+        """
+    qt_sc """
+                       select count(*) from ${tableName}
+                    """
+
+    // add column
+    sql """
+        ALTER table ${tableName} ADD COLUMN new_column INT default "1" 
+        """
+
+    sql """ SELECT * FROM ${tableName} WHERE user_id=2 """
+
+    sql """ INSERT INTO ${tableName} 
(`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`,
+                                      
`last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`)
+            VALUES
+             (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20)
+        """
+
+    qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """
+
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+        """
+    qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """
+
+    qt_sc """ select count(*) from ${tableName} """
+
+
+    // drop column
+    sql """
+          ALTER TABLE ${tableName} DROP COLUMN cost
+          """
+
+    qt_sc """ select * from ${tableName} where user_id = 3 """
+
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+
+    qt_sc """ select * from ${tableName} where user_id = 4 """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+
+    // compaction
+    String[][] tablets = sql """ show tablets from ${tableName}; """
+    for (String[] tablet in tablets) {
+            String tablet_id = tablet[0]
+            backend_id = tablet[2]
+            logger.info("run compaction:" + tablet_id)
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=cumulative")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            //assertEquals(code, 0)
+    }
+
+    // wait for all compactions done
+    for (String[] tablet in tablets) {
+            boolean running = true
+            do {
+                Thread.sleep(100)
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+    }
+    qt_sc """ select count(*) from ${tableName} """
+
+    qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 """
+
+    } finally {
+        //try_sql("DROP TABLE IF EXISTS ${tableName}")
+    }
+}
\ No newline at end of file
diff --git 
a/regression-test/suites/schema_change/test_uniq_rollup_schema_change.groovy 
b/regression-test/suites/schema_change/test_uniq_rollup_schema_change.groovy
new file mode 100644
index 0000000000..d0b2cf64e1
--- /dev/null
+++ b/regression-test/suites/schema_change/test_uniq_rollup_schema_change.groovy
@@ -0,0 +1,237 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_uniq_rollup_schema_change") {
+    def tableName = "schema_change_uniq_rollup_regression_test"
+
+    try {
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        for (String[] backend in backends) {
+            backendId_to_backendIP.put(backend[0], backend[2])
+            backendId_to_backendHttpPort.put(backend[0], backend[5])
+        }
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+    sql """ DROP TABLE IF EXISTS ${tableName} """
+
+    sql """
+            CREATE TABLE ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
+            UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY 
HASH(`user_id`)
+            BUCKETS 1
+            PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true" );
+        """
+
+    //add rollup
+    def result = "null"
+    def rollupName = "rollup_cost"
+    sql "ALTER TABLE ${tableName} ADD ROLLUP 
${rollupName}(`user_id`,`date`,`city`,`age`,`sex`, cost);"
+    while (!result.contains("FINISHED")){
+        result = sql "SHOW ALTER TABLE ROLLUP WHERE TableName='${tableName}' 
ORDER BY CreateTime DESC LIMIT 1;"
+        result = result.toString()
+        logger.info("result: ${result}")
+        if(result.contains("CANCELLED")){
+            return
+        }
+        Thread.sleep(100)
+    }
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2020-01-01', 1, 30, 20)
+        """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 19)
+        """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 21)
+        """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20)
+        """
+    qt_sc """
+                       select count(*) from ${tableName}
+                    """
+
+    // add column
+    sql """
+        ALTER table ${tableName} ADD COLUMN new_column INT default "1" 
+        """
+
+    sql """ SELECT * FROM ${tableName} WHERE user_id=2 """
+
+    sql """ INSERT INTO ${tableName} 
(`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`,
+                                      
`last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`)
+            VALUES
+             (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20)
+        """
+
+    qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """
+
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+        """
+    qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """
+
+
+    qt_sc """ select count(*) from ${tableName} """
+
+    // drop column
+    sql """
+          ALTER TABLE ${tableName} DROP COLUMN cost
+          """
+
+    result = "null"
+    while (!result.contains("FINISHED")){
+        result = sql "SHOW ALTER TABLE COLUMN WHERE IndexName='${tableName}' 
ORDER BY CreateTime DESC LIMIT 1;"
+        result = result.toString()
+        logger.info("result: ${result}")
+        if(result.contains("CANCELLED")) {
+            log.info("rollup job is cancelled, result: ${result}".toString())
+            return
+        }
+        Thread.sleep(100)
+    }
+
+    qt_sc """ select * from ${tableName} where user_id = 3 """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', 'Beijing', 10, 1,'2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+
+    qt_sc """ select * from ${tableName} where user_id = 4 """
+
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1,'2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+    sql """ INSERT INTO ${tableName} VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 
'2020-01-03', 32, 20, 2)
+        """
+
+    // compaction
+    String[][] tablets = sql """ show tablets from ${tableName}; """
+    for (String[] tablet in tablets) {
+            String tablet_id = tablet[0]
+            backend_id = tablet[2]
+            logger.info("run compaction:" + tablet_id)
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=cumulative")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            //assertEquals(code, 0)
+    }
+
+    // wait for all compactions done
+    for (String[] tablet in tablets) {
+            boolean running = true
+            do {
+                Thread.sleep(100)
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+    }
+    qt_sc """ select count(*) from ${tableName} """
+
+    qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 """
+
+    } finally {
+        //try_sql("DROP TABLE IF EXISTS ${tableName}")
+    }
+}
\ No newline at end of file
diff --git 
a/regression-test/suites/schema_change/test_dup_keys_schema_change.groovy 
b/regression-test/suites/schema_change/test_uniq_vals_schema_change.groovy
similarity index 82%
copy from 
regression-test/suites/schema_change/test_dup_keys_schema_change.groovy
copy to regression-test/suites/schema_change/test_uniq_vals_schema_change.groovy
index e81be79de7..1cea755bee 100644
--- a/regression-test/suites/schema_change/test_dup_keys_schema_change.groovy
+++ b/regression-test/suites/schema_change/test_uniq_vals_schema_change.groovy
@@ -17,8 +17,8 @@
 
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
-suite ("test_dup_keys_schema_change") {
-    def tableName = "schema_change_dup_keys_regression_test"
+suite ("test_uniq_vals_schema_change") {
+    def tableName = "schema_change_uniq_vals_regression_test"
 
     try {
         String[][] backends = sql """ show backends; """
@@ -55,7 +55,6 @@ suite ("test_dup_keys_schema_change") {
                 disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
             }
         }
-
         sql """ DROP TABLE IF EXISTS ${tableName} """
 
         sql """
@@ -71,7 +70,7 @@ suite ("test_dup_keys_schema_change") {
                     `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
                     `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
                     `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
-                DUPLICATE KEY(`user_id`, `date`, `city`, `age`, `sex`) 
DISTRIBUTED BY HASH(`user_id`)
+                UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) 
DISTRIBUTED BY HASH(`user_id`)
                 BUCKETS 1
                 PROPERTIES ( "replication_num" = "1", "light_schema_change" = 
"true" );
             """
@@ -91,7 +90,8 @@ suite ("test_dup_keys_schema_change") {
         sql """ INSERT INTO ${tableName} VALUES
                 (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20)
             """
-        qt_sc """
+
+        qt_sc"""
                         select count(*) from ${tableName}
                         """
 
@@ -100,7 +100,7 @@ suite ("test_dup_keys_schema_change") {
             ALTER table ${tableName} ADD COLUMN new_column INT default "1" 
             """
 
-        sql """ SELECT * FROM ${tableName} WHERE user_id=2 order by 
min_dwell_time """
+        sql """ SELECT * FROM ${tableName} WHERE user_id=2 """
 
         sql """ INSERT INTO ${tableName} 
(`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`,
                                         
`last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`)
@@ -114,52 +114,40 @@ suite ("test_dup_keys_schema_change") {
         sql """ INSERT INTO ${tableName} VALUES
                 (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20, 2)
             """
-        qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 order by 
new_column """
+        qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """
 
         qt_sc """ select count(*) from ${tableName} """
 
         // drop column
         sql """
-            ALTER TABLE ${tableName} DROP COLUMN sex
-            """
-        result = "null"
-        while (!result.contains("FINISHED")){
-            result = sql "SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
-            result = result.toString()
-            logger.info("result: ${result}")
-            if(result.contains("CANCELLED")) {
-                log.info("rollup job is cancelled, result: 
${result}".toString())
-                return
-            }
-            Thread.sleep(100)
-        }
-        qt_sc """ select * from ${tableName} where user_id = 3 order by 
new_column """
-
+            ALTER TABLE ${tableName} DROP COLUMN last_visit_date
+            """
+        qt_sc = sql """ select * from ${tableName} where user_id = 3 """
 
         sql """ INSERT INTO ${tableName} VALUES
-                (4, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+                (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
             """
 
         qt_sc """ select * from ${tableName} where user_id = 4 """
 
 
         sql """ INSERT INTO ${tableName} VALUES
-                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
             """
         sql """ INSERT INTO ${tableName} VALUES
-                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
             """
         sql """ INSERT INTO ${tableName} VALUES
-                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
             """
         sql """ INSERT INTO ${tableName} VALUES
-                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
             """
         sql """ INSERT INTO ${tableName} VALUES
-                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
             """
         sql """ INSERT INTO ${tableName} VALUES
-                (5, '2017-10-01', 'Beijing', 10, '2020-01-03', '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
+                (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
             """
 
         // compaction
@@ -213,13 +201,12 @@ suite ("test_dup_keys_schema_change") {
                     running = compactionStatus.run_status
                 } while (running)
         }
-
         qt_sc """ select count(*) from ${tableName} """
 
-        qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 order by 
min_dwell_time"""
+        qt_sc """  SELECT * FROM ${tableName} WHERE user_id=2 """
 
     } finally {
         //try_sql("DROP TABLE IF EXISTS ${tableName}")
     }
 
-}
+}
\ No newline at end of file
diff --git a/regression-test/suites/schema_change/test_update_schema_change.sql 
b/regression-test/suites/schema_change/test_update_schema_change.sql
new file mode 100644
index 0000000000..bf0241a02b
--- /dev/null
+++ b/regression-test/suites/schema_change/test_update_schema_change.sql
@@ -0,0 +1,50 @@
+DROP TABLE IF EXISTS schema_change_update_regression_test;
+
+CREATE TABLE schema_change_update_regression_test (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
+            UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY 
HASH(`user_id`)
+            PROPERTIES ( "replication_num" = "1" , "light_schema_change" = 
"true");
+
+INSERT INTO schema_change_update_regression_test VALUES
+             (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2020-01-01', 1, 30, 20);
+
+INSERT INTO schema_change_update_regression_test VALUES
+             (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 21);
+
+SELECT * FROM schema_change_update_regression_test order by user_id ASC, 
last_visit_date;
+
+ALTER table schema_change_update_regression_test ADD COLUMN new_column INT 
default "1";
+
+SELECT * FROM schema_change_update_regression_test order by user_id DESC, 
last_visit_date;
+
+UPDATE schema_change_update_regression_test set new_column = 2 where user_id = 
1;
+
+SELECT * FROM schema_change_update_regression_test order by user_id ASC, 
last_visit_date;
+
+INSERT INTO schema_change_update_regression_test VALUES
+             (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2020-01-01', 1, 30, 20, 2);
+
+INSERT INTO schema_change_update_regression_test VALUES
+             (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2020-01-02', 1, 31, 21, 20);
+
+UPDATE schema_change_update_regression_test set new_column = 20 where 
new_column = 2;
+
+SELECT * FROM schema_change_update_regression_test order by user_id DESC, 
last_visit_date;
+
+ALTER TABLE schema_change_update_regression_test DROP COLUMN new_column;
+
+SELECT * FROM schema_change_update_regression_test order by user_id DESC, 
last_visit_date;
+
+UPDATE schema_change_update_regression_test set cost = 20 where user_id = 5;
+
+SELECT * FROM schema_change_update_regression_test order by user_id DESC, 
last_visit_date;
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to