This is an automated email from the ASF dual-hosted git repository.

zhangchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 3ee89aea35 [Feature](merge-on-write)Support ignore mode for 
merge-on-write unique table (#21773)
3ee89aea35 is described below

commit 3ee89aea35726197cb7e94bb4f2c36bc9d50da84
Author: bobhan1 <bh2444151...@outlook.com>
AuthorDate: Thu Sep 14 18:03:51 2023 +0800

    [Feature](merge-on-write)Support ignore mode for merge-on-write unique 
table (#21773)
---
 be/src/exec/tablet_info.cpp                        |   8 +-
 be/src/exec/tablet_info.h                          |   2 +
 be/src/http/action/stream_load.cpp                 |   7 ++
 be/src/http/http_common.h                          |   1 +
 be/src/olap/rowset/beta_rowset_writer.cpp          |   2 +-
 be/src/olap/rowset_builder.cpp                     |   1 +
 be/src/olap/tablet.cpp                             |  74 +++++++-----
 be/src/olap/tablet_schema.cpp                      |   2 +
 be/src/olap/tablet_schema.h                        |   9 +-
 .../Load/STREAM-LOAD.md                            |   2 +
 .../Manipulation/INSERT.md                         |   4 +-
 .../Load/STREAM-LOAD.md                            |   2 +
 .../Manipulation/INSERT.md                         |   5 +-
 fe/fe-core/src/main/cup/sql_parser.cup             |  21 +++-
 .../java/org/apache/doris/analysis/DeleteStmt.java |   3 +-
 .../apache/doris/analysis/NativeInsertStmt.java    |  28 ++++-
 .../java/org/apache/doris/analysis/UpdateStmt.java |   3 +-
 .../doris/load/loadv2/LoadingTaskPlanner.java      |   3 +-
 .../plans/commands/InsertIntoTableCommand.java     |   2 +-
 .../org/apache/doris/planner/OlapTableSink.java    |  18 ++-
 .../apache/doris/planner/StreamLoadPlanner.java    |   8 +-
 .../java/org/apache/doris/task/LoadTaskInfo.java   |   4 +
 .../java/org/apache/doris/task/StreamLoadTask.java |   8 ++
 .../apache/doris/planner/OlapTableSinkTest.java    |   8 +-
 gensrc/proto/descriptors.proto                     |   1 +
 gensrc/proto/olap_file.proto                       |   1 +
 gensrc/thrift/Descriptors.thrift                   |   1 +
 gensrc/thrift/FrontendService.thrift               |   1 +
 regression-test/data/insert_p0/insert_ignore.out   |  36 ++++++
 .../data/unique_with_mow_p0/ignore_mode.csv        |  10 ++
 .../data/unique_with_mow_p0/ignore_mode2.csv       |   2 +
 .../data/unique_with_mow_p0/test_ignore_mode.out   |  20 ++++
 .../suites/insert_p0/insert_ignore.groovy          | 132 +++++++++++++++++++++
 .../unique_with_mow_p0/test_ignore_mode.groovy     | 112 +++++++++++++++++
 34 files changed, 479 insertions(+), 62 deletions(-)

diff --git a/be/src/exec/tablet_info.cpp b/be/src/exec/tablet_info.cpp
index 71ca504d3a..64c80c41af 100644
--- a/be/src/exec/tablet_info.cpp
+++ b/be/src/exec/tablet_info.cpp
@@ -124,6 +124,7 @@ Status OlapTableSchemaParam::init(const 
POlapTableSchemaParam& pschema) {
     _version = pschema.version();
     _is_partial_update = pschema.partial_update();
     _is_strict_mode = pschema.is_strict_mode();
+    _is_unique_key_ignore_mode = pschema.is_unique_key_ignore_mode();
 
     for (auto& col : pschema.partial_update_input_columns()) {
         _partial_update_input_columns.insert(col);
@@ -176,9 +177,9 @@ Status OlapTableSchemaParam::init(const 
TOlapTableSchemaParam& tschema) {
     _table_id = tschema.table_id;
     _version = tschema.version;
     _is_partial_update = tschema.is_partial_update;
-    if (tschema.__isset.is_strict_mode) {
-        _is_strict_mode = tschema.is_strict_mode;
-    }
+    _is_strict_mode = tschema.__isset.is_strict_mode && tschema.is_strict_mode;
+    _is_unique_key_ignore_mode =
+            tschema.__isset.is_unique_key_ignore_mode && 
tschema.is_unique_key_ignore_mode;
 
     for (auto& tcolumn : tschema.partial_update_input_columns) {
         _partial_update_input_columns.insert(tcolumn);
@@ -246,6 +247,7 @@ void 
OlapTableSchemaParam::to_protobuf(POlapTableSchemaParam* pschema) const {
     pschema->set_version(_version);
     pschema->set_partial_update(_is_partial_update);
     pschema->set_is_strict_mode(_is_strict_mode);
+    pschema->set_is_unique_key_ignore_mode(_is_unique_key_ignore_mode);
     for (auto col : _partial_update_input_columns) {
         *pschema->add_partial_update_input_columns() = col;
     }
diff --git a/be/src/exec/tablet_info.h b/be/src/exec/tablet_info.h
index 3e6ab7b94b..42e6377284 100644
--- a/be/src/exec/tablet_info.h
+++ b/be/src/exec/tablet_info.h
@@ -90,6 +90,7 @@ public:
         return _partial_update_input_columns;
     }
     bool is_strict_mode() const { return _is_strict_mode; }
+    bool is_unique_key_ignore_mode() const { return 
_is_unique_key_ignore_mode; }
     std::string debug_string() const;
 
 private:
@@ -104,6 +105,7 @@ private:
     bool _is_partial_update = false;
     std::set<std::string> _partial_update_input_columns;
     bool _is_strict_mode = false;
+    bool _is_unique_key_ignore_mode = false;
 };
 
 using OlapTableIndexTablets = TOlapTableIndexTablets;
diff --git a/be/src/http/action/stream_load.cpp 
b/be/src/http/action/stream_load.cpp
index 4e7d32ccdb..dafbb8e558 100644
--- a/be/src/http/action/stream_load.cpp
+++ b/be/src/http/action/stream_load.cpp
@@ -555,6 +555,13 @@ Status StreamLoadAction::_process_put(HttpRequest* 
http_req,
         bool value = iequal(http_req->header(HTTP_MEMTABLE_ON_SINKNODE), 
"true");
         request.__set_memtable_on_sink_node(value);
     }
+    if (!http_req->header(HTTP_IGNORE_MODE).empty()) {
+        if (iequal(http_req->header(HTTP_IGNORE_MODE), "true")) {
+            request.__set_ignore_mode(true);
+        } else {
+            request.__set_ignore_mode(false);
+        }
+    }
 
 #ifndef BE_TEST
     // plan this load
diff --git a/be/src/http/http_common.h b/be/src/http/http_common.h
index bcbfa33e10..6ccaf60736 100644
--- a/be/src/http/http_common.h
+++ b/be/src/http/http_common.h
@@ -58,6 +58,7 @@ static const std::string HTTP_SKIP_LINES = "skip_lines";
 static const std::string HTTP_COMMENT = "comment";
 static const std::string HTTP_ENABLE_PROFILE = "enable_profile";
 static const std::string HTTP_PARTIAL_COLUMNS = "partial_columns";
+static const std::string HTTP_IGNORE_MODE = "ignore_mode";
 static const std::string HTTP_SQL = "sql";
 static const std::string HTTP_TWO_PHASE_COMMIT = "two_phase_commit";
 static const std::string HTTP_TXN_ID_KEY = "txn_id";
diff --git a/be/src/olap/rowset/beta_rowset_writer.cpp 
b/be/src/olap/rowset/beta_rowset_writer.cpp
index c79ce05cb9..7a7c2b9770 100644
--- a/be/src/olap/rowset/beta_rowset_writer.cpp
+++ b/be/src/olap/rowset/beta_rowset_writer.cpp
@@ -149,7 +149,7 @@ Status BetaRowsetWriter::_generate_delete_bitmap(int32_t 
segment_id) {
     OlapStopWatch watch;
     RETURN_IF_ERROR(_context.tablet->calc_delete_bitmap(
             rowset, segments, specified_rowsets, 
_context.mow_context->delete_bitmap,
-            _context.mow_context->max_version, nullptr));
+            _context.mow_context->max_version, nullptr, nullptr));
     size_t total_rows = std::accumulate(
             segments.begin(), segments.end(), 0,
             [](size_t sum, const segment_v2::SegmentSharedPtr& s) { return sum 
+= s->num_rows(); });
diff --git a/be/src/olap/rowset_builder.cpp b/be/src/olap/rowset_builder.cpp
index 7aafb8f83e..70ee539153 100644
--- a/be/src/olap/rowset_builder.cpp
+++ b/be/src/olap/rowset_builder.cpp
@@ -322,6 +322,7 @@ void RowsetBuilder::_build_current_tablet_schema(int64_t 
index_id,
     
_tablet_schema->set_partial_update_info(table_schema_param->is_partial_update(),
                                             
table_schema_param->partial_update_input_columns());
     _tablet_schema->set_is_strict_mode(table_schema_param->is_strict_mode());
+    
_tablet_schema->set_is_unique_key_ignore_mode(table_schema_param->is_unique_key_ignore_mode());
 }
 
 } // namespace doris
diff --git a/be/src/olap/tablet.cpp b/be/src/olap/tablet.cpp
index 6d806b1a94..6e083dbac0 100644
--- a/be/src/olap/tablet.cpp
+++ b/be/src/olap/tablet.cpp
@@ -107,6 +107,7 @@
 #include "olap/txn_manager.h"
 #include "olap/types.h"
 #include "olap/utils.h"
+#include "runtime/define_primitive_type.h"
 #include "segment_loader.h"
 #include "service/point_query_executor.h"
 #include "util/bvar_helper.h"
@@ -2884,6 +2885,7 @@ Status Tablet::calc_segment_delete_bitmap(RowsetSharedPtr 
rowset,
     Version dummy_version(end_version + 1, end_version + 1);
     auto rowset_schema = rowset->tablet_schema();
     bool is_partial_update = rowset_schema->is_partial_update();
+    bool is_unique_key_ignore_mode = 
rowset_schema->is_unique_key_ignore_mode();
     // use for partial update
     PartialUpdateReadPlan read_plan_ori;
     PartialUpdateReadPlan read_plan_update;
@@ -2951,42 +2953,50 @@ Status 
Tablet::calc_segment_delete_bitmap(RowsetSharedPtr rowset,
             if (st.is<KEY_NOT_FOUND>()) {
                 continue;
             }
-
-            // sequence id smaller than the previous one, so delete current row
-            if (st.is<KEY_ALREADY_EXISTS>()) {
-                delete_bitmap->add({rowset_id, seg->id(), 
DeleteBitmap::TEMP_VERSION_COMMON},
-                                   row_id);
-                continue;
-            } else if (is_partial_update && rowset_writer != nullptr) {
-                // In publish version, record rows to be deleted for 
concurrent update
-                // For example, if version 5 and 6 update a row, but version 6 
only see
-                // version 4 when write, and when publish version, version 5's 
value will
-                // be marked as deleted and it's update is losed.
-                // So here we should read version 5's columns and build a new 
row, which is
-                // consists of version 6's update columns and version 5's 
origin columns
-                // here we build 2 read plan for ori values and update values
-                prepare_to_read(loc, pos, &read_plan_ori);
-                prepare_to_read(RowLocation {rowset_id, seg->id(), row_id}, 
pos, &read_plan_update);
-                rsid_to_rowset[rowset_find->rowset_id()] = rowset_find;
-                ++pos;
-                // delete bitmap will be calculate when memtable flush and
-                // publish. The two stages may see different versions.
-                // When there is sequence column, the currently imported data
-                // of rowset may be marked for deletion at memtablet flush or
-                // publish because the seq column is smaller than the previous
-                // rowset.
-                // just set 0 as a unified temporary version number, and 
update to
-                // the real version number later.
+            if (UNLIKELY(is_unique_key_ignore_mode)) {
+                if (st.is<OK>() || st.is<KEY_ALREADY_EXISTS>()) {
+                    delete_bitmap->add({rowset_id, seg->id(), 
DeleteBitmap::TEMP_VERSION_COMMON},
+                                       row_id);
+                }
+            } else {
+                // sequence id smaller than the previous one, so delete 
current row
+                if (st.is<KEY_ALREADY_EXISTS>()) {
+                    delete_bitmap->add({rowset_id, seg->id(), 
DeleteBitmap::TEMP_VERSION_COMMON},
+                                       row_id);
+                    continue;
+                } else if (is_partial_update && rowset_writer != nullptr) {
+                    // In publish version, record rows to be deleted for 
concurrent update
+                    // For example, if version 5 and 6 update a row, but 
version 6 only see
+                    // version 4 when write, and when publish version, version 
5's value will
+                    // be marked as deleted and it's update is losed.
+                    // So here we should read version 5's columns and build a 
new row, which is
+                    // consists of version 6's update columns and version 5's 
origin columns
+                    // here we build 2 read plan for ori values and update 
values
+                    prepare_to_read(loc, pos, &read_plan_ori);
+                    prepare_to_read(RowLocation {rowset_id, seg->id(), 
row_id}, pos,
+                                    &read_plan_update);
+                    rsid_to_rowset[rowset_find->rowset_id()] = rowset_find;
+                    ++pos;
+                    // delete bitmap will be calculate when memtable flush and
+                    // publish. The two stages may see different versions.
+                    // When there is sequence column, the currently imported 
data
+                    // of rowset may be marked for deletion at memtablet flush 
or
+                    // publish because the seq column is smaller than the 
previous
+                    // rowset.
+                    // just set 0 as a unified temporary version number, and 
update to
+                    // the real version number later.
+                    delete_bitmap->add(
+                            {loc.rowset_id, loc.segment_id, 
DeleteBitmap::TEMP_VERSION_COMMON},
+                            loc.row_id);
+                    delete_bitmap->add({rowset_id, seg->id(), 
DeleteBitmap::TEMP_VERSION_COMMON},
+                                       row_id);
+                    continue;
+                }
+                // when st = ok
                 delete_bitmap->add(
                         {loc.rowset_id, loc.segment_id, 
DeleteBitmap::TEMP_VERSION_COMMON},
                         loc.row_id);
-                delete_bitmap->add({rowset_id, seg->id(), 
DeleteBitmap::TEMP_VERSION_COMMON},
-                                   row_id);
-                continue;
             }
-            // when st = ok
-            delete_bitmap->add({loc.rowset_id, loc.segment_id, 
DeleteBitmap::TEMP_VERSION_COMMON},
-                               loc.row_id);
         }
         remaining -= num_read;
     }
diff --git a/be/src/olap/tablet_schema.cpp b/be/src/olap/tablet_schema.cpp
index 6f25b20e65..a87b86ced7 100644
--- a/be/src/olap/tablet_schema.cpp
+++ b/be/src/olap/tablet_schema.cpp
@@ -759,6 +759,7 @@ void TabletSchema::init_from_pb(const TabletSchemaPB& 
schema) {
     _compression_type = schema.compression_type();
     _schema_version = schema.schema_version();
     _is_partial_update = schema.is_partial_update();
+    _is_unique_key_ignore_mode = schema.is_unique_key_ignore_mode();
     for (auto& col_name : schema.partial_update_input_columns()) {
         _partial_update_input_columns.emplace(col_name);
     }
@@ -917,6 +918,7 @@ void TabletSchema::to_schema_pb(TabletSchemaPB* 
tablet_schema_pb) const {
     tablet_schema_pb->set_compression_type(_compression_type);
     tablet_schema_pb->set_version_col_idx(_version_col_idx);
     tablet_schema_pb->set_is_partial_update(_is_partial_update);
+    
tablet_schema_pb->set_is_unique_key_ignore_mode(_is_unique_key_ignore_mode);
     for (auto& col : _partial_update_input_columns) {
         *tablet_schema_pb->add_partial_update_input_columns() = col;
     }
diff --git a/be/src/olap/tablet_schema.h b/be/src/olap/tablet_schema.h
index 72d0636f6e..57faee4d82 100644
--- a/be/src/olap/tablet_schema.h
+++ b/be/src/olap/tablet_schema.h
@@ -363,8 +363,12 @@ public:
     }
     void set_is_strict_mode(bool is_strict_mode) { _is_strict_mode = 
is_strict_mode; }
     bool is_strict_mode() const { return _is_strict_mode; }
-    std::vector<uint32_t> get_missing_cids() const { return _missing_cids; }
-    std::vector<uint32_t> get_update_cids() const { return _update_cids; }
+    void set_is_unique_key_ignore_mode(bool is_unique_key_ignore_mode) {
+        _is_unique_key_ignore_mode = is_unique_key_ignore_mode;
+    }
+    bool is_unique_key_ignore_mode() const { return 
_is_unique_key_ignore_mode; }
+    std::vector<uint32_t> get_missing_cids() { return _missing_cids; }
+    std::vector<uint32_t> get_update_cids() { return _update_cids; }
 
 private:
     friend bool operator==(const TabletSchema& a, const TabletSchema& b);
@@ -411,6 +415,7 @@ private:
     // to generate a new row, only available in non-strict mode
     bool _can_insert_new_rows_in_partial_update = true;
     bool _is_strict_mode = false;
+    bool _is_unique_key_ignore_mode = false;
 };
 
 bool operator==(const TabletSchema& a, const TabletSchema& b);
diff --git 
a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STREAM-LOAD.md
 
b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STREAM-LOAD.md
index 039462c219..cdc44ae086 100644
--- 
a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STREAM-LOAD.md
+++ 
b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STREAM-LOAD.md
@@ -156,6 +156,8 @@ separated by commas.
 
 28. comment: <version since="1.2.3" type="inline"> String type, the default 
value is "". </version>
 
+29. ignore_mode: <version since="dev" type="inline"> Ignore mode, only 
effective when the target table is a unique table with merge-on-write enabled. 
When insert ignore mode is enabled, for the inserted rows, if the key of the 
row does not exist in the table, the row will be inserted. If the key already 
exists in the table, the row will be discarded. When sequence columns exists in 
the target table, the ignore mode can't be enabled in stream load.</version>
+
 ### Example
 
 1. Import the data in the local file 'testData' into the table 'testTbl' in 
the database 'testDb', and use Label for deduplication. Specify a timeout of 
100 seconds
diff --git 
a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT.md
 
b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT.md
index 2ad5c6269c..2fa97eae01 100644
--- 
a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT.md
+++ 
b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT.md
@@ -35,7 +35,7 @@ INSERT
 The change statement is to complete the data insertion operation.
 
 ```sql
-INSERT INTO table_name
+INSERT [IGNORE] INTO table_name
     [ PARTITION (p1, ...) ]
     [ WITH LABEL label]
     [ (column [, ...]) ]
@@ -44,7 +44,7 @@ INSERT INTO table_name
 ````
 
  Parameters
-
+> IGNORE: insert ignore mode, only effective when the target table is a unique 
table with merge-on-write enabled. When insert ignore mode is enabled, for the 
inserted rows, if the key of the row does not exist in the table, the row will 
be inserted. If the key already exists in the table, the row will be discarded. 
When sequence column exists in the target table, the `insert ignore` statements 
are forbidden.
 > tablet_name: The destination table for importing data. Can be of the form 
 > `db_name.table_name`
 >
 > partitions: Specify the partitions to be imported, which must be partitions 
 > that exist in `table_name`. Multiple partition names are separated by commas
diff --git 
a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STREAM-LOAD.md
 
b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STREAM-LOAD.md
index 8ca18f74df..00c622cdd5 100644
--- 
a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STREAM-LOAD.md
+++ 
b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STREAM-LOAD.md
@@ -152,6 +152,8 @@ curl --location-trusted -u user:passwd [-H ""...] -T 
data.file -XPUT http://fe_h
 
 28. comment: <version since="1.2.3" type="inline"> 字符串类型, 默认值为空. 给任务增加额外的信息. 
</version>
 
+29. ignore_mode: <version since="dev" type="inline"> 
ignore模式,仅当目标表为开启merge-on-write的unique表时有效。开启后,对于插入的行,如果该行的key在表中不存在,则插入该行数据。如果key在表中不存在,则丢弃这行数据。当目标表中存在sequence列时stream无法开启ignore
 mode。</version>
+
 ### Example
 
 1. 将本地文件'testData'中的数据导入到数据库'testDb'中'testTbl'的表,使用Label用于去重。指定超时时间为 100 秒
diff --git 
a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT.md
 
b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT.md
index ef9552e8ab..d1ec9f0efe 100644
--- 
a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT.md
+++ 
b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT.md
@@ -35,7 +35,7 @@ INSERT
 该语句是完成数据插入操作。
 
 ```sql
-INSERT INTO table_name
+INSERT [IGNORE] INTO table_name
     [ PARTITION (p1, ...) ]
     [ WITH LABEL label]
     [ (column [, ...]) ]
@@ -44,7 +44,8 @@ INSERT INTO table_name
 ```
 
  Parameters
-
+> IGNORE: insert 
ignore模式,仅当目标表为开启merge-on-write的unique表时有效。开启后,对于插入的行,如果该行的key在表中不存在,则插入该行数据。如果key在表中不存在,则丢弃这行数据。当目标表中存在sequence列时无法通过insert
 ignore语句进行插入操作。
+>
 > tablet_name: 导入数据的目的表。可以是 `db_name.table_name` 形式
 >
 > partitions: 指定待导入的分区,必须是 `table_name` 中存在的分区,多个分区名称用逗号分隔
diff --git a/fe/fe-core/src/main/cup/sql_parser.cup 
b/fe/fe-core/src/main/cup/sql_parser.cup
index 8f9119308a..ac0b8208fa 100644
--- a/fe/fe-core/src/main/cup/sql_parser.cup
+++ b/fe/fe-core/src/main/cup/sql_parser.cup
@@ -887,7 +887,7 @@ nonterminal ParseNode load_property;
 nonterminal List<ParseNode> opt_load_property_list;
 
 // Boolean
-nonterminal Boolean opt_negative, opt_is_allow_null, opt_is_key, 
opt_read_only, opt_aggregate, opt_local, opt_is_auto_inc;
+nonterminal Boolean opt_negative, opt_is_allow_null, opt_is_key, 
opt_read_only, opt_aggregate, opt_local, opt_is_auto_inc, opt_is_insert_ignore;
 nonterminal String opt_from_rollup, opt_to_rollup;
 nonterminal ColumnPosition opt_col_pos;
 
@@ -3718,6 +3718,17 @@ opt_is_auto_inc ::=
         RESULT = true;
     :}
     ;
+
+opt_is_insert_ignore ::=
+    {:
+        RESULT = false;
+    :}
+    | KW_IGNORE
+    {:
+        RESULT = true;
+    :}
+    ;
+
 opt_comment ::=
     /* empty */
     {:
@@ -4805,16 +4816,16 @@ insert_overwrite_stmt ::=
 
 // Insert statement
 insert_stmt ::=
-    KW_INSERT KW_INTO insert_target:target opt_with_label:label 
opt_col_list:cols opt_plan_hints:hints insert_source:source
+    KW_INSERT opt_is_insert_ignore:is_insert_ignore KW_INTO 
insert_target:target opt_with_label:label opt_col_list:cols 
opt_plan_hints:hints insert_source:source
     {:
-        RESULT = new NativeInsertStmt(target, label, cols, source, hints);
+        RESULT = new NativeInsertStmt(target, label, cols, source, hints, 
is_insert_ignore);
     :}
     // TODO(zc) add default value for SQL-2003
     // | KW_INSERT KW_INTO insert_target:target KW_DEFAULT KW_VALUES
     | /* used for group commit */
-    KW_INSERT KW_INTO INTEGER_LITERAL:table_id opt_with_label:label 
opt_col_list:cols opt_plan_hints:hints insert_source:source
+    KW_INSERT opt_is_insert_ignore:is_insert_ignore KW_INTO 
INTEGER_LITERAL:table_id opt_with_label:label opt_col_list:cols 
opt_plan_hints:hints insert_source:source
     {:
-        RESULT = new NativeInsertStmt(table_id, label, cols, source, hints);
+        RESULT = new NativeInsertStmt(table_id, label, cols, source, hints, 
is_insert_ignore);
     :}
     ;
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java
index 872edf8aba..a43d339be8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java
@@ -201,7 +201,8 @@ public class DeleteStmt extends DdlStmt {
                 cols,
                 new InsertSource(selectStmt),
                 null,
-                isPartialUpdate);
+                isPartialUpdate,
+                false);
     }
 
     private void analyzeTargetTable(Analyzer analyzer) throws UserException {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java
index 8c24c09819..96962c6dcc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java
@@ -166,6 +166,8 @@ public class NativeInsertStmt extends InsertStmt {
     // true if be generates an insert from group commit tvf stmt and executes 
to load data
     public boolean isInnerGroupCommit = false;
 
+    private boolean isInsertIgnore = false;
+
     public NativeInsertStmt(InsertTarget target, String label, List<String> 
cols, InsertSource source,
             List<String> hints) {
         super(new LabelName(null, label), null, null);
@@ -185,6 +187,26 @@ public class NativeInsertStmt extends InsertStmt {
         this.tableId = tableId;
     }
 
+    public NativeInsertStmt(long tableId, String label, List<String> cols, 
InsertSource source,
+            List<String> hints, boolean isInsertIgnore) {
+        this(new InsertTarget(new TableName(null, null, null), null), label, 
cols, source, hints, isInsertIgnore);
+        this.tableId = tableId;
+    }
+
+    public NativeInsertStmt(InsertTarget target, String label, List<String> 
cols, InsertSource source,
+            List<String> hints, boolean isInsertIgnore) {
+        super(new LabelName(null, label), null, null);
+        this.tblName = target.getTblName();
+        this.targetPartitionNames = target.getPartitionNames();
+        this.label = new LabelName(null, label);
+        this.queryStmt = source.getQueryStmt();
+        this.planHints = hints;
+        this.isInsertIgnore = isInsertIgnore;
+        this.targetColumnNames = cols;
+        this.isValuesOrConstantSelect = (queryStmt instanceof SelectStmt
+                && ((SelectStmt) queryStmt).getTableRefs().isEmpty());
+    }
+
     // Ctor for CreateTableAsSelectStmt and InsertOverwriteTableStmt
     public NativeInsertStmt(TableName name, PartitionNames 
targetPartitionNames, LabelName label,
             QueryStmt queryStmt, List<String> planHints, List<String> 
targetColumnNames) {
@@ -199,10 +221,11 @@ public class NativeInsertStmt extends InsertStmt {
     }
 
     public NativeInsertStmt(InsertTarget target, String label, List<String> 
cols, InsertSource source,
-             List<String> hints, boolean isPartialUpdate) {
+             List<String> hints, boolean isPartialUpdate, boolean 
isInsertIgnore) {
         this(target, label, cols, source, hints);
         this.isPartialUpdate = isPartialUpdate;
         this.partialUpdateCols.addAll(cols);
+        this.isInsertIgnore = isInsertIgnore;
     }
 
     public boolean isValuesOrConstantSelect() {
@@ -382,7 +405,8 @@ public class NativeInsertStmt extends InsertStmt {
             OlapTableSink sink = (OlapTableSink) dataSink;
             TUniqueId loadId = analyzer.getContext().queryId();
             int sendBatchParallelism = 
analyzer.getContext().getSessionVariable().getSendBatchParallelism();
-            sink.init(loadId, transactionId, db.getId(), timeoutSecond, 
sendBatchParallelism, false, false);
+            sink.init(loadId, transactionId, db.getId(), timeoutSecond,
+                    sendBatchParallelism, false, false, isInsertIgnore);
         }
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/UpdateStmt.java 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/UpdateStmt.java
index 0959970fa7..315c9a1fbb 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/UpdateStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/UpdateStmt.java
@@ -125,7 +125,8 @@ public class UpdateStmt extends DdlStmt {
                 cols,
                 new InsertSource(selectStmt),
                 null,
-                isPartialUpdate);
+                isPartialUpdate,
+                false);
     }
 
     private void analyzeTargetTable(Analyzer analyzer) throws UserException {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadingTaskPlanner.java 
b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadingTaskPlanner.java
index 17ee58bd63..a65ee96fab 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadingTaskPlanner.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadingTaskPlanner.java
@@ -193,8 +193,9 @@ public class LoadingTaskPlanner {
         List<Long> partitionIds = getAllPartitionIds();
         OlapTableSink olapTableSink = new OlapTableSink(table, destTupleDesc, 
partitionIds,
                 Config.enable_single_replica_load);
-        olapTableSink.init(loadId, txnId, dbId, timeoutS, 
sendBatchParallelism, false, strictMode);
+        olapTableSink.init(loadId, txnId, dbId, timeoutS, 
sendBatchParallelism, false, strictMode, false);
         olapTableSink.setPartialUpdateInputColumns(isPartialUpdate, 
partialUpdateInputColumns);
+
         olapTableSink.complete(analyzer);
 
         // 3. Plan fragment
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/InsertIntoTableCommand.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/InsertIntoTableCommand.java
index 935d3867be..8f14f5efce 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/InsertIntoTableCommand.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/InsertIntoTableCommand.java
@@ -150,7 +150,7 @@ public class InsertIntoTableCommand extends Command 
implements ForwardWithSync,
         sink.init(ctx.queryId(), txn.getTxnId(),
                 physicalOlapTableSink.getDatabase().getId(),
                 ctx.getExecTimeout(),
-                ctx.getSessionVariable().getSendBatchParallelism(), false, 
false);
+                ctx.getSessionVariable().getSendBatchParallelism(), false, 
false, false);
 
         sink.complete(new Analyzer(Env.getCurrentEnv(), ctx));
         TransactionState state = 
Env.getCurrentGlobalTransactionMgr().getTransactionState(
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java 
b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java
index 5cff7011af..a91fdb28f1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java
@@ -26,6 +26,7 @@ import org.apache.doris.catalog.DistributionInfo;
 import org.apache.doris.catalog.Env;
 import org.apache.doris.catalog.HashDistributionInfo;
 import org.apache.doris.catalog.Index;
+import org.apache.doris.catalog.KeysType;
 import org.apache.doris.catalog.ListPartitionItem;
 import org.apache.doris.catalog.MaterializedIndex;
 import org.apache.doris.catalog.MaterializedIndex.IndexExtState;
@@ -104,6 +105,8 @@ public class OlapTableSink extends DataSink {
 
     private boolean isStrictMode = false;
 
+    private boolean isUniqueKeyIgnoreMode = false;
+
     public OlapTableSink(OlapTable dstTable, TupleDescriptor tupleDescriptor, 
List<Long> partitionIds,
             boolean singleReplicaLoad) {
         this.dstTable = dstTable;
@@ -113,7 +116,7 @@ public class OlapTableSink extends DataSink {
     }
 
     public void init(TUniqueId loadId, long txnId, long dbId, long 
loadChannelTimeoutS, int sendBatchParallelism,
-            boolean loadToSingleTablet, boolean isStrictMode) throws 
AnalysisException {
+            boolean loadToSingleTablet, boolean isStrictMode, boolean 
isUniqueKeyIgnoreMode) throws AnalysisException {
         TOlapTableSink tSink = new TOlapTableSink();
         tSink.setLoadId(loadId);
         tSink.setTxnId(txnId);
@@ -121,6 +124,7 @@ public class OlapTableSink extends DataSink {
         tSink.setLoadChannelTimeoutS(loadChannelTimeoutS);
         tSink.setSendBatchParallelism(sendBatchParallelism);
         this.isStrictMode = isStrictMode;
+        this.isUniqueKeyIgnoreMode = isUniqueKeyIgnoreMode;
         if (loadToSingleTablet && !(dstTable.getDefaultDistributionInfo() 
instanceof RandomDistributionInfo)) {
             throw new AnalysisException(
                     "if load_to_single_tablet set to true," + " the olap table 
must be with random distribution");
@@ -186,6 +190,17 @@ public class OlapTableSink extends DataSink {
         }
         tSink.setWriteSingleReplica(singleReplicaLoad);
         tSink.setNodesInfo(createPaloNodesInfo());
+        if (isUniqueKeyIgnoreMode) {
+            if (dstTable.getKeysType() != KeysType.UNIQUE_KEYS || 
!dstTable.getEnableUniqueKeyMergeOnWrite()) {
+                throw new UserException("ignore mode can only be enabled if 
the target table is "
+                        + "a unique table with merge-on-write enabled.");
+            } else if (isPartialUpdate) {
+                throw new UserException("ignore mode can't be used in partial 
update.");
+            } else if (dstTable.hasSequenceCol()) {
+                throw new UserException("ignore mode can't be used if the 
target table has sequence column, "
+                        + "but table[" + dstTable.getName() + "] has sequnce 
column.");
+            }
+        }
     }
 
     @Override
@@ -221,6 +236,7 @@ public class OlapTableSink extends DataSink {
         schemaParam.setTableId(table.getId());
         
schemaParam.setVersion(table.getIndexMetaByIndexId(table.getBaseIndexId()).getSchemaVersion());
         schemaParam.setIsStrictMode(isStrictMode);
+        schemaParam.setIsUniqueKeyIgnoreMode(isUniqueKeyIgnoreMode);
 
         schemaParam.tuple_desc = tupleDescriptor.toThrift();
         for (SlotDescriptor slotDesc : tupleDescriptor.getSlots()) {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java 
b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java
index 50a5f68b8a..77fe4697f1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java
@@ -256,8 +256,8 @@ public class StreamLoadPlanner {
         List<Long> partitionIds = getAllPartitionIds();
         OlapTableSink olapTableSink = new OlapTableSink(destTable, tupleDesc, 
partitionIds,
                 Config.enable_single_replica_load);
-        olapTableSink.init(loadId, taskInfo.getTxnId(), db.getId(), timeout,
-                taskInfo.getSendBatchParallelism(), 
taskInfo.isLoadToSingleTablet(), taskInfo.isStrictMode());
+        olapTableSink.init(loadId, taskInfo.getTxnId(), db.getId(), timeout, 
taskInfo.getSendBatchParallelism(),
+                taskInfo.isLoadToSingleTablet(), taskInfo.isStrictMode(), 
taskInfo.isIgnoreMode());
         olapTableSink.setPartialUpdateInputColumns(isPartialUpdate, 
partialUpdateInputColumns);
         olapTableSink.complete(analyzer);
 
@@ -465,8 +465,8 @@ public class StreamLoadPlanner {
         List<Long> partitionIds = getAllPartitionIds();
         OlapTableSink olapTableSink = new OlapTableSink(destTable, tupleDesc, 
partitionIds,
                 Config.enable_single_replica_load);
-        olapTableSink.init(loadId, taskInfo.getTxnId(), db.getId(), timeout,
-                taskInfo.getSendBatchParallelism(), 
taskInfo.isLoadToSingleTablet(), taskInfo.isStrictMode());
+        olapTableSink.init(loadId, taskInfo.getTxnId(), db.getId(), timeout, 
taskInfo.getSendBatchParallelism(),
+                taskInfo.isLoadToSingleTablet(), taskInfo.isStrictMode(), 
taskInfo.isIgnoreMode());
         olapTableSink.setPartialUpdateInputColumns(isPartialUpdate, 
partialUpdateInputColumns);
         olapTableSink.complete(analyzer);
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java 
b/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java
index 3174e4d5c6..9f21de25b0 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java
@@ -125,6 +125,10 @@ public interface LoadTaskInfo {
         return false;
     }
 
+    default boolean isIgnoreMode() {
+        return false;
+    }
+
     class ImportColumnDescs {
         public List<ImportColumnDesc> descs = Lists.newArrayList();
         public boolean isColumnDescsRewrited = false;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java 
b/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java
index c99c720ee0..4c95da2102 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java
@@ -84,6 +84,7 @@ public class StreamLoadTask implements LoadTaskInfo {
     private List<String> hiddenColumns;
     private boolean trimDoubleQuotes = false;
     private boolean isPartialUpdate = false;
+    private boolean isIgnoreMode = false;
 
     private int skipLines = 0;
     private boolean enableProfile = false;
@@ -308,6 +309,10 @@ public class StreamLoadTask implements LoadTaskInfo {
         this.memtableOnSinkNode = memtableOnSinkNode;
     }
 
+    public boolean isIgnoreMode() {
+        return isIgnoreMode;
+    }
+
     public static StreamLoadTask 
fromTStreamLoadPutRequest(TStreamLoadPutRequest request) throws UserException {
         StreamLoadTask streamLoadTask = new 
StreamLoadTask(request.getLoadId(), request.getTxnId(),
                 request.getFileType(), request.getFormatType(),
@@ -444,6 +449,9 @@ public class StreamLoadTask implements LoadTaskInfo {
         if (request.isSetMemtableOnSinkNode()) {
             this.memtableOnSinkNode = request.isMemtableOnSinkNode();
         }
+        if (request.isSetIgnoreMode()) {
+            isIgnoreMode = request.isIgnoreMode();
+        }
     }
 
     // used for stream load
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/planner/OlapTableSinkTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/planner/OlapTableSinkTest.java
index dc98026a00..44a0f02928 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/planner/OlapTableSinkTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/planner/OlapTableSinkTest.java
@@ -107,7 +107,7 @@ public class OlapTableSinkTest {
                 new DataProperty(DataProperty.DEFAULT_STORAGE_MEDIUM));
         dstTable.getPartitionInfo().setIsMutable(partition.getId(), true);
         OlapTableSink sink = new OlapTableSink(dstTable, tuple, 
Lists.newArrayList(2L), false);
-        sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false, false);
+        sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false, false, false);
         sink.complete(null);
         LOG.info("sink is {}", sink.toThrift());
         LOG.info("{}", sink.getExplainString("", TExplainLevel.NORMAL));
@@ -144,7 +144,7 @@ public class OlapTableSinkTest {
         };
 
         OlapTableSink sink = new OlapTableSink(dstTable, tuple, 
Lists.newArrayList(p1.getId()), false);
-        sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false, false);
+        sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false, false, false);
         try {
             sink.complete(null);
         } catch (UserException e) {
@@ -169,7 +169,7 @@ public class OlapTableSinkTest {
         };
 
         OlapTableSink sink = new OlapTableSink(dstTable, tuple, 
Lists.newArrayList(unknownPartId), false);
-        sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false, false);
+        sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false, false, false);
         sink.complete(null);
         LOG.info("sink is {}", sink.toThrift());
         LOG.info("{}", sink.getExplainString("", TExplainLevel.NORMAL));
@@ -206,7 +206,7 @@ public class OlapTableSinkTest {
         };
 
         OlapTableSink sink = new OlapTableSink(dstTable, tuple, 
Lists.newArrayList(p1.getId()), false);
-        sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false, false);
+        sink.init(new TUniqueId(1, 2), 3, 4, 1000, 1, false, false, false);
         try {
             sink.complete(null);
         } catch (UserException e) {
diff --git a/gensrc/proto/descriptors.proto b/gensrc/proto/descriptors.proto
index abebf8fde5..b2e76fc019 100644
--- a/gensrc/proto/descriptors.proto
+++ b/gensrc/proto/descriptors.proto
@@ -67,5 +67,6 @@ message POlapTableSchemaParam {
     optional bool partial_update = 7;
     repeated string partial_update_input_columns = 8;
     optional bool is_strict_mode = 9 [default = false];
+    optional bool is_unique_key_ignore_mode = 10 [default = false];
 };
 
diff --git a/gensrc/proto/olap_file.proto b/gensrc/proto/olap_file.proto
index fe5f76f6b4..bc968e87ee 100644
--- a/gensrc/proto/olap_file.proto
+++ b/gensrc/proto/olap_file.proto
@@ -254,6 +254,7 @@ message TabletSchemaPB {
     repeated string partial_update_input_columns = 21;
     optional bool enable_single_replica_compaction = 22 [default=false];
     optional bool skip_write_index_on_load = 23 [default=false];
+    optional bool is_unique_key_ignore_mode = 24 [default=false];
 }
 
 enum TabletStatePB {
diff --git a/gensrc/thrift/Descriptors.thrift b/gensrc/thrift/Descriptors.thrift
index fa391febda..10521af033 100644
--- a/gensrc/thrift/Descriptors.thrift
+++ b/gensrc/thrift/Descriptors.thrift
@@ -230,6 +230,7 @@ struct TOlapTableSchemaParam {
     8: optional bool is_partial_update
     9: optional list<string> partial_update_input_columns
     10: optional bool is_strict_mode = false;
+    11: optional bool is_unique_key_ignore_mode = false;
 }
 
 struct TTabletLocation {
diff --git a/gensrc/thrift/FrontendService.thrift 
b/gensrc/thrift/FrontendService.thrift
index c1d5f6b0c6..65bd2b7151 100644
--- a/gensrc/thrift/FrontendService.thrift
+++ b/gensrc/thrift/FrontendService.thrift
@@ -628,6 +628,7 @@ struct TStreamLoadPutRequest {
     // only valid when file type is CSV
     52: optional i8 escape
     53: optional bool memtable_on_sink_node;
+    54: optional bool ignore_mode = false
 }
 
 struct TStreamLoadPutResult {
diff --git a/regression-test/data/insert_p0/insert_ignore.out 
b/regression-test/data/insert_p0/insert_ignore.out
new file mode 100644
index 0000000000..bb38f60c9c
--- /dev/null
+++ b/regression-test/data/insert_p0/insert_ignore.out
@@ -0,0 +1,36 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !origin_data --
+1      kevin   18      shenzhen        400
+2      bob     20      beijing 500
+3      alice   22      shanghai        600
+4      jack    24      hangzhou        700
+5      tom     26      guanzhou        800
+
+-- !after_insert_ignore --
+1      kevin   18      shenzhen        400
+2      bob     20      beijing 500
+3      alice   22      shanghai        600
+4      jack    24      hangzhou        700
+5      tom     26      guanzhou        800
+10     alex    28      shenzhen        1111
+20     leo     30      beijing 2222
+30     sam     32      shanghai        3333
+40     Ruth    34      hangzhou        4444
+50     cynthia 36      guanzhou        8000
+
+-- !origin_data --
+1      1
+
+-- !delete_a_row --
+
+-- !after_insert_ignore --
+1      3
+
+-- !sql --
+1      10
+
+-- !delete_a_row --
+
+-- !after_insert_ignore --
+1      1
+
diff --git a/regression-test/data/unique_with_mow_p0/ignore_mode.csv 
b/regression-test/data/unique_with_mow_p0/ignore_mode.csv
new file mode 100644
index 0000000000..693c484172
--- /dev/null
+++ b/regression-test/data/unique_with_mow_p0/ignore_mode.csv
@@ -0,0 +1,10 @@
+1,"kevin",18,"shenzhen",4000
+10,"alex",28,"shenzhen",1111
+2,"bob",20,"beijing",5000
+20,"leo",30,"beijing",2222
+30,"sam",32,"shanghai",3333
+3,"alice",22,"shanghai",6000
+4,"jack",24,"hangzhou",7000
+40,"Ruth",34,"hangzhou",4444
+5,"tom",26,"guanzhou",8000
+50,"cynthia",36,"guanzhou",8000
\ No newline at end of file
diff --git a/regression-test/data/unique_with_mow_p0/ignore_mode2.csv 
b/regression-test/data/unique_with_mow_p0/ignore_mode2.csv
new file mode 100644
index 0000000000..5711f63304
--- /dev/null
+++ b/regression-test/data/unique_with_mow_p0/ignore_mode2.csv
@@ -0,0 +1,2 @@
+1,4000
+2,5000
diff --git a/regression-test/data/unique_with_mow_p0/test_ignore_mode.out 
b/regression-test/data/unique_with_mow_p0/test_ignore_mode.out
new file mode 100644
index 0000000000..5c72f099d9
--- /dev/null
+++ b/regression-test/data/unique_with_mow_p0/test_ignore_mode.out
@@ -0,0 +1,20 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !origin_data --
+1      kevin   18      shenzhen        400
+2      bob     20      beijing 500
+3      alice   22      shanghai        600
+4      jack    24      hangzhou        700
+5      tom     26      guanzhou        800
+
+-- !after_ignore_mode_stream_load --
+1      kevin   18      shenzhen        400
+2      bob     20      beijing 500
+3      alice   22      shanghai        600
+4      jack    24      hangzhou        700
+5      tom     26      guanzhou        800
+10     "alex"  28      "shenzhen"      1111
+20     "leo"   30      "beijing"       2222
+30     "sam"   32      "shanghai"      3333
+40     "Ruth"  34      "hangzhou"      4444
+50     "cynthia"       36      "guanzhou"      8000
+
diff --git a/regression-test/suites/insert_p0/insert_ignore.groovy 
b/regression-test/suites/insert_p0/insert_ignore.groovy
new file mode 100644
index 0000000000..37cd7707f2
--- /dev/null
+++ b/regression-test/suites/insert_p0/insert_ignore.groovy
@@ -0,0 +1,132 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_insert_ignore") {
+
+    def tableName = "test_insert_ignore1"
+    sql """ DROP TABLE IF EXISTS ${tableName} FORCE;"""
+    sql """
+            CREATE TABLE ${tableName} ( 
+                `id` int(11) NULL, 
+                `name` varchar(10) NULL,
+                `age` int(11) NULL DEFAULT "20", 
+                `city` varchar(10) NOT NULL DEFAULT "beijing", 
+                `balance` decimalv3(9, 0) NULL
+            ) ENGINE = OLAP UNIQUE KEY(`id`) 
+            COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1
+            PROPERTIES ( 
+                "replication_allocation" = "tag.location.default: 1", 
+                "storage_format" = "V2", 
+                "enable_unique_key_merge_on_write" = "true", 
+                "light_schema_change" = "true", 
+                "disable_auto_compaction" = "false", 
+                "enable_single_replica_compaction" = "false" 
+            );
+    """
+    sql """insert into ${tableName} values
+        (1,"kevin",18,"shenzhen",400),
+        (2,"bob",20,"beijing",500),
+        (3,"alice",22,"shanghai",600),
+        (4,"jack",24,"hangzhou",700),
+        (5,"tom",26,"guanzhou",800);"""
+    qt_origin_data "select * from ${tableName} order by id;"
+    
+    // some rows are with existing keys, some are not
+    sql """insert ignore into ${tableName} values
+        (1,"kevin",18,"shenzhen",4000),
+        (10,"alex",28,"shenzhen",1111),
+        (2,"bob",20,"beijing",5000),
+        (20,"leo",30,"beijing",2222),
+        (30,"sam",32,"shanghai",3333),
+        (3,"alice",22,"shanghai",6000),
+        (4,"jack",24,"hangzhou",7000),
+        (40,"Ruth",34,"hangzhou",4444),
+        (5,"tom",26,"guanzhou",8000),
+        (50,"cynthia",36,"guanzhou",8000);"""
+
+    qt_after_insert_ignore "select * from ${tableName} order by id;"
+    sql """ DROP TABLE IF EXISTS ${tableName};"""
+
+    def tableName2 = "test_insert_ignore2"
+    sql """ DROP TABLE IF EXISTS ${tableName2} FORCE; """
+    sql """CREATE TABLE IF NOT EXISTS ${tableName2} (
+                `uid` BIGINT NULL,
+                `v1` BIGINT NULL 
+                )
+    UNIQUE KEY(uid)
+    DISTRIBUTED BY HASH(uid) BUCKETS 1
+    PROPERTIES (
+        "enable_unique_key_merge_on_write" = "true",
+        "replication_num" = "1"
+    );"""
+
+    sql "insert into ${tableName2} values(1,1);"
+    qt_origin_data "select * from ${tableName2} order by uid;"
+
+    sql "insert into ${tableName2}(uid, v1, __DORIS_DELETE_SIGN__) values(1, 
2, 1);"
+    qt_delete_a_row "select * from ${tableName2} order by uid;"
+
+    sql "insert ignore into ${tableName2} values(1,3);"
+    qt_after_insert_ignore "select * from ${tableName2} order by uid;"
+
+    sql "insert into ${tableName2} values(1,10);"
+    qt_sql "select * from ${tableName2} order by uid;"
+
+    sql "insert into ${tableName2}(uid, v1, __DORIS_DELETE_SIGN__) values(1, 
1, 1);"
+    qt_delete_a_row "select * from ${tableName2} order by uid;"
+
+    sql "insert ignore into ${tableName2} values(1,1);"
+    qt_after_insert_ignore "select * from ${tableName2} order by uid;"
+    sql """ DROP TABLE IF EXISTS ${tableName2}; """
+
+
+    // test illigal cases
+    def tableName3 = "test_insert_ignore3"
+    sql """ DROP TABLE IF EXISTS ${tableName3} FORCE; """
+    sql """CREATE TABLE IF NOT EXISTS ${tableName3} (
+                `uid` BIGINT NULL,
+                `v1` BIGINT NULL 
+            ) UNIQUE KEY(uid)
+    DISTRIBUTED BY HASH(uid) BUCKETS 1
+    PROPERTIES (
+        "enable_unique_key_merge_on_write" = "false",
+        "replication_num" = "1"
+    );"""
+    sql "insert into ${tableName3} values(1,1);"
+    test {
+        sql "insert ignore into ${tableName3} values(1,3);"
+        exception "ignore mode can only be enabled if the target table is a 
unique table with merge-on-write enabled."
+    }
+
+    def tableName4 = "test_insert_ignore4"
+    sql """ DROP TABLE IF EXISTS ${tableName4} FORCE; """
+    sql """CREATE TABLE IF NOT EXISTS ${tableName4} (
+                `uid` BIGINT NULL,
+                `v1` BIGINT NULL 
+            ) UNIQUE KEY(uid)
+    DISTRIBUTED BY HASH(uid) BUCKETS 1
+    PROPERTIES (
+        "enable_unique_key_merge_on_write" = "true",
+        "replication_num" = "1",
+        "function_column.sequence_col" = 'v1'
+    );"""
+    sql "insert into ${tableName4} values(1,1);"
+    test {
+        sql "insert ignore into ${tableName4} values(1,3);"
+        exception "ignore mode can't be used if the target table has sequence 
column, but table[${tableName4}] has sequnce column."
+    }
+}
diff --git a/regression-test/suites/unique_with_mow_p0/test_ignore_mode.groovy 
b/regression-test/suites/unique_with_mow_p0/test_ignore_mode.groovy
new file mode 100644
index 0000000000..94afd0be6b
--- /dev/null
+++ b/regression-test/suites/unique_with_mow_p0/test_ignore_mode.groovy
@@ -0,0 +1,112 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_mow_table_ignore_mode") {
+
+    def tableName = "test_mow_table_ignore_mode1"
+    sql """ DROP TABLE IF EXISTS ${tableName} FORCE;"""
+    sql """
+            CREATE TABLE ${tableName} ( 
+                `id` int(11) NULL, 
+                `name` varchar(10) NULL,
+                `age` int(11) NULL DEFAULT "20", 
+                `city` varchar(10) NOT NULL DEFAULT "beijing", 
+                `balance` decimalv3(9, 0) NULL
+            ) ENGINE = OLAP UNIQUE KEY(`id`) 
+            COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1
+            PROPERTIES ( 
+                "replication_allocation" = "tag.location.default: 1", 
+                "storage_format" = "V2", 
+                "enable_unique_key_merge_on_write" = "true", 
+                "light_schema_change" = "true", 
+                "disable_auto_compaction" = "false", 
+                "enable_single_replica_compaction" = "false" 
+            );
+    """
+    sql """insert into ${tableName} values
+        (1,"kevin",18,"shenzhen",400),
+        (2,"bob",20,"beijing",500),
+        (3,"alice",22,"shanghai",600),
+        (4,"jack",24,"hangzhou",700),
+        (5,"tom",26,"guanzhou",800);"""
+    qt_origin_data "select * from ${tableName} order by id;"
+
+    // some rows are with existing keys, some are not
+    streamLoad {
+        table "${tableName}"
+
+        set 'column_separator', ','
+        set 'format', 'csv'
+        set 'columns', 'id,name,age,city,balance'
+        set 'ignore_mode', 'true'
+
+        file 'ignore_mode.csv'
+        time 10000 // limit inflight 10s
+    }
+    sql "sync"
+
+    qt_after_ignore_mode_stream_load "select * from ${tableName} order by id;"
+    sql """ DROP TABLE IF EXISTS ${tableName};"""
+
+
+    // test illegal case
+    def tableName2 = "test_mow_table_ignore_mode2"
+    sql """ DROP TABLE IF EXISTS ${tableName2} FORCE;"""
+    sql """
+            CREATE TABLE ${tableName2} ( 
+                `id` int(11) NULL, 
+                `name` varchar(10) NULL,
+                `age` int(11) NULL DEFAULT "20", 
+                `city` varchar(10) NOT NULL DEFAULT "beijing", 
+                `balance` decimalv3(9, 0) NULL
+            ) ENGINE = OLAP UNIQUE KEY(`id`) 
+            COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1
+            PROPERTIES ( 
+                "replication_allocation" = "tag.location.default: 1", 
+                "storage_format" = "V2", 
+                "enable_unique_key_merge_on_write" = "true", 
+                "light_schema_change" = "true", 
+                "disable_auto_compaction" = "false", 
+                "enable_single_replica_compaction" = "false" 
+            );"""
+    sql """insert into ${tableName2} values
+        (1,"kevin",18,"shenzhen",400),
+        (2,"bob",20,"beijing",500),
+        (3,"alice",22,"shanghai",600),
+        (4,"jack",24,"hangzhou",700),
+        (5,"tom",26,"guanzhou",800);"""
+    // some rows are with existing keys, some are not
+    streamLoad {
+        table "${tableName2}"
+
+        set 'column_separator', ','
+        set 'format', 'csv'
+        set 'columns', 'id,balance'
+        set 'partial_columns', 'true'
+        set 'ignore_mode', 'true'
+
+        file 'ignore_mode.csv'
+        time 10000 // limit inflight 10s
+
+        check {result, exception, startTime, endTime ->
+            assertTrue(exception == null)
+            def json = parseJson(result)
+            assertEquals("Fail", json.Status)
+            assertTrue(json.Message.contains("ignore mode can't be used in 
partial update."))
+        }
+    }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to