This is an automated email from the ASF dual-hosted git repository. kxiao pushed a commit to branch branch-2.0 in repository https://gitbox.apache.org/repos/asf/doris.git
commit c89f57ae7e8e7fae4976362e05d52a5ce3db3849 Author: lihangyu <15605149...@163.com> AuthorDate: Wed Oct 18 10:13:44 2023 +0800 [Fix](row store) Real default value should be used instead of default… (#25230) Before this PR the default value is not correct, we should use default value in Frontend schema. --- be/src/exec/rowid_fetcher.cpp | 5 +- be/src/olap/tablet.cpp | 6 ++- be/src/runtime/descriptors.cpp | 5 +- be/src/runtime/descriptors.h | 6 +++ be/src/service/point_query_executor.cpp | 10 ++-- be/src/service/point_query_executor.h | 3 ++ be/src/vec/jsonb/serialize.cpp | 22 +++++++-- be/src/vec/jsonb/serialize.h | 4 +- be/test/vec/jsonb/serialize_test.cpp | 9 +++- .../org/apache/doris/analysis/SlotDescriptor.java | 1 + gensrc/thrift/Descriptors.thrift | 4 ++ .../test_compaction_uniq_keys_row_store.out | 8 +-- .../data/point_query_p0/test_point_query.out | 57 +++++++++++++--------- .../data/point_query_p0/test_rowstore.out | 7 +++ .../suites/point_query_p0/test_point_query.groovy | 8 +-- .../suites/point_query_p0/test_rowstore.groovy | 49 +++++++++++++++++++ 16 files changed, 157 insertions(+), 47 deletions(-) diff --git a/be/src/exec/rowid_fetcher.cpp b/be/src/exec/rowid_fetcher.cpp index e1a48276891..94fcd814bea 100644 --- a/be/src/exec/rowid_fetcher.cpp +++ b/be/src/exec/rowid_fetcher.cpp @@ -131,6 +131,8 @@ Status RowIDFetcher::_merge_rpc_results(const PMultiGetRequest& request, } vectorized::DataTypeSerDeSPtrs serdes; std::unordered_map<uint32_t, uint32_t> col_uid_to_idx; + std::vector<std::string> default_values; + default_values.resize(_fetch_option.desc->slots().size()); auto merge_function = [&](const PMultiGetResponse& resp) { Status st(Status::create(resp.status())); if (!st.ok()) { @@ -150,12 +152,13 @@ Status RowIDFetcher::_merge_rpc_results(const PMultiGetRequest& request, serdes = vectorized::create_data_type_serdes(_fetch_option.desc->slots()); for (int i = 0; i < _fetch_option.desc->slots().size(); ++i) { col_uid_to_idx[_fetch_option.desc->slots()[i]->col_unique_id()] = i; + default_values[i] = _fetch_option.desc->slots()[i]->col_default_value(); } } for (int i = 0; i < resp.binary_row_data_size(); ++i) { vectorized::JsonbSerializeUtil::jsonb_to_block( serdes, resp.binary_row_data(i).data(), resp.binary_row_data(i).size(), - col_uid_to_idx, *output_block); + col_uid_to_idx, *output_block, default_values); } return Status::OK(); } diff --git a/be/src/olap/tablet.cpp b/be/src/olap/tablet.cpp index 2ed3571575b..391e9f2aa14 100644 --- a/be/src/olap/tablet.cpp +++ b/be/src/olap/tablet.cpp @@ -2723,14 +2723,18 @@ Status Tablet::fetch_value_through_row_column(RowsetSharedPtr input_rowset, uint vectorized::DataTypeSerDeSPtrs serdes; serdes.resize(cids.size()); std::unordered_map<uint32_t, uint32_t> col_uid_to_idx; + std::vector<std::string> default_values; + default_values.resize(cids.size()); for (int i = 0; i < cids.size(); ++i) { const TabletColumn& column = tablet_schema->column(cids[i]); vectorized::DataTypePtr type = vectorized::DataTypeFactory::instance().create_data_type(column); col_uid_to_idx[column.unique_id()] = i; + default_values[i] = column.default_value(); serdes[i] = type->get_serde(); } - vectorized::JsonbSerializeUtil::jsonb_to_block(serdes, *string_column, col_uid_to_idx, block); + vectorized::JsonbSerializeUtil::jsonb_to_block(serdes, *string_column, col_uid_to_idx, block, + default_values); return Status::OK(); } diff --git a/be/src/runtime/descriptors.cpp b/be/src/runtime/descriptors.cpp index be582616522..36dde94ca2b 100644 --- a/be/src/runtime/descriptors.cpp +++ b/be/src/runtime/descriptors.cpp @@ -63,7 +63,10 @@ SlotDescriptor::SlotDescriptor(const TSlotDescriptor& tdesc) _field_idx(-1), _is_materialized(tdesc.isMaterialized), _is_key(tdesc.is_key), - _need_materialize(tdesc.need_materialize) {} + _need_materialize(tdesc.need_materialize), + _column_paths(tdesc.column_paths), + _is_auto_increment(tdesc.__isset.is_auto_increment ? tdesc.is_auto_increment : false), + _col_default_value(tdesc.__isset.col_default_value ? tdesc.col_default_value : "") {} SlotDescriptor::SlotDescriptor(const PSlotDescriptor& pdesc) : _id(pdesc.id()), diff --git a/be/src/runtime/descriptors.h b/be/src/runtime/descriptors.h index f3c3ea33c4d..1e8f4503f77 100644 --- a/be/src/runtime/descriptors.h +++ b/be/src/runtime/descriptors.h @@ -109,6 +109,8 @@ public: bool is_key() const { return _is_key; } bool need_materialize() const { return _need_materialize; } + const std::string& col_default_value() const { return _col_default_value; } + private: friend class DescriptorTbl; friend class TupleDescriptor; @@ -141,6 +143,10 @@ private: const bool _is_key; const bool _need_materialize; + const std::vector<std::string> _column_paths; + + const bool _is_auto_increment; + const std::string _col_default_value; SlotDescriptor(const TSlotDescriptor& tdesc); SlotDescriptor(const PSlotDescriptor& pdesc); diff --git a/be/src/service/point_query_executor.cpp b/be/src/service/point_query_executor.cpp index dbb77c4a99a..4e965197feb 100644 --- a/be/src/service/point_query_executor.cpp +++ b/be/src/service/point_query_executor.cpp @@ -67,8 +67,11 @@ Status Reusable::init(const TDescriptorTable& t_desc_tbl, const std::vector<TExp RETURN_IF_ERROR(vectorized::VExpr::prepare(_output_exprs_ctxs, _runtime_state.get(), row_desc)); _create_timestamp = butil::gettimeofday_ms(); _data_type_serdes = vectorized::create_data_type_serdes(tuple_desc()->slots()); + _col_default_values.resize(tuple_desc()->slots().size()); for (int i = 0; i < tuple_desc()->slots().size(); ++i) { - _col_uid_to_idx[tuple_desc()->slots()[i]->col_unique_id()] = i; + auto slot = tuple_desc()->slots()[i]; + _col_uid_to_idx[slot->col_unique_id()] = i; + _col_default_values[i] = slot->col_default_value(); } return Status::OK(); } @@ -310,7 +313,7 @@ Status PointQueryExecutor::_lookup_row_data() { _reusable->get_data_type_serdes(), _row_read_ctxs[i]._cached_row_data.data().data, _row_read_ctxs[i]._cached_row_data.data().size, _reusable->get_col_uid_to_idx(), - *_result_block); + *_result_block, _reusable->get_col_default_values()); continue; } if (!_row_read_ctxs[i]._row_location.has_value()) { @@ -325,7 +328,8 @@ Status PointQueryExecutor::_lookup_row_data() { // serilize value to block, currently only jsonb row formt vectorized::JsonbSerializeUtil::jsonb_to_block( _reusable->get_data_type_serdes(), value.data(), value.size(), - _reusable->get_col_uid_to_idx(), *_result_block); + _reusable->get_col_uid_to_idx(), *_result_block, + _reusable->get_col_default_values()); } return Status::OK(); } diff --git a/be/src/service/point_query_executor.h b/be/src/service/point_query_executor.h index 58ddba35c98..768176db20c 100644 --- a/be/src/service/point_query_executor.h +++ b/be/src/service/point_query_executor.h @@ -81,6 +81,8 @@ public: return _col_uid_to_idx; } + const std::vector<std::string>& get_col_default_values() const { return _col_default_values; } + // do not touch block after returned void return_block(std::unique_ptr<vectorized::Block>& block); @@ -101,6 +103,7 @@ private: int64_t _create_timestamp = 0; vectorized::DataTypeSerDeSPtrs _data_type_serdes; std::unordered_map<uint32_t, uint32_t> _col_uid_to_idx; + std::vector<std::string> _col_default_values; int64_t _mem_size = 0; }; diff --git a/be/src/vec/jsonb/serialize.cpp b/be/src/vec/jsonb/serialize.cpp index c7b98095bbb..0251dc12974 100644 --- a/be/src/vec/jsonb/serialize.cpp +++ b/be/src/vec/jsonb/serialize.cpp @@ -40,6 +40,7 @@ #include "vec/core/columns_with_type_and_name.h" #include "vec/data_types/data_type.h" #include "vec/data_types/serde/data_type_serde.h" +#include "vec/io/reader_buffer.h" namespace doris::vectorized { @@ -71,10 +72,12 @@ void JsonbSerializeUtil::block_to_jsonb(const TabletSchema& schema, const Block& void JsonbSerializeUtil::jsonb_to_block(const DataTypeSerDeSPtrs& serdes, const ColumnString& jsonb_column, const std::unordered_map<uint32_t, uint32_t>& col_id_to_idx, - Block& dst) { + Block& dst, + const std::vector<std::string>& default_values) { for (int i = 0; i < jsonb_column.size(); ++i) { StringRef jsonb_data = jsonb_column.get_data_at(i); - jsonb_to_block(serdes, jsonb_data.data, jsonb_data.size, col_id_to_idx, dst); + jsonb_to_block(serdes, jsonb_data.data, jsonb_data.size, col_id_to_idx, dst, + default_values); } } @@ -82,7 +85,8 @@ void JsonbSerializeUtil::jsonb_to_block(const DataTypeSerDeSPtrs& serdes, void JsonbSerializeUtil::jsonb_to_block(const DataTypeSerDeSPtrs& serdes, const char* data, size_t size, const std::unordered_map<uint32_t, uint32_t>& col_id_to_idx, - Block& dst) { + Block& dst, + const std::vector<std::string>& default_values) { auto pdoc = JsonbDocument::createDocument(data, size); JsonbDocument& doc = *pdoc; size_t num_rows = dst.rows(); @@ -98,11 +102,19 @@ void JsonbSerializeUtil::jsonb_to_block(const DataTypeSerDeSPtrs& serdes, const } if (filled_columns < dst.columns()) { // fill missing slot - for (auto& column_type_name : dst) { + for (int i = 0; i < dst.columns(); ++i) { + const auto& column_type_name = dst.get_by_position(i); MutableColumnPtr col = column_type_name.column->assume_mutable(); if (col->size() < num_rows + 1) { DCHECK(col->size() == num_rows); - col->insert_default(); + if (default_values[i].empty()) { + col->insert_default(); + } else { + Slice value(default_values[i].data(), default_values[i].size()); + DataTypeSerDe::FormatOptions opt; + opt.converted_from_string = true; + static_cast<void>(serdes[i]->deserialize_one_cell_from_json(*col, value, opt)); + } } DCHECK(col->size() == num_rows + 1); } diff --git a/be/src/vec/jsonb/serialize.h b/be/src/vec/jsonb/serialize.h index 725dbc07072..1442792ebf4 100644 --- a/be/src/vec/jsonb/serialize.h +++ b/be/src/vec/jsonb/serialize.h @@ -43,10 +43,10 @@ public: // batch rows static void jsonb_to_block(const DataTypeSerDeSPtrs& serdes, const ColumnString& jsonb_column, const std::unordered_map<uint32_t, uint32_t>& col_id_to_idx, - Block& dst); + Block& dst, const std::vector<std::string>& default_values); // single row static void jsonb_to_block(const DataTypeSerDeSPtrs& serdes, const char* data, size_t size, const std::unordered_map<uint32_t, uint32_t>& col_id_to_idx, - Block& dst); + Block& dst, const std::vector<std::string>& default_values); }; } // namespace doris::vectorized \ No newline at end of file diff --git a/be/test/vec/jsonb/serialize_test.cpp b/be/test/vec/jsonb/serialize_test.cpp index 5a43f58a0f2..bff79435ad4 100644 --- a/be/test/vec/jsonb/serialize_test.cpp +++ b/be/test/vec/jsonb/serialize_test.cpp @@ -160,15 +160,18 @@ TEST(BlockSerializeTest, Array) { Block new_block = block.clone_empty(); std::unordered_map<uint32_t, uint32_t> col_uid_to_idx; + std::vector<std::string> default_values; + default_values.resize(read_desc.slots().size()); for (int i = 0; i < read_desc.slots().size(); ++i) { col_uid_to_idx[read_desc.slots()[i]->col_unique_id()] = i; + default_values[i] = read_desc.slots()[i]->col_default_value(); std::cout << "uid " << read_desc.slots()[i]->col_unique_id() << ":" << i << std::endl; } std::cout << block.dump_data() << std::endl; std::cout << new_block.dump_data() << std::endl; JsonbSerializeUtil::jsonb_to_block(create_data_type_serdes(read_desc.slots()), static_cast<ColumnString&>(*col.get()), col_uid_to_idx, - new_block); + new_block, default_values); std::cout << block.dump_data() << std::endl; std::cout << new_block.dump_data() << std::endl; EXPECT_EQ(block.dump_data(), new_block.dump_data()); @@ -339,12 +342,14 @@ TEST(BlockSerializeTest, JsonbBlock) { } Block new_block = block.clone_empty(); std::unordered_map<uint32_t, uint32_t> col_uid_to_idx; + std::vector<std::string> default_values; + default_values.resize(read_desc.slots().size()); for (int i = 0; i < read_desc.slots().size(); ++i) { col_uid_to_idx[read_desc.slots()[i]->col_unique_id()] = i; } JsonbSerializeUtil::jsonb_to_block(create_data_type_serdes(block.get_data_types()), static_cast<const ColumnString&>(*col.get()), col_uid_to_idx, - new_block); + new_block, default_values); std::cout << block.dump_data() << std::endl; std::cout << new_block.dump_data() << std::endl; EXPECT_EQ(block.dump_data(), new_block.dump_data()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java index 67bbc2b18c2..f53f3106dec 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java @@ -294,6 +294,7 @@ public class SlotDescriptor { LOG.debug("column name:{}, column unique id:{}", column.getName(), column.getUniqueId()); tSlotDescriptor.setColUniqueId(column.getUniqueId()); tSlotDescriptor.setIsKey(column.isKey()); + tSlotDescriptor.setColDefaultValue(column.getDefaultValue()); } return tSlotDescriptor; } diff --git a/gensrc/thrift/Descriptors.thrift b/gensrc/thrift/Descriptors.thrift index 168e448dd86..af2860d4233 100644 --- a/gensrc/thrift/Descriptors.thrift +++ b/gensrc/thrift/Descriptors.thrift @@ -57,6 +57,10 @@ struct TSlotDescriptor { // If set to false, then such slots will be ignored during // materialize them.Used to optmize to read less data and less memory usage 13: optional bool need_materialize = true + 14: optional bool is_auto_increment = false; + // subcolumn path info list for semi structure column(variant) + 15: optional list<string> column_paths + 16: optional string col_default_value } struct TTupleDescriptor { diff --git a/regression-test/data/compaction/test_compaction_uniq_keys_row_store.out b/regression-test/data/compaction/test_compaction_uniq_keys_row_store.out index 633471fdecc..e49aea97d90 100644 --- a/regression-test/data/compaction/test_compaction_uniq_keys_row_store.out +++ b/regression-test/data/compaction/test_compaction_uniq_keys_row_store.out @@ -18,10 +18,10 @@ 3 2017-10-01 2017-10-01 2017-10-01 11:11:11.026 2017-10-01 11:11:11.016000 Beijing 10 1 2020-01-04 00:00:00 2020-01-04 00:00:00 2017-10-01 11:11:11.110 2017-10-01 11:11:11.150111 2020-01-04 00:00:00 1 33 21 -- !point_select -- -3 2017-10-01 2017-10-01 2017-10-01 11:11:11.027 2017-10-01 11:11:11.017000 Beijing 10 1 \N \N \N \N 2020-01-05 00:00:00 1 34 20 +3 2017-10-01 2017-10-01 2017-10-01 11:11:11.027 2017-10-01 11:11:11.017000 Beijing 10 1 1970-01-01 00:00:00 1970-01-01 00:00:00 1970-01-01 00:00:00.111 1970-01-01 00:00:00.000000 2020-01-05 00:00:00 1 34 20 -- !point_select -- -4 2017-10-01 2017-10-01 2017-10-01 11:11:11.028 2017-10-01 11:11:11.018000 Beijing 10 1 \N \N \N \N 2020-01-05 00:00:00 1 34 20 +4 2017-10-01 2017-10-01 2017-10-01 11:11:11.028 2017-10-01 11:11:11.018000 Beijing 10 1 1970-01-01 00:00:00 1970-01-01 00:00:00 1970-01-01 00:00:00.111 1970-01-01 00:00:00.000000 2020-01-05 00:00:00 1 34 20 -- !point_select -- 1 2017-10-01 2017-10-01 2017-10-01 11:11:11.021 2017-10-01 11:11:11.011000 Beijing 10 1 2020-01-01 00:00:00 2020-01-01 00:00:00 2017-10-01 11:11:11.170 2017-10-01 11:11:11.110111 2020-01-01 00:00:00 1 30 20 @@ -42,8 +42,8 @@ 3 2017-10-01 2017-10-01 2017-10-01 11:11:11.026 2017-10-01 11:11:11.016000 Beijing 10 1 2020-01-04 00:00:00 2020-01-04 00:00:00 2017-10-01 11:11:11.110 2017-10-01 11:11:11.150111 2020-01-04 00:00:00 1 33 21 -- !point_select -- -3 2017-10-01 2017-10-01 2017-10-01 11:11:11.027 2017-10-01 11:11:11.017000 Beijing 10 1 \N \N \N \N 2020-01-05 00:00:00 1 34 20 +3 2017-10-01 2017-10-01 2017-10-01 11:11:11.027 2017-10-01 11:11:11.017000 Beijing 10 1 1970-01-01 00:00:00 1970-01-01 00:00:00 1970-01-01 00:00:00.111 1970-01-01 00:00:00.000000 2020-01-05 00:00:00 1 34 20 -- !point_select -- -4 2017-10-01 2017-10-01 2017-10-01 11:11:11.028 2017-10-01 11:11:11.018000 Beijing 10 1 \N \N \N \N 2020-01-05 00:00:00 1 34 20 +4 2017-10-01 2017-10-01 2017-10-01 11:11:11.028 2017-10-01 11:11:11.018000 Beijing 10 1 1970-01-01 00:00:00 1970-01-01 00:00:00 1970-01-01 00:00:00.111 1970-01-01 00:00:00.000000 2020-01-05 00:00:00 1 34 20 diff --git a/regression-test/data/point_query_p0/test_point_query.out b/regression-test/data/point_query_p0/test_point_query.out index b5a2aaa9cb6..71a6c480d4b 100644 --- a/regression-test/data/point_query_p0/test_point_query.out +++ b/regression-test/data/point_query_p0/test_point_query.out @@ -48,10 +48,10 @@ 1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 -- !point_select -- -1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] \N 5630 0 +1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 0 -- !point_select -- -1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] \N 5630 0 +1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 0 -- !point_select -- 1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 @@ -59,26 +59,29 @@ -- !point_select -- 1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 +-- !point_select -- +1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 0 + -- !sql -- -1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N \N +1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N 0 0 -- !sql -- -1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] \N +1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] 0 0 -- !sql -- 6120202020646464 6C616F6F71 32.92200050354004 -- !sql -- -1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N \N +1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N 0 0 -- !sql -- -1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] \N +1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] 0 0 -- !sql -- -1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N \N +1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N 0 0 -- !sql -- -1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] \N +1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] 0 0 -- !sql -- 0 1 2 3 @@ -132,10 +135,10 @@ 1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 -- !point_select -- -1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] \N 5630 0 +1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 0 -- !point_select -- -1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] \N 5630 0 +1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 0 -- !point_select -- 1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 @@ -143,26 +146,29 @@ -- !point_select -- 1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 +-- !point_select -- +1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 0 + -- !sql -- -1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N \N +1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N 0 0 -- !sql -- -1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] \N +1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] 0 0 -- !sql -- 6120202020646464 6C616F6F71 32.92200050354004 -- !sql -- -1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N \N +1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N 0 0 -- !sql -- -1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] \N +1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] 0 0 -- !sql -- -1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N \N +1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N 0 0 -- !sql -- -1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] \N +1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] 0 0 -- !sql -- 0 1 2 3 @@ -216,10 +222,10 @@ 1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 -- !point_select -- -1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] \N 5630 0 +1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 0 -- !point_select -- -1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] \N 5630 0 +1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 0 -- !point_select -- 1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 @@ -227,26 +233,29 @@ -- !point_select -- 1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 +-- !point_select -- +1235 120939.111300000 a ddd xxxxxx 2030-01-02 2020-01-01 12:36:38 22.822 7022-01-01 false 1929111.111 [119291.192910000] ["111", "222", "333"] 2 0 + -- !sql -- -1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N \N +1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N 0 0 -- !sql -- -1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] \N +1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] 0 0 -- !sql -- 6120202020646464 6C616F6F71 32.92200050354004 -- !sql -- -1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N \N +1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N 0 0 -- !sql -- -1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] \N +1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] 0 0 -- !sql -- -1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N \N +1231 119291.110000000 ddd laooq \N 2020-01-01T12:36:38 \N 1022-01-01 \N 1.111 [119181.111100000, 819019.119100000, NULL] \N 0 0 -- !sql -- -1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] \N +1237 120939.111300000 a ddd laooq 2030-01-02 2020-01-01T12:36:38 22.822 7022-01-01 false 90696620686827832.374 [1.100000000, 2.200000000, 3.300000000, 4.400000000, 5.500000000] [] 0 0 -- !sql -- 0 1 2 3 diff --git a/regression-test/data/point_query_p0/test_rowstore.out b/regression-test/data/point_query_p0/test_rowstore.out new file mode 100644 index 00000000000..b73b91ff8ca --- /dev/null +++ b/regression-test/data/point_query_p0/test_rowstore.out @@ -0,0 +1,7 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql -- +1 abc + +-- !sql -- +1 abc 123 + diff --git a/regression-test/suites/point_query_p0/test_point_query.groovy b/regression-test/suites/point_query_p0/test_point_query.groovy index 09825d5d6fd..1bc9fb8dcc8 100644 --- a/regression-test/suites/point_query_p0/test_point_query.groovy +++ b/regression-test/suites/point_query_p0/test_point_query.groovy @@ -201,10 +201,10 @@ suite("test_point_query") { qe_point_select stmt qe_point_select stmt - // sql """ - // ALTER table ${tableName} ADD COLUMN new_column1 INT default "0"; - // """ - // qe_point_select stmt + sql """ + ALTER table ${tableName} ADD COLUMN new_column1 INT default "0"; + """ + qe_point_select stmt } // disable useServerPrepStmts def result2 = connect(user=user, password=password, url=context.config.jdbcUrl) { diff --git a/regression-test/suites/point_query_p0/test_rowstore.groovy b/regression-test/suites/point_query_p0/test_rowstore.groovy new file mode 100644 index 00000000000..e1284931045 --- /dev/null +++ b/regression-test/suites/point_query_p0/test_rowstore.groovy @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_rowstore", "p0") { + def tableName = "rs_query" + sql """DROP TABLE IF EXISTS ${tableName}""" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} ( + `k1` int(11) NULL COMMENT "", + `k2` text NULL COMMENT "", + ) ENGINE=OLAP + UNIQUE KEY(`k1`) + DISTRIBUTED BY HASH(`k1`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "store_row_column" = "true", + "enable_unique_key_merge_on_write" = "true", + "light_schema_change" = "true", + "storage_format" = "V2" + ) + """ + + sql "set experimental_enable_nereids_planner = false" + sql """insert into ${tableName} values (1, 'abc')""" + explain { + sql("select * from ${tableName}") + contains "OPT TWO PHASE" + } + qt_sql """select * from ${tableName}""" + + sql """ + ALTER table ${tableName} ADD COLUMN new_column1 INT default "123"; + """ + qt_sql """select * from ${tableName} where k1 = 1""" +} \ No newline at end of file --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org