This is an automated email from the ASF dual-hosted git repository. zhangstar333 pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push: new e62d19d90d0 [improve](partition) support auto list partition with more columns (#27817) e62d19d90d0 is described below commit e62d19d90d05498c3ee7e1bdc139e46ce38fe972 Author: zhangstar333 <87313068+zhangstar...@users.noreply.github.com> AuthorDate: Mon Dec 4 11:33:18 2023 +0800 [improve](partition) support auto list partition with more columns (#27817) before the partition by column only have one column. now remove those limit, could have more columns. --- be/src/exec/tablet_info.cpp | 24 +++-- be/src/exec/tablet_info.h | 15 ++- be/src/vec/sink/vrow_distribution.cpp | 87 ++++++++++------ be/src/vec/sink/vrow_distribution.h | 27 ++--- .../org/apache/doris/analysis/PartitionDesc.java | 7 +- .../apache/doris/analysis/PartitionExprUtil.java | 34 ++++-- .../apache/doris/service/FrontendServiceImpl.java | 10 +- .../test_auto_partition_behavior.out | 115 +++++++++++---------- .../auto_partition/test_auto_list_partition.groovy | 40 +++++++ .../test_auto_partition_behavior.groovy | 14 +-- 10 files changed, 228 insertions(+), 145 deletions(-) diff --git a/be/src/exec/tablet_info.cpp b/be/src/exec/tablet_info.cpp index 90d43462581..5d89385a119 100644 --- a/be/src/exec/tablet_info.cpp +++ b/be/src/exec/tablet_info.cpp @@ -19,6 +19,7 @@ #include <gen_cpp/Descriptors_types.h> #include <gen_cpp/Exprs_types.h> +#include <gen_cpp/Partitions_types.h> #include <gen_cpp/Types_types.h> #include <gen_cpp/descriptors.pb.h> #include <glog/logging.h> @@ -286,14 +287,23 @@ VOlapTablePartitionParam::VOlapTablePartitionParam(std::shared_ptr<OlapTableSche } if (t_param.__isset.enable_automatic_partition && t_param.enable_automatic_partition) { - _is_auto_partiton = true; - Status st = vectorized::VExpr::create_expr_tree(t_param.partition_function_exprs[0], - _part_func_ctx); - if (!st.ok()) { - throw Exception(Status::InternalError("Partition function expr is not valid"), - "Partition function expr is not valid"); + _is_auto_partition = true; + auto size = t_param.partition_function_exprs.size(); + _part_func_ctx.resize(size); + _partition_function.resize(size); + DCHECK((t_param.partition_type == TPartitionType::RANGE_PARTITIONED && size == 1) || + (t_param.partition_type == TPartitionType::LIST_PARTITIONED && size >= 1)) + << "now support only 1 partition column for auto range partitions. " + << t_param.partition_type << " " << size; + for (int i = 0; i < size; ++i) { + Status st = vectorized::VExpr::create_expr_tree(t_param.partition_function_exprs[i], + _part_func_ctx[i]); + if (!st.ok()) { + throw Exception(Status::InternalError("Partition function expr is not valid"), + "Partition function expr is not valid"); + } + _partition_function[i] = _part_func_ctx[i]->root(); } - _partition_function = _part_func_ctx->root(); } } diff --git a/be/src/exec/tablet_info.h b/be/src/exec/tablet_info.h index 2e5f40bec35..7e4a764d6a1 100644 --- a/be/src/exec/tablet_info.h +++ b/be/src/exec/tablet_info.h @@ -241,16 +241,15 @@ public: const std::vector<VOlapTablePartition*>& get_partitions() const { return _partitions; } // it's same with auto now because we only support transformed partition in auto partition. may expand in future - bool is_projection_partition() const { return _is_auto_partiton; } - bool is_auto_partition() const { return _is_auto_partiton; } + bool is_projection_partition() const { return _is_auto_partition; } + bool is_auto_partition() const { return _is_auto_partition; } std::vector<uint16_t> get_partition_keys() const { return _partition_slot_locs; } Status add_partitions(const std::vector<TOlapTablePartition>& partitions); - //TODO: use vector when we support multi partition column for auto-partition - vectorized::VExprContextSPtr get_part_func_ctx() { return _part_func_ctx; } - vectorized::VExprSPtr get_partition_function() { return _partition_function; } + vectorized::VExprContextSPtrs get_part_func_ctx() { return _part_func_ctx; } + vectorized::VExprSPtrs get_partition_function() { return _partition_function; } // which will affect _partition_block Status generate_partition_from(const TOlapTablePartition& t_part, @@ -293,9 +292,9 @@ private: VOlapTablePartition* _default_partition = nullptr; // for auto partition, now only support 1 column. TODO: use vector to save them when we support multi column auto-partition. - bool _is_auto_partiton = false; - vectorized::VExprContextSPtr _part_func_ctx = nullptr; - vectorized::VExprSPtr _partition_function = nullptr; + bool _is_auto_partition = false; + vectorized::VExprContextSPtrs _part_func_ctx = {nullptr}; + vectorized::VExprSPtrs _partition_function = {nullptr}; TPartitionType::type _part_type; // support list or range }; diff --git a/be/src/vec/sink/vrow_distribution.cpp b/be/src/vec/sink/vrow_distribution.cpp index 74561594cfe..c7a9532b17f 100644 --- a/be/src/vec/sink/vrow_distribution.cpp +++ b/be/src/vec/sink/vrow_distribution.cpp @@ -19,6 +19,7 @@ #include <gen_cpp/FrontendService.h> #include <gen_cpp/FrontendService_types.h> +#include <glog/logging.h> #include "common/status.h" #include "runtime/client_cache.h" @@ -29,28 +30,31 @@ #include "vec/columns/column_const.h" #include "vec/columns/column_nullable.h" #include "vec/columns/column_vector.h" +#include "vec/data_types/data_type.h" #include "vec/sink/writer/vtablet_writer.h" namespace doris::vectorized { -std::pair<vectorized::VExprContextSPtr, vectorized::VExprSPtr> +std::pair<vectorized::VExprContextSPtrs, vectorized::VExprSPtrs> VRowDistribution::_get_partition_function() { return {_vpartition->get_part_func_ctx(), _vpartition->get_partition_function()}; } -Status VRowDistribution::_save_missing_values(vectorized::ColumnPtr col, - vectorized::DataTypePtr value_type, Block* block, +Status VRowDistribution::_save_missing_values(std::vector<std::vector<std::string>>& col_strs, + int col_size, Block* block, std::vector<int64_t> filter) { // de-duplication for new partitions but save all rows. _batching_block->add_rows(block, filter); - for (auto row : filter) { - auto val_str = value_type->to_string(*col, row); - if (!_deduper.contains(val_str)) { - _deduper.emplace(val_str); + std::vector<TStringLiteral> cur_row_values; + for (int row = 0; row < col_strs[0].size(); ++row) { + cur_row_values.clear(); + for (int col = 0; col < col_size; ++col) { TStringLiteral node; - node.value = std::move(val_str); - _partitions_need_create.emplace_back(std::vector {node}); // only 1 partition column now + node.value = std::move(col_strs[col][row]); + cur_row_values.push_back(node); } + //For duplicate cur_values, they will be filtered in FE + _partitions_need_create.emplace_back(cur_row_values); } // to avoid too large mem use @@ -63,7 +67,6 @@ Status VRowDistribution::_save_missing_values(vectorized::ColumnPtr col, void VRowDistribution::clear_batching_stats() { _partitions_need_create.clear(); - _deduper.clear(); _batching_rows = 0; _batching_bytes = 0; } @@ -192,7 +195,7 @@ Status VRowDistribution::_filter_block(vectorized::Block* block, return Status::OK(); } -Status VRowDistribution::_generate_rows_distribution_for_non_auto_parititon( +Status VRowDistribution::_generate_rows_distribution_for_non_auto_partition( vectorized::Block* block, bool has_filtered_rows, std::vector<RowPartTabletIds>& row_part_tablet_ids) { auto num_rows = block->rows(); @@ -209,14 +212,14 @@ Status VRowDistribution::_generate_rows_distribution_for_non_auto_parititon( return Status::OK(); } -Status VRowDistribution::_generate_rows_distribution_for_auto_parititon( - vectorized::Block* block, int partition_col_idx, bool has_filtered_rows, - std::vector<RowPartTabletIds>& row_part_tablet_ids, int64_t& rows_stat_val) { +Status VRowDistribution::_generate_rows_distribution_for_auto_partition( + vectorized::Block* block, const std::vector<uint16_t>& partition_cols_idx, + bool has_filtered_rows, std::vector<RowPartTabletIds>& row_part_tablet_ids, + int64_t& rows_stat_val) { auto num_rows = block->rows(); std::vector<uint16_t> partition_keys = _vpartition->get_partition_keys(); //TODO: use loop to create missing_vals for multi column. - CHECK(partition_keys.size() == 1) << "now support only 1 partition column for auto partitions."; auto partition_col = block->get_by_position(partition_keys[0]); _missing_map.clear(); _missing_map.reserve(partition_col.column->size()); @@ -236,18 +239,29 @@ Status VRowDistribution::_generate_rows_distribution_for_auto_parititon( if (!_missing_map.empty()) { // for missing partition keys, calc the missing partition and save in _partitions_need_create - auto [part_ctx, part_func] = _get_partition_function(); - auto return_type = part_func->data_type(); - // expose the data column - vectorized::ColumnPtr range_left_col = block->get_by_position(partition_col_idx).column; - if (const auto* nullable = - check_and_get_column<vectorized::ColumnNullable>(*range_left_col)) { - range_left_col = nullable->get_nested_column_ptr(); - return_type = assert_cast<const vectorized::DataTypeNullable*>(return_type.get()) - ->get_nested_type(); + auto [part_ctxs, part_funcs] = _get_partition_function(); + auto funcs_size = part_funcs.size(); + std::vector<std::vector<std::string>> col_strs; + col_strs.resize(funcs_size); + + for (int i = 0; i < funcs_size; ++i) { + auto return_type = part_funcs[i]->data_type(); + // expose the data column + vectorized::ColumnPtr range_left_col = + block->get_by_position(partition_cols_idx[i]).column; + if (const auto* nullable = + check_and_get_column<vectorized::ColumnNullable>(*range_left_col)) { + range_left_col = nullable->get_nested_column_ptr(); + return_type = assert_cast<const vectorized::DataTypeNullable*>(return_type.get()) + ->get_nested_type(); + } + for (auto row : _missing_map) { + col_strs[i].push_back(return_type->to_string(*range_left_col, row)); + } } + // calc the end value and save them. in the end of sending, we will create partitions for them and deal them. - RETURN_IF_ERROR(_save_missing_values(range_left_col, return_type, block, _missing_map)); + RETURN_IF_ERROR(_save_missing_values(col_strs, funcs_size, block, _missing_map)); size_t new_bt_rows = _batching_block->rows(); size_t new_bt_bytes = _batching_block->bytes(); @@ -307,23 +321,28 @@ Status VRowDistribution::generate_rows_distribution( _tablet_indexes.assign(num_rows, 0); // if there's projection of partition calc, we need to calc it first. - auto [part_ctx, part_func] = _get_partition_function(); - int partition_col_idx = -1; + auto [part_ctxs, part_funcs] = _get_partition_function(); + std::vector<uint16_t> partition_cols_idx; if (_vpartition->is_projection_partition()) { // calc the start value of missing partition ranges. - // in VNodeChannel's add_block. the spare columns will be erased. - RETURN_IF_ERROR(part_func->execute(part_ctx.get(), block.get(), &partition_col_idx)); - VLOG_DEBUG << "Partition-calculated block:" << block->dump_data(); + auto func_size = part_funcs.size(); + for (int i = 0; i < func_size; ++i) { + int result_idx = -1; + RETURN_IF_ERROR(part_funcs[i]->execute(part_ctxs[i].get(), block.get(), &result_idx)); + VLOG_DEBUG << "Partition-calculated block:" << block->dump_data(); + partition_cols_idx.push_back(result_idx); + } + // change the column to compare to transformed. - _vpartition->set_transformed_slots({(uint16_t)partition_col_idx}); + _vpartition->set_transformed_slots(partition_cols_idx); } if (_vpartition->is_auto_partition() && !_deal_batched) { - RETURN_IF_ERROR(_generate_rows_distribution_for_auto_parititon( - block.get(), partition_col_idx, has_filtered_rows, row_part_tablet_ids, + RETURN_IF_ERROR(_generate_rows_distribution_for_auto_partition( + block.get(), partition_cols_idx, has_filtered_rows, row_part_tablet_ids, rows_stat_val)); } else { // not auto partition - RETURN_IF_ERROR(_generate_rows_distribution_for_non_auto_parititon( + RETURN_IF_ERROR(_generate_rows_distribution_for_non_auto_partition( block.get(), has_filtered_rows, row_part_tablet_ids)); } _row_distribution_watch.stop(); diff --git a/be/src/vec/sink/vrow_distribution.h b/be/src/vec/sink/vrow_distribution.h index 158a13abaaa..c2c202f5933 100644 --- a/be/src/vec/sink/vrow_distribution.h +++ b/be/src/vec/sink/vrow_distribution.h @@ -96,11 +96,13 @@ public: Status open(RowDescriptor* output_row_desc) { if (_vpartition->is_auto_partition()) { - auto [part_ctx, part_func] = _get_partition_function(); - RETURN_IF_ERROR(part_ctx->prepare(_state, *output_row_desc)); - RETURN_IF_ERROR(part_ctx->open(_state)); + auto [part_ctxs, part_funcs] = _get_partition_function(); + for (auto part_ctx : part_ctxs) { + RETURN_IF_ERROR(part_ctx->prepare(_state, *output_row_desc)); + RETURN_IF_ERROR(part_ctx->open(_state)); + } } - for (auto& index : _schema->indexes()) { + for (const auto& index : _schema->indexes()) { auto& where_clause = index->where_clause; if (where_clause != nullptr) { RETURN_IF_ERROR(where_clause->prepare(_state, *output_row_desc)); @@ -125,9 +127,9 @@ public: void clear_batching_stats(); private: - std::pair<vectorized::VExprContextSPtr, vectorized::VExprSPtr> _get_partition_function(); + std::pair<vectorized::VExprContextSPtrs, vectorized::VExprSPtrs> _get_partition_function(); - Status _save_missing_values(vectorized::ColumnPtr col, vectorized::DataTypePtr value_type, + Status _save_missing_values(std::vector<std::vector<std::string>>& col_strs, int col_size, Block* block, std::vector<int64_t> filter); void _get_tablet_ids(vectorized::Block* block, int32_t index_idx, @@ -142,11 +144,12 @@ private: Status _filter_block(vectorized::Block* block, std::vector<RowPartTabletIds>& row_part_tablet_ids); - Status _generate_rows_distribution_for_auto_parititon( - vectorized::Block* block, int partition_col_idx, bool has_filtered_rows, - std::vector<RowPartTabletIds>& row_part_tablet_ids, int64_t& rows_stat_val); + Status _generate_rows_distribution_for_auto_partition( + vectorized::Block* block, const std::vector<uint16_t>& partition_col_idx, + bool has_filtered_rows, std::vector<RowPartTabletIds>& row_part_tablet_ids, + int64_t& rows_stat_val); - Status _generate_rows_distribution_for_non_auto_parititon( + Status _generate_rows_distribution_for_non_auto_partition( vectorized::Block* block, bool has_filtered_rows, std::vector<RowPartTabletIds>& row_part_tablet_ids); @@ -157,12 +160,10 @@ private: int _batch_size = 0; // for auto partitions - std::vector<std::vector<TStringLiteral>> - _partitions_need_create; // support only one partition column now + std::vector<std::vector<TStringLiteral>> _partitions_need_create; std::unique_ptr<MutableBlock> _batching_block; bool _deal_batched = false; // If true, send batched block before any block's append. size_t _batching_rows = 0, _batching_bytes = 0; - std::set<std::string> _deduper; MonotonicStopWatch _row_distribution_watch; OlapTableBlockConvertor* _block_convertor = nullptr; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionDesc.java index 0d365d9c2b7..fc572cb443d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionDesc.java @@ -128,13 +128,12 @@ public class PartitionDesc { + expr.toSql()); } } else if (expr instanceof SlotRef) { - if (colNames.isEmpty()) { - colNames.add(((SlotRef) expr).getColumnName()); - } else { + if (!colNames.isEmpty() && !isListPartition) { throw new AnalysisException( - "auto create partition only support one slotRef in expr. " + "auto create partition only support one slotRef in expr of RANGE partition. " + expr.toSql()); } + colNames.add(((SlotRef) expr).getColumnName()); } else { if (!isListPartition) { throw new AnalysisException( diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionExprUtil.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionExprUtil.java index a0f59f3fcc3..0f7e1f92347 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionExprUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionExprUtil.java @@ -34,6 +34,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; public class PartitionExprUtil { @@ -114,7 +115,7 @@ public class PartitionExprUtil { } public static Map<String, AddPartitionClause> getAddPartitionClauseFromPartitionValues(OlapTable olapTable, - ArrayList<TStringLiteral> partitionValues, PartitionInfo partitionInfo) + ArrayList<List<TStringLiteral>> partitionValues, PartitionInfo partitionInfo) throws AnalysisException { Map<String, AddPartitionClause> result = Maps.newHashMap(); ArrayList<Expr> partitionExprs = partitionInfo.getPartitionExprs(); @@ -124,16 +125,22 @@ public class PartitionExprUtil { FunctionIntervalInfo intervalInfo = getFunctionIntervalInfo(partitionExprs, partitionType); Set<String> filterPartitionValues = new HashSet<String>(); - for (TStringLiteral partitionValue : partitionValues) { + for (List<TStringLiteral> partitionValueList : partitionValues) { PartitionKeyDesc partitionKeyDesc = null; String partitionName = "p"; - String value = partitionValue.value; - if (filterPartitionValues.contains(value)) { + ArrayList<String> curPartitionValues = new ArrayList<>(); + for (TStringLiteral tStringLiteral : partitionValueList) { + curPartitionValues.add(tStringLiteral.value); + } + String filterStr = curPartitionValues.stream() + .map(s -> s + s.length()) // Concatenate each string with its length + .reduce("", (s1, s2) -> s1 + s2); + if (filterPartitionValues.contains(filterStr)) { continue; } - filterPartitionValues.add(value); + filterPartitionValues.add(filterStr); if (partitionType == PartitionType.RANGE) { - String beginTime = value; + String beginTime = curPartitionValues.get(0); // have check range type size must be 1 DateLiteral beginDateTime = new DateLiteral(beginTime, partitionColumnType); partitionName += String.format(DATETIME_NAME_FORMATTER, beginDateTime.getYear(), beginDateTime.getMonth(), beginDateTime.getDay(), @@ -142,14 +149,19 @@ public class PartitionExprUtil { partitionKeyDesc = createPartitionKeyDescWithRange(beginDateTime, endDateTime, partitionColumnType); } else if (partitionType == PartitionType.LIST) { List<List<PartitionValue>> listValues = new ArrayList<>(); - String pointValue = value; - PartitionValue lowerValue = new PartitionValue(pointValue); - listValues.add(Collections.singletonList(lowerValue)); + List<PartitionValue> inValues = new ArrayList<>(); + for (String value : curPartitionValues) { + inValues.add(new PartitionValue(value)); + } + listValues.add(inValues); partitionKeyDesc = PartitionKeyDesc.createIn( listValues); - partitionName += getFormatPartitionValue(lowerValue.getStringValue()); + partitionName += getFormatPartitionValue(filterStr); if (partitionColumnType.isStringType()) { - partitionName += "_" + System.currentTimeMillis(); + if (partitionName.length() > 50) { + partitionName = partitionName.substring(40) + Objects.hash(partitionName) + + "_" + System.currentTimeMillis(); + } } } else { throw new AnalysisException("now only support range and list partition"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java index 6ff03f6acb2..c813ef8ca56 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java @@ -42,6 +42,7 @@ import org.apache.doris.catalog.MaterializedIndex; import org.apache.doris.catalog.OlapTable; import org.apache.doris.catalog.Partition; import org.apache.doris.catalog.PartitionInfo; +import org.apache.doris.catalog.PartitionType; import org.apache.doris.catalog.Replica; import org.apache.doris.catalog.Table; import org.apache.doris.catalog.TableIf; @@ -3152,15 +3153,16 @@ public class FrontendServiceImpl implements FrontendService.Iface { OlapTable olapTable = (OlapTable) table; PartitionInfo partitionInfo = olapTable.getPartitionInfo(); - ArrayList<TStringLiteral> partitionValues = new ArrayList<TStringLiteral>(); + ArrayList<List<TStringLiteral>> partitionValues = new ArrayList<>(); for (int i = 0; i < request.partitionValues.size(); i++) { - if (request.partitionValues.get(i).size() != 1) { + if (partitionInfo.getType() == PartitionType.RANGE && request.partitionValues.get(i).size() != 1) { errorStatus.setErrorMsgs( - Lists.newArrayList("Only support single partition, partitionValues size should equal 1.")); + Lists.newArrayList( + "Only support single partition of RANGE, partitionValues size should equal 1.")); result.setStatus(errorStatus); return result; } - partitionValues.add(request.partitionValues.get(i).get(0)); + partitionValues.add(request.partitionValues.get(i)); } Map<String, AddPartitionClause> addPartitionClauseMap; try { diff --git a/regression-test/data/partition_p0/auto_partition/test_auto_partition_behavior.out b/regression-test/data/partition_p0/auto_partition/test_auto_partition_behavior.out index 53cd65f5d8c..ac0ac36cee2 100644 --- a/regression-test/data/partition_p0/auto_partition/test_auto_partition_behavior.out +++ b/regression-test/data/partition_p0/auto_partition/test_auto_partition_behavior.out @@ -1,32 +1,33 @@ -- This file is automatically generated. You should know what you did if you want to edit this -- !sql1 -- - - - ! - ! -Xxx -xxX + 1 + 2 + ! 3 + ! 4 +Xxx 3 +xxX 3 -- !sql2 -- - - - ! - ! -Xxx -xxX + 1 + 2 + ! 3 + ! 4 +Xxx 3 +xxX 3 -- !sql3 -- - - ! - ! - - -- -- - --- -Xxx -xxX + 2 + ! 3 + ! 4 + - 3 +- 1 +- - 3 +-- 2 +Xxx 3 +xxX 3 -- !sql4 -- + 2 -- !sql5 -- 1 @@ -49,46 +50,46 @@ Xxx xxX -- !sql2 -- - - - - - ! - ! - ! - ! -Xxx -Xxx -xxX -xxX + 1 + 1 + 2 + 2 + ! 3 + ! 3 + ! 4 + ! 4 +Xxx 3 +Xxx 3 +xxX 3 +xxX 3 -- !sql3 -- - - - ! - ! - ! - ! - - -- -- - --- -Xxx -Xxx -xxX -xxX + 2 + 2 + ! 3 + ! 3 + ! 4 + ! 4 + - 3 +- 1 +- - 3 +-- 2 +Xxx 3 +Xxx 3 +xxX 3 +xxX 3 -- !sql4 -- - ! - ! - - -- -- - --- -Xxx -Xxx -xxX -xxX + ! 4 + ! 4 + - 3 +- 1 +- - 3 +-- 2 +Xxx 3 +Xxx 3 +xxX 3 +xxX 3 -- !sql1 -- 2009-12-12T00:00 2020-12-12T00:00 diff --git a/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy b/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy index 6e420d6e9cc..9705e74ac90 100644 --- a/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy +++ b/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy @@ -230,4 +230,44 @@ suite("test_auto_list_partition") { def result10 = sql "show partitions from test_tinyint" logger.info("${result10}") assertEquals(result10.size(), 2) + sql "drop table if exists test_list_many_column" + sql """ + CREATE TABLE test_list_many_column ( + id int not null, + k largeint not null + ) + AUTO PARTITION BY LIST (`id`, `k`) + ( + ) + DISTRIBUTED BY HASH(`k`) BUCKETS 16 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + """ + sql " insert into test_list_many_column values (1,1), (-1,-1);" + sql " insert into test_list_many_column values (1,3), (-1,-7);" + result11 = sql "show partitions from test_list_many_column" + logger.info("${result11}") + assertEquals(result11.size(), 4) + + sql "drop table if exists test_list_many_column2" + sql """ + CREATE TABLE test_list_many_column2 ( + id int not null, + k largeint not null, + str varchar not null, + ) + AUTO PARTITION BY LIST (`id`, `k`, `str`) + ( + ) + DISTRIBUTED BY HASH(`k`) BUCKETS 16 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + """ + sql """ insert into test_list_many_column2 values (1,1,"asd"), (-1,-1,"vdf");""" + sql """ insert into test_list_many_column2 values (2,2,"xxx"), (-3,-3,"qwe");""" + result12 = sql "show partitions from test_list_many_column2" + logger.info("${result12}") + assertEquals(result12.size(), 4) } diff --git a/regression-test/suites/partition_p0/auto_partition/test_auto_partition_behavior.groovy b/regression-test/suites/partition_p0/auto_partition/test_auto_partition_behavior.groovy index 6407f9afeca..6875dd840c4 100644 --- a/regression-test/suites/partition_p0/auto_partition/test_auto_partition_behavior.groovy +++ b/regression-test/suites/partition_p0/auto_partition/test_auto_partition_behavior.groovy @@ -35,11 +35,11 @@ suite("test_auto_partition_behavior") { """ // special characters sql """ insert into unique_table values (" "), (" "), ("Xxx"), ("xxX"), (" ! "), (" ! ") """ - qt_sql1 """ select * from unique_table order by `str` """ + qt_sql1 """ select *,length(str) from unique_table order by `str` """ def result = sql "show partitions from unique_table" assertEquals(result.size(), 6) sql """ insert into unique_table values (" "), (" "), ("Xxx"), ("xxX"), (" ! "), (" ! ") """ - qt_sql2 """ select * from unique_table order by `str` """ + qt_sql2 """ select *,length(str) from unique_table order by `str` """ result = sql "show partitions from unique_table" assertEquals(result.size(), 6) sql """ insert into unique_table values ("-"), ("--"), ("- -"), (" - ") """ @@ -51,10 +51,10 @@ suite("test_auto_partition_behavior") { sql """ alter table unique_table drop partition ${partition1_name} """ // partition ' ' result = sql "show partitions from unique_table" assertEquals(result.size(), 9) - qt_sql3 """ select * from unique_table order by `str` """ + qt_sql3 """ select *,length(str) from unique_table order by `str` """ // modify value sql """ update unique_table set str = "modified" where str in (" ", " ") """ // only " " - qt_sql4 """ select * from unique_table where str = ' ' order by `str` """ // modified + qt_sql4 """ select *,length(str) from unique_table where str = ' ' order by `str` """ // modified qt_sql5 """ select count() from unique_table where str = 'modified' """ // crop qt_sql6 """ select * from unique_table where ((str > ' ! ' || str = 'modified') && str != 'Xxx') order by str """ @@ -83,7 +83,7 @@ suite("test_auto_partition_behavior") { result = sql "show partitions from dup_table" assertEquals(result.size(), 6) sql """ insert into dup_table values (" "), (" "), ("Xxx"), ("xxX"), (" ! "), (" ! ") """ - qt_sql2 """ select * from dup_table order by `str` """ + qt_sql2 """ select *,length(str) from dup_table order by `str` """ result = sql "show partitions from dup_table" assertEquals(result.size(), 6) sql """ insert into dup_table values ("-"), ("--"), ("- -"), (" - ") """ @@ -95,9 +95,9 @@ suite("test_auto_partition_behavior") { sql """ alter table dup_table drop partition ${partition1_name} """ result = sql "show partitions from dup_table" assertEquals(result.size(), 9) - qt_sql3 """ select * from dup_table order by `str` """ + qt_sql3 """ select *,length(str) from dup_table order by `str` """ // crop - qt_sql4 """ select * from dup_table where str > ' ! ' order by str """ + qt_sql4 """ select * ,length(str) from dup_table where str > ' ! ' order by str """ /// agg key table sql "drop table if exists agg_dt6" --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org