github-actions[bot] commented on code in PR #34929: URL: https://github.com/apache/doris/pull/34929#discussion_r1616536374
########## be/src/vec/exec/format/table/iceberg/types.h: ########## @@ -0,0 +1,403 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include <iostream> +#include <map> +#include <optional> +#include <regex> +#include <stdexcept> +#include <string> +#include <vector> + +#include "common/exception.h" + +namespace doris { +namespace iceberg { Review Comment: warning: nested namespaces can be concatenated [modernize-concat-nested-namespaces] ```suggestion namespace doris::iceberg { ``` be/src/vec/exec/format/table/iceberg/types.h:401: ```diff - } // namespace iceberg - } // namespace doris + } // namespace doris ``` ########## be/src/vec/exec/format/table/iceberg/types.cpp: ########## @@ -0,0 +1,194 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "types.h" + +#include <optional> + +namespace doris { +namespace iceberg { Review Comment: warning: nested namespaces can be concatenated [modernize-concat-nested-namespaces] ```suggestion namespace doris::iceberg { ``` be/src/vec/exec/format/table/iceberg/types.cpp:192: ```diff - } // namespace iceberg - } // namespace doris + } // namespace doris ``` ########## be/src/vec/sink/writer/iceberg/viceberg_table_writer.cpp: ########## @@ -0,0 +1,450 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "viceberg_table_writer.h" + +#include "runtime/runtime_state.h" +#include "vec/core/block.h" +#include "vec/core/column_with_type_and_name.h" +#include "vec/exec/format/table/iceberg/partition_spec_parser.h" +#include "vec/exec/format/table/iceberg/schema_parser.h" +#include "vec/exprs/vexpr.h" +#include "vec/exprs/vexpr_context.h" +#include "vec/exprs/vslot_ref.h" +#include "vec/sink/writer/iceberg/partition_transformers.h" +#include "vec/sink/writer/iceberg/viceberg_partition_writer.h" +#include "vec/sink/writer/vhive_utils.h" + +namespace doris { +namespace vectorized { + +VIcebergTableWriter::VIcebergTableWriter(const TDataSink& t_sink, + const VExprContextSPtrs& output_expr_ctxs) + : AsyncResultWriter(output_expr_ctxs), _t_sink(t_sink) { + DCHECK(_t_sink.__isset.iceberg_table_sink); +} + +Status VIcebergTableWriter::init_properties(ObjectPool* pool) { + return Status::OK(); +} + +Status VIcebergTableWriter::open(RuntimeState* state, RuntimeProfile* profile) { + _state = state; + _profile = profile; + + // add all counter + _written_rows_counter = ADD_COUNTER(_profile, "WrittenRows", TUnit::UNIT); + _send_data_timer = ADD_TIMER(_profile, "SendDataTime"); + _partition_writers_dispatch_timer = + ADD_CHILD_TIMER(_profile, "PartitionsDispatchTime", "SendDataTime"); + _partition_writers_write_timer = + ADD_CHILD_TIMER(_profile, "PartitionsWriteTime", "SendDataTime"); + _partition_writers_count = ADD_COUNTER(_profile, "PartitionsWriteCount", TUnit::UNIT); + _open_timer = ADD_TIMER(_profile, "OpenTime"); + _close_timer = ADD_TIMER(_profile, "CloseTime"); + _write_file_counter = ADD_COUNTER(_profile, "WriteFileCount", TUnit::UNIT); + + SCOPED_TIMER(_open_timer); + try { + _schema = iceberg::SchemaParser::from_json(_t_sink.iceberg_table_sink.schema_json); + std::string partition_spec_json = + _t_sink.iceberg_table_sink + .partition_specs_json[_t_sink.iceberg_table_sink.partition_spec_id]; + if (!partition_spec_json.empty()) { + _partition_spec = iceberg::PartitionSpecParser::from_json(_schema, partition_spec_json); + _iceberg_partition_columns = _to_iceberg_partition_columns(); + } + } catch (doris::Exception& e) { + return e.to_status(); + } + + std::set<int> partition_idx_set; + for (const auto& iceberg_partition_column : _iceberg_partition_columns) { + partition_idx_set.insert(iceberg_partition_column.source_idx()); + } + + for (int i = 0; i < _schema->columns().size(); ++i) { + _write_output_vexpr_ctxs.emplace_back(_vec_output_expr_ctxs[i]); + } + + return Status::OK(); +} + +std::vector<VIcebergTableWriter::IcebergPartitionColumn> +VIcebergTableWriter::_to_iceberg_partition_columns() { + std::vector<IcebergPartitionColumn> partition_columns; + + std::unordered_map<int, int> id_to_column_idx; + id_to_column_idx.reserve(_schema->columns().size()); + for (int i = 0; i < _schema->columns().size(); i++) { + id_to_column_idx[_schema->columns()[i].field_id()] = i; + } + for (const auto& partition_field : _partition_spec->fields()) { + int column_idx = id_to_column_idx[partition_field.source_id()]; + std::unique_ptr<PartitionColumnTransform> partition_column_transform = + PartitionColumnTransforms::create( + partition_field, _vec_output_expr_ctxs[column_idx]->root()->type()); + partition_columns.emplace_back(partition_field, + _vec_output_expr_ctxs[column_idx]->root()->type(), + column_idx, std::move(partition_column_transform)); + } + return partition_columns; +} + +Status VIcebergTableWriter::write(vectorized::Block& block) { + SCOPED_RAW_TIMER(&_send_data_ns); + std::unordered_map<std::shared_ptr<VIcebergPartitionWriter>, IColumn::Filter> writer_positions; + _row_count += block.rows(); + + if (_iceberg_partition_columns.empty()) { + std::shared_ptr<VIcebergPartitionWriter> writer; + { + SCOPED_RAW_TIMER(&_partition_writers_dispatch_ns); + auto writer_iter = _partitions_to_writers.find(""); + if (writer_iter == _partitions_to_writers.end()) { + try { + writer = _create_partition_writer(block, -1); + } catch (doris::Exception& e) { + return e.to_status(); + } + _partitions_to_writers.insert({"", writer}); + RETURN_IF_ERROR(writer->open(_state, _profile)); + } else { + if (writer_iter->second->written_len() > config::iceberg_sink_max_file_size) { + std::string file_name(writer_iter->second->file_name()); + int file_name_index = writer_iter->second->file_name_index(); + { + SCOPED_RAW_TIMER(&_close_ns); + static_cast<void>(writer_iter->second->close(Status::OK())); + } + _partitions_to_writers.erase(writer_iter); + try { + writer = _create_partition_writer(block, -1, &file_name, + file_name_index + 1); + } catch (doris::Exception& e) { + return e.to_status(); + } + _partitions_to_writers.insert({"", writer}); + RETURN_IF_ERROR(writer->open(_state, _profile)); + } else { + writer = writer_iter->second; + } + } + } + SCOPED_RAW_TIMER(&_partition_writers_write_ns); + RETURN_IF_ERROR(writer->write(block)); + return Status::OK(); + } + + { + SCOPED_RAW_TIMER(&_partition_writers_dispatch_ns); + _transformed_block.reserve(_iceberg_partition_columns.size()); + for (auto& iceberg_partition_columns : _iceberg_partition_columns) { + DCHECK(_vec_output_expr_ctxs[iceberg_partition_columns.source_idx()] + ->root() + ->is_slot_ref()); + int partition_column_pos_in_block = + ((vectorized::VSlotRef*) + _vec_output_expr_ctxs[iceberg_partition_columns.source_idx()] + ->root() + .get()) + ->column_id(); + _transformed_block.insert(iceberg_partition_columns.partition_column_transform().apply( + block, partition_column_pos_in_block)); + } + for (int i = 0; i < block.rows(); ++i) { + std::optional<PartitionData> partition_data; + try { + partition_data = _get_partition_data(_transformed_block, i); + } catch (doris::Exception& e) { + return e.to_status(); + } + std::string partition_name; + DCHECK(partition_data.has_value()); + try { + partition_name = _partition_to_path(partition_data.value()); + } catch (doris::Exception& e) { + return e.to_status(); + } + auto create_and_open_writer = + [&](const std::string& partition_name, int position, + const std::string* file_name, int file_name_index, + std::shared_ptr<VIcebergPartitionWriter>& writer_ptr) -> Status { + try { + auto writer = + _create_partition_writer(block, position, file_name, file_name_index); + RETURN_IF_ERROR(writer->open(_state, _profile)); + IColumn::Filter filter(block.rows(), 0); + filter[position] = 1; + writer_positions.insert({writer, std::move(filter)}); + _partitions_to_writers.insert({partition_name, writer}); + writer_ptr = writer; + } catch (doris::Exception& e) { + return e.to_status(); + } + return Status::OK(); + }; + + auto writer_iter = _partitions_to_writers.find(partition_name); + if (writer_iter == _partitions_to_writers.end()) { + std::shared_ptr<VIcebergPartitionWriter> writer; + if (_partitions_to_writers.size() + 1 > + config::table_sink_partition_write_max_partition_nums_per_writer) { + return Status::InternalError( + "Too many open partitions {}", + config::table_sink_partition_write_max_partition_nums_per_writer); + } + RETURN_IF_ERROR(create_and_open_writer(partition_name, i, nullptr, 0, writer)); + } else { + std::shared_ptr<VIcebergPartitionWriter> writer; + if (writer_iter->second->written_len() > config::iceberg_sink_max_file_size) { + std::string file_name(writer_iter->second->file_name()); + int file_name_index = writer_iter->second->file_name_index(); + { + SCOPED_RAW_TIMER(&_close_ns); + static_cast<void>(writer_iter->second->close(Status::OK())); + } + writer_positions.erase(writer_iter->second); + _partitions_to_writers.erase(writer_iter); + RETURN_IF_ERROR(create_and_open_writer(partition_name, i, &file_name, + file_name_index + 1, writer)); + } else { + writer = writer_iter->second; + } + auto writer_pos_iter = writer_positions.find(writer); + if (writer_pos_iter == writer_positions.end()) { + IColumn::Filter filter(block.rows(), 0); + filter[i] = 1; + writer_positions.insert({writer, std::move(filter)}); + } else { + writer_pos_iter->second[i] = 1; + } + } + } + } + SCOPED_RAW_TIMER(&_partition_writers_write_ns); + for (auto it = writer_positions.begin(); it != writer_positions.end(); ++it) { + RETURN_IF_ERROR(it->first->write(block, &it->second)); + } + return Status::OK(); +} + +Status VIcebergTableWriter::close(Status status) { + int64_t partitions_to_writers_size = _partitions_to_writers.size(); + { + SCOPED_RAW_TIMER(&_close_ns); + for (const auto& pair : _partitions_to_writers) { + Status st = pair.second->close(status); + if (st != Status::OK()) { + LOG(WARNING) << fmt::format("partition writer close failed for partition {}", + st.to_string()); + continue; + } + } + _partitions_to_writers.clear(); + } + if (status.ok()) { + SCOPED_TIMER(_profile->total_time_counter()); + + COUNTER_SET(_written_rows_counter, static_cast<int64_t>(_row_count)); + COUNTER_SET(_send_data_timer, _send_data_ns); + COUNTER_SET(_partition_writers_dispatch_timer, _partition_writers_dispatch_ns); + COUNTER_SET(_partition_writers_write_timer, _partition_writers_write_ns); + COUNTER_SET(_partition_writers_count, partitions_to_writers_size); + COUNTER_SET(_close_timer, _close_ns); + COUNTER_SET(_write_file_counter, _write_file_count); + } + return Status::OK(); +} + +std::string VIcebergTableWriter::_partition_to_path(const doris::iceberg::StructLike& data) { + std::stringstream ss; + for (size_t i = 0; i < _iceberg_partition_columns.size(); i++) { + auto& iceberg_partition_column = _iceberg_partition_columns[i]; + TypeDescriptor result_type = + iceberg_partition_column.partition_column_transform().get_result_type(); + std::string value_string = + iceberg_partition_column.partition_column_transform().to_human_string(result_type, + data.get(i)); + if (i > 0) { + ss << "/"; + } + ss << _escape(iceberg_partition_column.field().name()) << '=' << _escape(value_string); + } + + return ss.str(); +} + +std::string VIcebergTableWriter::_escape(const std::string& path) { + return VHiveUtils::escape_path_name(path); +} + +std::vector<std::string> VIcebergTableWriter::_partition_values( + const doris::iceberg::StructLike& data) { + std::vector<std::string> partition_values; + partition_values.reserve(_iceberg_partition_columns.size()); + for (size_t i = 0; i < _iceberg_partition_columns.size(); i++) { + auto& iceberg_partition_column = _iceberg_partition_columns[i]; + TypeDescriptor result_type = + iceberg_partition_column.partition_column_transform().get_result_type(); + partition_values.emplace_back( + iceberg_partition_column.partition_column_transform().to_human_string(result_type, + data.get(i))); + } + + return partition_values; +} + +std::shared_ptr<VIcebergPartitionWriter> VIcebergTableWriter::_create_partition_writer( + vectorized::Block& block, int position, const std::string* file_name, int file_name_index) { + auto& iceberg_table_sink = _t_sink.iceberg_table_sink; + std::optional<PartitionData> partition_data; + partition_data = _get_partition_data(_transformed_block, position); + std::string partition_path; + std::vector<std::string> partition_values; + if (partition_data.has_value()) { + partition_path = _partition_to_path(partition_data.value()); + partition_values = _partition_values(partition_data.value()); + } + const std::string& output_path = iceberg_table_sink.output_path; + + auto write_path = fmt::format("{}/{}", output_path, partition_path); + auto original_write_path = + fmt::format("{}/{}", iceberg_table_sink.original_output_path, partition_path); + auto target_path = fmt::format("{}/{}", output_path, partition_path); + + VIcebergPartitionWriter::WriteInfo write_info = { + std::move(write_path), std::move(original_write_path), std::move(target_path), + iceberg_table_sink.file_type}; + + _write_file_count++; + return std::make_shared<VIcebergPartitionWriter>( + _t_sink, std::move(partition_values), _vec_output_expr_ctxs, _write_output_vexpr_ctxs, + _non_write_columns_indices, *_schema, std::move(write_info), + (file_name == nullptr) ? _compute_file_name() : *file_name, file_name_index, + iceberg_table_sink.file_format, iceberg_table_sink.compression_type, + iceberg_table_sink.hadoop_config); +} + +std::optional<PartitionData> VIcebergTableWriter::_get_partition_data( + vectorized::Block& transformed_block, int position) { + if (_iceberg_partition_columns.empty()) { + return std::nullopt; + } + + std::vector<std::any> values; + values.reserve(_iceberg_partition_columns.size()); + int column_idx = 0; + for (auto& iceberg_partition_column : _iceberg_partition_columns) { + const vectorized::ColumnWithTypeAndName& partition_column = + transformed_block.get_by_position(column_idx); + const TypeDescriptor& result_type = + iceberg_partition_column.partition_column_transform().get_result_type(); + auto value = _get_iceberg_partition_value(result_type, partition_column, position); + values.emplace_back(value); + ++column_idx; + } + return PartitionData(std::move(values)); +} + +std::any VIcebergTableWriter::_get_iceberg_partition_value( + const TypeDescriptor& type_desc, const ColumnWithTypeAndName& partition_column, + int position) { + ColumnPtr column; + if (auto* nullable_column = check_and_get_column<ColumnNullable>(*partition_column.column)) { + auto* __restrict null_map_data = nullable_column->get_null_map_data().data(); + if (null_map_data[position]) { + return std::any(); Review Comment: warning: avoid repeating the return type from the declaration; use a braced initializer list instead [modernize-return-braced-init-list] ```suggestion return {}; ``` ########## be/src/vec/runtime/vorc_transformer.cpp: ########## @@ -159,80 +169,115 @@ Status VOrcTransformer::open() { return Status::OK(); } -std::unique_ptr<orc::Type> VOrcTransformer::_build_orc_type(const TypeDescriptor& type_descriptor) { - std::pair<Status, std::unique_ptr<orc::Type>> result; +std::unique_ptr<orc::Type> VOrcTransformer::_build_orc_type( Review Comment: warning: function '_build_orc_type' exceeds recommended size/complexity thresholds [readability-function-size] ```cpp std::unique_ptr<orc::Type> VOrcTransformer::_build_orc_type( ^ ``` <details> <summary>Additional context</summary> **be/src/vec/runtime/vorc_transformer.cpp:171:** 108 lines including whitespace and comments (threshold 80) ```cpp std::unique_ptr<orc::Type> VOrcTransformer::_build_orc_type( ^ ``` </details> -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org