This is an automated email from the ASF dual-hosted git repository. mrhhsg pushed a commit to branch spill_and_reserve in repository https://gitbox.apache.org/repos/asf/doris.git
commit 736dd5580ed71a6ff653ce35365973360fd92369 Author: Jerry Hu <mrh...@gmail.com> AuthorDate: Wed Sep 18 11:45:37 2024 +0800 fix build error --- be/src/clucene | 2 +- be/src/pipeline/exec/operator.h | 14 +++++++------- .../exec/partitioned_hash_join_probe_operator.cpp | 8 ++++---- be/src/pipeline/exec/set_sink_operator.cpp | 6 ------ be/src/pipeline/pipeline_fragment_context.h | 4 ++-- be/src/vec/core/block.cpp | 17 ++++++++--------- 6 files changed, 22 insertions(+), 29 deletions(-) diff --git a/be/src/clucene b/be/src/clucene index fdbf2204031..c5d02a7e411 160000 --- a/be/src/clucene +++ b/be/src/clucene @@ -1 +1 @@ -Subproject commit fdbf2204031128b2bd8505fc73c06403b7c1a815 +Subproject commit c5d02a7e41194b02444be6d684e3aeb4ff1b5595 diff --git a/be/src/pipeline/exec/operator.h b/be/src/pipeline/exec/operator.h index 6bda3ac0733..17edfc668f5 100644 --- a/be/src/pipeline/exec/operator.h +++ b/be/src/pipeline/exec/operator.h @@ -723,12 +723,12 @@ public: } size_t revocable_mem_size(RuntimeState* state) const override { - return (_child_x and !is_source()) ? _child_x->revocable_mem_size(state) : 0; + return (_child and !is_source()) ? _child->revocable_mem_size(state) : 0; } Status revoke_memory(RuntimeState* state) override { - if (_child_x and !is_source()) { - return _child_x->revoke_memory(state); + if (_child and !is_source()) { + return _child->revoke_memory(state); } return Status::OK(); } @@ -852,8 +852,8 @@ public: if (estimated_size < state->minimum_operator_memory_required_bytes()) { estimated_size = state->minimum_operator_memory_required_bytes(); } - if (!is_source() && _child_x) { - auto child_reserve_size = _child_x->get_reserve_mem_size(state); + if (!is_source() && _child) { + auto child_reserve_size = _child->get_reserve_mem_size(state); estimated_size += std::max(state->minimum_operator_memory_required_bytes(), child_reserve_size); } @@ -864,8 +864,8 @@ public: auto& local_state = get_local_state(state); local_state.reset_estimate_memory_usage(); - if (!is_source() && _child_x) { - _child_x->reset_reserve_mem_size(state); + if (!is_source() && _child) { + _child->reset_reserve_mem_size(state); } } }; diff --git a/be/src/pipeline/exec/partitioned_hash_join_probe_operator.cpp b/be/src/pipeline/exec/partitioned_hash_join_probe_operator.cpp index 49e170fe78f..c7be1c61d14 100644 --- a/be/src/pipeline/exec/partitioned_hash_join_probe_operator.cpp +++ b/be/src/pipeline/exec/partitioned_hash_join_probe_operator.cpp @@ -765,8 +765,8 @@ size_t PartitionedHashJoinProbeOperatorX::revocable_mem_size(RuntimeState* state } auto revocable_size = _revocable_mem_size(state, true); - if (_child_x) { - revocable_size += _child_x->revocable_mem_size(state); + if (_child) { + revocable_size += _child->revocable_mem_size(state); } return revocable_size; } @@ -808,8 +808,8 @@ Status PartitionedHashJoinProbeOperatorX::revoke_memory(RuntimeState* state) { RETURN_IF_ERROR(local_state.spill_probe_blocks(state, true)); - if (_child_x) { - return _child_x->revoke_memory(state); + if (_child) { + return _child->revoke_memory(state); } return Status::OK(); } diff --git a/be/src/pipeline/exec/set_sink_operator.cpp b/be/src/pipeline/exec/set_sink_operator.cpp index e96340741e5..e15cecd22ed 100644 --- a/be/src/pipeline/exec/set_sink_operator.cpp +++ b/be/src/pipeline/exec/set_sink_operator.cpp @@ -227,12 +227,6 @@ size_t SetSinkOperatorX<is_intersect>::get_reserve_mem_size(RuntimeState* state) return size_to_reserve; } -template <bool is_intersect> -Status SetSinkOperatorX<is_intersect>::prepare(RuntimeState* state) { - RETURN_IF_ERROR(Base::prepare(state)); - return vectorized::VExpr::prepare(_child_exprs, state, _child_x->row_desc()); -} - template <bool is_intersect> Status SetSinkOperatorX<is_intersect>::open(RuntimeState* state) { RETURN_IF_ERROR(Base::open(state)); diff --git a/be/src/pipeline/pipeline_fragment_context.h b/be/src/pipeline/pipeline_fragment_context.h index c0924be38b6..1050b3eea66 100644 --- a/be/src/pipeline/pipeline_fragment_context.h +++ b/be/src/pipeline/pipeline_fragment_context.h @@ -71,8 +71,8 @@ public: void print_profile(const std::string& extra_info); - std::vector<std::shared_ptr<TRuntimeProfileTree>> collect_realtime_profile_x() const; - std::shared_ptr<TRuntimeProfileTree> collect_realtime_load_channel_profile_x() const; + std::vector<std::shared_ptr<TRuntimeProfileTree>> collect_realtime_profile() const; + std::shared_ptr<TRuntimeProfileTree> collect_realtime_load_channel_profile() const; bool is_timeout(timespec now) const; diff --git a/be/src/vec/core/block.cpp b/be/src/vec/core/block.cpp index 6b6dfb483ce..d4644fca489 100644 --- a/be/src/vec/core/block.cpp +++ b/be/src/vec/core/block.cpp @@ -88,7 +88,7 @@ Status Block::deserialize(const PBlock& pblock) { RETURN_IF_ERROR(BeExecVersionManager::check_be_exec_version(be_exec_version)); const char* buf = nullptr; - faststring compression_scratch; + std::string compression_scratch; if (pblock.compressed()) { // Decompress SCOPED_RAW_TIMER(&_decompress_time_ns); @@ -111,11 +111,11 @@ Status Block::deserialize(const PBlock& pblock) { DCHECK(success) << "snappy::GetUncompressedLength failed"; compression_scratch.resize(uncompressed_size); success = snappy::RawUncompress(compressed_data, compressed_size, - reinterpret_cast<char*>(compression_scratch.data())); + compression_scratch.data()); DCHECK(success) << "snappy::RawUncompress failed"; } _decompressed_bytes = uncompressed_size; - buf = reinterpret_cast<char*>(compression_scratch.data()); + buf = compression_scratch.data(); } else { buf = pblock.column_values().data(); } @@ -925,7 +925,7 @@ Status Block::serialize(int be_exec_version, PBlock* pblock, // serialize data values // when data type is HLL, content_uncompressed_size maybe larger than real size. - faststring column_values; + std::string column_values; try { // TODO: After support c++23, we should use resize_and_overwrite to replace resize column_values.resize(content_uncompressed_size); @@ -935,14 +935,13 @@ Status Block::serialize(int be_exec_version, PBlock* pblock, LOG(WARNING) << msg; return Status::BufferAllocFailed(msg); } - char* buf = reinterpret_cast<char*>(column_values.data()); + char* buf = column_values.data(); for (const auto& c : *this) { buf = c.type->serialize(*(c.column), buf, pblock->be_exec_version()); } *uncompressed_bytes = content_uncompressed_size; - const size_t serialize_bytes = - buf - reinterpret_cast<char*>(column_values.data()) + STREAMVBYTE_PADDING; + const size_t serialize_bytes = buf - column_values.data() + STREAMVBYTE_PADDING; *compressed_bytes = serialize_bytes; column_values.resize(serialize_bytes); @@ -965,13 +964,13 @@ Status Block::serialize(int be_exec_version, PBlock* pblock, pblock->set_compressed(true); *compressed_bytes = compressed_size; } else { - pblock->set_column_values(column_values.data(), column_values.size()); + pblock->set_column_values(std::move(column_values)); } VLOG_ROW << "uncompressed size: " << content_uncompressed_size << ", compressed size: " << compressed_size; } else { - pblock->set_column_values(column_values.data(), column_values.size()); + pblock->set_column_values(std::move(column_values)); } if (!allow_transfer_large_data && *compressed_bytes >= std::numeric_limits<int32_t>::max()) { return Status::InternalError("The block is large than 2GB({}), can not send by Protobuf.", --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org