This is an automated email from the ASF dual-hosted git repository.

liaoxin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new df14c2d765f [chore](compile) add compile check for load (#50354)
df14c2d765f is described below

commit df14c2d765fb985080b38319c3b02154a5f20393
Author: Xin Liao <liao...@selectdb.com>
AuthorDate: Thu Apr 24 14:22:43 2025 +0800

    [chore](compile) add compile check for load (#50354)
---
 be/src/cloud/cloud_rowset_builder.cpp              |  4 ++-
 be/src/cloud/cloud_tablets_channel.cpp             |  4 ++-
 be/src/olap/delta_writer.cpp                       |  4 ++-
 be/src/olap/delta_writer_v2.cpp                    |  7 ++--
 be/src/olap/memtable.cpp                           | 31 ++++++++----------
 be/src/olap/memtable.h                             |  4 +--
 be/src/olap/push_handler.cpp                       | 38 ++++++++++++----------
 be/src/olap/push_handler.h                         |  2 +-
 be/src/olap/rowset/segcompaction.cpp               |  4 ++-
 be/src/runtime/load_stream.cpp                     | 12 ++++---
 be/src/runtime/load_stream_writer.cpp              | 30 ++++++++++-------
 be/src/runtime/stream_load/stream_load_context.cpp | 21 +++++++-----
 12 files changed, 91 insertions(+), 70 deletions(-)

diff --git a/be/src/cloud/cloud_rowset_builder.cpp 
b/be/src/cloud/cloud_rowset_builder.cpp
index 9466dd10628..e9a5c3b879d 100644
--- a/be/src/cloud/cloud_rowset_builder.cpp
+++ b/be/src/cloud/cloud_rowset_builder.cpp
@@ -24,6 +24,7 @@
 #include "olap/storage_policy.h"
 
 namespace doris {
+#include "common/compile_check_begin.h"
 using namespace ErrorCode;
 
 CloudRowsetBuilder::CloudRowsetBuilder(CloudStorageEngine& engine, const 
WriteRequest& req,
@@ -87,7 +88,7 @@ Status CloudRowsetBuilder::init() {
 }
 
 Status CloudRowsetBuilder::check_tablet_version_count() {
-    int version_count = cloud_tablet()->fetch_add_approximate_num_rowsets(0);
+    int64_t version_count = 
cloud_tablet()->fetch_add_approximate_num_rowsets(0);
     // TODO(plat1ko): load backoff algorithm
     if (version_count > config::max_tablet_version_num) {
         return Status::Error<TOO_MANY_VERSION>(
@@ -140,4 +141,5 @@ Status CloudRowsetBuilder::set_txn_related_delete_bitmap() {
     }
     return Status::OK();
 }
+#include "common/compile_check_end.h"
 } // namespace doris
diff --git a/be/src/cloud/cloud_tablets_channel.cpp 
b/be/src/cloud/cloud_tablets_channel.cpp
index 7dddad90b95..cbc6a5d525d 100644
--- a/be/src/cloud/cloud_tablets_channel.cpp
+++ b/be/src/cloud/cloud_tablets_channel.cpp
@@ -26,6 +26,7 @@
 #include "runtime/tablets_channel.h"
 
 namespace doris {
+#include "common/compile_check_begin.h"
 
 CloudTabletsChannel::CloudTabletsChannel(CloudStorageEngine& engine, const 
TabletsChannelKey& key,
                                          const UniqueId& load_id, bool 
is_high_priority,
@@ -255,7 +256,7 @@ Status CloudTabletsChannel::close(LoadChannel* parent, 
const PTabletWriterAddBlo
         it++;
     }
 
-    tablet_vec->Reserve(writers_to_commit.size());
+    tablet_vec->Reserve(static_cast<int>(writers_to_commit.size()));
     for (auto* writer : writers_to_commit) {
         PTabletInfo* tablet_info = tablet_vec->Add();
         tablet_info->set_tablet_id(writer->tablet_id());
@@ -271,4 +272,5 @@ Status CloudTabletsChannel::close(LoadChannel* parent, 
const PTabletWriterAddBlo
     return Status::OK();
 }
 
+#include "common/compile_check_end.h"
 } // namespace doris
diff --git a/be/src/olap/delta_writer.cpp b/be/src/olap/delta_writer.cpp
index 096ee2df296..75defb647aa 100644
--- a/be/src/olap/delta_writer.cpp
+++ b/be/src/olap/delta_writer.cpp
@@ -57,6 +57,7 @@
 #include "vec/core/block.h"
 
 namespace doris {
+#include "common/compile_check_begin.h"
 using namespace ErrorCode;
 
 BaseDeltaWriter::BaseDeltaWriter(const WriteRequest& req, RuntimeProfile* 
profile,
@@ -277,7 +278,7 @@ void DeltaWriter::_request_slave_tablet_pull_rowset(const 
PNodeInfo& node_info)
     request->set_rowset_path(tablet_path);
     request->set_token(ExecEnv::GetInstance()->token());
     request->set_brpc_port(config::brpc_port);
-    request->set_node_id(node_info.id());
+    request->set_node_id(static_cast<int32_t>(node_info.id()));
     for (int segment_id = 0; segment_id < 
cur_rowset->rowset_meta()->num_segments(); segment_id++) {
         auto seg_path =
                 local_segment_path(tablet_path, 
cur_rowset->rowset_id().to_string(), segment_id);
@@ -364,4 +365,5 @@ int64_t BaseDeltaWriter::num_rows_filtered() const {
     return rowset_writer == nullptr ? 0 : rowset_writer->num_rows_filtered();
 }
 
+#include "common/compile_check_end.h"
 } // namespace doris
diff --git a/be/src/olap/delta_writer_v2.cpp b/be/src/olap/delta_writer_v2.cpp
index 5afbe59985f..2eb2bb91fb8 100644
--- a/be/src/olap/delta_writer_v2.cpp
+++ b/be/src/olap/delta_writer_v2.cpp
@@ -62,6 +62,7 @@
 #include "vec/sink/load_stream_stub.h"
 
 namespace doris {
+#include "common/compile_check_begin.h"
 using namespace ErrorCode;
 
 DeltaWriterV2::DeltaWriterV2(WriteRequest* req,
@@ -224,8 +225,9 @@ Status DeltaWriterV2::_build_current_tablet_schema(int64_t 
index_id,
 
     if (!indexes.empty() && !indexes[i]->columns.empty() &&
         indexes[i]->columns[0]->unique_id() >= 0) {
-        _tablet_schema->build_current_tablet_schema(index_id, 
table_schema_param->version(),
-                                                    indexes[i], 
ori_tablet_schema);
+        _tablet_schema->build_current_tablet_schema(
+                index_id, static_cast<int32_t>(table_schema_param->version()), 
indexes[i],
+                ori_tablet_schema);
     }
 
     _tablet_schema->set_table_id(table_schema_param->table_id());
@@ -245,4 +247,5 @@ Status DeltaWriterV2::_build_current_tablet_schema(int64_t 
index_id,
     return Status::OK();
 }
 
+#include "common/compile_check_end.h"
 } // namespace doris
diff --git a/be/src/olap/memtable.cpp b/be/src/olap/memtable.cpp
index 767e0fdca4c..e6f4fe3966d 100644
--- a/be/src/olap/memtable.cpp
+++ b/be/src/olap/memtable.cpp
@@ -42,6 +42,7 @@
 #include "vec/columns/column.h"
 
 namespace doris {
+#include "common/compile_check_begin.h"
 
 bvar::Adder<int64_t> g_memtable_cnt("memtable_cnt");
 
@@ -101,7 +102,7 @@ void MemTable::_init_columns_offset_by_slot_descs(const 
std::vector<SlotDescript
 }
 
 void MemTable::_init_agg_functions(const vectorized::Block* block) {
-    for (uint32_t cid = _tablet_schema->num_key_columns(); cid < _num_columns; 
++cid) {
+    for (auto cid = _tablet_schema->num_key_columns(); cid < _num_columns; 
++cid) {
         vectorized::AggregateFunctionPtr function;
         if (_keys_type == KeysType::UNIQUE_KEYS && _enable_unique_key_mow) {
             // In such table, non-key column's aggregation type is NONE, so we 
need to construct
@@ -129,7 +130,7 @@ void MemTable::_init_agg_functions(const vectorized::Block* 
block) {
         _agg_functions[cid] = function;
     }
 
-    for (uint32_t cid = _tablet_schema->num_key_columns(); cid < _num_columns; 
++cid) {
+    for (auto cid = _tablet_schema->num_key_columns(); cid < _num_columns; 
++cid) {
         _offsets_of_aggregate_states[cid] = _total_size_of_aggregate_states;
         _total_size_of_aggregate_states += _agg_functions[cid]->size_of_data();
 
@@ -208,7 +209,7 @@ Status MemTable::insert(const vectorized::Block* 
input_block,
             if (_partial_update_mode == 
UniqueKeyUpdateModePB::UPDATE_FIXED_COLUMNS) {
                 // for unique key fixed partial update, sequence column index 
in block
                 // may be different with the index in `_tablet_schema`
-                for (size_t i = 0; i < clone_block.columns(); i++) {
+                for (int32_t i = 0; i < clone_block.columns(); i++) {
                     if (clone_block.get_by_position(i).name == SEQUENCE_COL) {
                         _seq_col_idx_in_block = i;
                         break;
@@ -266,7 +267,7 @@ void 
MemTable::_aggregate_two_row_in_block(vectorized::MutableBlock& mutable_blo
     // dst is non-sequence row, or dst sequence is smaller
     if constexpr (!has_skip_bitmap_col) {
         DCHECK(_skip_bitmap_col_idx == -1);
-        for (uint32_t cid = _tablet_schema->num_key_columns(); cid < 
_num_columns; ++cid) {
+        for (size_t cid = _tablet_schema->num_key_columns(); cid < 
_num_columns; ++cid) {
             auto* col_ptr = mutable_block.mutable_columns()[cid].get();
             _agg_functions[cid]->add(dst_row->agg_places(cid),
                                      const_cast<const 
doris::vectorized::IColumn**>(&col_ptr),
@@ -279,7 +280,7 @@ void 
MemTable::_aggregate_two_row_in_block(vectorized::MutableBlock& mutable_blo
                 assert_cast<vectorized::ColumnBitmap*, 
TypeCheckOnRelease::DISABLE>(
                         
mutable_block.mutable_columns()[_skip_bitmap_col_idx].get())
                         ->get_data()[src_row->_row_pos];
-        for (uint32_t cid = _tablet_schema->num_key_columns(); cid < 
_num_columns; ++cid) {
+        for (size_t cid = _tablet_schema->num_key_columns(); cid < 
_num_columns; ++cid) {
             const auto& col = _tablet_schema->column(cid);
             if (cid != _skip_bitmap_col_idx && 
skip_bitmap.contains(col.unique_id())) {
                 continue;
@@ -407,11 +408,11 @@ void MemTable::_sort_one_column(DorisVector<RowInBlock*>& 
row_in_blocks, Tie& ti
                                 std::function<int(const RowInBlock*, const 
RowInBlock*)> cmp) {
     auto iter = tie.iter();
     while (iter.next()) {
-        pdqsort(std::next(row_in_blocks.begin(), iter.left()),
-                std::next(row_in_blocks.begin(), iter.right()),
+        pdqsort(std::next(row_in_blocks.begin(), 
static_cast<int>(iter.left())),
+                std::next(row_in_blocks.begin(), 
static_cast<int>(iter.right())),
                 [&cmp](auto lhs, auto rhs) -> bool { return cmp(lhs, rhs) < 0; 
});
         tie[iter.left()] = 0;
-        for (int i = iter.left() + 1; i < iter.right(); i++) {
+        for (auto i = iter.left() + 1; i < iter.right(); i++) {
             tie[i] = (cmp(row_in_blocks[i - 1], row_in_blocks[i]) == 0);
         }
     }
@@ -623,17 +624,10 @@ bool MemTable::need_agg() const {
 }
 
 size_t MemTable::get_flush_reserve_memory_size() const {
-    size_t reserve_size = 0;
-    if (_keys_type == KeysType::DUP_KEYS) {
-        if (_tablet_schema->num_key_columns() == 0) {
-            // no need to reserve
-        } else {
-            reserve_size = 
static_cast<size_t>(_input_mutable_block.allocated_bytes() * 1.2);
-        }
-    } else {
-        reserve_size = 
static_cast<size_t>(_input_mutable_block.allocated_bytes() * 1.2);
+    if (_keys_type == KeysType::DUP_KEYS && _tablet_schema->num_key_columns() 
== 0) {
+        return 0; // no need to reserve
     }
-    return reserve_size;
+    return 
static_cast<size_t>(static_cast<double>(_input_mutable_block.allocated_bytes()) 
* 1.2);
 }
 
 Status MemTable::_to_block(std::unique_ptr<vectorized::Block>* res) {
@@ -666,4 +660,5 @@ Status 
MemTable::to_block(std::unique_ptr<vectorized::Block>* res) {
     return Status::OK();
 }
 
+#include "common/compile_check_end.h"
 } // namespace doris
diff --git a/be/src/olap/memtable.h b/be/src/olap/memtable.h
index 125e0682053..883696f8246 100644
--- a/be/src/olap/memtable.h
+++ b/be/src/olap/memtable.h
@@ -118,8 +118,8 @@ public:
     Tie(size_t begin, size_t end) : _begin(begin), _end(end) {
         _bits = std::vector<uint8_t>(_end - _begin, 1);
     }
-    uint8_t operator[](int i) const { return _bits[i - _begin]; }
-    uint8_t& operator[](int i) { return _bits[i - _begin]; }
+    uint8_t operator[](size_t i) const { return _bits[i - _begin]; }
+    uint8_t& operator[](size_t i) { return _bits[i - _begin]; }
     Iter iter() { return Iter(*this); }
 
 private:
diff --git a/be/src/olap/push_handler.cpp b/be/src/olap/push_handler.cpp
index 6f24e015fbb..09e5db89447 100644
--- a/be/src/olap/push_handler.cpp
+++ b/be/src/olap/push_handler.cpp
@@ -64,6 +64,7 @@
 #include "vec/functions/simple_function_factory.h"
 
 namespace doris {
+#include "common/compile_check_begin.h"
 using namespace ErrorCode;
 
 // Process push command, the main logical is as follows:
@@ -227,7 +228,7 @@ Status PushHandler::_do_streaming_ingestion(TabletSharedPtr 
tablet, const TPushR
 
 Status PushHandler::_convert_v2(TabletSharedPtr cur_tablet, RowsetSharedPtr* 
cur_rowset,
                                 TabletSchemaSPtr tablet_schema, PushType 
push_type) {
-    Status res = Status::OK();
+    Status st = Status::OK();
     uint32_t num_rows = 0;
     PUniqueId load_id;
     load_id.set_hi(0);
@@ -266,18 +267,18 @@ Status PushHandler::_convert_v2(TabletSharedPtr 
cur_tablet, RowsetSharedPtr* cur
             // init schema
             std::unique_ptr<Schema> schema(new (std::nothrow) 
Schema(tablet_schema));
             if (schema == nullptr) {
-                res = Status::Error<MEM_ALLOC_FAILED>("fail to create schema. 
tablet={}",
-                                                      cur_tablet->tablet_id());
+                st = Status::Error<MEM_ALLOC_FAILED>("fail to create schema. 
tablet={}",
+                                                     cur_tablet->tablet_id());
                 break;
             }
 
             // init Reader
             std::unique_ptr<PushBrokerReader> reader = 
PushBrokerReader::create_unique(
                     schema.get(), _request.broker_scan_range, 
_request.desc_tbl);
-            res = reader->init();
-            if (reader == nullptr || !res.ok()) {
-                res = Status::Error<PUSH_INIT_ERROR>("fail to init reader. 
res={}, tablet={}", res,
-                                                     cur_tablet->tablet_id());
+            st = reader->init();
+            if (reader == nullptr || !st.ok()) {
+                st = Status::Error<PUSH_INIT_ERROR>("fail to init reader. 
st={}, tablet={}", st,
+                                                    cur_tablet->tablet_id());
                 break;
             }
 
@@ -287,18 +288,18 @@ Status PushHandler::_convert_v2(TabletSharedPtr 
cur_tablet, RowsetSharedPtr* cur
             // 4. Read data from broker and write into cur_tablet
             VLOG_NOTICE << "start to convert etl file to delta.";
             while (!reader->eof()) {
-                res = reader->next(&block);
-                if (!res.ok()) {
+                st = reader->next(&block);
+                if (!st.ok()) {
                     LOG(WARNING) << "read next row failed."
-                                 << " res=" << res << " read_rows=" << 
num_rows;
+                                 << " st=" << st << " read_rows=" << num_rows;
                     break;
                 } else {
                     if (reader->eof()) {
                         break;
                     }
-                    if (!(res = rowset_writer->add_block(&block)).ok()) {
+                    if (!(st = rowset_writer->add_block(&block)).ok()) {
                         LOG(WARNING) << "fail to attach block to 
rowset_writer. "
-                                     << "res=" << res << ", tablet=" << 
cur_tablet->tablet_id()
+                                     << "st=" << st << ", tablet=" << 
cur_tablet->tablet_id()
                                      << ", read_rows=" << num_rows;
                         break;
                     }
@@ -310,16 +311,16 @@ Status PushHandler::_convert_v2(TabletSharedPtr 
cur_tablet, RowsetSharedPtr* cur
             RETURN_IF_ERROR(reader->close());
         }
 
-        if (!res.ok()) {
+        if (!st.ok()) {
             break;
         }
 
-        if (!(res = rowset_writer->flush()).ok()) {
+        if (!(st = rowset_writer->flush()).ok()) {
             LOG(WARNING) << "failed to finalize writer";
             break;
         }
 
-        if (!(res = rowset_writer->build(*cur_rowset)).ok()) {
+        if (!(st = rowset_writer->build(*cur_rowset)).ok()) {
             LOG(WARNING) << "failed to build rowset";
             break;
         }
@@ -328,9 +329,9 @@ Status PushHandler::_convert_v2(TabletSharedPtr cur_tablet, 
RowsetSharedPtr* cur
         _write_rows += (*cur_rowset)->num_rows();
     } while (false);
 
-    VLOG_TRACE << "convert delta file end. res=" << res << ", tablet=" << 
cur_tablet->tablet_id()
+    VLOG_TRACE << "convert delta file end. st=" << st << ", tablet=" << 
cur_tablet->tablet_id()
                << ", processed_rows" << num_rows;
-    return res;
+    return st;
 }
 
 PushBrokerReader::PushBrokerReader(const Schema* schema, const 
TBrokerScanRange& t_scan_range,
@@ -585,7 +586,7 @@ Status PushBrokerReader::_init_expr_ctxes() {
 
     std::map<SlotId, SlotDescriptor*> src_slot_desc_map;
     std::unordered_map<SlotDescriptor*, int> src_slot_desc_to_index {};
-    for (int i = 0, len = src_tuple_desc->slots().size(); i < len; ++i) {
+    for (size_t i = 0, len = src_tuple_desc->slots().size(); i < len; ++i) {
         auto* slot_desc = src_tuple_desc->slots()[i];
         src_slot_desc_to_index.emplace(slot_desc, i);
         src_slot_desc_map.emplace(slot_desc->id(), slot_desc);
@@ -692,4 +693,5 @@ Status PushBrokerReader::_get_next_reader() {
     return Status::OK();
 }
 
+#include "common/compile_check_end.h"
 } // namespace doris
diff --git a/be/src/olap/push_handler.h b/be/src/olap/push_handler.h
index b932dcacae8..d604a2c9dbf 100644
--- a/be/src/olap/push_handler.h
+++ b/be/src/olap/push_handler.h
@@ -116,7 +116,7 @@ private:
     const TDescriptorTable& _t_desc_tbl;
     std::unordered_map<std::string, TypeDescriptor> _name_to_col_type;
     std::unordered_set<std::string> _missing_cols;
-    std::unordered_map<std::string, size_t> _src_block_name_to_idx;
+    std::unordered_map<std::string, uint32_t> _src_block_name_to_idx;
     vectorized::VExprContextSPtrs _dest_expr_ctxs;
     vectorized::VExprContextSPtr _pre_filter_ctx_ptr;
     std::vector<SlotDescriptor*> _src_slot_descs_order_by_dest;
diff --git a/be/src/olap/rowset/segcompaction.cpp 
b/be/src/olap/rowset/segcompaction.cpp
index a16bb16d490..1584cfefd67 100644
--- a/be/src/olap/rowset/segcompaction.cpp
+++ b/be/src/olap/rowset/segcompaction.cpp
@@ -65,6 +65,7 @@
 #include "vec/olap/vertical_merge_iterator.h"
 
 namespace doris {
+#include "common/compile_check_begin.h"
 using namespace ErrorCode;
 
 SegcompactionWorker::SegcompactionWorker(BetaRowsetWriter* writer) : 
_writer(writer) {}
@@ -380,7 +381,7 @@ void 
SegcompactionWorker::compact_segments(SegCompactionCandidatesSharedPtr segm
         return;
     }
     if (!status.ok()) {
-        int16_t errcode = status.code();
+        int errcode = status.code();
         switch (errcode) {
         case FETCH_MEMORY_EXCEEDED:
         case SEGCOMPACTION_INIT_READER:
@@ -464,4 +465,5 @@ bool SegcompactionWorker::cancel() {
     return _is_compacting_state_mutable.exchange(false);
 }
 
+#include "common/compile_check_end.h"
 } // namespace doris
diff --git a/be/src/runtime/load_stream.cpp b/be/src/runtime/load_stream.cpp
index 3d643f09c94..317387167b2 100644
--- a/be/src/runtime/load_stream.cpp
+++ b/be/src/runtime/load_stream.cpp
@@ -52,6 +52,7 @@
 #define UNKNOWN_ID_FOR_TEST 0x7c00
 
 namespace doris {
+#include "common/compile_check_begin.h"
 
 bvar::Adder<int64_t> g_load_stream_cnt("load_stream_count");
 bvar::LatencyRecorder g_load_stream_flush_wait_ms("load_stream_flush_wait_ms");
@@ -241,7 +242,7 @@ Status TabletStream::add_segment(const PStreamHeader& 
header, butil::IOBuf* data
             return _status.status();
         }
         DBUG_EXECUTE_IF("TabletStream.add_segment.segid_never_written",
-                        { segid = _segids_mapping[src_id]->size(); });
+                        { segid = 
static_cast<uint32_t>(_segids_mapping[src_id]->size()); });
         if (segid >= _segids_mapping[src_id]->size()) {
             _status.update(Status::InternalError(
                     "add segment failed, segment is never written, src_id={}, 
segment_id={}",
@@ -452,7 +453,7 @@ LoadStream::~LoadStream() {
 
 Status LoadStream::init(const POpenLoadStreamRequest* request) {
     _txn_id = request->txn_id();
-    _total_streams = request->total_streams();
+    _total_streams = static_cast<int32_t>(request->total_streams());
     _is_incremental = (_total_streams == 0);
 
     _schema = std::make_shared<OlapTableSchemaParam>();
@@ -515,14 +516,14 @@ void LoadStream::_report_result(StreamId stream, const 
Status& status,
     if (_enable_profile && _close_load_cnt == _total_streams) {
         TRuntimeProfileTree tprofile;
         ThriftSerializer ser(false, 4096);
-        uint8_t* buf = nullptr;
+        uint8_t* profile_buf = nullptr;
         uint32_t len = 0;
         std::unique_lock<bthread::Mutex> l(_lock);
 
         _profile->to_thrift(&tprofile);
-        auto st = ser.serialize(&tprofile, &len, &buf);
+        auto st = ser.serialize(&tprofile, &len, &profile_buf);
         if (st.ok()) {
-            response.set_load_stream_profile(buf, len);
+            response.set_load_stream_profile(profile_buf, len);
         } else {
             LOG(WARNING) << "TRuntimeProfileTree serialize failed, errmsg=" << 
st << ", " << *this;
         }
@@ -717,4 +718,5 @@ inline std::ostream& operator<<(std::ostream& ostr, const 
LoadStream& load_strea
     return ostr;
 }
 
+#include "common/compile_check_end.h"
 } // namespace doris
diff --git a/be/src/runtime/load_stream_writer.cpp 
b/be/src/runtime/load_stream_writer.cpp
index 8e3958249db..be355884fb3 100644
--- a/be/src/runtime/load_stream_writer.cpp
+++ b/be/src/runtime/load_stream_writer.cpp
@@ -68,6 +68,7 @@
 #include "vec/core/block.h"
 
 namespace doris {
+#include "common/compile_check_begin.h"
 using namespace ErrorCode;
 
 bvar::Adder<int64_t> g_load_stream_writer_cnt("load_stream_writer_count");
@@ -109,15 +110,16 @@ Status LoadStreamWriter::append_data(uint32_t segid, 
uint64_t offset, butil::IOB
         if (segid >= file_writers.size()) {
             for (size_t i = file_writers.size(); i <= segid; i++) {
                 Status st;
-                io::FileWriterPtr file_writer;
-                st = _rowset_writer->create_file_writer(i, file_writer, 
file_type);
+                io::FileWriterPtr seg_file_writer;
+                st = 
_rowset_writer->create_file_writer(static_cast<uint32_t>(i), seg_file_writer,
+                                                        file_type);
                 
DBUG_EXECUTE_IF("LoadStreamWriter.append_data.create_file_writer_failed",
                                 { st = Status::InternalError("fault 
injection"); });
                 if (!st.ok()) {
                     _is_canceled = true;
                     return st;
                 }
-                file_writers.push_back(std::move(file_writer));
+                file_writers.push_back(std::move(seg_file_writer));
                 g_load_stream_file_writer_cnt << 1;
             }
         }
@@ -150,7 +152,7 @@ Status LoadStreamWriter::close_writer(uint32_t segid, 
FileType file_type) {
             return Status::Corruption("close_writer failed, LoadStreamWriter 
is not inited");
         }
         DBUG_EXECUTE_IF("LoadStreamWriter.close_writer.bad_segid",
-                        { segid = file_writers.size(); });
+                        { segid = static_cast<uint32_t>(file_writers.size()); 
});
         if (segid >= file_writers.size()) {
             return Status::Corruption(
                     "close_writer failed, file {} is never opened, file type 
is {}", segid,
@@ -191,7 +193,7 @@ Status LoadStreamWriter::add_segment(uint32_t segid, const 
SegmentStatistics& st
             return Status::Corruption("add_segment failed, LoadStreamWriter is 
not inited");
         }
         DBUG_EXECUTE_IF("LoadStreamWriter.add_segment.bad_segid",
-                        { segid = _segment_file_writers.size(); });
+                        { segid = 
static_cast<uint32_t>(_segment_file_writers.size()); });
         RETURN_IF_ERROR(_calc_file_size(segid, FileType::SEGMENT_FILE, 
&segment_file_size));
         if (_inverted_file_writers.size() > 0) {
             RETURN_IF_ERROR(
@@ -216,7 +218,7 @@ Status LoadStreamWriter::_calc_file_size(uint32_t segid, 
FileType file_type, siz
             (file_type == FileType::SEGMENT_FILE) ? _segment_file_writers : 
_inverted_file_writers;
 
     DBUG_EXECUTE_IF("LoadStreamWriter._calc_file_size.unknown_segment",
-                    { segid = file_writers.size(); });
+                    { segid = static_cast<uint32_t>(file_writers.size()); });
     if (segid >= file_writers.size()) {
         return Status::Corruption("calc file size failed, file {} is never 
opened, file type is {}",
                                   segid, file_type);
@@ -231,8 +233,8 @@ Status LoadStreamWriter::_calc_file_size(uint32_t segid, 
FileType file_type, siz
     }
     DBUG_EXECUTE_IF("LoadStreamWriter._calc_file_size.file_not_closed", {
         io::FileWriterPtr fwriter;
-        
static_cast<void>(_rowset_writer->create_file_writer(file_writers.size(), 
fwriter,
-                                                             
FileType::SEGMENT_FILE));
+        static_cast<void>(_rowset_writer->create_file_writer(
+                static_cast<uint32_t>(file_writers.size()), fwriter, 
FileType::SEGMENT_FILE));
         file_writers.push_back(std::move(fwriter));
         file_writer = file_writers.back().get();
     });
@@ -265,7 +267,8 @@ Status LoadStreamWriter::_pre_close() {
     DBUG_EXECUTE_IF("LoadStreamWriter.close.inverted_writers_size_not_match", {
         io::FileWriterPtr file_writer;
         static_cast<void>(_rowset_writer->create_file_writer(
-                _inverted_file_writers.size(), file_writer, 
FileType::INVERTED_INDEX_FILE));
+                static_cast<uint32_t>(_inverted_file_writers.size()), 
file_writer,
+                FileType::INVERTED_INDEX_FILE));
         _inverted_file_writers.push_back(std::move(file_writer));
     });
     if (_inverted_file_writers.size() > 0 &&
@@ -277,8 +280,9 @@ Status LoadStreamWriter::_pre_close() {
     }
     DBUG_EXECUTE_IF("LoadStreamWriter.close.file_not_closed", {
         io::FileWriterPtr file_writer;
-        
static_cast<void>(_rowset_writer->create_file_writer(_segment_file_writers.size(),
-                                                             file_writer, 
FileType::SEGMENT_FILE));
+        static_cast<void>(_rowset_writer->create_file_writer(
+                static_cast<uint32_t>(_segment_file_writers.size()), 
file_writer,
+                FileType::SEGMENT_FILE));
         _segment_file_writers.push_back(std::move(file_writer));
     });
     for (const auto& writer : _segment_file_writers) {
@@ -291,7 +295,8 @@ Status LoadStreamWriter::_pre_close() {
     DBUG_EXECUTE_IF("LoadStreamWriter.close.inverted_file_not_closed", {
         io::FileWriterPtr file_writer;
         static_cast<void>(_rowset_writer->create_file_writer(
-                _inverted_file_writers.size(), file_writer, 
FileType::INVERTED_INDEX_FILE));
+                static_cast<uint32_t>(_inverted_file_writers.size()), 
file_writer,
+                FileType::INVERTED_INDEX_FILE));
         _inverted_file_writers.push_back(std::move(file_writer));
     });
     for (const auto& writer : _inverted_file_writers) {
@@ -320,4 +325,5 @@ Status LoadStreamWriter::close() {
     return Status::OK();
 }
 
+#include "common/compile_check_end.h"
 } // namespace doris
diff --git a/be/src/runtime/stream_load/stream_load_context.cpp 
b/be/src/runtime/stream_load/stream_load_context.cpp
index dbabf7fa421..176846fb5f3 100644
--- a/be/src/runtime/stream_load/stream_load_context.cpp
+++ b/be/src/runtime/stream_load/stream_load_context.cpp
@@ -31,6 +31,7 @@
 #include "common/logging.h"
 
 namespace doris {
+#include "common/compile_check_begin.h"
 using namespace ErrorCode;
 
 std::string StreamLoadContext::to_json() const {
@@ -106,9 +107,9 @@ std::string StreamLoadContext::to_json() const {
     writer.Key("ReadDataTimeMs");
     writer.Int64(read_data_cost_nanos / 1000000);
     writer.Key("WriteDataTimeMs");
-    writer.Int(write_data_cost_nanos / 1000000);
+    writer.Int64(write_data_cost_nanos / 1000000);
     writer.Key("ReceiveDataTimeMs");
-    writer.Int((receive_and_read_data_cost_nanos - read_data_cost_nanos) / 
1000000);
+    writer.Int64((receive_and_read_data_cost_nanos - read_data_cost_nanos) / 
1000000);
     if (!group_commit) {
         writer.Key("CommitAndPublishTimeMs");
         writer.Int64(commit_and_publish_txn_cost_nanos / 1000000);
@@ -133,37 +134,40 @@ std::string 
StreamLoadContext::prepare_stream_load_record(const std::string& str
     rapidjson::Document::AllocatorType& allocator = document.GetAllocator();
 
     rapidjson::Value cluster_value(rapidjson::kStringType);
-    cluster_value.SetString(auth.cluster.c_str(), auth.cluster.size());
+    cluster_value.SetString(auth.cluster.c_str(),
+                            
static_cast<rapidjson::SizeType>(auth.cluster.size()));
     if (!cluster_value.IsNull()) {
         document.AddMember("cluster", cluster_value, allocator);
     }
 
     rapidjson::Value db_value(rapidjson::kStringType);
-    db_value.SetString(db.c_str(), db.size());
+    db_value.SetString(db.c_str(), 
static_cast<rapidjson::SizeType>(db.size()));
     if (!db_value.IsNull()) {
         document.AddMember("Db", db_value, allocator);
     }
 
     rapidjson::Value table_value(rapidjson::kStringType);
-    table_value.SetString(table.c_str(), table.size());
+    table_value.SetString(table.c_str(), 
static_cast<rapidjson::SizeType>(table.size()));
     if (!table_value.IsNull()) {
         document.AddMember("Table", table_value, allocator);
     }
 
     rapidjson::Value user_value(rapidjson::kStringType);
-    user_value.SetString(auth.user.c_str(), auth.user.size());
+    user_value.SetString(auth.user.c_str(), 
static_cast<rapidjson::SizeType>(auth.user.size()));
     if (!user_value.IsNull()) {
         document.AddMember("User", user_value, allocator);
     }
 
     rapidjson::Value client_ip_value(rapidjson::kStringType);
-    client_ip_value.SetString(auth.user_ip.c_str(), auth.user_ip.size());
+    client_ip_value.SetString(auth.user_ip.c_str(),
+                              
static_cast<rapidjson::SizeType>(auth.user_ip.size()));
     if (!client_ip_value.IsNull()) {
         document.AddMember("ClientIp", client_ip_value, allocator);
     }
 
     rapidjson::Value comment_value(rapidjson::kStringType);
-    comment_value.SetString(load_comment.c_str(), load_comment.size());
+    comment_value.SetString(load_comment.c_str(),
+                            
static_cast<rapidjson::SizeType>(load_comment.size()));
     if (!comment_value.IsNull()) {
         document.AddMember("Comment", comment_value, allocator);
     }
@@ -359,4 +363,5 @@ bool StreamLoadContext::is_mow_table() const {
             put_result.pipeline_params.is_mow_table);
 }
 
+#include "common/compile_check_end.h"
 } // namespace doris


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to