This is an automated email from the ASF dual-hosted git repository.

gavinchou pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new b4d9c47f8e6 branch-3.0: [chore](log) Standardize S3 failure log 
formats to enable critical operation monitoring #49813 (#49828)
b4d9c47f8e6 is described below

commit b4d9c47f8e656a93406fd21e33eb849f7e8451db
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Mon Apr 7 18:01:07 2025 +0800

    branch-3.0: [chore](log) Standardize S3 failure log formats to enable 
critical operation monitoring #49813 (#49828)
    
    Cherry-picked from #49813
    
    Co-authored-by: Gavin Chou <ga...@selectdb.com>
---
 be/src/io/fs/s3_file_reader.cpp | 16 +++++++++++++---
 be/src/io/fs/s3_file_system.cpp |  2 +-
 be/src/io/fs/s3_file_writer.cpp | 15 ++++++++-------
 3 files changed, 22 insertions(+), 11 deletions(-)

diff --git a/be/src/io/fs/s3_file_reader.cpp b/be/src/io/fs/s3_file_reader.cpp
index 86590d91632..80c58235300 100644
--- a/be/src/io/fs/s3_file_reader.cpp
+++ b/be/src/io/fs/s3_file_reader.cpp
@@ -132,6 +132,7 @@ Status S3FileReader::read_at_impl(size_t offset, Slice 
result, size_t* bytes_rea
 
     int total_sleep_time = 0;
     while (retry_count <= max_retries) {
+        *bytes_read = 0;
         s3_file_reader_read_counter << 1;
         // clang-format off
         auto resp = client->get_object( { .bucket = _bucket, .key = _key, },
@@ -157,8 +158,12 @@ Status S3FileReader::read_at_impl(size_t offset, Slice 
result, size_t* bytes_rea
             }
         }
         if (*bytes_read != bytes_req) {
-            return Status::InternalError("failed to read (bytes read: {}, 
bytes req: {})",
-                                         *bytes_read, bytes_req);
+            std::string msg = fmt::format(
+                    "failed to get object, path={} offset={} bytes_req={} 
bytes_read={} "
+                    "file_size={} tries={}",
+                    _path.native(), offset, bytes_req, *bytes_read, 
_file_size, (retry_count + 1));
+            LOG(WARNING) << msg;
+            return Status::InternalError(msg);
         }
         _s3_stats.total_bytes_read += bytes_req;
         s3_bytes_read_total << bytes_req;
@@ -170,7 +175,12 @@ Status S3FileReader::read_at_impl(size_t offset, Slice 
result, size_t* bytes_rea
         }
         return Status::OK();
     }
-    return Status::InternalError("failed to read from s3, exceeded maximum 
retries");
+    std::string msg = fmt::format(
+            "failed to get object, path={} offset={} bytes_req={} 
bytes_read={} file_size={} "
+            "tries={}",
+            _path.native(), offset, bytes_req, *bytes_read, _file_size, 
(max_retries + 1));
+    LOG(WARNING) << msg;
+    return Status::InternalError(msg);
 }
 
 void S3FileReader::_collect_profile_before_close() {
diff --git a/be/src/io/fs/s3_file_system.cpp b/be/src/io/fs/s3_file_system.cpp
index e265ee24a3f..bc6aefd92cc 100644
--- a/be/src/io/fs/s3_file_system.cpp
+++ b/be/src/io/fs/s3_file_system.cpp
@@ -105,7 +105,7 @@ Status ObjClientHolder::reset(const S3ClientConf& conf) {
         return Status::InvalidArgument("failed to init s3 client with conf 
{}", conf.to_string());
     }
 
-    LOG(INFO) << "reset s3 client with new conf: " << conf.to_string();
+    LOG(WARNING) << "reset s3 client with new conf: " << conf.to_string();
 
     {
         std::lock_guard lock(_mtx);
diff --git a/be/src/io/fs/s3_file_writer.cpp b/be/src/io/fs/s3_file_writer.cpp
index edec1e17aef..d5a5cd00cea 100644
--- a/be/src/io/fs/s3_file_writer.cpp
+++ b/be/src/io/fs/s3_file_writer.cpp
@@ -292,15 +292,15 @@ void S3FileWriter::_upload_one_part(int64_t part_num, 
UploadFileBuffer& buf) {
     }
     const auto& client = _obj_client->get();
     if (nullptr == client) {
-        LOG_WARNING("failed at key: {}, load part {} bacause of invalid obj 
client",
+        LOG_WARNING("failed to upload part, key={}, part_num={} bacause of 
null obj client",
                     _obj_storage_path_opts.key, part_num);
         buf.set_status(Status::InternalError<false>("invalid obj storage 
client"));
         return;
     }
     auto resp = client->upload_part(_obj_storage_path_opts, 
buf.get_string_view_data(), part_num);
     if (resp.resp.status.code != ErrorCode::OK) {
-        LOG_INFO("failed at key: {}, load part {}, st {}", 
_obj_storage_path_opts.key, part_num,
-                 resp.resp.status.msg);
+        LOG_WARNING("failed to upload part, key={}, part_num={}, status={}",
+                    _obj_storage_path_opts.key, part_num, 
resp.resp.status.msg);
         buf.set_status(Status(resp.resp.status.code, 
std::move(resp.resp.status.msg)));
         return;
     }
@@ -347,7 +347,8 @@ Status S3FileWriter::_complete() {
     if (_failed || _completed_parts.size() != expected_num_parts1 ||
         expected_num_parts1 != expected_num_parts2) {
         _st = Status::InternalError(
-                "error status={} failed={} #complete_parts={} 
#expected_parts={} "
+                "failed to complete multipart upload, error status={} 
failed={} #complete_parts={} "
+                "#expected_parts={} "
                 "completed_parts_list={} file_path={} file_size={} has left 
buffer not uploaded={}",
                 _st, _failed, _completed_parts.size(), expected_num_parts1, 
_dump_completed_part(),
                 _obj_storage_path_opts.path.native(), _bytes_appended, 
_pending_buf != nullptr);
@@ -363,7 +364,7 @@ Status S3FileWriter::_complete() {
               << " s3_write_buffer_size=" << config::s3_write_buffer_size;
     auto resp = client->complete_multipart_upload(_obj_storage_path_opts, 
_completed_parts);
     if (resp.status.code != ErrorCode::OK) {
-        LOG_WARNING("Compltet multi part upload failed because {}, file path 
{}", resp.status.msg,
+        LOG_WARNING("failed to complete multipart upload, err={}, 
file_path={}", resp.status.msg,
                     _obj_storage_path_opts.path.native());
         return {resp.status.code, std::move(resp.status.msg)};
     }
@@ -408,8 +409,8 @@ void S3FileWriter::_put_object(UploadFileBuffer& buf) {
     TEST_SYNC_POINT_RETURN_WITH_VOID("S3FileWriter::_put_object", this, &buf);
     auto resp = client->put_object(_obj_storage_path_opts, 
buf.get_string_view_data());
     if (resp.status.code != ErrorCode::OK) {
-        LOG_WARNING("put object failed because {}, file path {}", 
resp.status.msg,
-                    _obj_storage_path_opts.path.native());
+        LOG_WARNING("failed to put object, put object failed because {}, file 
path {}",
+                    resp.status.msg, _obj_storage_path_opts.path.native());
         buf.set_status({resp.status.code, std::move(resp.status.msg)});
         return;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to