This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-4.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-4.0 by this push:
     new 44a73f08c4c branch-4.0: [fix](hdfs)Fix be coredump  in HDFS reader 
during profile collection. #56806 (#56951)
44a73f08c4c is described below

commit 44a73f08c4cff86bddc029f629195c2de8c71be5
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Thu Oct 16 09:52:51 2025 +0800

    branch-4.0: [fix](hdfs)Fix be coredump  in HDFS reader during profile 
collection. #56806 (#56951)
    
    Cherry-picked from #56806
    
    Co-authored-by: daidai <[email protected]>
---
 be/src/io/fs/hdfs_file_reader.cpp | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/be/src/io/fs/hdfs_file_reader.cpp 
b/be/src/io/fs/hdfs_file_reader.cpp
index cb8b71f67d7..0e278dff0c8 100644
--- a/be/src/io/fs/hdfs_file_reader.cpp
+++ b/be/src/io/fs/hdfs_file_reader.cpp
@@ -119,6 +119,7 @@ Status HdfsFileReader::read_at_impl(size_t offset, Slice 
result, size_t* bytes_r
                                     const IOContext* io_ctx) {
     auto st = do_read_at_impl(offset, result, bytes_read, io_ctx);
     if (!st.ok()) {
+        _handle = nullptr;
         _accessor.destroy();
     }
     return st;
@@ -131,6 +132,11 @@ Status HdfsFileReader::do_read_at_impl(size_t offset, 
Slice result, size_t* byte
         return Status::InternalError("read closed file: {}", _path.native());
     }
 
+    if (_handle == nullptr) [[unlikely]] {
+        return Status::InternalError("cached hdfs file handle has been 
destroyed: {}",
+                                     _path.native());
+    }
+
     if (offset > _handle->file_size()) {
         return Status::IOError("offset exceeds file size(offset: {}, file 
size: {}, path: {})",
                                offset, _handle->file_size(), _path.native());
@@ -245,6 +251,10 @@ Status HdfsFileReader::do_read_at_impl(size_t offset, 
Slice result, size_t* byte
 void HdfsFileReader::_collect_profile_before_close() {
     if (_profile != nullptr && is_hdfs(_fs_name)) {
 #ifdef USE_HADOOP_HDFS
+        if (_handle == nullptr) [[unlikely]] {
+            return;
+        }
+
         struct hdfsReadStatistics* hdfs_statistics = nullptr;
         auto r = hdfsFileGetReadStatistics(_handle->file(), &hdfs_statistics);
         if (r != 0) {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to