This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new 8b8425c4340 branch-3.1: [fix](hdfs)Fix be coredump  in HDFS reader 
during profile collection. #56806 (#56950)
8b8425c4340 is described below

commit 8b8425c4340cb288a0c7deea57f6566ab878c71a
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Thu Oct 16 23:29:43 2025 +0800

    branch-3.1: [fix](hdfs)Fix be coredump  in HDFS reader during profile 
collection. #56806 (#56950)
    
    Cherry-picked from #56806
    
    Co-authored-by: daidai <[email protected]>
---
 be/src/io/fs/hdfs_file_reader.cpp | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/be/src/io/fs/hdfs_file_reader.cpp 
b/be/src/io/fs/hdfs_file_reader.cpp
index 87a5756496e..e628d814ff3 100644
--- a/be/src/io/fs/hdfs_file_reader.cpp
+++ b/be/src/io/fs/hdfs_file_reader.cpp
@@ -118,6 +118,7 @@ Status HdfsFileReader::read_at_impl(size_t offset, Slice 
result, size_t* bytes_r
                                     const IOContext* io_ctx) {
     auto st = do_read_at_impl(offset, result, bytes_read, io_ctx);
     if (!st.ok()) {
+        _handle = nullptr;
         _accessor.destroy();
     }
     return st;
@@ -130,6 +131,11 @@ Status HdfsFileReader::do_read_at_impl(size_t offset, 
Slice result, size_t* byte
         return Status::InternalError("read closed file: {}", _path.native());
     }
 
+    if (_handle == nullptr) [[unlikely]] {
+        return Status::InternalError("cached hdfs file handle has been 
destroyed: {}",
+                                     _path.native());
+    }
+
     if (offset > _handle->file_size()) {
         return Status::IOError("offset exceeds file size(offset: {}, file 
size: {}, path: {})",
                                offset, _handle->file_size(), _path.native());
@@ -241,6 +247,10 @@ Status HdfsFileReader::do_read_at_impl(size_t offset, 
Slice result, size_t* byte
 void HdfsFileReader::_collect_profile_before_close() {
     if (_profile != nullptr && is_hdfs(_fs_name)) {
 #ifdef USE_HADOOP_HDFS
+        if (_handle == nullptr) [[unlikely]] {
+            return;
+        }
+
         struct hdfsReadStatistics* hdfs_statistics = nullptr;
         auto r = hdfsFileGetReadStatistics(_handle->file(), &hdfs_statistics);
         if (r != 0) {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to