This is an automated email from the ASF dual-hosted git repository.

zclll pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 69262cb5902 [fix](cache) Fix null pointer dereference in prefetch 
dryrun fallback path (#60870)
69262cb5902 is described below

commit 69262cb5902911cbc8a4f2e868c6c54f3ec1e42a
Author: Yixuan Wang <[email protected]>
AuthorDate: Sat Feb 28 11:42:51 2026 +0800

    [fix](cache) Fix null pointer dereference in prefetch dryrun fallback path 
(#60870)
    
    fix the bug of https://github.com/apache/doris/pull/59482
    
    Null pointer of buffer should not be triggered by s3 io
    ```
    23:58:57   26# doris::io::S3FileReader::read_at_impl(unsigned long, 
doris::Slice, unsigned long*, doris::io::IOContext const*) at 
/root/doris/be/build_ASAN/../src/io/fs/s3_file_reader.cpp:166
    23:58:57   27# doris::io::FileReader::read_at(unsigned long, doris::Slice, 
unsigned long*, doris::io::IOContext const*) at 
/root/doris/be/build_ASAN/../src/io/fs/file_reader.cpp:34
    23:58:57   28# doris::io::CachedRemoteFileReader::read_at_impl(unsigned 
long, doris::Slice, unsigned long*, doris::io::IOContext const*) at 
/root/doris/be/build_ASAN/../src/io/cache/cached_remote_file_reader.cpp:556
    23:58:57   29# std::_Function_handler<void (), 
doris::io::CachedRemoteFileReader::prefetch_range(unsigned long, unsigned long, 
doris::io::IOContext const*)::$_0>::_M_invoke(std::_Any_data const&) at 
/usr/local/ldb-toolchain-v0.26/bin/../lib/gcc/x86_64-pc-linux-gnu/15/include/g++-v15/bits/std_function.h:292
    ```
---
 be/src/io/cache/cached_remote_file_reader.cpp | 29 ++++++++++++++++-----------
 1 file changed, 17 insertions(+), 12 deletions(-)

diff --git a/be/src/io/cache/cached_remote_file_reader.cpp 
b/be/src/io/cache/cached_remote_file_reader.cpp
index ce2cfbcf283..4871b2dee67 100644
--- a/be/src/io/cache/cached_remote_file_reader.cpp
+++ b/be/src/io/cache/cached_remote_file_reader.cpp
@@ -546,18 +546,23 @@ Status CachedRemoteFileReader::read_at_impl(size_t 
offset, Slice result, size_t*
                 }
             }
             if (!st || block_state != FileBlock::State::DOWNLOADED) {
-                LOG(WARNING) << "Read data failed from file cache downloaded 
by others. err="
-                             << st.msg() << ", block state=" << block_state;
-                size_t nest_bytes_read {0};
-                stats.hit_cache = false;
-                stats.from_peer_cache = false;
-                s3_read_counter << 1;
-                SCOPED_RAW_TIMER(&stats.remote_read_timer);
-                RETURN_IF_ERROR(_remote_file_reader->read_at(
-                        current_offset, Slice(result.data + (current_offset - 
offset), read_size),
-                        &nest_bytes_read));
-                indirect_read_bytes += read_size;
-                DCHECK(nest_bytes_read == read_size);
+                if (is_dryrun) [[unlikely]] {
+                    // dryrun mode uses a null buffer, skip actual remote IO
+                } else {
+                    LOG(WARNING) << "Read data failed from file cache 
downloaded by others. err="
+                                 << st.msg() << ", block state=" << 
block_state;
+                    size_t nest_bytes_read {0};
+                    stats.hit_cache = false;
+                    stats.from_peer_cache = false;
+                    s3_read_counter << 1;
+                    SCOPED_RAW_TIMER(&stats.remote_read_timer);
+                    RETURN_IF_ERROR(_remote_file_reader->read_at(
+                            current_offset,
+                            Slice(result.data + (current_offset - offset), 
read_size),
+                            &nest_bytes_read));
+                    indirect_read_bytes += read_size;
+                    DCHECK(nest_bytes_read == read_size);
+                }
             }
         }
         *bytes_read += read_size;


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to