This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 0c38e5e06ed [fix](filecache) fix load_cache_info_into_memory crash 
(#51684)
0c38e5e06ed is described below

commit 0c38e5e06ed23e95abe12884af2132850ef936b8
Author: zhengyu <[email protected]>
AuthorDate: Wed Jun 18 21:31:09 2025 +0800

    [fix](filecache) fix load_cache_info_into_memory crash (#51684)
    
    keep _storage last so it will deconstruct first
    otherwise, load_cache_info_into_memory might crash coz it will use other
    members of BlockFileCache
    so join this async load thread first
    
    Signed-off-by: zhengyu <[email protected]>
---
 be/src/io/cache/block_file_cache.h | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/be/src/io/cache/block_file_cache.h 
b/be/src/io/cache/block_file_cache.h
index e5632ec8507..7c046cc1627 100644
--- a/be/src/io/cache/block_file_cache.h
+++ b/be/src/io/cache/block_file_cache.h
@@ -496,7 +496,6 @@ private:
     size_t _max_query_cache_size = 0;
 
     mutable std::mutex _mutex;
-    std::unique_ptr<FileCacheStorage> _storage;
     bool _close {false};
     std::mutex _close_mtx;
     std::condition_variable _close_cv;
@@ -577,6 +576,11 @@ private:
     std::shared_ptr<bvar::LatencyRecorder> _evict_in_advance_latency_us;
     std::shared_ptr<bvar::LatencyRecorder> _recycle_keys_length_recorder;
     std::shared_ptr<bvar::LatencyRecorder> _ttl_gc_latency_us;
+    // keep _storage last so it will deconstruct first
+    // otherwise, load_cache_info_into_memory might crash
+    // coz it will use other members of BlockFileCache
+    // so join this async load thread first
+    std::unique_ptr<FileCacheStorage> _storage;
 };
 
 } // namespace doris::io


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to