This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 1abe9d438414cab92313ee45787dcd2102611d10
Author: Xinyi Zou <zouxiny...@gmail.com>
AuthorDate: Wed Feb 21 15:00:18 2024 +0800

    [fix](memory) Fix LRU cache stale sweep (#31122)
    
    Remove LRUCacheValueBase, put last_visit_time into LRUHandle, and 
automatically update timestamp to last_visit_time during cache insert and 
lookup.
    
    Do not rely on external modification of last_visit_time, which is often 
forgotten.
---
 be/src/olap/lru_cache.cpp                          | 39 +++++++-----
 be/src/olap/lru_cache.h                            | 39 +++++++-----
 be/src/olap/page_cache.cpp                         |  2 -
 be/src/olap/page_cache.h                           |  7 +--
 .../rowset/segment_v2/inverted_index_cache.cpp     |  2 -
 .../olap/rowset/segment_v2/inverted_index_cache.h  |  6 +-
 be/src/olap/schema_cache.h                         |  4 +-
 be/src/olap/segment_loader.h                       |  3 +-
 be/src/olap/storage_engine.cpp                     |  2 -
 be/src/olap/storage_engine.h                       |  2 +-
 be/src/olap/tablet_schema_cache.cpp                |  2 -
 be/src/olap/tablet_schema_cache.h                  |  2 +-
 be/src/runtime/memory/lru_cache_policy.h           | 34 ++++------
 be/src/service/point_query_executor.h              |  4 +-
 be/test/olap/lru_cache_test.cpp                    | 73 ++++++++++++----------
 15 files changed, 113 insertions(+), 108 deletions(-)

diff --git a/be/src/olap/lru_cache.cpp b/be/src/olap/lru_cache.cpp
index 2077bf263e5..95114ec54c9 100644
--- a/be/src/olap/lru_cache.cpp
+++ b/be/src/olap/lru_cache.cpp
@@ -14,6 +14,7 @@
 #include "gutil/bits.h"
 #include "runtime/thread_context.h"
 #include "util/doris_metrics.h"
+#include "util/time.h"
 
 using std::string;
 using std::stringstream;
@@ -243,6 +244,7 @@ Cache::Handle* LRUCache::lookup(const CacheKey& key, 
uint32_t hash) {
         }
         e->refs++;
         ++_hit_count;
+        e->last_visit_time = UnixMillis();
     }
     return reinterpret_cast<Cache::Handle*>(e);
 }
@@ -371,6 +373,7 @@ Cache::Handle* LRUCache::insert(const CacheKey& key, 
uint32_t hash, void* value,
     e->mem_tracker = tracker;
     e->type = _type;
     memcpy(e->key_data, key.data(), key.size());
+    e->last_visit_time = UnixMillis();
     // The memory of the parameter value should be recorded in the tls mem 
tracker,
     // transfer the memory ownership of the value to 
ShardedLRUCache::_mem_tracker.
     THREAD_MEM_TRACKER_TRANSFER_TO(e->bytes, tracker);
@@ -440,7 +443,7 @@ void LRUCache::erase(const CacheKey& key, uint32_t hash) {
     }
 }
 
-int64_t LRUCache::prune() {
+PrunedInfo LRUCache::prune() {
     LRUHandle* to_remove_head = nullptr;
     {
         std::lock_guard l(_mutex);
@@ -458,23 +461,25 @@ int64_t LRUCache::prune() {
         }
     }
     int64_t pruned_count = 0;
+    int64_t pruned_size = 0;
     while (to_remove_head != nullptr) {
         ++pruned_count;
+        pruned_size += to_remove_head->bytes;
         LRUHandle* next = to_remove_head->next;
         to_remove_head->free();
         to_remove_head = next;
     }
-    return pruned_count;
+    return {pruned_count, pruned_size};
 }
 
-int64_t LRUCache::prune_if(CacheValuePredicate pred, bool lazy_mode) {
+PrunedInfo LRUCache::prune_if(CachePrunePredicate pred, bool lazy_mode) {
     LRUHandle* to_remove_head = nullptr;
     {
         std::lock_guard l(_mutex);
         LRUHandle* p = _lru_normal.next;
         while (p != &_lru_normal) {
             LRUHandle* next = p->next;
-            if (pred(p->value)) {
+            if (pred(p)) {
                 _evict_one_entry(p);
                 p->next = to_remove_head;
                 to_remove_head = p;
@@ -487,7 +492,7 @@ int64_t LRUCache::prune_if(CacheValuePredicate pred, bool 
lazy_mode) {
         p = _lru_durable.next;
         while (p != &_lru_durable) {
             LRUHandle* next = p->next;
-            if (pred(p->value)) {
+            if (pred(p)) {
                 _evict_one_entry(p);
                 p->next = to_remove_head;
                 to_remove_head = p;
@@ -498,13 +503,15 @@ int64_t LRUCache::prune_if(CacheValuePredicate pred, bool 
lazy_mode) {
         }
     }
     int64_t pruned_count = 0;
+    int64_t pruned_size = 0;
     while (to_remove_head != nullptr) {
         ++pruned_count;
+        pruned_size += to_remove_head->bytes;
         LRUHandle* next = to_remove_head->next;
         to_remove_head->free();
         to_remove_head = next;
     }
-    return pruned_count;
+    return {pruned_count, pruned_size};
 }
 
 void LRUCache::set_cache_value_time_extractor(CacheValueTimeExtractor 
cache_value_time_extractor) {
@@ -622,20 +629,24 @@ uint64_t ShardedLRUCache::new_id() {
     return _last_id.fetch_add(1, std::memory_order_relaxed);
 }
 
-int64_t ShardedLRUCache::prune() {
-    int64_t num_prune = 0;
+PrunedInfo ShardedLRUCache::prune() {
+    PrunedInfo pruned_info;
     for (int s = 0; s < _num_shards; s++) {
-        num_prune += _shards[s]->prune();
+        PrunedInfo info = _shards[s]->prune();
+        pruned_info.pruned_count += info.pruned_count;
+        pruned_info.pruned_size += info.pruned_size;
     }
-    return num_prune;
+    return pruned_info;
 }
 
-int64_t ShardedLRUCache::prune_if(CacheValuePredicate pred, bool lazy_mode) {
-    int64_t num_prune = 0;
+PrunedInfo ShardedLRUCache::prune_if(CachePrunePredicate pred, bool lazy_mode) 
{
+    PrunedInfo pruned_info;
     for (int s = 0; s < _num_shards; s++) {
-        num_prune += _shards[s]->prune_if(pred, lazy_mode);
+        PrunedInfo info = _shards[s]->prune_if(pred, lazy_mode);
+        pruned_info.pruned_count += info.pruned_count;
+        pruned_info.pruned_size += info.pruned_size;
     }
-    return num_prune;
+    return pruned_info;
 }
 
 int64_t ShardedLRUCache::mem_consumption() {
diff --git a/be/src/olap/lru_cache.h b/be/src/olap/lru_cache.h
index 2a7d0ac86b9..d5faba6a2f6 100644
--- a/be/src/olap/lru_cache.h
+++ b/be/src/olap/lru_cache.h
@@ -53,6 +53,7 @@ namespace doris {
 
 class Cache;
 class LRUCachePolicy;
+struct LRUHandle;
 
 enum LRUCacheType {
     SIZE, // The capacity of cache is based on the memory size of cache entry, 
memory size = handle size + charge.
@@ -150,11 +151,15 @@ private:
 // The entry with smaller CachePriority will evict firstly
 enum class CachePriority { NORMAL = 0, DURABLE = 1 };
 
-using CacheValuePredicate = std::function<bool(const void*)>;
+using CachePrunePredicate = std::function<bool(const LRUHandle*)>;
 // CacheValueTimeExtractor can extract timestamp
 // in cache value through the specified function,
 // such as last_visit_time in InvertedIndexSearcherCache::CacheValue
 using CacheValueTimeExtractor = std::function<int64_t(const void*)>;
+struct PrunedInfo {
+    int64_t pruned_count = 0;
+    int64_t pruned_size = 0;
+};
 
 class Cache {
 public:
@@ -219,12 +224,12 @@ public:
     // encouraged to override the default implementation.  A future release of
     // leveldb may change prune() to a pure abstract method.
     // return num of entries being pruned.
-    virtual int64_t prune() { return 0; }
+    virtual PrunedInfo prune() { return {0, 0}; }
 
     // Same as prune(), but the entry will only be pruned if the predicate 
matched.
     // NOTICE: the predicate should be simple enough, or the prune_if() 
function
     // may hold lock for a long time to execute predicate.
-    virtual int64_t prune_if(CacheValuePredicate pred, bool lazy_mode = false) 
{ return 0; }
+    virtual PrunedInfo prune_if(CachePrunePredicate pred, bool lazy_mode = 
false) { return {0, 0}; }
 
     virtual int64_t mem_consumption() = 0;
 
@@ -246,15 +251,17 @@ struct LRUHandle {
     struct LRUHandle* prev = nullptr;      // previous entry in lru list
     size_t charge;
     size_t key_length;
-    size_t total_size; // including key length
-    size_t bytes;      // Used by LRUCacheType::NUMBER, LRUCacheType::SIZE 
equal to total_size.
-    bool in_cache;     // Whether entry is in the cache.
+    size_t total_size; // Entry charge, used to limit cache capacity, 
LRUCacheType::SIZE including key length.
+    size_t bytes;  // Used by LRUCacheType::NUMBER, LRUCacheType::SIZE equal 
to total_size.
+    bool in_cache; // Whether entry is in the cache.
     uint32_t refs;
     uint32_t hash; // Hash of key(); used for fast sharding and comparisons
     CachePriority priority = CachePriority::NORMAL;
     MemTrackerLimiter* mem_tracker;
     LRUCacheType type;
-    char key_data[1]; // Beginning of key
+    int64_t last_visit_time; // Save the last visit time of this cache entry.
+    char key_data[1];        // Beginning of key
+    // Note! key_data must be at the end.
 
     CacheKey key() const {
         // For cheaper lookups, we allow a temporary Handle object
@@ -345,8 +352,8 @@ public:
     Cache::Handle* lookup(const CacheKey& key, uint32_t hash);
     void release(Cache::Handle* handle);
     void erase(const CacheKey& key, uint32_t hash);
-    int64_t prune();
-    int64_t prune_if(CacheValuePredicate pred, bool lazy_mode = false);
+    PrunedInfo prune();
+    PrunedInfo prune_if(CachePrunePredicate pred, bool lazy_mode = false);
 
     void set_cache_value_time_extractor(CacheValueTimeExtractor 
cache_value_time_extractor);
     void set_cache_value_check_timestamp(bool cache_value_check_timestamp);
@@ -384,8 +391,8 @@ private:
 
     HandleTable _table;
 
-    uint64_t _lookup_count = 0; // cache查找总次数
-    uint64_t _hit_count = 0;    // 命中cache的总次数
+    uint64_t _lookup_count = 0; // number of cache lookups
+    uint64_t _hit_count = 0;    // number of cache hits
 
     CacheValueTimeExtractor _cache_value_time_extractor;
     bool _cache_value_check_timestamp = false;
@@ -408,8 +415,8 @@ public:
     virtual void* value(Handle* handle) override;
     Slice value_slice(Handle* handle) override;
     virtual uint64_t new_id() override;
-    virtual int64_t prune() override;
-    int64_t prune_if(CacheValuePredicate pred, bool lazy_mode = false) 
override;
+    PrunedInfo prune() override;
+    PrunedInfo prune_if(CachePrunePredicate pred, bool lazy_mode = false) 
override;
     int64_t mem_consumption() override;
     int64_t get_usage() override;
     size_t get_total_capacity() override { return _total_capacity; };
@@ -479,8 +486,10 @@ public:
     void* value(Handle* handle) override;
     Slice value_slice(Handle* handle) override;
     uint64_t new_id() override { return 0; };
-    int64_t prune() override { return 0; };
-    int64_t prune_if(CacheValuePredicate pred, bool lazy_mode = false) 
override { return 0; };
+    PrunedInfo prune() override { return {0, 0}; };
+    PrunedInfo prune_if(CachePrunePredicate pred, bool lazy_mode = false) 
override {
+        return {0, 0};
+    };
     int64_t mem_consumption() override { return 0; };
     int64_t get_usage() override { return 0; };
     size_t get_total_capacity() override { return 0; };
diff --git a/be/src/olap/page_cache.cpp b/be/src/olap/page_cache.cpp
index 6fbcbbe19f3..db22457b329 100644
--- a/be/src/olap/page_cache.cpp
+++ b/be/src/olap/page_cache.cpp
@@ -60,7 +60,6 @@ bool StoragePageCache::lookup(const CacheKey& key, 
PageCacheHandle* handle,
         return false;
     }
     *handle = PageCacheHandle(cache, lru_handle);
-    handle->update_last_visit_time();
     return true;
 }
 
@@ -79,7 +78,6 @@ void StoragePageCache::insert(const CacheKey& key, DataPage* 
data, PageCacheHand
     auto cache = _get_page_cache(page_type);
     auto lru_handle = cache->insert(key.encode(), data, data->capacity(), 
deleter, priority);
     *handle = PageCacheHandle(cache, lru_handle);
-    handle->update_last_visit_time();
 }
 
 } // namespace doris
diff --git a/be/src/olap/page_cache.h b/be/src/olap/page_cache.h
index a604d205503..2ea7c9a674a 100644
--- a/be/src/olap/page_cache.h
+++ b/be/src/olap/page_cache.h
@@ -37,7 +37,7 @@ namespace doris {
 class PageCacheHandle;
 
 template <typename TAllocator>
-class PageBase : private TAllocator, public LRUCacheValueBase {
+class PageBase : private TAllocator {
 public:
     PageBase() : _data(nullptr), _size(0), _capacity(0) {}
 
@@ -218,11 +218,6 @@ public:
         return Slice(cache_value->data(), cache_value->size());
     }
 
-    void update_last_visit_time() {
-        DataPage* cache_value = (DataPage*)_cache->value(_handle);
-        cache_value->last_visit_time = UnixMillis();
-    }
-
 private:
     Cache* _cache = nullptr;
     Cache::Handle* _handle = nullptr;
diff --git a/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp 
b/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp
index 139ad9bd906..8359a2f896f 100644
--- a/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp
+++ b/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp
@@ -142,9 +142,7 @@ void InvertedIndexQueryCache::insert(const CacheKey& key, 
std::shared_ptr<roarin
 
     std::unique_ptr<InvertedIndexQueryCache::CacheValue> cache_value_ptr =
             std::make_unique<InvertedIndexQueryCache::CacheValue>();
-    cache_value_ptr->last_visit_time = UnixMillis();
     cache_value_ptr->bitmap = bitmap;
-    cache_value_ptr->size = bitmap->getSizeInBytes();
     if (key.encode().empty()) {
         return;
     }
diff --git a/be/src/olap/rowset/segment_v2/inverted_index_cache.h 
b/be/src/olap/rowset/segment_v2/inverted_index_cache.h
index 503ff5fe5c7..cbeb8adf6ef 100644
--- a/be/src/olap/rowset/segment_v2/inverted_index_cache.h
+++ b/be/src/olap/rowset/segment_v2/inverted_index_cache.h
@@ -81,8 +81,10 @@ public:
 
     // The cache value of index_searcher lru cache.
     // Holding an opened index_searcher.
-    struct CacheValue : public LRUCacheValueBase {
+    struct CacheValue {
         IndexSearcherPtr index_searcher;
+        size_t size = 0;
+        int64_t last_visit_time;
 
         CacheValue() = default;
         explicit CacheValue(IndexSearcherPtr searcher, size_t mem_size, 
int64_t visit_time)
@@ -230,7 +232,7 @@ public:
         }
     };
 
-    struct CacheValue : public LRUCacheValueBase {
+    struct CacheValue {
         std::shared_ptr<roaring::Roaring> bitmap;
     };
 
diff --git a/be/src/olap/schema_cache.h b/be/src/olap/schema_cache.h
index 58bf05bbfc8..326d8875f81 100644
--- a/be/src/olap/schema_cache.h
+++ b/be/src/olap/schema_cache.h
@@ -69,7 +69,6 @@ public:
         if (lru_handle) {
             Defer release([cache = cache(), lru_handle] { 
cache->release(lru_handle); });
             auto value = (CacheValue*)cache()->value(lru_handle);
-            value->last_visit_time = UnixMillis();
             VLOG_DEBUG << "use cache schema";
             if constexpr (std::is_same_v<SchemaType, TabletSchemaSPtr>) {
                 return value->tablet_schema;
@@ -88,7 +87,6 @@ public:
             return;
         }
         CacheValue* value = new CacheValue;
-        value->last_visit_time = UnixMillis();
         if constexpr (std::is_same_v<SchemaType, TabletSchemaSPtr>) {
             value->type = Type::TABLET_SCHEMA;
             value->tablet_schema = schema;
@@ -108,7 +106,7 @@ public:
     // Try to prune the cache if expired.
     Status prune();
 
-    struct CacheValue : public LRUCacheValueBase {
+    struct CacheValue {
         Type type;
         // either tablet_schema or schema
         TabletSchemaSPtr tablet_schema = nullptr;
diff --git a/be/src/olap/segment_loader.h b/be/src/olap/segment_loader.h
index a53ff4bfc99..d0e2ae81509 100644
--- a/be/src/olap/segment_loader.h
+++ b/be/src/olap/segment_loader.h
@@ -72,7 +72,7 @@ public:
 
     // The cache value of segment lru cache.
     // Holding all opened segments of a rowset.
-    struct CacheValue : public LRUCacheValueBase {
+    struct CacheValue {
         segment_v2::SegmentSharedPtr segment;
     };
 
@@ -133,7 +133,6 @@ public:
 
     void push_segment(Cache* cache, Cache::Handle* handle) {
         
segments.push_back(((SegmentCache::CacheValue*)cache->value(handle))->segment);
-        ((SegmentCache::CacheValue*)cache->value(handle))->last_visit_time = 
UnixMillis();
         cache->release(handle);
     }
 
diff --git a/be/src/olap/storage_engine.cpp b/be/src/olap/storage_engine.cpp
index 37c3c8f19aa..a4ed16cd262 100644
--- a/be/src/olap/storage_engine.cpp
+++ b/be/src/olap/storage_engine.cpp
@@ -1462,7 +1462,6 @@ int CreateTabletIdxCache::get_index(const std::string& 
key) {
     if (lru_handle) {
         Defer release([cache = cache(), lru_handle] { 
cache->release(lru_handle); });
         auto value = (CacheValue*)cache()->value(lru_handle);
-        value->last_visit_time = UnixMillis();
         VLOG_DEBUG << "use create tablet idx cache key=" << key << " value=" 
<< value->idx;
         return value->idx;
     }
@@ -1472,7 +1471,6 @@ int CreateTabletIdxCache::get_index(const std::string& 
key) {
 void CreateTabletIdxCache::set_index(const std::string& key, int next_idx) {
     assert(next_idx >= 0);
     CacheValue* value = new CacheValue;
-    value->last_visit_time = UnixMillis();
     value->idx = next_idx;
     auto deleter = [](const doris::CacheKey& key, void* value) {
         CacheValue* cache_value = (CacheValue*)value;
diff --git a/be/src/olap/storage_engine.h b/be/src/olap/storage_engine.h
index b3467b9c348..b06e431c6e8 100644
--- a/be/src/olap/storage_engine.h
+++ b/be/src/olap/storage_engine.h
@@ -496,7 +496,7 @@ public:
 
     void set_index(const std::string& key, int next_idx);
 
-    struct CacheValue : public LRUCacheValueBase {
+    struct CacheValue {
         int idx = 0;
     };
 
diff --git a/be/src/olap/tablet_schema_cache.cpp 
b/be/src/olap/tablet_schema_cache.cpp
index a1d3bc662ac..6cadee3a001 100644
--- a/be/src/olap/tablet_schema_cache.cpp
+++ b/be/src/olap/tablet_schema_cache.cpp
@@ -32,11 +32,9 @@ std::pair<Cache::Handle*, TabletSchemaSPtr> 
TabletSchemaCache::insert(const std:
     TabletSchemaSPtr tablet_schema_ptr;
     if (lru_handle) {
         auto* value = (CacheValue*)cache()->value(lru_handle);
-        value->last_visit_time = UnixMillis();
         tablet_schema_ptr = value->tablet_schema;
     } else {
         auto* value = new CacheValue;
-        value->last_visit_time = UnixMillis();
         tablet_schema_ptr = std::make_shared<TabletSchema>();
         TabletSchemaPB pb;
         pb.ParseFromString(key);
diff --git a/be/src/olap/tablet_schema_cache.h 
b/be/src/olap/tablet_schema_cache.h
index 5824f3c1870..6eb93105f83 100644
--- a/be/src/olap/tablet_schema_cache.h
+++ b/be/src/olap/tablet_schema_cache.h
@@ -43,7 +43,7 @@ public:
     void release(Cache::Handle*);
 
 private:
-    struct CacheValue : public LRUCacheValueBase {
+    struct CacheValue {
         TabletSchemaSPtr tablet_schema;
     };
 };
diff --git a/be/src/runtime/memory/lru_cache_policy.h 
b/be/src/runtime/memory/lru_cache_policy.h
index 717c90d9ef2..bfd1a2568a3 100644
--- a/be/src/runtime/memory/lru_cache_policy.h
+++ b/be/src/runtime/memory/lru_cache_policy.h
@@ -25,14 +25,6 @@
 
 namespace doris {
 
-// Base of the lru cache value.
-struct LRUCacheValueBase {
-    // Save the last visit time of this cache entry.
-    // Use atomic because it may be modified by multi threads.
-    std::atomic<int64_t> last_visit_time = 0;
-    size_t size = 0;
-};
-
 // Base of lru cache, allow prune stale entry and prune all entry.
 class LRUCachePolicy : public CachePolicy {
 public:
@@ -84,6 +76,8 @@ public:
 
     // Try to prune the cache if expired.
     void prune_stale() override {
+        COUNTER_SET(_freed_entrys_counter, (int64_t)0);
+        COUNTER_SET(_freed_memory_counter, (int64_t)0);
         if (_stale_sweep_time_s <= 0 && _cache == 
ExecEnv::GetInstance()->get_dummy_lru_cache()) {
             return;
         }
@@ -91,19 +85,15 @@ public:
             COUNTER_SET(_cost_timer, (int64_t)0);
             SCOPED_TIMER(_cost_timer);
             const int64_t curtime = UnixMillis();
-            int64_t byte_size = 0L;
-            auto pred = [this, curtime, &byte_size](const void* value) -> bool 
{
-                auto* cache_value = (LRUCacheValueBase*)value;
-                if ((cache_value->last_visit_time + _stale_sweep_time_s * 
1000) < curtime) {
-                    byte_size += cache_value->size;
-                    return true;
-                }
-                return false;
+            auto pred = [this, curtime](const LRUHandle* handle) -> bool {
+                return static_cast<bool>((handle->last_visit_time + 
_stale_sweep_time_s * 1000) <
+                                         curtime);
             };
 
             // Prune cache in lazy mode to save cpu and minimize the time 
holding write lock
-            COUNTER_SET(_freed_entrys_counter, _cache->prune_if(pred, true));
-            COUNTER_SET(_freed_memory_counter, byte_size);
+            PrunedInfo pruned_info = _cache->prune_if(pred, true);
+            COUNTER_SET(_freed_entrys_counter, pruned_info.pruned_count);
+            COUNTER_SET(_freed_memory_counter, pruned_info.pruned_size);
             COUNTER_UPDATE(_prune_stale_number_counter, 1);
             LOG(INFO) << fmt::format("{} prune stale {} entries, {} bytes, {} 
times prune",
                                      type_string(_type), 
_freed_entrys_counter->value(),
@@ -113,6 +103,8 @@ public:
     }
 
     void prune_all(bool clear) override {
+        COUNTER_SET(_freed_entrys_counter, (int64_t)0);
+        COUNTER_SET(_freed_memory_counter, (int64_t)0);
         if (_cache == ExecEnv::GetInstance()->get_dummy_lru_cache()) {
             return;
         }
@@ -120,9 +112,9 @@ public:
             _cache->mem_consumption() > CACHE_MIN_FREE_SIZE) {
             COUNTER_SET(_cost_timer, (int64_t)0);
             SCOPED_TIMER(_cost_timer);
-            auto size = _cache->mem_consumption();
-            COUNTER_SET(_freed_entrys_counter, _cache->prune());
-            COUNTER_SET(_freed_memory_counter, size);
+            PrunedInfo pruned_info = _cache->prune();
+            COUNTER_SET(_freed_entrys_counter, pruned_info.pruned_count);
+            COUNTER_SET(_freed_memory_counter, pruned_info.pruned_size);
             COUNTER_UPDATE(_prune_all_number_counter, 1);
             LOG(INFO) << fmt::format(
                     "{} prune all {} entries, {} bytes, {} times prune, is 
clear: {}",
diff --git a/be/src/service/point_query_executor.h 
b/be/src/service/point_query_executor.h
index 77fa8c69cdf..ddde73d9d6d 100644
--- a/be/src/service/point_query_executor.h
+++ b/be/src/service/point_query_executor.h
@@ -215,7 +215,6 @@ private:
     void add(__int128_t cache_id, std::shared_ptr<Reusable> item) {
         std::string key = encode_key(cache_id);
         CacheValue* value = new CacheValue;
-        value->last_visit_time = UnixMillis();
         value->item = item;
         auto deleter = [](const doris::CacheKey& key, void* value) {
             CacheValue* cache_value = (CacheValue*)value;
@@ -236,13 +235,12 @@ private:
         if (lru_handle) {
             Defer release([cache = cache(), lru_handle] { 
cache->release(lru_handle); });
             auto value = (CacheValue*)cache()->value(lru_handle);
-            value->last_visit_time = UnixMillis();
             return value->item;
         }
         return nullptr;
     }
 
-    struct CacheValue : public LRUCacheValueBase {
+    struct CacheValue {
         std::shared_ptr<Reusable> item;
     };
 };
diff --git a/be/test/olap/lru_cache_test.cpp b/be/test/olap/lru_cache_test.cpp
index bf76f6b8f2a..e8a8b81efe6 100644
--- a/be/test/olap/lru_cache_test.cpp
+++ b/be/test/olap/lru_cache_test.cpp
@@ -48,9 +48,9 @@ uint32_t DecodeFixed32(const char* ptr) {
 }
 
 // Conversions between numeric keys/values and the types expected by Cache.
-const CacheKey EncodeKey(std::string* result, int k) {
+CacheKey EncodeKey(std::string* result, int k) {
     PutFixed32(result, k);
-    return CacheKey(result->c_str(), result->size());
+    return {result->c_str(), result->size()};
 }
 
 static int DecodeKey(const CacheKey& k) {
@@ -90,11 +90,11 @@ public:
 
     CacheTest() : _cache(new CacheTestPolicy(kCacheSize)) { _s_current = this; 
}
 
-    ~CacheTest() { delete _cache; }
+    ~CacheTest() override { delete _cache; }
 
-    Cache* cache() { return _cache->cache(); }
+    Cache* cache() const { return _cache->cache(); }
 
-    int Lookup(int key) {
+    int Lookup(int key) const {
         std::string result;
         Cache::Handle* handle = cache()->lookup(EncodeKey(&result, key));
         const int r = (handle == nullptr) ? -1 : 
DecodeValue(cache()->value(handle));
@@ -106,26 +106,26 @@ public:
         return r;
     }
 
-    void Insert(int key, int value, int charge) {
+    void Insert(int key, int value, int charge) const {
         std::string result;
         cache()->release(cache()->insert(EncodeKey(&result, key), 
EncodeValue(value), charge,
                                          &CacheTest::Deleter));
     }
 
-    void InsertDurable(int key, int value, int charge) {
+    void InsertDurable(int key, int value, int charge) const {
         std::string result;
         cache()->release(cache()->insert(EncodeKey(&result, key), 
EncodeValue(value), charge,
                                          &CacheTest::Deleter, 
CachePriority::DURABLE));
     }
 
-    void Erase(int key) {
+    void Erase(int key) const {
         std::string result;
         cache()->erase(EncodeKey(&result, key));
     }
 
-    void SetUp() {}
+    void SetUp() override {}
 
-    void TearDown() {}
+    void TearDown() override {}
 };
 CacheTest* CacheTest::_s_current;
 
@@ -254,37 +254,37 @@ static void insert_number_LRUCache(LRUCache& cache, const 
CacheKey& key, int val
 
 TEST_F(CacheTest, Usage) {
     LRUCache cache(LRUCacheType::SIZE);
-    cache.set_capacity(1040);
+    cache.set_capacity(1050);
 
     // The lru usage is handle_size + charge.
-    // handle_size = sizeof(handle) - 1 + key size = 104 - 1 + 3 = 106
+    // handle_size = sizeof(handle) - 1 + key size = 120 - 1 + 3 = 122
     CacheKey key1("100");
     insert_LRUCache(cache, key1, 100, CachePriority::NORMAL);
-    ASSERT_EQ(206, cache.get_usage()); // 100 + 106
+    ASSERT_EQ(222, cache.get_usage()); // 100 + 122
 
     CacheKey key2("200");
     insert_LRUCache(cache, key2, 200, CachePriority::DURABLE);
-    ASSERT_EQ(512, cache.get_usage()); // 206 + 306(d), d = DURABLE
+    ASSERT_EQ(544, cache.get_usage()); // 222 + 322(d), d = DURABLE
 
     CacheKey key3("300");
     insert_LRUCache(cache, key3, 300, CachePriority::NORMAL);
-    ASSERT_EQ(918, cache.get_usage()); // 206 + 306(d) + 406
+    ASSERT_EQ(966, cache.get_usage()); // 222 + 322(d) + 422
 
     CacheKey key4("400");
     insert_LRUCache(cache, key4, 400, CachePriority::NORMAL);
-    ASSERT_EQ(812, cache.get_usage()); // 306(d) + 506, evict 206 406
+    ASSERT_EQ(844, cache.get_usage()); // 322(d) + 522, evict 222 422
 
     CacheKey key5("500");
     insert_LRUCache(cache, key5, 500, CachePriority::NORMAL);
-    ASSERT_EQ(912, cache.get_usage()); // 306(d) + 606, evict 506
+    ASSERT_EQ(944, cache.get_usage()); // 322(d) + 622, evict 522
 
     CacheKey key6("600");
     insert_LRUCache(cache, key6, 600, CachePriority::NORMAL);
-    ASSERT_EQ(1012, cache.get_usage()); // 306(d) + 706, evict 506
+    ASSERT_EQ(1044, cache.get_usage()); // 322(d) + 722, evict 622
 
     CacheKey key7("950");
     insert_LRUCache(cache, key7, 950, CachePriority::DURABLE);
-    ASSERT_EQ(0, cache.get_usage()); // evict 306 706, because 950 + 106 > 
1040, so insert failed
+    ASSERT_EQ(0, cache.get_usage()); // evict 322 722, because 950 + 122 > 
1050, so insert failed
 }
 
 TEST_F(CacheTest, Prune) {
@@ -320,11 +320,13 @@ TEST_F(CacheTest, Prune) {
     insert_number_LRUCache(cache, key7, 700, 1, CachePriority::DURABLE);
     EXPECT_EQ(5, cache.get_usage());
 
-    auto pred = [](const void* value) -> bool { return false; };
+    auto pred = [](const LRUHandle* handle) -> bool { return false; };
     cache.prune_if(pred);
     EXPECT_EQ(5, cache.get_usage());
 
-    auto pred2 = [](const void* value) -> bool { return 
DecodeValue((void*)value) > 400; };
+    auto pred2 = [](const LRUHandle* handle) -> bool {
+        return DecodeValue((void*)(handle->value)) > 400;
+    };
     cache.prune_if(pred2);
     EXPECT_EQ(2, cache.get_usage());
 
@@ -335,7 +337,7 @@ TEST_F(CacheTest, Prune) {
         insert_number_LRUCache(cache, CacheKey {std::to_string(i)}, i, 1, 
CachePriority::NORMAL);
         EXPECT_EQ(i, cache.get_usage());
     }
-    cache.prune_if([](const void*) { return true; });
+    cache.prune_if([](const LRUHandle*) { return true; });
     EXPECT_EQ(0, cache.get_usage());
 }
 
@@ -372,20 +374,25 @@ TEST_F(CacheTest, PruneIfLazyMode) {
     insert_number_LRUCache(cache, key7, 700, 1, CachePriority::DURABLE);
     EXPECT_EQ(7, cache.get_usage());
 
-    auto pred = [](const void* value) -> bool { return false; };
+    auto pred = [](const LRUHandle* handle) -> bool { return false; };
     cache.prune_if(pred, true);
     EXPECT_EQ(7, cache.get_usage());
 
     // in lazy mode, the first item not satisfied the pred2, `prune_if` then 
stopped
     // and no item's removed.
-    auto pred2 = [](const void* value) -> bool { return 
DecodeValue((void*)value) > 400; };
+    auto pred2 = [](const LRUHandle* handle) -> bool {
+        return DecodeValue((void*)(handle->value)) > 400;
+    };
     cache.prune_if(pred2, true);
     EXPECT_EQ(7, cache.get_usage());
 
     // in normal priority, 100, 300 are removed
     // in durable priority, 200 is removed
-    auto pred3 = [](const void* value) -> bool { return 
DecodeValue((void*)value) <= 600; };
-    EXPECT_EQ(3, cache.prune_if(pred3, true));
+    auto pred3 = [](const LRUHandle* handle) -> bool {
+        return DecodeValue((void*)(handle->value)) <= 600;
+    };
+    PrunedInfo pruned_info = cache.prune_if(pred3, true);
+    EXPECT_EQ(3, pruned_info.pruned_count);
     EXPECT_EQ(4, cache.get_usage());
 }
 
@@ -429,11 +436,13 @@ TEST_F(CacheTest, Number) {
     insert_number_LRUCache(cache, key8, 100, 1, CachePriority::DURABLE);
     EXPECT_EQ(2, cache.get_usage());
 
-    auto pred = [](const void* value) -> bool { return false; };
+    auto pred = [](const LRUHandle* handle) -> bool { return false; };
     cache.prune_if(pred);
     EXPECT_EQ(2, cache.get_usage());
 
-    auto pred2 = [](const void* value) -> bool { return 
DecodeValue((void*)value) > 100; };
+    auto pred2 = [](const LRUHandle* handle) -> bool {
+        return DecodeValue((void*)(handle->value)) > 100;
+    };
     cache.prune_if(pred2);
     EXPECT_EQ(1, cache.get_usage());
 
@@ -498,7 +507,7 @@ TEST(CacheHandleTest, HandleTableTest) {
     LRUHandle* hs[count];
     for (int i = 0; i < count; ++i) {
         CacheKey* key = &keys[i];
-        LRUHandle* h = reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 
1 + key->size()));
+        auto* h = reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + 
key->size()));
         h->value = nullptr;
         h->deleter = nullptr;
         h->charge = 1;
@@ -531,7 +540,7 @@ TEST(CacheHandleTest, HandleTableTest) {
 
     for (int i = 0; i < count; ++i) {
         CacheKey* key = &keys[i];
-        LRUHandle* h = reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 
1 + key->size()));
+        auto* h = reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + 
key->size()));
         h->value = nullptr;
         h->deleter = nullptr;
         h->charge = 1;
@@ -574,8 +583,8 @@ TEST(CacheHandleTest, HandleTableTest) {
 
     EXPECT_EQ(ht._elems, count - 4);
 
-    for (int i = 0; i < count; ++i) {
-        free(hs[i]);
+    for (auto& h : hs) {
+        free(h);
     }
 }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to