This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new a0c3ddf902b [fix](memory) Fix LRUCacheType::NUMBER charge (#29588)
a0c3ddf902b is described below
commit a0c3ddf902b3f0d83ea7c1ead14151488135bdf5
Author: Xinyi Zou <[email protected]>
AuthorDate: Sat Jan 6 10:37:56 2024 +0800
[fix](memory) Fix LRUCacheType::NUMBER charge (#29588)
if LRUCacheType::NUMBER, charge not add handle_size, because charge at this
time is no longer the memory size, but an independent weight.
---
be/src/olap/lru_cache.cpp | 8 +++-
be/src/olap/lru_cache.h | 4 +-
be/src/runtime/exec_env_init.cpp | 2 +-
be/test/olap/lru_cache_test.cpp | 95 +++++++++++++++++++++++++++++++++-------
4 files changed, 88 insertions(+), 21 deletions(-)
diff --git a/be/src/olap/lru_cache.cpp b/be/src/olap/lru_cache.cpp
index efc92870470..2077bf263e5 100644
--- a/be/src/olap/lru_cache.cpp
+++ b/be/src/olap/lru_cache.cpp
@@ -356,8 +356,12 @@ Cache::Handle* LRUCache::insert(const CacheKey& key,
uint32_t hash, void* value,
e->deleter = deleter;
e->charge = charge;
e->key_length = key.size();
- e->total_size = (_type == LRUCacheType::SIZE ? handle_size + charge : 1);
+ // if LRUCacheType::NUMBER, charge not add handle_size,
+ // because charge at this time is no longer the memory size, but an weight.
+ e->total_size = (_type == LRUCacheType::SIZE ? handle_size + charge :
charge);
DCHECK(_type == LRUCacheType::SIZE || bytes != -1) << " _type " << _type;
+ // if LRUCacheType::NUMBER and bytes equals 0, such as some caches cannot
accurately track memory size.
+ // cache mem tracker value divided by handle_size(106) will get the number
of cache entries.
e->bytes = (_type == LRUCacheType::SIZE ? handle_size + charge :
handle_size + bytes);
e->hash = hash;
e->refs = 2; // one for the returned handle, one for LRUCache.
@@ -670,7 +674,7 @@ void ShardedLRUCache::update_cache_metrics() const {
Cache::Handle* DummyLRUCache::insert(const CacheKey& key, void* value, size_t
charge,
void (*deleter)(const CacheKey& key,
void* value),
CachePriority priority, size_t bytes) {
- size_t handle_size = sizeof(LRUHandle) - 1 + key.size();
+ size_t handle_size = sizeof(LRUHandle);
auto* e = reinterpret_cast<LRUHandle*>(malloc(handle_size));
e->value = value;
e->deleter = deleter;
diff --git a/be/src/olap/lru_cache.h b/be/src/olap/lru_cache.h
index 6ecab80022d..2a7d0ac86b9 100644
--- a/be/src/olap/lru_cache.h
+++ b/be/src/olap/lru_cache.h
@@ -55,8 +55,8 @@ class Cache;
class LRUCachePolicy;
enum LRUCacheType {
- SIZE, // The capacity of cache is based on the size of cache entry.
- NUMBER // The capacity of cache is based on the number of cache entry.
+ SIZE, // The capacity of cache is based on the memory size of cache entry,
memory size = handle size + charge.
+ NUMBER // The capacity of cache is based on the number of cache entry,
number = charge, the weight of an entry.
};
static constexpr LRUCacheType DEFAULT_LRU_CACHE_TYPE = LRUCacheType::SIZE;
diff --git a/be/src/runtime/exec_env_init.cpp b/be/src/runtime/exec_env_init.cpp
index eae038aaa46..1f9595e3bb1 100644
--- a/be/src/runtime/exec_env_init.cpp
+++ b/be/src/runtime/exec_env_init.cpp
@@ -626,7 +626,7 @@ void ExecEnv::destroy() {
SAFE_DELETE(_external_scan_context_mgr);
SAFE_DELETE(_user_function_cache);
- // cache_manager must be destoried after _inverted_index_query_cache
+ // cache_manager must be destoried after all cache.
// https://github.com/apache/doris/issues/24082#issuecomment-1712544039
SAFE_DELETE(_cache_manager);
diff --git a/be/test/olap/lru_cache_test.cpp b/be/test/olap/lru_cache_test.cpp
index 60dbd0402b1..bf76f6b8f2a 100644
--- a/be/test/olap/lru_cache_test.cpp
+++ b/be/test/olap/lru_cache_test.cpp
@@ -236,11 +236,22 @@ static void insert_LRUCache(LRUCache& cache, const
CacheKey& key, int value,
CachePriority priority) {
uint32_t hash = key.hash(key.data(), key.size(), 0);
static std::unique_ptr<MemTrackerLimiter> lru_cache_tracker =
-
std::make_unique<MemTrackerLimiter>(MemTrackerLimiter::Type::GLOBAL,
"TestLruCache");
+
std::make_unique<MemTrackerLimiter>(MemTrackerLimiter::Type::GLOBAL,
+ "TestSizeLruCache");
cache.release(cache.insert(key, hash, EncodeValue(value), value, &deleter,
lru_cache_tracker.get(), priority, value));
}
+static void insert_number_LRUCache(LRUCache& cache, const CacheKey& key, int
value, int charge,
+ CachePriority priority) {
+ uint32_t hash = key.hash(key.data(), key.size(), 0);
+ static std::unique_ptr<MemTrackerLimiter> lru_cache_tracker =
+
std::make_unique<MemTrackerLimiter>(MemTrackerLimiter::Type::GLOBAL,
+ "TestNumberLruCache");
+ cache.release(cache.insert(key, hash, EncodeValue(value), charge, &deleter,
+ lru_cache_tracker.get(), priority, value));
+}
+
TEST_F(CacheTest, Usage) {
LRUCache cache(LRUCacheType::SIZE);
cache.set_capacity(1040);
@@ -282,31 +293,31 @@ TEST_F(CacheTest, Prune) {
// The lru usage is 1, add one entry
CacheKey key1("100");
- insert_LRUCache(cache, key1, 100, CachePriority::NORMAL);
+ insert_number_LRUCache(cache, key1, 100, 1, CachePriority::NORMAL);
EXPECT_EQ(1, cache.get_usage());
CacheKey key2("200");
- insert_LRUCache(cache, key2, 200, CachePriority::DURABLE);
+ insert_number_LRUCache(cache, key2, 200, 1, CachePriority::DURABLE);
EXPECT_EQ(2, cache.get_usage());
CacheKey key3("300");
- insert_LRUCache(cache, key3, 300, CachePriority::NORMAL);
+ insert_number_LRUCache(cache, key3, 300, 1, CachePriority::NORMAL);
EXPECT_EQ(3, cache.get_usage());
CacheKey key4("400");
- insert_LRUCache(cache, key4, 400, CachePriority::NORMAL);
+ insert_number_LRUCache(cache, key4, 400, 1, CachePriority::NORMAL);
EXPECT_EQ(4, cache.get_usage());
CacheKey key5("500");
- insert_LRUCache(cache, key5, 500, CachePriority::NORMAL);
+ insert_number_LRUCache(cache, key5, 500, 1, CachePriority::NORMAL);
EXPECT_EQ(5, cache.get_usage());
CacheKey key6("600");
- insert_LRUCache(cache, key6, 600, CachePriority::NORMAL);
+ insert_number_LRUCache(cache, key6, 600, 1, CachePriority::NORMAL);
EXPECT_EQ(5, cache.get_usage());
CacheKey key7("700");
- insert_LRUCache(cache, key7, 700, CachePriority::DURABLE);
+ insert_number_LRUCache(cache, key7, 700, 1, CachePriority::DURABLE);
EXPECT_EQ(5, cache.get_usage());
auto pred = [](const void* value) -> bool { return false; };
@@ -321,7 +332,7 @@ TEST_F(CacheTest, Prune) {
EXPECT_EQ(0, cache.get_usage());
for (int i = 1; i <= 5; ++i) {
- insert_LRUCache(cache, CacheKey {std::to_string(i)}, i,
CachePriority::NORMAL);
+ insert_number_LRUCache(cache, CacheKey {std::to_string(i)}, i, 1,
CachePriority::NORMAL);
EXPECT_EQ(i, cache.get_usage());
}
cache.prune_if([](const void*) { return true; });
@@ -334,31 +345,31 @@ TEST_F(CacheTest, PruneIfLazyMode) {
// The lru usage is 1, add one entry
CacheKey key1("100");
- insert_LRUCache(cache, key1, 100, CachePriority::NORMAL);
+ insert_number_LRUCache(cache, key1, 100, 1, CachePriority::NORMAL);
EXPECT_EQ(1, cache.get_usage());
CacheKey key2("200");
- insert_LRUCache(cache, key2, 200, CachePriority::DURABLE);
+ insert_number_LRUCache(cache, key2, 200, 1, CachePriority::DURABLE);
EXPECT_EQ(2, cache.get_usage());
CacheKey key3("300");
- insert_LRUCache(cache, key3, 300, CachePriority::NORMAL);
+ insert_number_LRUCache(cache, key3, 300, 1, CachePriority::NORMAL);
EXPECT_EQ(3, cache.get_usage());
CacheKey key4("666");
- insert_LRUCache(cache, key4, 666, CachePriority::NORMAL);
+ insert_number_LRUCache(cache, key4, 666, 1, CachePriority::NORMAL);
EXPECT_EQ(4, cache.get_usage());
CacheKey key5("500");
- insert_LRUCache(cache, key5, 500, CachePriority::NORMAL);
+ insert_number_LRUCache(cache, key5, 500, 1, CachePriority::NORMAL);
EXPECT_EQ(5, cache.get_usage());
CacheKey key6("600");
- insert_LRUCache(cache, key6, 600, CachePriority::NORMAL);
+ insert_number_LRUCache(cache, key6, 600, 1, CachePriority::NORMAL);
EXPECT_EQ(6, cache.get_usage());
CacheKey key7("700");
- insert_LRUCache(cache, key7, 700, CachePriority::DURABLE);
+ insert_number_LRUCache(cache, key7, 700, 1, CachePriority::DURABLE);
EXPECT_EQ(7, cache.get_usage());
auto pred = [](const void* value) -> bool { return false; };
@@ -378,6 +389,58 @@ TEST_F(CacheTest, PruneIfLazyMode) {
EXPECT_EQ(4, cache.get_usage());
}
+TEST_F(CacheTest, Number) {
+ LRUCache cache(LRUCacheType::NUMBER);
+ cache.set_capacity(5);
+
+ // The lru usage is 1, add one entry
+ CacheKey key1("1");
+ insert_number_LRUCache(cache, key1, 0, 1, CachePriority::NORMAL);
+ EXPECT_EQ(1, cache.get_usage());
+
+ CacheKey key2("2");
+ insert_number_LRUCache(cache, key2, 0, 2, CachePriority::DURABLE);
+ EXPECT_EQ(3, cache.get_usage());
+
+ CacheKey key3("3");
+ insert_number_LRUCache(cache, key3, 0, 3, CachePriority::NORMAL);
+ EXPECT_EQ(5, cache.get_usage());
+
+ CacheKey key4("4");
+ insert_number_LRUCache(cache, key4, 0, 3, CachePriority::NORMAL);
+ EXPECT_EQ(5, cache.get_usage()); // evict key3, because key3 is NORMAL,
key2 is DURABLE.
+
+ CacheKey key5("5");
+ insert_number_LRUCache(cache, key5, 0, 5, CachePriority::NORMAL);
+ EXPECT_EQ(5, cache.get_usage()); // evict key2, key4
+
+ CacheKey key6("6");
+ insert_number_LRUCache(cache, key6, 0, 6, CachePriority::NORMAL);
+ EXPECT_EQ(0, cache.get_usage()); // evict key5, evict key6 at the same
time as release.
+
+ CacheKey key7("7");
+ insert_number_LRUCache(cache, key7, 200, 1, CachePriority::NORMAL);
+ EXPECT_EQ(1, cache.get_usage());
+
+ CacheKey key8("8");
+ insert_number_LRUCache(cache, key8, 100, 1, CachePriority::DURABLE);
+ EXPECT_EQ(2, cache.get_usage());
+
+ insert_number_LRUCache(cache, key8, 100, 1, CachePriority::DURABLE);
+ EXPECT_EQ(2, cache.get_usage());
+
+ auto pred = [](const void* value) -> bool { return false; };
+ cache.prune_if(pred);
+ EXPECT_EQ(2, cache.get_usage());
+
+ auto pred2 = [](const void* value) -> bool { return
DecodeValue((void*)value) > 100; };
+ cache.prune_if(pred2);
+ EXPECT_EQ(1, cache.get_usage());
+
+ cache.prune();
+ EXPECT_EQ(0, cache.get_usage());
+}
+
TEST_F(CacheTest, HeavyEntries) {
// Add a bunch of light and heavy entries and then count the combined
// size of items still in the cache, which must be approximately the
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]