This is an automated email from the ASF dual-hosted git repository.

kturner pushed a commit to branch 3.1
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/3.1 by this push:
     new be410feb68 replace cache usage with a map (#4855)
be410feb68 is described below

commit be410feb680730e490f540a4e0d93f8dfcb39c3d
Author: Keith Turner <ktur...@apache.org>
AuthorDate: Fri Sep 13 14:01:54 2024 -0400

    replace cache usage with a map (#4855)
    
    Introduced a cache in #4853 but found that it was not needed because the
    code clears it on each pass through all of the tablets.  Had assumed
    it was a long lived cache.
---
 .../tserver/memory/LargestFirstMemoryManager.java     | 19 +++++++------------
 1 file changed, 7 insertions(+), 12 deletions(-)

diff --git 
a/server/tserver/src/main/java/org/apache/accumulo/tserver/memory/LargestFirstMemoryManager.java
 
b/server/tserver/src/main/java/org/apache/accumulo/tserver/memory/LargestFirstMemoryManager.java
index ad66b4a848..42623f7027 100644
--- 
a/server/tserver/src/main/java/org/apache/accumulo/tserver/memory/LargestFirstMemoryManager.java
+++ 
b/server/tserver/src/main/java/org/apache/accumulo/tserver/memory/LargestFirstMemoryManager.java
@@ -34,9 +34,6 @@ import org.apache.accumulo.server.ServerContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.github.benmanes.caffeine.cache.Cache;
-import com.github.benmanes.caffeine.cache.Caffeine;
-
 /**
  * The LargestFirstMemoryManager attempts to keep memory between 80% and 90% 
full. It adapts over
  * time the point at which it should start a compaction based on how full 
memory gets between
@@ -60,8 +57,7 @@ public class LargestFirstMemoryManager {
   private double compactionThreshold;
   private long maxObserved;
   private final HashMap<TableId,Long> mincIdleThresholds = new HashMap<>();
-  private final Cache<TableId,Long> mincAgeThresholds =
-      Caffeine.newBuilder().expireAfterWrite(5, TimeUnit.MINUTES).build();
+  private final HashMap<TableId,Long> mincAgeThresholds = new HashMap<>();
   private ServerContext context = null;
 
   private static class TabletInfo {
@@ -145,17 +141,14 @@ public class LargestFirstMemoryManager {
   @SuppressWarnings("deprecation")
   protected long getMinCIdleThreshold(KeyExtent extent) {
     TableId tableId = extent.tableId();
-    if (!mincIdleThresholds.containsKey(tableId)) {
-      mincIdleThresholds.put(tableId, context.getTableConfiguration(tableId)
-          .getTimeInMillis(Property.TABLE_MINC_COMPACT_IDLETIME));
-    }
-    return mincIdleThresholds.get(tableId);
+    return mincIdleThresholds.computeIfAbsent(tableId, tid -> 
context.getTableConfiguration(tid)
+        .getTimeInMillis(Property.TABLE_MINC_COMPACT_IDLETIME));
   }
 
   protected long getMaxAge(KeyExtent extent) {
     TableId tableId = extent.tableId();
-    return mincAgeThresholds.asMap().computeIfAbsent(tableId, tid -> context
-        
.getTableConfiguration(tid).getTimeInMillis(Property.TABLE_MINC_COMPACT_MAXAGE));
+    return mincAgeThresholds.computeIfAbsent(tableId, tid -> 
context.getTableConfiguration(tid)
+        .getTimeInMillis(Property.TABLE_MINC_COMPACT_MAXAGE));
   }
 
   protected boolean tableExists(TableId tableId) {
@@ -176,6 +169,8 @@ public class LargestFirstMemoryManager {
     final int maxMinCs = maxConcurrentMincs * numWaitingMultiplier;
 
     mincIdleThresholds.clear();
+    mincAgeThresholds.clear();
+
     final List<KeyExtent> tabletsToMinorCompact = new ArrayList<>();
 
     LargestMap largestMemTablets = new LargestMap(maxMinCs);

Reply via email to