This is an automated email from the ASF dual-hosted git repository.

cshannon pushed a commit to branch 2.1
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/2.1 by this push:
     new ccf4eec33b Replace RecentLogs LinkedHashMap with Caffeine (#4881)
ccf4eec33b is described below

commit ccf4eec33b13257bd2869b7208fe71a55fed7c7c
Author: Christopher L. Shannon <cshan...@apache.org>
AuthorDate: Thu Sep 12 18:43:52 2024 -0400

    Replace RecentLogs LinkedHashMap with Caffeine (#4881)
    
    This improves concurrency by removing the synchronized blocks and
    replacing the LinkedHashMap with a Caffeine cache. The cache size is set
    to a max of 50 which matches the previous map limit.
    
    This closes #4876
---
 server/monitor/pom.xml                             |  4 ++
 .../accumulo/monitor/util/logging/RecentLogs.java  | 48 ++++++++++------------
 2 files changed, 25 insertions(+), 27 deletions(-)

diff --git a/server/monitor/pom.xml b/server/monitor/pom.xml
index fe59bf92a8..5ca84506d8 100644
--- a/server/monitor/pom.xml
+++ b/server/monitor/pom.xml
@@ -35,6 +35,10 @@
       <groupId>com.fasterxml.jackson.core</groupId>
       <artifactId>jackson-databind</artifactId>
     </dependency>
+    <dependency>
+      <groupId>com.github.ben-manes.caffeine</groupId>
+      <artifactId>caffeine</artifactId>
+    </dependency>
     <dependency>
       <groupId>com.google.auto.service</groupId>
       <artifactId>auto-service</artifactId>
diff --git 
a/server/monitor/src/main/java/org/apache/accumulo/monitor/util/logging/RecentLogs.java
 
b/server/monitor/src/main/java/org/apache/accumulo/monitor/util/logging/RecentLogs.java
index e95da38c7f..6ca1543ea5 100644
--- 
a/server/monitor/src/main/java/org/apache/accumulo/monitor/util/logging/RecentLogs.java
+++ 
b/server/monitor/src/main/java/org/apache/accumulo/monitor/util/logging/RecentLogs.java
@@ -18,15 +18,17 @@
  */
 package org.apache.accumulo.monitor.util.logging;
 
-import java.util.LinkedHashMap;
 import java.util.List;
-import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.stream.Collectors;
 
 import org.apache.accumulo.monitor.rest.logs.LogResource;
 import org.apache.accumulo.monitor.rest.logs.SanitizedLogEvent;
 import org.apache.accumulo.monitor.rest.logs.SingleLogEvent;
 
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+
 /**
  * A recent logs cache for the monitor that holds log messages received from
  * {@link AccumuloMonitorAppender} instances at the monitor's REST endpoint
@@ -38,52 +40,44 @@ public class RecentLogs {
 
   private static final int MAX_LOGS = 50;
 
+  private final Cache<String,DedupedEvent> events =
+      Caffeine.newBuilder().maximumSize(MAX_LOGS).build();
+
   /**
    * Internal class for keeping the current count and most recent event that 
matches a given cache
    * key (derived from the event's application, logger, level, and message 
fields).
    */
   private static class DedupedEvent {
     private final SingleLogEvent event;
-    private final int count;
+    private final AtomicInteger count;
 
-    private DedupedEvent(SingleLogEvent event, int count) {
+    private DedupedEvent(SingleLogEvent event) {
       this.event = event;
-      this.count = count;
+      this.count = new AtomicInteger();
     }
   }
 
-  private final LinkedHashMap<String,DedupedEvent> events =
-      new LinkedHashMap<>(MAX_LOGS + 1, (float) .75, true) {
-
-        private static final long serialVersionUID = 1L;
-
-        @Override
-        protected boolean removeEldestEntry(Map.Entry<String,DedupedEvent> 
eldest) {
-          return size() > MAX_LOGS;
-        }
-      };
-
-  public synchronized void addEvent(SingleLogEvent event) {
+  public void addEvent(SingleLogEvent event) {
     String key = event.application + ":" + event.logger + ":" + event.level + 
":" + event.message;
-    int count = events.containsKey(key) ? events.remove(key).count + 1 : 1;
-    events.put(key, new DedupedEvent(event, count));
+    events.asMap().computeIfAbsent(key, k -> new 
DedupedEvent(event)).count.incrementAndGet();
   }
 
-  public synchronized void clearEvents() {
-    events.clear();
+  public void clearEvents() {
+    events.invalidateAll();
   }
 
-  public synchronized int numEvents() {
-    return events.size();
+  public int numEvents() {
+    return events.asMap().size();
   }
 
-  public synchronized boolean eventsIncludeErrors() {
-    return events.values().stream().anyMatch(
+  public boolean eventsIncludeErrors() {
+    return events.asMap().values().stream().anyMatch(
         x -> x.event.level.equalsIgnoreCase("ERROR") || 
x.event.level.equalsIgnoreCase("FATAL"));
   }
 
-  public synchronized List<SanitizedLogEvent> getSanitizedEvents() {
-    return events.values().stream().map(ev -> new SanitizedLogEvent(ev.event, 
ev.count))
+  public List<SanitizedLogEvent> getSanitizedEvents() {
+    return events.asMap().values().stream()
+        .map(ev -> new SanitizedLogEvent(ev.event, 
ev.count.get())).limit(MAX_LOGS)
         .collect(Collectors.toList());
   }
 

Reply via email to