aokolnychyi commented on code in PR #8755:
URL: https://github.com/apache/iceberg/pull/8755#discussion_r1446667326


##########
spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/SparkExecutorCache.java:
##########
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import java.time.Duration;
+import java.util.List;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An executor cache for reducing the computation and IO overhead in tasks.
+ *
+ * <p>The cache is configured and controlled through Spark SQL properties. It 
supports both limits
+ * on the total cache size and maximum size for individual entries. 
Additionally, it implements
+ * automatic eviction of entries after a specified duration of inactivity. The 
cache will respect
+ * the SQL configuration valid at the time of initialization. All subsequent 
changes to the
+ * configuration will have no effect.
+ *
+ * <p>The cache is accessed and populated via {@link #getOrLoad(String, 
String, Supplier, long)}. If
+ * the value is not present in the cache, it is computed using the provided 
supplier and stored in
+ * the cache, subject to the defined size constraints. When a key is added, it 
must be associated
+ * with a particular group ID. Once a group is no longer needed, it is 
recommended to explicitly
+ * invalidate its state by calling {@link #invalidate(String)} instead of 
relying on automatic
+ * eviction.
+ *
+ * <p>Note that this class employs the singleton pattern to ensure only one 
cache exists per JVM.
+ */
+public class SparkExecutorCache {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(SparkExecutorCache.class);
+
+  private static final SparkConfParser CONF_PARSER = new SparkConfParser();
+  private static final boolean CACHE_ENABLED = parseCacheEnabledConf();
+  private static final Duration TIMEOUT = parseTimeoutConf();
+  private static final long MAX_ENTRY_SIZE = parseMaxEntrySizeConf();
+  private static final long MAX_TOTAL_SIZE = parseMaxTotalSizeConf();
+  private static final String EXECUTOR_DESC = SparkUtil.executorDesc();
+
+  private static volatile SparkExecutorCache instance = null;
+
+  private volatile Cache<String, CacheValue> state;
+
+  private SparkExecutorCache() {}
+
+  /**
+   * Returns the cache if created or creates it.
+   *
+   * <p>Note this method returns null if caching is disabled.
+   */
+  public static SparkExecutorCache getOrCreate() {
+    if (instance == null && CACHE_ENABLED) {
+      synchronized (SparkExecutorCache.class) {
+        if (instance == null) {
+          SparkExecutorCache.instance = new SparkExecutorCache();
+        }
+      }
+    }
+
+    return instance;
+  }
+
+  /** Returns the cache if already created or null otherwise. */
+  public static SparkExecutorCache get() {
+    return instance;
+  }
+
+  /** Returns the max entry size in bytes that will be considered for caching. 
*/
+  public long maxEntrySize() {
+    return MAX_ENTRY_SIZE;
+  }
+
+  /** Returns the period of inactivity after which cache entries are evicted. 
*/
+  public Duration timeout() {
+    return TIMEOUT;
+  }
+
+  /** Returns the max total size of this cache in bytes. */
+  public long maxTotalSize() {
+    return MAX_TOTAL_SIZE;
+  }
+
+  /**
+   * Gets the cached value for the key or populates the cache with a new 
mapping.
+   *
+   * @param group a group ID
+   * @param key a cache key
+   * @param valueSupplier a supplier to compute the value
+   * @param valueSize an estimated memory size of the value in bytes
+   * @return the cached or computed value
+   */
+  public <V> V getOrLoad(String group, String key, Supplier<V> valueSupplier, 
long valueSize) {
+    if (valueSize > MAX_ENTRY_SIZE) {
+      return valueSupplier.get();
+    }
+
+    String internalKey = group + "_" + key;
+    CacheValue value = state().get(internalKey, loadFunc(valueSupplier, 
valueSize));
+    Preconditions.checkNotNull(value, "Loaded value must not be null");
+    return value.get();
+  }
+
+  private <V> Function<String, CacheValue> loadFunc(Supplier<V> valueSupplier, 
long valueSize) {
+    return key -> {
+      long start = System.currentTimeMillis();
+      V value = valueSupplier.get();
+      long end = System.currentTimeMillis();
+      LOG.info("Loaded value for {} with size {} in {} ms", key, valueSize, 
(end - start));

Review Comment:
   I am going back and forth on the log level here. I'd say it is a fragile 
place and it is better to always have more logs for now. I don't expect a huge 
number of these lines. We do have pretty detailed logs for broadcasts in Spark, 
for instance. 
   
   That said, we can switch to debug if everyone thinks it would be better.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to