amogh-jahagirdar commented on code in PR #14161: URL: https://github.com/apache/iceberg/pull/14161#discussion_r2434085699
########## aws/src/test/java/org/apache/iceberg/aws/TestS3FileIOSharedResourceManagement.java: ########## @@ -0,0 +1,249 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.aws; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatCode; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.iceberg.aws.s3.S3FileIO; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class TestS3FileIOSharedResourceManagement { + + private Map<String, String> properties; + + @BeforeEach + public void before() { + properties = Maps.newHashMap(); + properties.put("client.region", "us-east-1"); + properties.put("s3.endpoint", "https://localhost:9000"); + properties.put("s3.path-style-access", "true"); + properties.put("s3.delete.enabled", "false"); // Don't actually delete during tests + } + + @Test + public void testMultipleS3FileIOInstancesShareResources() { + // Create multiple S3FileIO instances with same configuration + S3FileIO fileIO1 = new S3FileIO(); + fileIO1.initialize(properties); + + S3FileIO fileIO2 = new S3FileIO(); + fileIO2.initialize(properties); + + S3FileIO fileIO3 = new S3FileIO(); + fileIO3.initialize(properties); + + // Verify they can coexist without resource conflicts + assertThat(fileIO1.client()).isNotNull(); + assertThat(fileIO2.client()).isNotNull(); + assertThat(fileIO3.client()).isNotNull(); + + // Close some instances - others should continue working + fileIO1.close(); + fileIO2.close(); + + // The remaining instance should still work + assertThatCode(() -> fileIO3.client()).doesNotThrowAnyException(); + + fileIO3.close(); + } + + @Test + public void testSerializationDeserialization() throws Exception { + // This test simulates Spark's broadcast variable serialization/deserialization + S3FileIO originalFileIO = new S3FileIO(); + originalFileIO.initialize(properties); + + // Serialize the S3FileIO + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ObjectOutputStream oos = new ObjectOutputStream(baos)) { + oos.writeObject(originalFileIO); + } + + // Deserialize on "executor" + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + S3FileIO deserializedFileIO; + try (ObjectInputStream ois = new ObjectInputStream(bais)) { + deserializedFileIO = (S3FileIO) ois.readObject(); + } + + // Both should work without conflicts + assertThat(originalFileIO.client()).isNotNull(); + assertThat(deserializedFileIO.client()).isNotNull(); + + // Close original (simulating broadcast cleanup) - deserialized should still work + originalFileIO.close(); + assertThatCode(deserializedFileIO::client).doesNotThrowAnyException(); + + deserializedFileIO.close(); + } + + @Test + public void testConcurrentAccessFromMultipleThreads() throws InterruptedException { + int threadCount = 10; + ExecutorService executor = Executors.newFixedThreadPool(threadCount); + CountDownLatch startLatch = new CountDownLatch(1); + CountDownLatch completionLatch = new CountDownLatch(threadCount); + AtomicInteger successCount = new AtomicInteger(0); + AtomicReference<Exception> firstException = new AtomicReference<>(); + + // Create multiple threads that each create and use S3FileIO instances + for (int i = 0; i < threadCount; i++) { + executor.submit( + () -> { + try { + startLatch.await(); // Wait for all threads to be ready + + S3FileIO fileIO = new S3FileIO(); + fileIO.initialize(properties); + + // Simulate some operations + assertThat(fileIO.client()).isNotNull(); + + // Simulate random close timing (like Spark GC cleanup) + if (Math.random() > 0.5) { + Thread.sleep((long) (Math.random() * 100)); + } + + fileIO.close(); + successCount.incrementAndGet(); + + } catch (Exception e) { + firstException.compareAndSet(null, e); + } finally { + completionLatch.countDown(); + } + }); + } + + // Start all threads simultaneously + startLatch.countDown(); + + // Wait for completion + assertThat(completionLatch.await(30, TimeUnit.SECONDS)).isTrue(); + + // Verify no exceptions occurred + if (firstException.get() != null) { + throw new RuntimeException("Test failed with exception", firstException.get()); + } + + assertThat(successCount.get()).isEqualTo(threadCount); + + executor.shutdown(); + assertThat(executor.awaitTermination(5, TimeUnit.SECONDS)).isTrue(); + } + + @Test + public void testMemoryPressureSimulation() throws InterruptedException { Review Comment: I'm pretty hesitant to add tests like this though I understand the degree of confidence it builds. The issue is across different environments, this test may not be reproducible right? We're not really guaranteed that System.gc does anything etc. ########## aws/src/main/java/org/apache/iceberg/aws/ManagedHttpClientRegistry.java: ########## @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.aws; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import java.lang.ref.Cleaner; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; +import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.http.SdkHttpClient; + +/** + * A registry that manages the lifecycle of shared HTTP clients for AWS SDK v2. Resources are + * cleaned up when garbage collected. + */ +public class ManagedHttpClientRegistry { + private static final Logger LOG = LoggerFactory.getLogger(ManagedHttpClientRegistry.class); + private static final Cleaner CLEANER = Cleaner.create(); + + private final Cache<String, ManagedHttpClient> clientCache; + + private static volatile ManagedHttpClientRegistry instance; + + public static ManagedHttpClientRegistry getInstance() { + if (instance == null) { + synchronized (ManagedHttpClientRegistry.class) { + if (instance == null) { + instance = new ManagedHttpClientRegistry(); + } + } + } + return instance; + } + + private ManagedHttpClientRegistry() { + this.clientCache = Caffeine.newBuilder().build(); Review Comment: Looks like this is unbounded and it's not really a cache. Should we just use a map here? ########## aws/src/main/java/org/apache/iceberg/aws/ManagedHttpClientRegistry.java: ########## @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.aws; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import java.lang.ref.Cleaner; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; +import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.http.SdkHttpClient; + +/** + * A registry that manages the lifecycle of shared HTTP clients for AWS SDK v2. Resources are + * cleaned up when garbage collected. + */ +public class ManagedHttpClientRegistry { + private static final Logger LOG = LoggerFactory.getLogger(ManagedHttpClientRegistry.class); + private static final Cleaner CLEANER = Cleaner.create(); + + private final Cache<String, ManagedHttpClient> clientCache; + + private static volatile ManagedHttpClientRegistry instance; + + public static ManagedHttpClientRegistry getInstance() { + if (instance == null) { + synchronized (ManagedHttpClientRegistry.class) { + if (instance == null) { + instance = new ManagedHttpClientRegistry(); + } + } + } + return instance; + } + + private ManagedHttpClientRegistry() { + this.clientCache = Caffeine.newBuilder().build(); + } + + /** + * Get or create a managed HTTP client for the given configuration. + * + * @param clientKey unique key identifying the client configuration + * @param clientFactory factory to create the HTTP client if not cached + * @param properties configuration properties for this client + * @return a managed HTTP client that handles proper cleanup via Cleaner + */ + public SdkHttpClient getOrCreateClient( + String clientKey, Supplier<SdkHttpClient> clientFactory, Map<String, String> properties) { + return clientCache + .get( + clientKey, + k -> { + LOG.debug("Creating new managed HTTP client for key: {}", k); + SdkHttpClient httpClient = clientFactory.get(); + return new ManagedHttpClient(httpClient, k, properties); + }) + .getHttpClient(); + } + + @VisibleForTesting + Cache<String, ManagedHttpClient> getClientCache() { + return clientCache; + } + + @VisibleForTesting + void shutdown() { + clientCache.invalidateAll(); + clientCache.cleanUp(); + } + + /** + * Managed HTTP client wrapper that provides cleanup using java.lang.ref.Cleaner. The Cleaner + * ensures resources are eventually closed when the wrapper is garbage collected. + */ + static class ManagedHttpClient implements AutoCloseable { + private static final AtomicLong INSTANCE_COUNTER = new AtomicLong(0); + + private final SdkHttpClient httpClient; + private final String clientKey; + private final long instanceId; Review Comment: I'm not really sure if storing an additional `instanceId` really buys us much. Not against it but it looks like it's marginally useful in case of failures closing the http client. ########## aws/src/main/java/org/apache/iceberg/aws/ManagedHttpClientRegistry.java: ########## @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.aws; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import java.lang.ref.Cleaner; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; +import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.http.SdkHttpClient; + +/** + * A registry that manages the lifecycle of shared HTTP clients for AWS SDK v2. Resources are + * cleaned up when garbage collected. + */ +public class ManagedHttpClientRegistry { Review Comment: Are we sure this is something we want to expose? This really is something specifically configured by us for the AwsClient. I don't know if it's beneficial to expose this concept to consumers of the iceberg AWS module. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
