This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 4846e97b1a HDDS-11543. Track OzoneClient object leaks via LeakDetector 
framework. (#7285)
4846e97b1a is described below

commit 4846e97b1ae984637e97137fc46cfd2c2d7e3ed2
Author: Sadanand Shenoy <[email protected]>
AuthorDate: Wed Oct 9 20:06:14 2024 +0530

    HDDS-11543. Track OzoneClient object leaks via LeakDetector framework. 
(#7285)
---
 .../java/org/apache/hadoop/hdds/HddsUtils.java     | 13 +++++++
 .../utils/db/managed/ManagedRocksObjectUtils.java  |  7 +---
 .../org/apache/ozone/test/GenericTestUtils.java    | 15 ++++++++
 .../apache/hadoop/ozone/client/OzoneClient.java    |  8 +++-
 .../hadoop/ozone/client/OzoneClientFactory.java    | 27 +++++++++++--
 .../fs/ozone/AbstractOzoneFileSystemTest.java      | 21 +++++++----
 .../hadoop/fs/ozone/OzoneFileSystemTests.java      | 18 +++++----
 .../hadoop/fs/ozone/TestOzoneFSBucketLayout.java   | 12 +++++-
 .../apache/hadoop/ozone/TestOMSortDatanodes.java   |  7 ++++
 .../ozone/client/rpc/OzoneRpcClientTests.java      | 44 ++++++++++++++++++++--
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 14 +++++--
 .../ozone/om/TestOmContainerLocationCache.java     |  5 ++-
 .../hadoop/ozone/shell/TestOzoneShellHA.java       |  3 --
 .../org/apache/hadoop/ozone/om/OmTestManagers.java |  8 +++-
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      | 30 +++++++++------
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   | 10 ++++-
 16 files changed, 189 insertions(+), 53 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 662f4b3964..88489652e0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -878,4 +878,17 @@ public final class HddsUtils {
         ? Thread.currentThread().getStackTrace()
         : null;
   }
+
+  /**
+   * Logs a warning to report that the class is not closed properly.
+   */
+  public static void reportLeak(Class<?> clazz, String stackTrace, Logger log) 
{
+    String warning = String.format("%s is not closed properly", 
clazz.getSimpleName());
+    if (stackTrace != null && LOG.isDebugEnabled()) {
+      String debugMessage = String.format("%nStackTrace for unclosed instance: 
%s",
+          stackTrace);
+      warning = warning.concat(debugMessage);
+    }
+    log.warn(warning);
+  }
 }
diff --git 
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
 
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
index 148abee7fc..d58f70495f 100644
--- 
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
+++ 
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
@@ -55,12 +55,7 @@ public final class ManagedRocksObjectUtils {
 
   static void reportLeak(Class<?> clazz, String stackTrace) {
     ManagedRocksObjectMetrics.INSTANCE.increaseLeakObject();
-    String warning = String.format("%s is not closed properly", 
clazz.getSimpleName());
-    if (stackTrace != null && LOG.isDebugEnabled()) {
-      String debugMessage = String.format("%nStackTrace for unclosed instance: 
%s", stackTrace);
-      warning = warning.concat(debugMessage);
-    }
-    LOG.warn(warning);
+    HddsUtils.reportLeak(clazz, stackTrace, LOG);
   }
 
   private static @Nullable StackTraceElement[] getStackTrace() {
diff --git 
a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
 
b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
index 9a3a5c7a8f..48abd5e986 100644
--- 
a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
+++ 
b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
@@ -29,6 +29,7 @@ import java.lang.reflect.Method;
 import java.time.Instant;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.Callable;
 import java.util.concurrent.TimeoutException;
 
 import com.google.common.base.Preconditions;
@@ -205,6 +206,20 @@ public abstract class GenericTestUtils {
     }
   }
 
+  public static <T extends Throwable> T assertThrows(
+      Class<T> expectedType,
+      Callable<? extends AutoCloseable> func) {
+    return Assertions.assertThrows(expectedType, () -> {
+      final AutoCloseable closeable = func.call();
+      try {
+        if (closeable != null) {
+          closeable.close();
+        }
+      } catch (Exception ignored) {
+      }
+    });
+  }
+
   /**
    * @deprecated use sl4fj based version
    */
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java
index 3a63a59346..8bd648545d 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java
@@ -26,6 +26,7 @@ import java.io.Closeable;
 import java.io.IOException;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.ratis.util.UncheckedAutoCloseable;
 
 /**
  * OzoneClient connects to Ozone Cluster and
@@ -76,6 +77,7 @@ public class OzoneClient implements Closeable {
   private final ClientProtocol proxy;
   private final ObjectStore objectStore;
   private  ConfigurationSource conf;
+  private final UncheckedAutoCloseable leakTracker = 
OzoneClientFactory.track(this);
 
   /**
    * Creates a new OzoneClient object, generally constructed
@@ -119,7 +121,11 @@ public class OzoneClient implements Closeable {
    */
   @Override
   public void close() throws IOException {
-    proxy.close();
+    try {
+      proxy.close();
+    } finally {
+      leakTracker.close();
+    }
   }
 
   /**
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index 80a495a1d1..1c673618d0 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -23,9 +23,11 @@ import java.io.DataInputStream;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.LeakDetector;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -34,13 +36,17 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.StringUtils;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
+import org.apache.ratis.util.UncheckedAutoCloseable;
+
+import com.google.common.base.Preconditions;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
+
 /**
  * Factory class to create OzoneClients.
  */
@@ -54,6 +60,21 @@ public final class OzoneClientFactory {
    */
   private OzoneClientFactory() { }
 
+  private static final LeakDetector OZONE_CLIENT_LEAK_DETECTOR =
+      new LeakDetector("OzoneClientObject");
+
+  public static UncheckedAutoCloseable track(AutoCloseable object) {
+    final Class<?> clazz = object.getClass();
+    final StackTraceElement[] stackTrace = HddsUtils.getStackTrace(LOG);
+    return OZONE_CLIENT_LEAK_DETECTOR.track(object,
+        () -> HddsUtils.reportLeak(clazz,
+            HddsUtils.formatStackTrace(stackTrace, 4), LOG));
+  }
+
+  public static Logger getLogger() {
+    return LOG;
+  }
+
 
   /**
    * Constructs and return an OzoneClient with default configuration.
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
index 69242d2b1f..e7c4cbee1d 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
@@ -409,7 +409,7 @@ abstract class AbstractOzoneFileSystemTest {
   }
 
   private void checkInvalidPath(Path path) {
-    InvalidPathException pathException = assertThrows(
+    InvalidPathException pathException = GenericTestUtils.assertThrows(
         InvalidPathException.class, () -> fs.create(path, false)
     );
     assertThat(pathException.getMessage()).contains("Invalid path Name");
@@ -1831,12 +1831,14 @@ abstract class AbstractOzoneFileSystemTest {
     String rootPath = String.format("%s://%s.%s/",
         OzoneConsts.OZONE_URI_SCHEME, linkBucket1Name, linksVolume);
 
-    try {
-      FileSystem.get(URI.create(rootPath), cluster.getConf());
-      fail("Should throw Exception due to loop in Link Buckets");
+    try (FileSystem fileSystem = FileSystem.get(URI.create(rootPath),
+        cluster.getConf())) {
+      fail("Should throw Exception due to loop in Link Buckets" +
+          " while initialising fs with URI " + fileSystem.getUri());
     } catch (OMException oe) {
       // Expected exception
-      assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, 
oe.getResult());
+      assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS,
+          oe.getResult());
     } finally {
       volume.deleteBucket(linkBucket1Name);
       volume.deleteBucket(linkBucket2Name);
@@ -1854,13 +1856,17 @@ abstract class AbstractOzoneFileSystemTest {
     String rootPath2 = String.format("%s://%s.%s/",
         OzoneConsts.OZONE_URI_SCHEME, danglingLinkBucketName, linksVolume);
 
+    FileSystem fileSystem = null;
     try {
-      FileSystem.get(URI.create(rootPath2), cluster.getConf());
+      fileSystem = FileSystem.get(URI.create(rootPath2), cluster.getConf());
     } catch (OMException oe) {
       // Expected exception
       fail("Should not throw Exception and show orphan buckets");
     } finally {
       volume.deleteBucket(danglingLinkBucketName);
+      if (fileSystem != null) {
+        fileSystem.close();
+      }
     }
   }
 
@@ -2230,7 +2236,8 @@ abstract class AbstractOzoneFileSystemTest {
       OzoneConfiguration config = new OzoneConfiguration(fs.getConf());
       config.set(FS_DEFAULT_NAME_KEY, obsRootPath);
 
-      IllegalArgumentException e = 
assertThrows(IllegalArgumentException.class, () -> FileSystem.get(config));
+      IllegalArgumentException e = 
GenericTestUtils.assertThrows(IllegalArgumentException.class,
+              () -> FileSystem.get(config));
       assertThat(e.getMessage()).contains("OBJECT_STORE, which does not 
support file system semantics");
     }
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java
index 47c584e048..67baea8835 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java
@@ -65,15 +65,17 @@ final class OzoneFileSystemTests {
     URI uri = FileSystem.getDefaultUri(config);
     config.setBoolean(
         String.format("fs.%s.impl.disable.cache", uri.getScheme()), true);
-    FileSystem subject = FileSystem.get(uri, config);
-    Path dir = new Path(Objects.requireNonNull(rootPath), 
"listStatusIterator");
-    try {
-      Set<String> paths = new TreeSet<>();
-      for (int dirCount : dirCounts) {
-        listStatusIterator(subject, dir, paths, dirCount);
+    try (FileSystem subject = FileSystem.get(uri, config)) {
+      Path dir = new Path(Objects.requireNonNull(rootPath),
+          "listStatusIterator");
+      try {
+        Set<String> paths = new TreeSet<>();
+        for (int dirCount : dirCounts) {
+          listStatusIterator(subject, dir, paths, dirCount);
+        }
+      } finally {
+        subject.delete(dir, true);
       }
-    } finally {
-      subject.delete(dir, true);
     }
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java
index f1cedf59c3..8e8cc63a7d 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java
@@ -122,8 +122,16 @@ class TestOzoneFSBucketLayout {
   void fileSystemWithUnsupportedDefaultBucketLayout(String layout) {
     OzoneConfiguration conf = configWithDefaultBucketLayout(layout);
 
-    OMException e = assertThrows(OMException.class,
-        () -> FileSystem.newInstance(conf));
+    OMException e = assertThrows(OMException.class, () -> {
+      FileSystem fileSystem = null;
+      try {
+        fileSystem = FileSystem.newInstance(conf);
+      } finally {
+        if (fileSystem != null) {
+          fileSystem.close();
+        }
+      }
+    });
     assertThat(e.getMessage())
         .contains(ERROR_MAP.get(layout));
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
index cef872597e..2964912c40 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.net.StaticMapping;
 
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.om.KeyManagerImpl;
 import org.apache.hadoop.ozone.om.OmTestManagers;
 import org.apache.hadoop.ozone.om.OzoneManager;
@@ -74,6 +75,8 @@ public class TestOMSortDatanodes {
       "edge1", "/rack1"
   );
 
+  private static OzoneClient ozoneClient;
+
   @BeforeAll
   public static void setup() throws Exception {
     config = new OzoneConfiguration();
@@ -109,11 +112,15 @@ public class TestOMSortDatanodes {
         = new OmTestManagers(config, scm.getBlockProtocolServer(),
         mockScmContainerClient);
     om = omTestManagers.getOzoneManager();
+    ozoneClient = omTestManagers.getRpcClient();
     keyManager = (KeyManagerImpl)omTestManagers.getKeyManager();
   }
 
   @AfterAll
   public static void cleanup() throws Exception {
+    if (ozoneClient != null) {
+      ozoneClient.close();
+    }
     if (scm != null) {
       scm.stop();
       scm.join();
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
index eb9f35f518..6edef789b1 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
@@ -32,12 +32,14 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 import java.util.Optional;
+import java.util.Set;
 import java.util.TreeMap;
 import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
@@ -188,6 +190,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 import static org.slf4j.event.Level.DEBUG;
 
+import org.apache.ozone.test.tag.Unhealthy;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.MethodOrderer;
 import org.junit.jupiter.api.Test;
@@ -221,6 +224,7 @@ abstract class OzoneRpcClientTests extends OzoneTestBase {
   private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP,
       remoteGroupName, ACCESS, READ);
   private static MessageDigest eTagProvider;
+  private static Set<OzoneClient> ozoneClients = new HashSet<>();
 
   @BeforeAll
   public static void initialize() throws NoSuchAlgorithmException {
@@ -250,6 +254,7 @@ abstract class OzoneRpcClientTests extends OzoneTestBase {
         .build();
     cluster.waitForClusterToBeReady();
     ozClient = OzoneClientFactory.getRpcClient(conf);
+    ozoneClients.add(ozClient);
     store = ozClient.getObjectStore();
     storageContainerLocationClient =
         cluster.getStorageContainerLocationClient();
@@ -259,10 +264,9 @@ abstract class OzoneRpcClientTests extends OzoneTestBase {
   /**
    * Close OzoneClient and shutdown MiniOzoneCluster.
    */
-  static void shutdownCluster() throws IOException {
-    if (ozClient != null) {
-      ozClient.close();
-    }
+  static void shutdownCluster() {
+    org.apache.hadoop.hdds.utils.IOUtils.closeQuietly(ozoneClients);
+    ozoneClients.clear();
 
     if (storageContainerLocationClient != null) {
       storageContainerLocationClient.close();
@@ -274,6 +278,7 @@ abstract class OzoneRpcClientTests extends OzoneTestBase {
   }
 
   private static void setOzClient(OzoneClient ozClient) {
+    ozoneClients.add(ozClient);
     OzoneRpcClientTests.ozClient = ozClient;
   }
 
@@ -3140,6 +3145,37 @@ abstract class OzoneRpcClientTests extends OzoneTestBase 
{
     doMultipartUpload(bucket, keyName, (byte)97, replication);
 
   }
+
+  /**
+   * This test prints out that there is a memory leak in the test logs
+   * which during post-processing is caught by the CI thereby failing the
+   * CI run. Hence, disabling this for CI.
+   */
+  @Unhealthy
+  public void testClientLeakDetector() throws Exception {
+    OzoneClient client = OzoneClientFactory.getRpcClient(cluster.getConf());
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+    GenericTestUtils.LogCapturer ozoneClientFactoryLogCapturer =
+        GenericTestUtils.LogCapturer.captureLogs(
+            OzoneClientFactory.getLogger());
+
+    client.getObjectStore().createVolume(volumeName);
+    OzoneVolume volume = client.getObjectStore().getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    byte[] data = new byte[10];
+    Arrays.fill(data, (byte) 1);
+    try (OzoneOutputStream out = bucket.createKey(keyName, 10,
+        ReplicationConfig.fromTypeAndFactor(RATIS, ONE), new HashMap<>())) {
+      out.write(data);
+    }
+    client = null;
+    System.gc();
+    GenericTestUtils.waitFor(() -> ozoneClientFactoryLogCapturer.getOutput()
+        .contains("is not closed properly"), 100, 2000);
+  }
   @Test
   public void testMultipartUploadOwner() throws Exception {
     // Save the old user, and switch to the old user after test
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index efa2963842..e7df69a01d 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -71,6 +71,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@@ -171,6 +172,7 @@ public class TestKeyManagerImpl {
   private static final String VERSIONED_BUCKET_NAME = "versionedbucket1";
   private static final String VOLUME_NAME = "vol1";
   private static OzoneManagerProtocol writeClient;
+  private static OzoneClient rpcClient;
   private static OzoneManager om;
 
   @BeforeAll
@@ -219,6 +221,7 @@ public class TestKeyManagerImpl {
     keyManager = (KeyManagerImpl)omTestManagers.getKeyManager();
     prefixManager = omTestManagers.getPrefixManager();
     writeClient = omTestManagers.getWriteClient();
+    rpcClient = omTestManagers.getRpcClient();
 
     mockContainerClient();
 
@@ -235,6 +238,8 @@ public class TestKeyManagerImpl {
 
   @AfterAll
   public static void cleanup() throws Exception {
+    writeClient.close();
+    rpcClient.close();
     scm.stop();
     scm.join();
     om.stop();
@@ -252,10 +257,11 @@ public class TestKeyManagerImpl {
   public void cleanupTest() throws IOException {
     mockContainerClient();
     org.apache.hadoop.fs.Path volumePath = new 
org.apache.hadoop.fs.Path(OZONE_URI_DELIMITER, VOLUME_NAME);
-    FileSystem fs = FileSystem.get(conf);
-    fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true);
-    fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true);
-    fs.delete(new org.apache.hadoop.fs.Path(volumePath, 
VERSIONED_BUCKET_NAME), true);
+    try (FileSystem fs = FileSystem.get(conf)) {
+      fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true);
+      fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true);
+      fs.delete(new org.apache.hadoop.fs.Path(volumePath, 
VERSIONED_BUCKET_NAME), true);
+    }
   }
 
   private static void mockContainerClient() {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
index f25bb47f0d..5c7a0c3128 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
@@ -63,6 +63,7 @@ import 
org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
@@ -162,7 +163,7 @@ public class TestOmContainerLocationCache {
   private static final DatanodeDetails DN5 =
       MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID());
   private static final AtomicLong CONTAINER_ID = new AtomicLong(1);
-
+  private static OzoneClient ozoneClient;
 
   @BeforeAll
   public static void setUp() throws Exception {
@@ -184,6 +185,7 @@ public class TestOmContainerLocationCache {
     OmTestManagers omTestManagers = new OmTestManagers(conf,
         mockScmBlockLocationProtocol, mockScmContainerClient);
     om = omTestManagers.getOzoneManager();
+    ozoneClient = omTestManagers.getRpcClient();
     metadataManager = omTestManagers.getMetadataManager();
 
     rpcClient = new RpcClient(conf, null) {
@@ -204,6 +206,7 @@ public class TestOmContainerLocationCache {
 
   @AfterAll
   public static void cleanup() throws Exception {
+    ozoneClient.close();
     om.stop();
     FileUtils.deleteDirectory(dir);
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
index 4c5325edab..9cccd56d4d 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
@@ -23,7 +23,6 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.io.UnsupportedEncodingException;
-import java.net.URI;
 import java.util.Map;
 import java.util.Arrays;
 import java.util.HashSet;
@@ -1149,8 +1148,6 @@ public class TestOzoneShellHA {
             getClientConfForOFS(hostPrefix, cluster.getConf());
     int pageSize = 20;
     clientConf.setInt(OZONE_FS_LISTING_PAGE_SIZE, pageSize);
-    URI uri = FileSystem.getDefaultUri(clientConf);
-    clientConf.setBoolean(String.format("fs.%s.impl.disable.cache", 
uri.getScheme()), true);
     OzoneFsShell shell = new OzoneFsShell(clientConf);
 
     String volName = "testlistbucket";
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java
index edffd5ed74..c7a14bb6ee 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.client.ScmTopologyClient;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
 import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager;
@@ -52,6 +53,7 @@ public final class OmTestManagers {
   private final BucketManager bucketManager;
   private final PrefixManager prefixManager;
   private final ScmBlockLocationProtocol scmBlockClient;
+  private final OzoneClient rpcClient;
 
   public OzoneManager getOzoneManager() {
     return om;
@@ -77,6 +79,9 @@ public final class OmTestManagers {
   public ScmBlockLocationProtocol getScmBlockClient() {
     return scmBlockClient;
   }
+  public OzoneClient getRpcClient() {
+    return rpcClient;
+  }
 
   public OmTestManagers(OzoneConfiguration conf)
       throws AuthenticationException, IOException, InterruptedException, 
TimeoutException {
@@ -121,7 +126,8 @@ public final class OmTestManagers {
     waitFor(() -> om.getOmRatisServer().checkLeaderStatus() == 
RaftServerStatus.LEADER_AND_READY,
         10, 10_000);
 
-    writeClient = OzoneClientFactory.getRpcClient(conf)
+    rpcClient = OzoneClientFactory.getRpcClient(conf);
+    writeClient = rpcClient
         .getObjectStore().getClientProxy().getOzoneManagerClient();
     metadataManager = (OmMetadataManagerImpl) HddsWhiteboxTestUtils
         .getInternalState(om, "metadataManager");
diff --git 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index df8ece0348..618a837b16 100644
--- 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -193,18 +193,24 @@ public class BasicOzoneClientAdapterImpl implements 
OzoneClientAdapter {
           OzoneClientFactory.getRpcClient(conf);
     }
     objectStore = ozoneClient.getObjectStore();
-    this.volume = objectStore.getVolume(volumeStr);
-    this.bucket = volume.getBucket(bucketStr);
-    bucketReplicationConfig = this.bucket.getReplicationConfig();
-    nextReplicationConfigRefreshTime =
-        clock.millis() + bucketRepConfigRefreshPeriodMS;
-
-    // resolve the bucket layout in case of Link Bucket
-    BucketLayout resolvedBucketLayout =
-        OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore,
-            new HashSet<>());
-
-    OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout);
+    try {
+      this.volume = objectStore.getVolume(volumeStr);
+      this.bucket = volume.getBucket(bucketStr);
+      bucketReplicationConfig = this.bucket.getReplicationConfig();
+      nextReplicationConfigRefreshTime = clock.millis() + 
bucketRepConfigRefreshPeriodMS;
+
+      // resolve the bucket layout in case of Link Bucket
+      BucketLayout resolvedBucketLayout =
+          OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, new 
HashSet<>());
+
+      OzoneFSUtils.validateBucketLayout(bucket.getName(), 
resolvedBucketLayout);
+    } catch (IOException | RuntimeException exception) {
+      // in case of exception, the adapter object will not be
+      // initialised making the client object unreachable, close the client
+      // to release resources in this case and rethrow.
+      ozoneClient.close();
+      throw exception;
+    }
 
     this.configuredDnPort = conf.getInt(
         OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
diff --git 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 31889ed2a5..fefc87184f 100644
--- 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -221,7 +221,15 @@ public class BasicRootedOzoneClientAdapterImpl
           OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
 
       // Fetches the bucket layout to be used by OFS.
-      initDefaultFsBucketLayout(conf);
+      try {
+        initDefaultFsBucketLayout(conf);
+      } catch (IOException | RuntimeException exception) {
+        // in case of exception, the adapter object will not be
+        // initialised making the client object unreachable, close the client
+        // to release resources in this case and rethrow.
+        ozoneClient.close();
+        throw exception;
+      }
 
       config = conf;
     } finally {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to