This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new a21e362b6a HDDS-12547. Container creation and import use the same 
VolumeChoosingPolicy (#8090)
a21e362b6a is described below

commit a21e362b6ab78fcf71ba00d24cacd6a4d760c3d2
Author: Peter Lee <[email protected]>
AuthorDate: Fri Apr 11 22:51:52 2025 +0800

    HDDS-12547. Container creation and import use the same VolumeChoosingPolicy 
(#8090)
---
 .../ozone/container/common/interfaces/Handler.java |  9 +++++---
 .../common/statemachine/DatanodeStateMachine.java  | 13 ++++++++++--
 .../common/volume/VolumeChoosingPolicyFactory.java | 11 +++++-----
 .../ozone/container/keyvalue/KeyValueHandler.java  | 16 +++++----------
 .../ozone/container/ozoneimpl/OzoneContainer.java  | 13 +++++++-----
 .../container/replication/ContainerImporter.java   | 10 +++------
 .../ozone/container/common/ContainerTestUtils.java |  4 +++-
 .../container/common/TestDatanodeStateMachine.java |  4 ++++
 .../container/common/impl/TestHddsDispatcher.java  | 16 ++++++++++++---
 .../container/common/interfaces/TestHandler.java   |  4 +++-
 .../container/keyvalue/TestKeyValueHandler.java    | 24 +---------------------
 .../replication/TestContainerImporter.java         | 10 ++++++---
 .../replication/TestReplicationSupervisor.java     | 11 +++++++---
 .../TestSendContainerRequestHandler.java           |  7 ++++++-
 .../ozone/container/common/TestEndPoint.java       | 22 ++++++++++++++------
 .../ozone/client/rpc/TestECKeyOutputStream.java    |  3 ++-
 .../container/metrics/TestContainerMetrics.java    |  7 +++++--
 .../ozoneimpl/TestOzoneContainerWithTLS.java       |  5 ++++-
 .../ozoneimpl/TestSecureOzoneContainer.java        |  6 +++++-
 .../container/server/TestContainerServer.java      |  6 +++++-
 .../server/TestSecureContainerServer.java          |  7 +++++--
 .../datanode/container/ContainerCommands.java      |  4 ++++
 .../ozone/freon/ClosedContainerReplicator.java     |  6 +++++-
 23 files changed, 135 insertions(+), 83 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index fabc92ff8b..2cb757f1a1 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -23,6 +23,7 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.time.Clock;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
@@ -67,16 +68,18 @@ protected Handler(ConfigurationSource config, String 
datanodeId,
     this.icrSender = icrSender;
   }
 
+  @SuppressWarnings("checkstyle:ParameterNumber")
   public static Handler getHandlerForContainerType(
       final ContainerType containerType, final ConfigurationSource config,
       final String datanodeId, final ContainerSet contSet,
-      final VolumeSet volumeSet, final ContainerMetrics metrics,
+      final VolumeSet volumeSet, final VolumeChoosingPolicy 
volumeChoosingPolicy,
+      final ContainerMetrics metrics,
       IncrementalReportSender<Container> icrSender) {
     switch (containerType) {
     case KeyValueContainer:
       return new KeyValueHandler(config,
-          datanodeId, contSet, volumeSet, metrics,
-          icrSender);
+          datanodeId, contSet, volumeSet, volumeChoosingPolicy, metrics,
+          icrSender, Clock.systemUTC());
     default:
       throw new IllegalArgumentException("Handler for ContainerType: " +
           containerType + "doesn't exist.");
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 9423d8b996..e2860f0fd8 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -49,6 +49,7 @@
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.HddsDatanodeStopService;
 import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.report.ReportManager;
 import 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CloseContainerCommandHandler;
 import 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ClosePipelineCommandHandler;
@@ -61,6 +62,7 @@
 import 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler.RefreshVolumeUsageCommandHandler;
 import 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ReplicateContainerCommandHandler;
 import 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler.SetNodeOperationalStateCommandHandler;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import 
org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator;
 import 
org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionMetrics;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
@@ -97,6 +99,7 @@ public class DatanodeStateMachine implements Closeable {
   private final SCMConnectionManager connectionManager;
   private final ECReconstructionCoordinator ecReconstructionCoordinator;
   private StateContext context;
+  private VolumeChoosingPolicy volumeChoosingPolicy;
   private final OzoneContainer container;
   private final DatanodeDetails datanodeDetails;
   private final CommandDispatcher commandDispatcher;
@@ -173,13 +176,14 @@ public DatanodeStateMachine(HddsDatanodeService 
hddsDatanodeService,
     connectionManager = new SCMConnectionManager(conf);
     context = new StateContext(this.conf, DatanodeStates.getInitState(), this,
         threadNamePrefix);
+    volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf);
     // OzoneContainer instance is used in a non-thread safe way by the context
     // past to its constructor, so we much synchronize its access. See
     // HDDS-3116 for more details.
     constructionLock.writeLock().lock();
     try {
       container = new OzoneContainer(hddsDatanodeService, this.datanodeDetails,
-          conf, context, certClient, secretKeyClient);
+          conf, context, certClient, secretKeyClient, volumeChoosingPolicy);
     } finally {
       constructionLock.writeLock().unlock();
     }
@@ -188,7 +192,8 @@ public DatanodeStateMachine(HddsDatanodeService 
hddsDatanodeService,
     ContainerImporter importer = new ContainerImporter(conf,
         container.getContainerSet(),
         container.getController(),
-        container.getVolumeSet());
+        container.getVolumeSet(),
+        volumeChoosingPolicy);
     ContainerReplicator pullReplicator = new DownloadAndImportReplicator(
         conf, container.getContainerSet(),
         importer,
@@ -744,4 +749,8 @@ public DatanodeQueueMetrics getQueueMetrics() {
   public ReconfigurationHandler getReconfigurationHandler() {
     return reconfigurationHandler;
   }
+
+  public VolumeChoosingPolicy getVolumeChoosingPolicy() {
+    return volumeChoosingPolicy;
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java
index bb5f967ba8..28ad10a158 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
+import org.apache.ratis.util.ReflectionUtils;
 
 /**
  * A factory to create volume choosing policy instance based on configuration
@@ -35,10 +36,10 @@ public final class VolumeChoosingPolicyFactory {
   private VolumeChoosingPolicyFactory() {
   }
 
-  public static VolumeChoosingPolicy getPolicy(ConfigurationSource conf)
-      throws InstantiationException, IllegalAccessException {
-    return conf.getClass(HDDS_DATANODE_VOLUME_CHOOSING_POLICY,
-            DEFAULT_VOLUME_CHOOSING_POLICY, VolumeChoosingPolicy.class)
-        .newInstance();
+  public static VolumeChoosingPolicy getPolicy(ConfigurationSource conf) {
+    Class<? extends VolumeChoosingPolicy> policyClass = conf.getClass(
+        HDDS_DATANODE_VOLUME_CHOOSING_POLICY,
+        DEFAULT_VOLUME_CHOOSING_POLICY, VolumeChoosingPolicy.class);
+    return ReflectionUtils.newInstance(policyClass);
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index eb810fcd87..408897c6bb 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -157,13 +157,15 @@ public KeyValueHandler(ConfigurationSource config,
                          VolumeSet volSet,
                          ContainerMetrics metrics,
                          IncrementalReportSender<Container> icrSender) {
-    this(config, datanodeId, contSet, volSet, metrics, icrSender, 
Clock.systemUTC());
+    this(config, datanodeId, contSet, volSet, null, metrics, icrSender, 
Clock.systemUTC());
   }
 
+  @SuppressWarnings("checkstyle:ParameterNumber")
   public KeyValueHandler(ConfigurationSource config,
                          String datanodeId,
                          ContainerSet contSet,
                          VolumeSet volSet,
+                         VolumeChoosingPolicy volumeChoosingPolicy,
                          ContainerMetrics metrics,
                          IncrementalReportSender<Container> icrSender,
                          Clock clock) {
@@ -174,11 +176,8 @@ public KeyValueHandler(ConfigurationSource config,
         DatanodeConfiguration.class).isChunkDataValidationCheck();
     chunkManager = ChunkManagerFactory.createChunkManager(config, blockManager,
         volSet);
-    try {
-      volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf);
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
+    this.volumeChoosingPolicy = volumeChoosingPolicy != null ? 
volumeChoosingPolicy
+        : VolumeChoosingPolicyFactory.getPolicy(config);
 
     maxContainerSize = (long) config.getStorageSize(
         ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
@@ -216,11 +215,6 @@ public KeyValueHandler(ConfigurationSource config,
     }
   }
 
-  @VisibleForTesting
-  public VolumeChoosingPolicy getVolumeChoosingPolicyForTesting() {
-    return volumeChoosingPolicy;
-  }
-
   @Override
   public StateMachine.DataChannel getStreamDataChannel(
       Container container, ContainerCommandRequestProto msg)
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 750bb6e283..22d040c6e8 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -74,6 +74,7 @@
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
@@ -152,7 +153,8 @@ enum InitializingStatus {
   public OzoneContainer(HddsDatanodeService hddsDatanodeService,
       DatanodeDetails datanodeDetails, ConfigurationSource conf,
       StateContext context, CertificateClient certClient,
-      SecretKeyVerifierClient secretKeyClient) throws IOException {
+      SecretKeyVerifierClient secretKeyClient,
+      VolumeChoosingPolicy volumeChoosingPolicy) throws IOException {
     config = conf;
     this.datanodeDetails = datanodeDetails;
     this.context = context;
@@ -214,7 +216,7 @@ public OzoneContainer(HddsDatanodeService 
hddsDatanodeService,
           Handler.getHandlerForContainerType(
               containerType, conf,
               context.getParent().getDatanodeDetails().getUuidString(),
-              containerSet, volumeSet, metrics, icrSender));
+              containerSet, volumeSet, volumeChoosingPolicy, metrics, 
icrSender));
     }
 
     SecurityConfig secConf = new SecurityConfig(conf);
@@ -239,7 +241,7 @@ public OzoneContainer(HddsDatanodeService 
hddsDatanodeService,
         secConf,
         certClient,
         new ContainerImporter(conf, containerSet, controller,
-            volumeSet),
+            volumeSet, volumeChoosingPolicy),
         datanodeDetails.threadNamePrefix());
 
     readChannel = new XceiverServerGrpc(
@@ -299,8 +301,9 @@ public OzoneContainer(HddsDatanodeService 
hddsDatanodeService,
   @VisibleForTesting
   public OzoneContainer(
       DatanodeDetails datanodeDetails, ConfigurationSource conf,
-      StateContext context) throws IOException {
-    this(null, datanodeDetails, conf, context, null, null);
+      StateContext context, VolumeChoosingPolicy volumeChoosingPolicy)
+      throws IOException {
+    this(null, datanodeDetails, conf, context, null, null, 
volumeChoosingPolicy);
   }
 
   public GrpcTlsConfig getTlsClientConfig() {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
index b5ea8902c0..f69516f94e 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
@@ -40,7 +40,6 @@
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
-import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
@@ -71,15 +70,12 @@ public class ContainerImporter {
   public ContainerImporter(@Nonnull ConfigurationSource conf,
                            @Nonnull ContainerSet containerSet,
                            @Nonnull ContainerController controller,
-                           @Nonnull MutableVolumeSet volumeSet) {
+                           @Nonnull MutableVolumeSet volumeSet,
+                           @Nonnull VolumeChoosingPolicy volumeChoosingPolicy) 
{
     this.containerSet = containerSet;
     this.controller = controller;
     this.volumeSet = volumeSet;
-    try {
-      volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf);
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
+    this.volumeChoosingPolicy = volumeChoosingPolicy;
     containerSize = (long) conf.getStorageSize(
         ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
         ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
index 3cff94ed9b..33c188c6d7 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
@@ -68,6 +68,7 @@
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import 
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
@@ -132,7 +133,8 @@ public static OzoneContainer getOzoneContainer(
       DatanodeDetails datanodeDetails, OzoneConfiguration conf)
       throws IOException {
     StateContext context = getMockContext(datanodeDetails, conf);
-    return new OzoneContainer(datanodeDetails, conf, context);
+    VolumeChoosingPolicy volumeChoosingPolicy = 
VolumeChoosingPolicyFactory.getPolicy(conf);
+    return new OzoneContainer(datanodeDetails, conf, context, 
volumeChoosingPolicy);
   }
 
   public static StateContext getMockContext(DatanodeDetails datanodeDetails,
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 0ee12be723..1a38939909 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -44,12 +44,14 @@
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import 
org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
 import 
org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
 import org.apache.hadoop.ozone.container.common.states.DatanodeState;
 import 
org.apache.hadoop.ozone.container.common.states.datanode.InitDatanodeState;
 import 
org.apache.hadoop.ozone.container.common.states.datanode.RunningDatanodeState;
+import 
org.apache.hadoop.ozone.container.common.volume.CapacityVolumeChoosingPolicy;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
@@ -204,6 +206,8 @@ public void testDatanodeStateContext() throws IOException,
     ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf);
     try (DatanodeStateMachine stateMachine =
              new DatanodeStateMachine(datanodeDetails, conf)) {
+      VolumeChoosingPolicy volumeChoosingPolicy = 
stateMachine.getVolumeChoosingPolicy();
+      assertEquals(CapacityVolumeChoosingPolicy.class, 
volumeChoosingPolicy.getClass());
       DatanodeStateMachine.DatanodeStates currentState =
           stateMachine.getContext().getState();
       assertEquals(DatanodeStateMachine.DatanodeStates.INIT,
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index 5da884caa9..5a2e362f14 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -76,6 +76,7 @@
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
@@ -87,6 +88,7 @@
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
@@ -94,6 +96,7 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.ozone.test.GenericTestUtils.LogCapturer;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
@@ -111,10 +114,17 @@ public class TestHddsDispatcher {
   @TempDir
   private File testDir;
 
+  private static VolumeChoosingPolicy volumeChoosingPolicy;
+
   public static final IncrementalReportSender<Container> NO_OP_ICR_SENDER =
       c -> {
       };
 
+  @BeforeAll
+  public static void init() {
+    volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(new 
OzoneConfiguration());
+  }
+
   @ContainerLayoutTestInfo.ContainerTest
   public void testContainerCloseActionWhenFull(
       ContainerLayoutVersion layout) throws IOException {
@@ -147,7 +157,7 @@ public void testContainerCloseActionWhenFull(
         handlers.put(containerType,
             Handler.getHandlerForContainerType(containerType, conf,
                 context.getParent().getDatanodeDetails().getUuidString(),
-                containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
+                containerSet, volumeSet, volumeChoosingPolicy, metrics, 
NO_OP_ICR_SENDER));
       }
       HddsDispatcher hddsDispatcher = new HddsDispatcher(
           conf, containerSet, volumeSet, handlers, context, metrics, null);
@@ -284,7 +294,7 @@ public void testContainerCloseActionWhenVolumeFull(
         handlers.put(containerType,
             Handler.getHandlerForContainerType(containerType, conf,
                 context.getParent().getDatanodeDetails().getUuidString(),
-                containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
+                containerSet, volumeSet, volumeChoosingPolicy, metrics, 
NO_OP_ICR_SENDER));
       }
       HddsDispatcher hddsDispatcher = new HddsDispatcher(
           conf, containerSet, volumeSet, handlers, context, metrics, null);
@@ -531,7 +541,7 @@ static HddsDispatcher createDispatcher(DatanodeDetails dd, 
UUID scmId,
       handlers.put(containerType,
           Handler.getHandlerForContainerType(containerType, conf,
               context.getParent().getDatanodeDetails().getUuidString(),
-              containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
+              containerSet, volumeSet, volumeChoosingPolicy, metrics, 
NO_OP_ICR_SENDER));
     }
 
     final HddsDispatcher hddsDispatcher = new HddsDispatcher(conf,
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
index 708e109c0e..829f8c55ee 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
@@ -33,6 +33,7 @@
 import org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
 import org.junit.jupiter.api.AfterEach;
@@ -56,6 +57,7 @@ public void setup() throws Exception {
     this.conf = new OzoneConfiguration();
     this.containerSet = mock(ContainerSet.class);
     this.volumeSet = mock(MutableVolumeSet.class);
+    VolumeChoosingPolicy volumeChoosingPolicy = 
VolumeChoosingPolicyFactory.getPolicy(conf);
     DatanodeDetails datanodeDetails = mock(DatanodeDetails.class);
     StateContext context = ContainerTestUtils.getMockContext(
         datanodeDetails, conf);
@@ -67,7 +69,7 @@ public void setup() throws Exception {
           Handler.getHandlerForContainerType(
               containerType, conf,
               context.getParent().getDatanodeDetails().getUuidString(),
-              containerSet, volumeSet, metrics,
+              containerSet, volumeSet, volumeChoosingPolicy, metrics,
               TestHddsDispatcher.NO_OP_ICR_SENDER));
     }
     this.dispatcher = new HddsDispatcher(
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index be274562df..de26f0fbbf 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.ozone.container.keyvalue;
 
-import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
 import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY;
@@ -26,7 +25,6 @@
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.doCallRealMethod;
 import static org.mockito.Mockito.mock;
@@ -111,7 +109,6 @@ public void setup() throws StorageContainerException {
         mock(ContainerMetrics.class),
         mock(TokenVerifier.class)
     );
-
   }
 
   /**
@@ -282,14 +279,6 @@ public void testVolumeSetInKeyValueHandler() throws 
Exception {
       DatanodeDetails datanodeDetails = mock(DatanodeDetails.class);
       StateContext context = ContainerTestUtils.getMockContext(
           datanodeDetails, conf);
-      KeyValueHandler keyValueHandler = new KeyValueHandler(conf,
-          context.getParent().getDatanodeDetails().getUuidString(), cset,
-          volumeSet, metrics, c -> {
-      });
-      assertEquals("org.apache.hadoop.ozone.container.common" +
-          ".volume.CapacityVolumeChoosingPolicy",
-          keyValueHandler.getVolumeChoosingPolicyForTesting()
-              .getClass().getName());
 
       // Ensures that KeyValueHandler falls back to FILE_PER_BLOCK.
       conf.set(OZONE_SCM_CONTAINER_LAYOUT_KEY, "FILE_PER_CHUNK");
@@ -297,17 +286,6 @@ public void testVolumeSetInKeyValueHandler() throws 
Exception {
           metrics, c -> { });
       assertEquals(ContainerLayoutVersion.FILE_PER_BLOCK,
           conf.getEnum(OZONE_SCM_CONTAINER_LAYOUT_KEY, 
ContainerLayoutVersion.FILE_PER_CHUNK));
-
-      //Set a class which is not of sub class of VolumeChoosingPolicy
-      conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY,
-          "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher");
-      RuntimeException exception = assertThrows(RuntimeException.class,
-          () -> new KeyValueHandler(conf, 
context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet,
-              metrics, c -> { }));
-
-      assertThat(exception).hasMessageEndingWith(
-          "class org.apache.hadoop.ozone.container.common.impl.HddsDispatcher 
" +
-              "not 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy");
     } finally {
       volumeSet.shutdown();
       FileUtil.fullyDelete(datanodeDir);
@@ -492,7 +470,7 @@ public void testDeleteContainerTimeout() throws IOException 
{
     final AtomicInteger icrReceived = new AtomicInteger(0);
 
     final KeyValueHandler kvHandler = new KeyValueHandler(conf,
-        datanodeId, containerSet, volumeSet, metrics,
+        datanodeId, containerSet, volumeSet, null, metrics,
         c -> icrReceived.incrementAndGet(), clock);
     kvHandler.setClusterID(clusterId);
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
index c283d5a984..8b9d0d1d6d 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
@@ -51,8 +51,10 @@
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
@@ -72,11 +74,13 @@ class TestContainerImporter {
   private File tempDir;
 
   private OzoneConfiguration conf;
+  private VolumeChoosingPolicy volumeChoosingPolicy;
 
   @BeforeEach
   void setup() {
     conf = new OzoneConfiguration();
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath());
+    volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf);
   }
 
   @Test
@@ -93,7 +97,7 @@ void importSameContainerWhenAlreadyImport() throws Exception {
     MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null,
         StorageVolume.VolumeType.DATA_VOLUME, null);
     ContainerImporter containerImporter = new ContainerImporter(conf,
-        containerSet, controllerMock, volumeSet);
+        containerSet, controllerMock, volumeSet, volumeChoosingPolicy);
     File tarFile = new File("dummy.tar");
     // second import should fail immediately
     StorageContainerException ex = 
assertThrows(StorageContainerException.class,
@@ -123,7 +127,7 @@ void importSameContainerWhenFirstInProgress() throws 
Exception {
     MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null,
         StorageVolume.VolumeType.DATA_VOLUME, null);
     ContainerImporter containerImporter = new ContainerImporter(conf,
-        containerSet, controllerMock, volumeSet);
+        containerSet, controllerMock, volumeSet, volumeChoosingPolicy);
     // run import async first time having delay
     File tarFile = containerTarFile(containerId, containerData);
     CompletableFuture.runAsync(() -> {
@@ -162,7 +166,7 @@ public void 
testInconsistentChecksumContainerShouldThrowError() throws Exception
     MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null,
         StorageVolume.VolumeType.DATA_VOLUME, null);
     ContainerImporter containerImporter = spy(new ContainerImporter(conf,
-        containerSet, controllerMock, volumeSet));
+        containerSet, controllerMock, volumeSet, volumeChoosingPolicy));
 
     TarContainerPacker packer = mock(TarContainerPacker.class);
     when(packer.unpackContainerDescriptor(any())).thenReturn("test".getBytes(
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
index 1cc0121ad3..0884d73010 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
@@ -87,12 +87,14 @@
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import 
org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCommandInfo;
 import 
org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator;
 import 
org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinatorTask;
@@ -143,6 +145,8 @@ public class TestReplicationSupervisor {
   private TestClock clock;
   private DatanodeDetails datanode;
 
+  private VolumeChoosingPolicy volumeChoosingPolicy;
+
   @BeforeEach
   public void setUp() throws Exception {
     clock = new TestClock(Instant.now(), ZoneId.systemDefault());
@@ -155,6 +159,7 @@ public void setUp() throws Exception {
     context.setTermOfLeaderSCM(CURRENT_TERM);
     datanode = MockDatanodeDetails.randomDatanodeDetails();
     when(stateMachine.getDatanodeDetails()).thenReturn(datanode);
+    volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(new 
OzoneConfiguration());
   }
 
   @AfterEach
@@ -332,7 +337,7 @@ public void 
testDownloadAndImportReplicatorFailure(ContainerLayoutVersion layout
     ContainerController mockedCC =
         mock(ContainerController.class);
     ContainerImporter importer =
-        new ContainerImporter(conf, set, mockedCC, volumeSet);
+        new ContainerImporter(conf, set, mockedCC, volumeSet, 
volumeChoosingPolicy);
     ContainerReplicator replicator =
         new DownloadAndImportReplicator(conf, set, importer, moc);
 
@@ -391,7 +396,7 @@ public void 
testReplicationImportReserveSpace(ContainerLayoutVersion layout)
         .thenReturn(tarFile.toPath());
 
     ContainerImporter importer =
-        new ContainerImporter(conf, set, controllerMock, volumeSet);
+        new ContainerImporter(conf, set, controllerMock, volumeSet, 
volumeChoosingPolicy);
 
     HddsVolume vol1 = (HddsVolume) volumeSet.getVolumesList().get(0);
     // Initially volume has 0 commit space
@@ -569,7 +574,7 @@ public void testMultipleReplication(ContainerLayoutVersion 
layout,
       when(volumeSet.getVolumesList()).thenReturn(singletonList(
           new HddsVolume.Builder(testDir).conf(conf).build()));
       ContainerController mockedCC = mock(ContainerController.class);
-      ContainerImporter importer = new ContainerImporter(conf, set, mockedCC, 
volumeSet);
+      ContainerImporter importer = new ContainerImporter(conf, set, mockedCC, 
volumeSet, volumeChoosingPolicy);
       ContainerReplicator replicator = new DownloadAndImportReplicator(
           conf, set, importer, moc);
       replicatorRef.set(replicator);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
index 226cb07c0c..0d15e265ad 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
@@ -32,8 +32,10 @@
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
@@ -53,10 +55,13 @@ class TestSendContainerRequestHandler {
 
   private OzoneConfiguration conf;
 
+  private VolumeChoosingPolicy volumeChoosingPolicy;
+
   @BeforeEach
   void setup() {
     conf = new OzoneConfiguration();
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath());
+    volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf);
   }
 
   @Test
@@ -67,7 +72,7 @@ void testReceiveDataForExistingContainer() throws Exception {
     MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null,
         StorageVolume.VolumeType.DATA_VOLUME, null);
     ContainerImporter containerImporter = new ContainerImporter(conf,
-        newContainerSet(0), mock(ContainerController.class), volumeSet);
+        newContainerSet(0), mock(ContainerController.class), volumeSet, 
volumeChoosingPolicy);
     KeyValueContainerData containerData = new 
KeyValueContainerData(containerId,
         ContainerLayoutVersion.FILE_PER_BLOCK, 100, "test", "test");
     // add container to container set
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 82f5ff2fbf..b39e61c5f6 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -64,6 +64,7 @@
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import 
org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
@@ -74,6 +75,7 @@
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import 
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
@@ -97,6 +99,7 @@ public class TestEndPoint {
   @TempDir
   private static File testDir;
   private static OzoneConfiguration ozoneConf;
+  private static VolumeChoosingPolicy volumeChoosingPolicy;
   private static DatanodeLayoutStorage layoutStorage;
   private static DatanodeDetails dnDetails;
 
@@ -122,6 +125,7 @@ static void setUp() throws Exception {
     layoutStorage.initialize();
     scmServer = SCMTestUtils.startScmRpcServer(ozoneConf,
         scmServerImpl, serverAddress, 10);
+    volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(ozoneConf);
   }
 
   /**
@@ -156,7 +160,8 @@ public void testGetVersionTask() throws Exception {
       ozoneConf.setBoolean(
           OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
       OzoneContainer ozoneContainer = new OzoneContainer(dnDetails,
-          ozoneConf, ContainerTestUtils.getMockContext(dnDetails, ozoneConf));
+          ozoneConf, ContainerTestUtils.getMockContext(dnDetails, ozoneConf),
+          volumeChoosingPolicy);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           ozoneConf, ozoneContainer);
@@ -230,7 +235,8 @@ public void testCheckVersionResponse() throws Exception {
         serverAddress, 1000)) {
       LogCapturer logCapturer = 
LogCapturer.captureLogs(VersionEndpointTask.class);
       OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, ozoneConf,
-          ContainerTestUtils.getMockContext(dnDetails, ozoneConf));
+          ContainerTestUtils.getMockContext(dnDetails, ozoneConf),
+          volumeChoosingPolicy);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           ozoneConf, ozoneContainer);
@@ -273,7 +279,8 @@ public void testDnLayoutVersionFile() throws Exception {
     try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf,
         serverAddress, 1000)) {
       OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, ozoneConf,
-          ContainerTestUtils.getMockContext(dnDetails, ozoneConf));
+          ContainerTestUtils.getMockContext(dnDetails, ozoneConf),
+          volumeChoosingPolicy);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           ozoneConf, ozoneContainer);
@@ -335,7 +342,8 @@ public void testGetVersionToInvalidEndpoint() throws 
Exception {
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       DatanodeDetails datanodeDetails = randomDatanodeDetails();
       OzoneContainer ozoneContainer = new OzoneContainer(datanodeDetails,
-          conf, ContainerTestUtils.getMockContext(datanodeDetails, ozoneConf));
+          conf, ContainerTestUtils.getMockContext(datanodeDetails, ozoneConf),
+          volumeChoosingPolicy);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           conf, ozoneContainer);
       EndpointStateMachine.EndPointStates newState = versionTask.call();
@@ -362,7 +370,8 @@ public void testGetVersionAssertRpcTimeOut() throws 
Exception {
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       DatanodeDetails datanodeDetails = randomDatanodeDetails();
       OzoneContainer ozoneContainer = new OzoneContainer(datanodeDetails, conf,
-          ContainerTestUtils.getMockContext(datanodeDetails, ozoneConf));
+          ContainerTestUtils.getMockContext(datanodeDetails, ozoneConf),
+          volumeChoosingPolicy);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           conf, ozoneContainer);
 
@@ -655,7 +664,8 @@ public void testHeartbeatTaskRpcTimeOut() throws Exception {
   private OzoneContainer createVolume(OzoneConfiguration conf)
       throws IOException {
     OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, conf,
-        ContainerTestUtils.getMockContext(dnDetails, ozoneConf));
+        ContainerTestUtils.getMockContext(dnDetails, ozoneConf),
+        volumeChoosingPolicy);
 
     String clusterId = scmServerImpl.getClusterId();
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
index 2bc5947a70..4a03bbad8a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
@@ -198,7 +198,8 @@ public void testECKeyCreatetWithDatanodeIdChange()
     OzoneClient client1 = null;
     try (MockedStatic<Handler> mockedHandler = 
Mockito.mockStatic(Handler.class, Mockito.CALLS_REAL_METHODS)) {
       Map<String, Handler> handlers = new HashMap<>();
-      mockedHandler.when(() -> Handler.getHandlerForContainerType(any(), 
any(), any(), any(), any(), any(), any()))
+      mockedHandler.when(() -> Handler.getHandlerForContainerType(any(), 
any(), any(), any(), any(), any(), any(),
+          any()))
           .thenAnswer(i -> {
             Handler handler = Mockito.spy((Handler) i.callRealMethod());
             handlers.put(handler.getDatanodeId(), handler);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index a5fea3dfd7..fdc2f3feb0 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -55,6 +55,7 @@
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
 import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
@@ -63,6 +64,7 @@
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.ratis.util.function.CheckedBiConsumer;
@@ -86,6 +88,7 @@ public class TestContainerMetrics {
   private Path tempDir;
   private static final OzoneConfiguration CONF = new OzoneConfiguration();
   private static final int DFS_METRICS_PERCENTILES_INTERVALS = 1;
+  private static VolumeChoosingPolicy volumeChoosingPolicy;
 
   @BeforeAll
   public static void setup() {
@@ -94,7 +97,7 @@ public static void setup() {
         DFS_METRICS_PERCENTILES_INTERVALS);
     CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, 
false);
     CONF.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.toString());
-
+    volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(CONF);
   }
 
   @AfterEach
@@ -143,7 +146,7 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, 
VolumeSet volumeSet)
       handlers.put(containerType,
           Handler.getHandlerForContainerType(containerType, CONF,
               context.getParent().getDatanodeDetails().getUuidString(),
-              containerSet, volumeSet, metrics,
+              containerSet, volumeSet, volumeChoosingPolicy, metrics,
               c -> { }));
     }
     HddsDispatcher dispatcher = new HddsDispatcher(CONF, containerSet,
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
index 8485b12f59..0b3294b913 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
@@ -76,9 +76,11 @@
 import 
org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl;
 import org.apache.hadoop.ozone.client.SecretKeyTestClient;
 import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -304,8 +306,9 @@ private OzoneContainer 
createAndStartOzoneContainerInstance() {
     OzoneContainer container = null;
     try {
       StateContext stateContext = ContainerTestUtils.getMockContext(dn, conf);
+      VolumeChoosingPolicy volumeChoosingPolicy = 
VolumeChoosingPolicyFactory.getPolicy(conf);
       container = new OzoneContainer(
-          null, dn, conf, stateContext, caClient, keyClient);
+          null, dn, conf, stateContext, caClient, keyClient, 
volumeChoosingPolicy);
       MutableVolumeSet volumeSet = container.getVolumeSet();
       StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList())
           .forEach(hddsVolume -> 
hddsVolume.setDbParentDir(tempFolder.toFile()));
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
index de63bfd7c8..4b99635530 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
@@ -54,8 +54,10 @@
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.client.SecretKeyTestClient;
 import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.ratis.util.ExitUtils;
@@ -83,6 +85,7 @@ class TestSecureOzoneContainer {
   private Path ozoneMetaPath;
 
   private OzoneConfiguration conf;
+  private VolumeChoosingPolicy volumeChoosingPolicy;
   private CertificateClientTestImpl caClient;
   private SecretKeyClient secretKeyClient;
   private ContainerTokenSecretManager secretManager;
@@ -111,6 +114,7 @@ void setup() throws Exception {
     secretKeyClient = new SecretKeyTestClient();
     secretManager = new ContainerTokenSecretManager(
         TimeUnit.DAYS.toMillis(1), secretKeyClient);
+    volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf);
   }
 
   @ParameterizedTest
@@ -134,7 +138,7 @@ void testCreateOzoneContainer(boolean requireToken, boolean 
hasToken,
 
       DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
       container = new OzoneContainer(null, dn, conf, ContainerTestUtils
-          .getMockContext(dn, conf), caClient, secretKeyClient);
+          .getMockContext(dn, conf), caClient, secretKeyClient, 
volumeChoosingPolicy);
       MutableVolumeSet volumeSet = container.getVolumeSet();
       StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList())
           .forEach(hddsVolume -> 
hddsVolume.setDbParentDir(tempFolder.toFile()));
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index fe83aa0881..b3fb32f072 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -58,6 +58,7 @@
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
 import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
@@ -66,6 +67,7 @@
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.ratis.rpc.RpcType;
@@ -84,6 +86,7 @@ public class TestContainerServer {
   private static Path testDir;
   private static final OzoneConfiguration CONF = new OzoneConfiguration();
   private static CertificateClient caClient;
+  private static VolumeChoosingPolicy volumeChoosingPolicy;
   @TempDir
   private Path tempDir;
 
@@ -95,6 +98,7 @@ public static void setup() {
     DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
     caClient = new DNCertificateClient(new SecurityConfig(CONF), null,
         dn, null, null, null);
+    volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(CONF);
   }
 
   @AfterAll
@@ -204,7 +208,7 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, 
UUID scmId,
       handlers.put(containerType,
           Handler.getHandlerForContainerType(containerType, conf,
               dd.getUuid().toString(),
-              containerSet, volumeSet, metrics,
+              containerSet, volumeSet, volumeChoosingPolicy, metrics,
               c -> {
               }));
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
index f78971af4f..be048e65b3 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
@@ -89,6 +89,7 @@
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
 import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
@@ -96,6 +97,7 @@
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.security.token.Token;
@@ -121,7 +123,7 @@ public class TestSecureContainerServer {
   private static SecretKeyClient secretKeyClient;
   private static OzoneBlockTokenSecretManager blockTokenSecretManager;
   private static ContainerTokenSecretManager containerTokenSecretManager;
-
+  private static VolumeChoosingPolicy volumeChoosingPolicy;
   @BeforeAll
   public static void setup() throws Exception {
     DefaultMetricsSystem.setMiniClusterMode(true);
@@ -131,6 +133,7 @@ public static void setup() throws Exception {
     CONF.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true);
     caClient = new CertificateClientTestImpl(CONF);
     secretKeyClient = new SecretKeyTestClient();
+    volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(CONF);
 
     long tokenLifetime = TimeUnit.HOURS.toMillis(1);
 
@@ -179,7 +182,7 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, 
UUID scmId,
       handlers.put(containerType,
           Handler.getHandlerForContainerType(containerType, conf,
               dd.getUuid().toString(),
-              containerSet, volumeSet, metrics,
+              containerSet, volumeSet, volumeChoosingPolicy, metrics,
               c -> { }));
     }
     HddsDispatcher hddsDispatcher = new HddsDispatcher(
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java
index 7945c3bbfb..83f4ae3596 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java
@@ -45,11 +45,13 @@
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerReader;
 import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
@@ -96,6 +98,7 @@ public void loadContainersFromVolumes() throws IOException {
 
     volumeSet = new MutableVolumeSet(datanodeUuid, conf, null,
         StorageVolume.VolumeType.DATA_VOLUME, null);
+    VolumeChoosingPolicy volumeChoosingPolicy = 
VolumeChoosingPolicyFactory.getPolicy(conf);
 
     if (VersionedDatanodeFeatures.SchemaV3.isFinalizedAndEnabled(conf)) {
       MutableVolumeSet dbVolumeSet =
@@ -118,6 +121,7 @@ public void loadContainersFromVolumes() throws IOException {
               datanodeUuid,
               containerSet,
               volumeSet,
+              volumeChoosingPolicy,
               metrics,
               containerReplicaProto -> {
               });
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
index 8d4a4e99af..452846354a 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
@@ -45,11 +45,13 @@
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
 import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore;
 import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStoreImpl;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
@@ -200,6 +202,7 @@ private void initializeReplicationSupervisor(
 
     MutableVolumeSet volumeSet = new MutableVolumeSet(fakeDatanodeUuid, conf,
         null, StorageVolume.VolumeType.DATA_VOLUME, null);
+    VolumeChoosingPolicy volumeChoosingPolicy = 
VolumeChoosingPolicyFactory.getPolicy(conf);
 
     if (VersionedDatanodeFeatures.SchemaV3.isFinalizedAndEnabled(conf)) {
       MutableVolumeSet dbVolumeSet =
@@ -225,6 +228,7 @@ private void initializeReplicationSupervisor(
               fakeDatanodeUuid,
               containerSet,
               volumeSet,
+              volumeChoosingPolicy,
               metrics,
               containerReplicaProto -> {
               });
@@ -236,7 +240,7 @@ private void initializeReplicationSupervisor(
         new ContainerController(containerSet, handlers);
 
     ContainerImporter importer = new ContainerImporter(conf, containerSet,
-        controller, volumeSet);
+        controller, volumeSet, volumeChoosingPolicy);
     replicator = new DownloadAndImportReplicator(conf, containerSet, importer,
         new SimpleContainerDownloader(conf, null));
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to