This is an automated email from the ASF dual-hosted git repository.

sammichen pushed a commit to branch HDDS-5713
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-5713 by this push:
     new 6f43e7c5985 HDDS-13666. [DiskBalancer]Avoid reverse looping in 
destination selection when thresholds are low (#9068)
6f43e7c5985 is described below

commit 6f43e7c59855295ad17a3c6ac09e6cc796d56899
Author: Gargi Jaiswal <[email protected]>
AuthorDate: Thu Oct 16 08:35:30 2025 +0530

    HDDS-13666. [DiskBalancer]Avoid reverse looping in destination selection 
when thresholds are low (#9068)
---
 .../diskbalancer/DiskBalancerService.java          |   5 +-
 .../DiskBalancerVolumeCalculation.java             |  19 +-
 .../policy/ContainerChoosingPolicy.java            |  13 +-
 .../policy/DefaultContainerChoosingPolicy.java     |  58 +++++-
 .../policy/DefaultVolumeChoosingPolicy.java        |   2 +-
 .../TestDefaultContainerChoosingPolicy.java        | 203 +++++++++++++++++++++
 .../diskbalancer/TestDiskBalancerService.java      |   2 +-
 .../scm/node/TestContainerChoosingPolicy.java      |  36 +++-
 8 files changed, 315 insertions(+), 23 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
index aa9dd257678..1f913679fa4 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
@@ -388,7 +388,8 @@ public BackgroundTaskQueue getTasks() {
       }
       HddsVolume sourceVolume = pair.getLeft(), destVolume = pair.getRight();
       ContainerData toBalanceContainer = containerChoosingPolicy
-          .chooseContainer(ozoneContainer, sourceVolume, inProgressContainers);
+          .chooseContainer(ozoneContainer, sourceVolume, destVolume,
+              inProgressContainers, threshold, volumeSet, deltaSizes);
       if (toBalanceContainer != null) {
         DiskBalancerTask task = new DiskBalancerTask(toBalanceContainer, 
sourceVolume,
             destVolume);
@@ -643,7 +644,7 @@ public long calculateBytesToMove(ImmutableList<HddsVolume> 
inputVolumeSet) {
     }
 
     // Calculate ideal usage
-    double idealUsage = 
DiskBalancerVolumeCalculation.getIdealUsage(inputVolumeSet);
+    double idealUsage = 
DiskBalancerVolumeCalculation.getIdealUsage(inputVolumeSet, deltaSizes);
     double normalizedThreshold = threshold / 100.0;
 
     long totalBytesToMove = 0;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerVolumeCalculation.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerVolumeCalculation.java
index 6bfb168d1da..442cdcface4 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerVolumeCalculation.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerVolumeCalculation.java
@@ -61,20 +61,26 @@ public static ImmutableList<HddsVolume> 
getImmutableVolumeSet(MutableVolumeSet v
    * Get ideal usage from an immutable list of volumes.
    * 
    * @param volumes Immutable list of volumes
+   * @param deltaMap A map that tracks the total bytes which will be freed
+   * from each source volume during container moves
    * @return Ideal usage as a ratio (used space / total capacity)
    * @throws IllegalArgumentException if total capacity is zero
    */
-  public static double getIdealUsage(ImmutableList<HddsVolume> volumes) {
-    long totalCapacity = 0L, totalFree = 0L;
+  public static double getIdealUsage(ImmutableList<HddsVolume> volumes,
+      Map<HddsVolume, Long> deltaMap) {
+    long totalCapacity = 0L, totalEffectiveUsed = 0L;
     
     for (HddsVolume volume : volumes) {
       SpaceUsageSource usage = volume.getCurrentUsage();
       totalCapacity += usage.getCapacity();
-      totalFree += usage.getAvailable();
+      long currentUsed = usage.getCapacity() - usage.getAvailable();
+      long delta = (deltaMap != null) ? deltaMap.getOrDefault(volume, 0L) : 0L;
+      long committed = volume.getCommittedBytes();
+      totalEffectiveUsed += (currentUsed + delta + committed);
     }
     
     Preconditions.checkArgument(totalCapacity != 0);
-    return ((double) (totalCapacity - totalFree)) / totalCapacity;
+    return ((double) (totalEffectiveUsed)) / totalCapacity;
   }
   
   /**
@@ -84,7 +90,8 @@ public static double getIdealUsage(ImmutableList<HddsVolume> 
volumes) {
    * @param deltaMap Map of volume to delta sizes (ongoing operations), can be 
null
    * @return VolumeDataDensity sum across all volumes
    */
-  public static double calculateVolumeDataDensity(ImmutableList<HddsVolume> 
volumeSet, Map<HddsVolume, Long> deltaMap) {
+  public static double calculateVolumeDataDensity(ImmutableList<HddsVolume> 
volumeSet,
+      Map<HddsVolume, Long> deltaMap) {
     if (volumeSet == null) {
       LOG.warn("VolumeSet is null, returning 0.0 for VolumeDataDensity");
       return 0.0;
@@ -97,7 +104,7 @@ public static double 
calculateVolumeDataDensity(ImmutableList<HddsVolume> volume
       }
 
       // Calculate ideal usage using the same immutable volume snapshot
-      double idealUsage = getIdealUsage(volumeSet);
+      double idealUsage = getIdealUsage(volumeSet, deltaMap);
       double volumeDensitySum = 0.0;
 
       // Calculate density for each volume using the same snapshot
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/ContainerChoosingPolicy.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/ContainerChoosingPolicy.java
index aa80ccdf867..da52f4fcd53 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/ContainerChoosingPolicy.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/ContainerChoosingPolicy.java
@@ -17,10 +17,12 @@
 
 package org.apache.hadoop.ozone.container.diskbalancer.policy;
 
+import java.util.Map;
 import java.util.Set;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 
 /**
@@ -30,11 +32,18 @@ public interface ContainerChoosingPolicy {
   /**
    * Choose a container for balancing.
    * @param ozoneContainer the OzoneContainer instance to get all containers 
of a particular volume.
-   * @param volume the HddsVolume instance to choose containers from.
+   * @param srcVolume the HddsVolume instance to choose containers from.
+   * @param destVolume the destination volume to which container is being 
moved.
    * @param inProgressContainerIDs containerIDs present in this set should be
    - avoided as these containers are already under move by diskBalancer.
+   * @param threshold the threshold value
+   * @param volumeSet the volumeSet instance
+   * @param deltaMap the deltaMap instance of source volume
    * @return a Container
    */
   ContainerData chooseContainer(OzoneContainer ozoneContainer,
-      HddsVolume volume, Set<ContainerID> inProgressContainerIDs);
+      HddsVolume srcVolume, HddsVolume destVolume,
+      Set<ContainerID> inProgressContainerIDs,
+      Double threshold, MutableVolumeSet volumeSet,
+      Map<HddsVolume, Long> deltaMap);
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultContainerChoosingPolicy.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultContainerChoosingPolicy.java
index bfc9d15a1da..c759271f881 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultContainerChoosingPolicy.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultContainerChoosingPolicy.java
@@ -22,13 +22,18 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
+import com.google.common.collect.ImmutableList;
 import java.util.Iterator;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ExecutionException;
+import org.apache.hadoop.hdds.fs.SpaceUsageSource;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import 
org.apache.hadoop.ozone.container.diskbalancer.DiskBalancerVolumeCalculation;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -49,31 +54,72 @@ public class DefaultContainerChoosingPolicy implements 
ContainerChoosingPolicy {
 
   @Override
   public ContainerData chooseContainer(OzoneContainer ozoneContainer,
-      HddsVolume hddsVolume, Set<ContainerID> inProgressContainerIDs) {
+      HddsVolume srcVolume, HddsVolume destVolume,
+      Set<ContainerID> inProgressContainerIDs,
+      Double threshold, MutableVolumeSet volumeSet,
+      Map<HddsVolume, Long> deltaMap) {
     Iterator<Container<?>> itr;
     try {
-      itr = CACHE.get().get(hddsVolume,
-          () -> ozoneContainer.getController().getContainers(hddsVolume));
+      itr = CACHE.get().get(srcVolume,
+          () -> ozoneContainer.getController().getContainers(srcVolume));
     } catch (ExecutionException e) {
-      LOG.warn("Failed to get container iterator for volume {}", hddsVolume, 
e);
+      LOG.warn("Failed to get container iterator for volume {}", srcVolume, e);
       return null;
     }
 
+    // Calculate maxAllowedUtilization
+    ImmutableList<HddsVolume> immutableVolumeSet = 
DiskBalancerVolumeCalculation.getImmutableVolumeSet(volumeSet);
+    double idealUsage = 
DiskBalancerVolumeCalculation.getIdealUsage(immutableVolumeSet, deltaMap);
+    double maxAllowedUtilization = idealUsage + (threshold / 100.0);
+
     while (itr.hasNext()) {
       ContainerData containerData = itr.next().getContainerData();
       if (!inProgressContainerIDs.contains(
           ContainerID.valueOf(containerData.getContainerID())) &&
           (containerData.isClosed() || (test && 
containerData.isQuasiClosed()))) {
-        return containerData;
+
+        // This is a candidate container. Now, check if moving it would be 
productive.
+        if (isMoveProductive(containerData, destVolume, 
maxAllowedUtilization)) {
+          return containerData;
+        }
       }
     }
 
     if (!itr.hasNext()) {
-      CACHE.get().invalidate(hddsVolume);
+      CACHE.get().invalidate(srcVolume);
     }
     return null;
   }
 
+  /**
+   * Checks if moving the given container from source to destination would
+   * result in the destination's utilization being less than or equal to the
+   * averageUtilization + threshold. This prevents "thrashing" where a move
+   * immediately makes the destination a candidate for a source volume.
+   *
+   * @param containerData The container to be moved.
+   * @param destVolume The destination volume.
+   * @param maxAllowedUtilization The maximum allowed utilization
+   * for the destination volume.
+   * @return true if the move is productive, false otherwise.
+   */
+  private boolean isMoveProductive(ContainerData containerData, HddsVolume 
destVolume,
+      Double maxAllowedUtilization) {
+    long containerSize = containerData.getBytesUsed();
+    SpaceUsageSource usage = destVolume.getCurrentUsage();
+
+    double newDestUtilization =
+        (double) ((usage.getCapacity() - usage.getAvailable()) + 
destVolume.getCommittedBytes() + containerSize)
+            / usage.getCapacity();
+
+    if (newDestUtilization <= maxAllowedUtilization) {
+      // The move is productive.
+      return true;
+    }
+
+    return false;
+  }
+
   @VisibleForTesting
   public static void setTest(boolean isTest) {
     test = isTest;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultVolumeChoosingPolicy.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultVolumeChoosingPolicy.java
index 021488987a7..1dee4b57d43 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultVolumeChoosingPolicy.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultVolumeChoosingPolicy.java
@@ -61,7 +61,7 @@ public Pair<HddsVolume, HddsVolume> 
chooseVolume(MutableVolumeSet volumeSet,
       }
       
       // Calculate ideal usage using the same immutable volume
-      double idealUsage = 
DiskBalancerVolumeCalculation.getIdealUsage(allVolumes);
+      double idealUsage = 
DiskBalancerVolumeCalculation.getIdealUsage(allVolumes, deltaMap);
 
       // Threshold is given as a percentage
       double normalizedThreshold = threshold / 100;
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/diskbalancer/TestDefaultContainerChoosingPolicy.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/diskbalancer/TestDefaultContainerChoosingPolicy.java
new file mode 100644
index 00000000000..32586223159
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/diskbalancer/TestDefaultContainerChoosingPolicy.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.diskbalancer;
+
+import static 
org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.time.Duration;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageSource;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.fs.SpaceUsagePersistence;
+import org.apache.hadoop.hdds.fs.SpaceUsageSource;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.diskbalancer.policy.ContainerChoosingPolicy;
+import 
org.apache.hadoop.ozone.container.diskbalancer.policy.DefaultContainerChoosingPolicy;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
+import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+
+/**
+ * Unit tests for the DefaultContainerChoosingPolicy.
+ */
+public class TestDefaultContainerChoosingPolicy {
+
+  @TempDir
+  private Path baseDir;
+
+  private static final OzoneConfiguration CONF = new OzoneConfiguration();
+  private static final long MB = 1024L * 1024L;
+  private static final long VOLUME_CAPACITY = 2500L * MB; // 2500MB
+  private static final double THRESHOLD = 10.0;
+
+  private ContainerChoosingPolicy policy;
+  private OzoneContainer ozoneContainer;
+  private MutableVolumeSet volumeSet;
+  private ContainerSet containerSet;
+  private HddsVolume sourceVolume;
+  private HddsVolume destVolume1;
+  private HddsVolume destVolume2;
+  private Set<ContainerID> inProgressContainerIDs;
+  private Map<HddsVolume, Long> deltaMap;
+
+  @BeforeEach
+  public void setup() throws Exception {
+    policy = new DefaultContainerChoosingPolicy();
+    setupVolumesAndContainer();
+    inProgressContainerIDs = new HashSet<>();
+    deltaMap = new HashMap<>();
+  }
+
+  /**
+   * Sets up a scenario with three volumes and five containers on the source.
+   * Volumes:
+   * - Source Volume: 80% used (2000MB)
+   * - Destination Volume 1: 10% used (250MB)
+   * - Destination Volume 2: 50% used (1250MB)
+   * Containers on Source (Total 2000MB):
+   * - C1: 500MB, C2: 450MB, C3: 200MB, C4: 350MB, C5: 500MB
+   */
+  private void setupVolumesAndContainer() throws IOException {
+    // Create volumes with specific utilization
+    sourceVolume = createVolume("source-volume", 0.80);
+    destVolume1 = createVolume("dest-volume1", 0.10);
+    destVolume2 = createVolume("dest-volume2", 0.50);
+
+    // Create and spy on the volume set
+    String datanodeUuid = UUID.randomUUID().toString();
+    volumeSet = spy(new MutableVolumeSet(datanodeUuid, CONF, null,
+        StorageVolume.VolumeType.DATA_VOLUME, null));
+    when(volumeSet.getVolumesList())
+        .thenReturn(Arrays.asList(sourceVolume, destVolume1, destVolume2));
+
+    // Create five containers on the source volume
+    containerSet = newContainerSet();
+    Map<Long, Long> containerSizes = new HashMap<>();
+    containerSizes.put(1L, 500L * MB);
+    containerSizes.put(2L, 450L * MB);
+    containerSizes.put(3L, 200L * MB);
+    containerSizes.put(4L, 350L * MB);
+    containerSizes.put(5L, 500L * MB);
+
+    for (Map.Entry<Long, Long> entry : containerSizes.entrySet()) {
+      createContainer(entry.getKey(), entry.getValue(), sourceVolume);
+    }
+
+    // Mock OzoneContainer to return our container set
+    ozoneContainer = mock(OzoneContainer.class);
+    ContainerController controller = new ContainerController(containerSet, 
null);
+    when(ozoneContainer.getController()).thenReturn(controller);
+  }
+
+  /**
+   * Create volumes with specific utilization.
+   */
+  private HddsVolume createVolume(String dir, double utilization)
+      throws IOException {
+    long usedSpace = (long) (VOLUME_CAPACITY * utilization);
+    Path volumePath = baseDir.resolve(dir);
+
+    SpaceUsageSource source = MockSpaceUsageSource.fixed(VOLUME_CAPACITY,
+        VOLUME_CAPACITY - usedSpace);
+    SpaceUsageCheckFactory factory = MockSpaceUsageCheckFactory.of(
+        source, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
+
+    return new HddsVolume.Builder(volumePath.toString())
+        .conf(CONF)
+        .usageCheckFactory(factory)
+        .build();
+  }
+
+  /**
+   * Create KeyValueContainers and add it to the containerSet.
+   */
+  private void createContainer(long id, long size, HddsVolume vol)
+      throws IOException {
+    KeyValueContainerData containerData = new KeyValueContainerData(id,
+        ContainerLayoutVersion.FILE_PER_BLOCK, size,
+        UUID.randomUUID().toString(), UUID.randomUUID().toString());
+    containerData.setState(ContainerDataProto.State.CLOSED);
+    containerData.setVolume(vol);
+    containerData.getStatistics().setBlockBytesForTesting(size);
+    KeyValueContainer container = new KeyValueContainer(containerData, CONF);
+    containerSet.addContainer(container);
+  }
+
+  @Test
+  public void testContainerChosenSuccessfully() {
+    // The policy should choose the first productive container for destVolume1.
+    // Total Used: 2000MB + 250MB + 1250MB = 3500MB
+    // Total Capacity: 3 * 2500MB = 7500MB
+    // Ideal Usage: 3500 / 7500 = ~46.67%
+    // Max Allowed Utilization: 46.67% + 10% (threshold) = ~56.67%
+    //
+    // Evaluation for destVolume1 (10% used / 250MB):
+    // - C1 (500MB) is productive: (250+500)/2500 = 30% <= 56.67%
+    // The policy iterates by container ID, so it will find and return C1.
+
+    ContainerData chosenContainer = policy.chooseContainer(ozoneContainer,
+        sourceVolume, destVolume1, inProgressContainerIDs, THRESHOLD, 
volumeSet, deltaMap);
+
+    // first container should be chosen
+    assertNotNull(chosenContainer);
+    assertEquals(1L, chosenContainer.getContainerID());
+  }
+
+  @Test
+  public void testContainerNotChosen() {
+    // For destVolume2, no container move should be productive.
+    // Max Allowed Utilization is ~56.67%.
+    //
+    // Evaluation for destVolume2 (50% used / 1250MB):
+    // Max productive size = (0.5667 * 2500) - 1250 = 166.75MB
+    // All containers on the source volume (smallest is 200MB) are larger
+    // than 166.75MB. Therefore, no container should be chosen.
+
+    ContainerData chosenContainer = policy.chooseContainer(ozoneContainer,
+        sourceVolume, destVolume2, inProgressContainerIDs, THRESHOLD, 
volumeSet, deltaMap);
+
+    // No containers should not be chosen
+    assertNull(chosenContainer);
+  }
+}
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/diskbalancer/TestDiskBalancerService.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/diskbalancer/TestDiskBalancerService.java
index 0d22a896062..9f20cfa53a6 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/diskbalancer/TestDiskBalancerService.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/diskbalancer/TestDiskBalancerService.java
@@ -293,7 +293,7 @@ public void testConcurrentTasksNotExceedThreadLimit() 
throws Exception {
     when(containerData.getBytesUsed()).thenReturn(100L);
 
     when(volumePolicy.chooseVolume(any(), anyDouble(), any(), 
anyLong())).thenReturn(Pair.of(source, dest));
-    when(containerPolicy.chooseContainer(any(), any(), 
any())).thenReturn(containerData);
+    when(containerPolicy.chooseContainer(any(), any(), any(), any(), any(), 
any(), any())).thenReturn(containerData);
 
     // Test when no tasks are in progress, it should schedule up to the limit
     BackgroundTaskQueue queue = svc.getTasks();
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerChoosingPolicy.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerChoosingPolicy.java
index 114a6404435..8a267a17784 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerChoosingPolicy.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerChoosingPolicy.java
@@ -19,8 +19,10 @@
 
 import static 
org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet;
 import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
@@ -55,6 +57,8 @@
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import 
org.apache.hadoop.ozone.container.diskbalancer.policy.ContainerChoosingPolicy;
 import 
org.apache.hadoop.ozone.container.diskbalancer.policy.DefaultContainerChoosingPolicy;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
@@ -77,6 +81,7 @@ public class TestContainerChoosingPolicy {
   private static final int NUM_THREADS = 10;
   private static final int NUM_ITERATIONS = 10000;
   private static final int MAX_IN_PROGRESS = 100;
+  private static final double THRESHOLD = 10.0;
 
   private static final OzoneConfiguration CONF = new OzoneConfiguration();
 
@@ -88,6 +93,7 @@ public class TestContainerChoosingPolicy {
   private OzoneContainer ozoneContainer;
   private ContainerChoosingPolicy containerChoosingPolicy;
   private ExecutorService executor;
+  private MutableVolumeSet volumeSet;
 
   // Simulate containers currently being balanced (in progress)
   private Set<ContainerID> inProgressContainerIDs = 
ConcurrentHashMap.newKeySet();
@@ -103,6 +109,13 @@ public void setup() throws Exception {
     when(ozoneContainer.getContainerSet()).thenReturn(containerSet);
     containerChoosingPolicy = new DefaultContainerChoosingPolicy();
     executor = Executors.newFixedThreadPool(NUM_THREADS);
+
+    // Create a spied MutableVolumeSet and inject the test volumes
+    String datanodeUuid = UUID.randomUUID().toString();
+    volumeSet = spy(new MutableVolumeSet(datanodeUuid, CONF, null,
+        StorageVolume.VolumeType.DATA_VOLUME, null));
+    when(volumeSet.getVolumesList())
+        .thenReturn(new ArrayList<>(volumes));
   }
 
   @AfterEach
@@ -123,24 +136,31 @@ public void cleanUp() {
 
   @Test
   @Timeout(300)
-  public void testConcurrentVolumeChoosingPerformance() throws Exception {
+  public void testConcurrentContainerChoosingPerformance() throws Exception {
     testPolicyPerformance("ContainerChoosingPolicy", containerChoosingPolicy);
   }
 
   @Test
   public void testContainerDeletionAfterIteratorGeneration() throws Exception {
     HddsVolume volume = volumes.get(0);
+    HddsVolume destVolume = volumes.get(1);
+
     List<Container<?>> containerList = 
ozoneContainer.getContainerSet().getContainerMap().values().stream()
         .filter(x -> 
volume.getStorageID().equals(x.getContainerData().getVolume().getStorageID()))
         .filter(x -> x.getContainerData().isClosed())
         .sorted(ContainerDataScanOrder.INSTANCE)
         .collect(Collectors.toList());
     inProgressContainerIDs.clear();
-    ContainerData container = 
containerChoosingPolicy.chooseContainer(ozoneContainer, volume, 
inProgressContainerIDs);
+
+    ContainerData container = 
containerChoosingPolicy.chooseContainer(ozoneContainer, volume, destVolume,
+        inProgressContainerIDs, THRESHOLD, volumeSet, null);
+    assertNotNull(container);
     assertEquals(containerList.get(0).getContainerData().getContainerID(), 
container.getContainerID());
+
     
ozoneContainer.getContainerSet().removeContainer(containerList.get(1).getContainerData().getContainerID());
     
inProgressContainerIDs.add(ContainerID.valueOf(container.getContainerID()));
-    container = containerChoosingPolicy.chooseContainer(ozoneContainer, 
volume, inProgressContainerIDs);
+    container = containerChoosingPolicy.chooseContainer(ozoneContainer, volume,
+        destVolume, inProgressContainerIDs, THRESHOLD, volumeSet, null);
     assertEquals(containerList.get(1).getContainerData().getContainerID(), 
container.getContainerID());
   }
 
@@ -165,11 +185,17 @@ private void testPolicyPerformance(String policyName, 
ContainerChoosingPolicy po
           int containerNotChosen = 0;
           int failures = 0;
           // Choose a random volume
-          HddsVolume volume = volumes.get(rand.nextInt(NUM_VOLUMES));
+          HddsVolume srcVolume = volumes.get(rand.nextInt(NUM_VOLUMES));
+          HddsVolume destVolume;
+
+          do {
+            destVolume = volumes.get(rand.nextInt(NUM_VOLUMES));
+          } while (srcVolume.equals(destVolume));
 
           for (int j = 0; j < NUM_ITERATIONS; j++) {
             try {
-              ContainerData c = policy.chooseContainer(ozoneContainer, volume, 
inProgressContainerIDs);
+              ContainerData c = policy.chooseContainer(ozoneContainer, 
srcVolume,
+                  destVolume, inProgressContainerIDs, THRESHOLD, volumeSet, 
null);
               if (c == null) {
                 containerNotChosen++;
               } else {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to