This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch HDDS-5713
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/HDDS-5713 by this push:
new 03c9069750 HDDS-12678. [DiskBalancer] Use ( capacity - free ) /
capacity to calculate disk utilization (#8278)
03c9069750 is described below
commit 03c9069750595ede26a97aa2aa3c81924d6450f1
Author: Gargi Jaiswal <[email protected]>
AuthorDate: Mon Apr 21 13:40:45 2025 +0530
HDDS-12678. [DiskBalancer] Use ( capacity - free ) / capacity to calculate
disk utilization (#8278)
---
.../ozone/container/common/volume/MutableVolumeSet.java | 6 +++---
.../ozone/container/diskbalancer/DiskBalancerService.java | 12 ++++++------
.../diskbalancer/policy/DefaultVolumeChoosingPolicy.java | 9 ++++++---
.../org/apache/hadoop/hdds/scm/node/DiskBalancerManager.java | 8 ++++----
4 files changed, 19 insertions(+), 16 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index a83288bed5..fcba2d65d9 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -471,13 +471,13 @@ public StorageLocationReport[] getStorageReport() {
}
public double getIdealUsage() {
- long totalCapacity = 0L, totalUsed = 0L;
+ long totalCapacity = 0L, totalFree = 0L;
for (StorageVolume volume: volumeMap.values()) {
SpaceUsageSource usage = volume.getCurrentUsage();
totalCapacity += usage.getCapacity();
- totalUsed += usage.getUsedSpace();
+ totalFree += usage.getAvailable();
}
Preconditions.checkArgument(totalCapacity != 0);
- return (double) totalUsed / totalCapacity;
+ return ((double) (totalCapacity - totalFree)) / totalCapacity;
}
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
index c3f882298c..5bd0992e52 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
@@ -519,11 +519,11 @@ public DiskBalancerInfo getDiskBalancerInfo() {
public long calculateBytesToMove(MutableVolumeSet inputVolumeSet) {
long bytesPendingToMove = 0;
- long totalUsedSpace = 0;
+ long totalFreeSpace = 0;
long totalCapacity = 0;
for (HddsVolume volume :
StorageVolumeUtil.getHddsVolumesList(inputVolumeSet.getVolumesList())) {
- totalUsedSpace += volume.getCurrentUsage().getUsedSpace();
+ totalFreeSpace += volume.getCurrentUsage().getAvailable();
totalCapacity += volume.getCurrentUsage().getCapacity();
}
@@ -531,20 +531,20 @@ public long calculateBytesToMove(MutableVolumeSet
inputVolumeSet) {
return 0;
}
- double datanodeUtilization = (double) totalUsedSpace / totalCapacity;
+ double datanodeUtilization = ((double) (totalCapacity - totalFreeSpace)) /
totalCapacity;
double thresholdFraction = threshold / 100.0;
double upperLimit = datanodeUtilization + thresholdFraction;
// Calculate excess data in overused volumes
for (HddsVolume volume :
StorageVolumeUtil.getHddsVolumesList(inputVolumeSet.getVolumesList())) {
- long usedSpace = volume.getCurrentUsage().getUsedSpace();
+ long freeSpace = volume.getCurrentUsage().getAvailable();
long capacity = volume.getCurrentUsage().getCapacity();
- double volumeUtilization = (double) usedSpace / capacity;
+ double volumeUtilization = ((double) (capacity - freeSpace)) / capacity;
// Consider only volumes exceeding the upper threshold
if (volumeUtilization > upperLimit) {
- long excessData = usedSpace - (long) (upperLimit * capacity);
+ long excessData = (capacity - freeSpace) - (long) (upperLimit *
capacity);
bytesPendingToMove += excessData;
}
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultVolumeChoosingPolicy.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultVolumeChoosingPolicy.java
index 2b5f37075d..393b3e15d5 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultVolumeChoosingPolicy.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultVolumeChoosingPolicy.java
@@ -45,13 +45,16 @@ public Pair<HddsVolume, HddsVolume>
chooseVolume(MutableVolumeSet volumeSet,
.stream()
.filter(volume ->
Math.abs(
- (double) (volume.getCurrentUsage().getUsedSpace() +
deltaMap.getOrDefault(volume, 0L))
+ ((double)((volume.getCurrentUsage().getCapacity() -
volume.getCurrentUsage().getAvailable())
+ + deltaMap.getOrDefault(volume, 0L)))
/ volume.getCurrentUsage().getCapacity() - idealUsage) >=
threshold)
.sorted((v1, v2) ->
Double.compare(
- (double) (v2.getCurrentUsage().getUsedSpace() +
deltaMap.getOrDefault(v2, 0L)) /
+ (double) ((v2.getCurrentUsage().getCapacity() -
v2.getCurrentUsage().getAvailable())
+ + deltaMap.getOrDefault(v2, 0L)) /
v2.getCurrentUsage().getCapacity(),
- (double) (v1.getCurrentUsage().getUsedSpace() +
deltaMap.getOrDefault(v1, 0L)) /
+ (double) ((v1.getCurrentUsage().getCapacity() -
v1.getCurrentUsage().getAvailable())
+ + deltaMap.getOrDefault(v1, 0L)) /
v1.getCurrentUsage().getCapacity()))
.collect(Collectors.toList());
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerManager.java
index f915405507..74c93bab70 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerManager.java
@@ -284,18 +284,18 @@ private double getVolumeDataDensitySumForDatanodeDetails(
DatanodeInfo datanodeInfo = (DatanodeInfo) datanodeDetails;
- double totalCapacity = 0d, totalUsed = 0d;
+ double totalCapacity = 0d, totalFree = 0d;
for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) {
totalCapacity += reportProto.getCapacity();
- totalUsed += reportProto.getScmUsed();
+ totalFree += reportProto.getRemaining();
}
Preconditions.checkArgument(totalCapacity != 0);
- double idealUsage = totalUsed / totalCapacity;
+ double idealUsage = (totalCapacity - totalFree) / totalCapacity;
double volumeDensitySum = datanodeInfo.getStorageReports().stream()
.map(report ->
- Math.abs((double)report.getScmUsed() / report.getCapacity()
+ Math.abs(((double) (report.getCapacity() - report.getRemaining()))
/ report.getCapacity()
- idealUsage))
.mapToDouble(Double::valueOf).sum();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]