This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new c8813dcbc85 HDDS-13545. MutableVolumeSet code cleanup (#9755)
c8813dcbc85 is described below
commit c8813dcbc85b4e62c456b3e0375c1d8d44dbc32e
Author: Rishabh Patel <[email protected]>
AuthorDate: Thu Feb 12 07:45:42 2026 -0800
HDDS-13545. MutableVolumeSet code cleanup (#9755)
---
.../apache/hadoop/ozone/HddsDatanodeService.java | 9 +-
.../states/endpoint/VersionEndpointTask.java | 2 +-
.../container/common/volume/MutableVolumeSet.java | 96 ++--------------------
.../common/volume/TestStorageVolumeChecker.java | 8 +-
.../container/common/volume/TestVolumeSet.java | 52 ++----------
5 files changed, 20 insertions(+), 147 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 2df26ca6e26..b0fbfe30b93 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -401,12 +401,11 @@ private void startRatisForTest() throws IOException {
MutableVolumeSet volumeSet =
getDatanodeStateMachine().getContainer().getVolumeSet();
- Map<String, StorageVolume> volumeMap = volumeSet.getVolumeMap();
+ List<StorageVolume> volumeList = volumeSet.getVolumesList();
- for (Map.Entry<String, StorageVolume> entry : volumeMap.entrySet()) {
- HddsVolume hddsVolume = (HddsVolume) entry.getValue();
- boolean result = StorageVolumeUtil.checkVolume(hddsVolume, clusterId,
- clusterId, conf, LOG, null);
+ for (StorageVolume storageVolume : volumeList) {
+ HddsVolume hddsVolume = (HddsVolume) storageVolume;
+ boolean result = StorageVolumeUtil.checkVolume(hddsVolume, clusterId,
clusterId, conf, LOG, null);
if (!result) {
volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath());
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 0f2b26b148d..b9326c07c5b 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -115,7 +115,7 @@ private void checkVolumeSet(MutableVolumeSet volumeSet,
try {
// If version file does not exist
// create version file and also set scm ID or cluster ID.
- for (StorageVolume volume : volumeSet.getVolumeMap().values()) {
+ for (StorageVolume volume : volumeSet.getVolumesList()) {
boolean result = StorageVolumeUtil.checkVolume(volume,
scmId, clusterId, configuration, LOG,
ozoneContainer.getDbVolumeSet());
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index 65d2fb70166..bef9c257178 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -21,16 +21,13 @@
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Collection;
-import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Function;
-import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
@@ -64,15 +61,9 @@ public class MutableVolumeSet implements VolumeSet {
*/
private Map<String, StorageVolume> failedVolumeMap;
- /**
- * Maintains a list of active volumes per StorageType.
- */
- private EnumMap<StorageType, List<StorageVolume>> volumeStateMap;
-
/**
* A Reentrant Read Write Lock to synchronize volume operations in VolumeSet.
- * Any update to {@link #volumeMap}, {@link #failedVolumeMap}, or
- * {@link #volumeStateMap} should be done after acquiring the write lock.
+ * Any update to {@link #volumeMap} or {@link #failedVolumeMap} should be
done after acquiring the write lock.
*/
private final ReentrantReadWriteLock volumeSetRWLock;
@@ -148,7 +139,6 @@ public StorageVolumeChecker getVolumeChecker() {
*/
private void initializeVolumeSet() throws IOException {
failedVolumeMap = new ConcurrentHashMap<>();
- volumeStateMap = new EnumMap<>(StorageType.class);
Collection<String> rawLocations;
if (volumeType == StorageVolume.VolumeType.META_VOLUME) {
@@ -159,10 +149,6 @@ private void initializeVolumeSet() throws IOException {
rawLocations = HddsServerUtil.getDatanodeStorageDirs(conf);
}
- for (StorageType storageType : StorageType.values()) {
- volumeStateMap.put(storageType, new ArrayList<>());
- }
-
for (String locationString : rawLocations) {
StorageVolume volume = null;
try {
@@ -180,7 +166,6 @@ private void initializeVolumeSet() throws IOException {
volume.getStorageDir());
}
volumeMap.put(volume.getStorageDir().getPath(), volume);
- volumeStateMap.get(volume.getStorageType()).add(volume);
volumeHealthMetrics.incrementHealthyVolumes();
} catch (IOException e) {
volumeHealthMetrics.incrementFailedVolumes();
@@ -329,45 +314,6 @@ public void writeUnlock() {
volumeSetRWLock.writeLock().unlock();
}
- // Add a volume to VolumeSet
- boolean addVolume(String dataDir) {
- return addVolume(dataDir, StorageType.DEFAULT);
- }
-
- // Add a volume to VolumeSet
- private boolean addVolume(String volumeRoot, StorageType storageType) {
- boolean success;
-
- this.writeLock();
- try {
- if (volumeMap.containsKey(volumeRoot)) {
- LOG.warn("Volume : {} already exists in VolumeMap", volumeRoot);
- success = false;
- } else {
- if (failedVolumeMap.containsKey(volumeRoot)) {
- failedVolumeMap.remove(volumeRoot);
- volumeHealthMetrics.decrementFailedVolumes();
- }
-
- StorageVolume volume =
- volumeFactory.createVolume(volumeRoot, storageType);
- volumeMap.put(volume.getStorageDir().getPath(), volume);
- volumeStateMap.get(volume.getStorageType()).add(volume);
-
- LOG.info("Added Volume : {} to VolumeSet",
- volume.getStorageDir().getPath());
- success = true;
- volumeHealthMetrics.incrementHealthyVolumes();
- }
- } catch (IOException ex) {
- LOG.error("Failed to add volume " + volumeRoot + " to VolumeSet", ex);
- success = false;
- } finally {
- this.writeUnlock();
- }
- return success;
- }
-
// Mark a volume as failed
public void failVolume(String volumeRoot) {
this.writeLock();
@@ -377,7 +323,6 @@ public void failVolume(String volumeRoot) {
volume.failVolume();
volumeMap.remove(volumeRoot);
- volumeStateMap.get(volume.getStorageType()).remove(volume);
failedVolumeMap.put(volumeRoot, volume);
volumeHealthMetrics.decrementHealthyVolumes();
volumeHealthMetrics.incrementFailedVolumes();
@@ -392,30 +337,6 @@ public void failVolume(String volumeRoot) {
}
}
- // Remove a volume from the VolumeSet completely.
- public void removeVolume(String volumeRoot) throws IOException {
- this.writeLock();
- try {
- if (volumeMap.containsKey(volumeRoot)) {
- StorageVolume volume = volumeMap.get(volumeRoot);
- volume.shutdown();
-
- volumeMap.remove(volumeRoot);
- volumeStateMap.get(volume.getStorageType()).remove(volume);
- volumeHealthMetrics.decrementHealthyVolumes();
- LOG.info("Removed Volume : {} from VolumeSet", volumeRoot);
- } else if (failedVolumeMap.containsKey(volumeRoot)) {
- failedVolumeMap.remove(volumeRoot);
- volumeHealthMetrics.decrementFailedVolumes();
- LOG.info("Removed Volume : {} from failed VolumeSet", volumeRoot);
- } else {
- LOG.warn("Volume : {} does not exist in VolumeSet", volumeRoot);
- }
- } finally {
- this.writeUnlock();
- }
- }
-
/**
* Shutdown the volumeset.
*/
@@ -435,7 +356,6 @@ public void shutdown() {
}
@Override
- @VisibleForTesting
public List<StorageVolume> getVolumesList() {
return ImmutableList.copyOf(volumeMap.values());
}
@@ -456,26 +376,20 @@ public void setVolumeMapForTesting(Map<String,
StorageVolume> map) {
volumeMap.putAll(map);
}
- @VisibleForTesting
- public Map<StorageType, List<StorageVolume>> getVolumeStateMap() {
- return ImmutableMap.copyOf(volumeStateMap);
- }
-
public boolean hasEnoughVolumes() {
// Max number of bad volumes allowed, should have at least
// 1 good volume
boolean hasEnoughVolumes;
- if (maxVolumeFailuresTolerated ==
- StorageVolumeChecker.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
- hasEnoughVolumes = !getVolumesList().isEmpty();
+ if (maxVolumeFailuresTolerated ==
StorageVolumeChecker.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
+ hasEnoughVolumes = !volumeMap.isEmpty();
} else {
- hasEnoughVolumes = getFailedVolumesList().size() <=
maxVolumeFailuresTolerated;
+ hasEnoughVolumes = failedVolumeMap.size() <= maxVolumeFailuresTolerated;
}
if (!hasEnoughVolumes) {
LOG.error("Not enough volumes in MutableVolumeSet. DatanodeUUID: {},
VolumeType: {}, " +
"MaxVolumeFailuresTolerated: {}, ActiveVolumes: {},
FailedVolumes: {}",
datanodeUuid, volumeType, maxVolumeFailuresTolerated,
- getVolumesList().size(), getFailedVolumesList().size());
+ volumeMap.size(), failedVolumeMap.size());
}
return hasEnoughVolumes;
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
index 92287bc617a..71cb7af04b7 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
@@ -208,6 +208,10 @@ public void testVolumeDeletion(VolumeCheckResult
checkResult,
conf.getObject(DatanodeConfiguration.class);
dnConf.setDiskCheckMinGap(Duration.ofMillis(0));
conf.setFromObject(dnConf);
+ File volParentDir =
+ new File(folder.toString(), UUID.randomUUID().toString());
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
+ conf.get(ScmConfigKeys.HDDS_DATANODE_DIR_KEY) + "," +
volParentDir.getPath());
DatanodeDetails datanodeDetails =
ContainerTestUtils.createDatanodeDetails();
@@ -218,9 +222,7 @@ public void testVolumeDeletion(VolumeCheckResult
checkResult,
StorageVolumeChecker volumeChecker = volumeSet.getVolumeChecker();
volumeChecker.setDelegateChecker(new DummyChecker());
- File volParentDir =
- new File(folder.toString(), UUID.randomUUID().toString());
- volumeSet.addVolume(volParentDir.getPath());
+
File volRootDir = new File(volParentDir, "hdds");
int i = 0;
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 1e13c7cc2da..1bf051b27c5 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -37,10 +37,10 @@
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.ozone.test.GenericTestUtils.LogCapturer;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -127,25 +127,6 @@ public void testVolumeSetInitialization() throws Exception
{
assertNumVolumes(volumeSet, 2, 0);
}
- @Test
- public void testAddVolume() {
-
- assertEquals(2, volumeSet.getVolumesList().size());
-
- assertNumVolumes(volumeSet, 2, 0);
-
- // Add a volume to VolumeSet
- String volume3 = baseDir.resolve("disk3").toString();
- boolean success = volumeSet.addVolume(volume3);
-
- assertTrue(success);
- assertEquals(3, volumeSet.getVolumesList().size());
- assertTrue(checkVolumeExistsInVolumeSet(volume3),
- "AddVolume did not add requested volume to VolumeSet");
-
- assertNumVolumes(volumeSet, 3, 0);
- }
-
@Test
public void testFailVolume() throws Exception {
assertNumVolumes(volumeSet, 2, 0);
@@ -169,30 +150,6 @@ public void testFailVolume() throws Exception {
assertNumVolumes(volumeSet, 1, 1);
}
- @Test
- public void testRemoveVolume() throws Exception {
- assertNumVolumes(volumeSet, 2, 0);
-
- assertEquals(2, volumeSet.getVolumesList().size());
-
- // Remove a volume from VolumeSet
- volumeSet.removeVolume(HddsVolumeUtil.getHddsRoot(volume1));
- assertEquals(1, volumeSet.getVolumesList().size());
-
- assertNumVolumes(volumeSet, 1, 0);
-
- // Attempting to remove a volume which does not exist in VolumeSet should
- // log a warning.
- LogCapturer logs = LogCapturer.captureLogs(MutableVolumeSet.class);
- volumeSet.removeVolume(HddsVolumeUtil.getHddsRoot(volume1));
- assertEquals(1, volumeSet.getVolumesList().size());
- String expectedLogMessage = "Volume : " +
- HddsVolumeUtil.getHddsRoot(volume1) + " does not exist in VolumeSet";
- assertThat(logs.getOutput()).contains(expectedLogMessage);
-
- assertNumVolumes(volumeSet, 1, 0);
- }
-
@Test
public void testVolumeInInconsistentState() throws Exception {
assertNumVolumes(volumeSet, 2, 0);
@@ -213,14 +170,15 @@ public void testVolumeInInconsistentState() throws
Exception {
// The new volume is in an inconsistent state as the root dir is
// non-empty but the version file does not exist. Add Volume should
// return false.
- boolean success = volumeSet.addVolume(volume3);
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
conf.get(ScmConfigKeys.HDDS_DATANODE_DIR_KEY) + "," + volume3);
+ volumeSet.shutdown();
+ initializeVolumeSet();
- assertFalse(success);
assertEquals(2, volumeSet.getVolumesList().size());
assertFalse(checkVolumeExistsInVolumeSet(volume3), "AddVolume should fail"
+
" for an inconsistent volume");
- assertNumVolumes(volumeSet, 2, 0);
+ assertNumVolumes(volumeSet, 2, 1);
// Delete volume3
File volume = new File(volume3);
FileUtils.deleteDirectory(volume);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]