This is an automated email from the ASF dual-hosted git repository.
tanvipenumudy pushed a commit to branch HDDS-13177
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/HDDS-13177 by this push:
new 01cf373244c HDDS-13180. Add replicatedSizeOfFiles to NSSummary (#8568)
01cf373244c is described below
commit 01cf373244c91ea2329135a8873e1b523a9cb682
Author: tanvipenumudy <[email protected]>
AuthorDate: Tue Jul 22 20:24:04 2025 +0530
HDDS-13180. Add replicatedSizeOfFiles to NSSummary (#8568)
---
.../TestReconInsightsForDeletedDirectories.java | 86 +++++++++++++------
.../java/org/apache/hadoop/ozone/TestDataUtil.java | 37 ++++++--
.../hadoop/ozone/om/snapshot/TestOmSnapshot.java | 2 +-
.../om/snapshot/TestOmSnapshotFileSystem.java | 2 +-
.../recon/ReconGuiceServletContextListener.java | 4 +
.../ozone/recon/api/OMDBInsightEndpoint.java | 39 +++++----
.../hadoop/ozone/recon/api/types/NSSummary.java | 13 ++-
.../hadoop/ozone/recon/codec/NSSummaryCodec.java | 5 +-
.../recon/tasks/NSSummaryTaskDbEventHandler.java | 2 +
.../ozone/recon/upgrade/ReconLayoutFeature.java | 3 +-
.../ReplicatedSizeOfFilesUpgradeAction.java | 61 ++++++++++++++
.../ozone/recon/api/TestOmDBInsightEndPoint.java | 96 ++++++++++++++++++---
.../impl/TestReconNamespaceSummaryManagerImpl.java | 6 +-
.../recon/tasks/TestNSSummaryTaskWithFSO.java | 10 ++-
.../TestReplicatedSizeOfFilesUpgradeAction.java | 98 ++++++++++++++++++++++
15 files changed, 395 insertions(+), 69 deletions(-)
diff --git
a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
index 487bd116d9a..09c80590e13 100644
---
a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
+++
b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
@@ -17,6 +17,8 @@
package org.apache.hadoop.ozone.recon;
+import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
+import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
@@ -29,6 +31,7 @@
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -39,6 +42,9 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.IOUtils;
@@ -52,6 +58,7 @@
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
import org.apache.hadoop.ozone.recon.api.OMDBInsightEndpoint;
import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
@@ -63,7 +70,9 @@
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -77,42 +86,41 @@ public class TestReconInsightsForDeletedDirectories {
LoggerFactory.getLogger(TestReconInsightsForDeletedDirectories.class);
private static MiniOzoneCluster cluster;
- private static FileSystem fs;
- private static String volumeName;
- private static String bucketName;
+ private FileSystem fs;
private static OzoneClient client;
private static ReconService recon;
+ private static OzoneConfiguration conf;
@BeforeAll
public static void init() throws Exception {
- OzoneConfiguration conf = new OzoneConfiguration();
+ conf = new OzoneConfiguration();
conf.setInt(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000000);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000,
TimeUnit.MILLISECONDS);
conf.setBoolean(OZONE_ACL_ENABLED, true);
recon = new ReconService(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(3)
+ .setNumDatanodes(5)
.addService(recon)
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
- // create a volume and a bucket to be used by OzoneFileSystem
- OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
- BucketLayout.FILE_SYSTEM_OPTIMIZED);
- volumeName = bucket.getVolumeName();
- bucketName = bucket.getName();
-
- String rootPath = String.format("%s://%s.%s/",
- OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
-
- // Set the fs.defaultFS and start the filesystem
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
// Set the number of keys to be processed during batch operate.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+ }
- fs = FileSystem.get(conf);
+ /**
+ * Provides a list of replication configurations (RATIS and EC)
+ * to be used for parameterized tests.
+ *
+ * @return List of replication configurations as Arguments.
+ */
+ static List<Arguments> replicationConfigs() {
+ return Arrays.asList(
+ Arguments.of(ReplicationConfig.fromTypeAndFactor(RATIS, THREE)),
+ Arguments.of(new ECReplicationConfig("RS-3-2-1024k"))
+ );
}
@AfterAll
@@ -121,7 +129,6 @@ public static void teardown() {
if (cluster != null) {
cluster.shutdown();
}
- IOUtils.closeQuietly(fs);
}
@AfterEach
@@ -133,6 +140,8 @@ public void cleanup() throws IOException {
fs.delete(fileStatus.getPath(), true);
}
});
+
+ IOUtils.closeQuietly(fs);
}
/**
@@ -144,9 +153,16 @@ public void cleanup() throws IOException {
* ├── ...
* └── file10
*/
- @Test
- public void testGetDeletedDirectoryInfo()
+ @ParameterizedTest
+ @MethodSource("replicationConfigs")
+ public void testGetDeletedDirectoryInfo(ReplicationConfig replicationConfig)
throws Exception {
+ OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
BucketLayout.FILE_SYSTEM_OPTIMIZED,
+ new DefaultReplicationConfig(replicationConfig));
+ String rootPath = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+ bucket.getVolumeName());
+ conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+ fs = FileSystem.get(conf);
// Create a directory structure with 10 files in dir1.
Path dir1 = new Path("/dir1");
@@ -210,6 +226,7 @@ public void testGetDeletedDirectoryInfo()
// Assert that the directory dir1 has 10 sub-files and size of 1000
bytes.
assertEquals(10, summary.getNumOfFiles());
assertEquals(10, summary.getSizeOfFiles());
+ assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig),
summary.getReplicatedSizeOfFiles());
}
// Delete the entire directory dir1.
@@ -237,6 +254,7 @@ public void testGetDeletedDirectoryInfo()
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 10.
assertEquals(10, entity.getUnreplicatedDataSize());
+ assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig),
entity.getReplicatedDataSize());
// Cleanup the tables.
cleanupTables();
@@ -254,9 +272,16 @@ public void testGetDeletedDirectoryInfo()
* │ │ └── file3
*
*/
- @Test
- public void testGetDeletedDirectoryInfoForNestedDirectories()
+ @ParameterizedTest
+ @MethodSource("replicationConfigs")
+ public void
testGetDeletedDirectoryInfoForNestedDirectories(ReplicationConfig
replicationConfig)
throws Exception {
+ OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
BucketLayout.FILE_SYSTEM_OPTIMIZED,
+ new DefaultReplicationConfig(replicationConfig));
+ String rootPath = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+ bucket.getVolumeName());
+ conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+ fs = FileSystem.get(conf);
// Create a directory structure with 10 files and 3 nested directories.
Path path = new Path("/dir1/dir2/dir3");
@@ -326,6 +351,7 @@ public void
testGetDeletedDirectoryInfoForNestedDirectories()
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 3.
assertEquals(3, entity.getUnreplicatedDataSize());
+ assertEquals(QuotaUtil.getReplicatedSize(3, replicationConfig),
entity.getReplicatedDataSize());
// Cleanup the tables.
cleanupTables();
@@ -352,9 +378,18 @@ public void
testGetDeletedDirectoryInfoForNestedDirectories()
* ├── ...
* └── file10
*/
- @Test
- public void testGetDeletedDirectoryInfoWithMultipleSubdirectories()
+ @ParameterizedTest
+ @MethodSource("replicationConfigs")
+ public void
testGetDeletedDirectoryInfoWithMultipleSubdirectories(ReplicationConfig
replicationConfig)
throws Exception {
+ OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
BucketLayout.FILE_SYSTEM_OPTIMIZED,
+ new DefaultReplicationConfig(replicationConfig));
+ String rootPath = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+ bucket.getVolumeName());
+ // Set the fs.defaultFS and start the filesystem
+ conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+ fs = FileSystem.get(conf);
+
int numSubdirectories = 10;
int filesPerSubdirectory = 10;
@@ -388,6 +423,7 @@ public void
testGetDeletedDirectoryInfoWithMultipleSubdirectories()
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 100.
assertEquals(100, entity.getUnreplicatedDataSize());
+ assertEquals(QuotaUtil.getReplicatedSize(100, replicationConfig),
entity.getReplicatedDataSize());
// Cleanup the tables.
cleanupTables();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
index a30fc356057..7ac80ef4058 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
@@ -33,6 +33,7 @@
import java.util.Scanner;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
@@ -65,7 +66,21 @@ public static OzoneBucket createVolumeAndBucket(OzoneClient
client,
}
public static OzoneBucket createVolumeAndBucket(OzoneClient client,
- String volumeName, String bucketName, BucketLayout bucketLayout)
+ String volumeName, String bucketName, BucketLayout bucketLayout) throws
IOException {
+ BucketArgs omBucketArgs;
+ BucketArgs.Builder builder = BucketArgs.newBuilder();
+ builder.setStorageType(StorageType.DISK);
+ if (bucketLayout != null) {
+ builder.setBucketLayout(bucketLayout);
+ }
+ omBucketArgs = builder.build();
+
+ return createVolumeAndBucket(client, volumeName, bucketName,
+ omBucketArgs);
+ }
+
+ public static OzoneBucket createVolumeAndBucket(OzoneClient client,
+ String volumeName, String bucketName, BucketLayout bucketLayout,
DefaultReplicationConfig replicationConfig)
throws IOException {
BucketArgs omBucketArgs;
BucketArgs.Builder builder = BucketArgs.newBuilder();
@@ -73,6 +88,10 @@ public static OzoneBucket createVolumeAndBucket(OzoneClient
client,
if (bucketLayout != null) {
builder.setBucketLayout(bucketLayout);
}
+
+ if (replicationConfig != null) {
+ builder.setDefaultReplicationConfig(replicationConfig);
+ }
omBucketArgs = builder.build();
return createVolumeAndBucket(client, volumeName, bucketName,
@@ -197,18 +216,26 @@ public static OzoneBucket createLinkedBucket(OzoneClient
client, String vol, Str
public static OzoneBucket createVolumeAndBucket(OzoneClient client,
BucketLayout bucketLayout)
throws IOException {
- return createVolumeAndBucket(client, bucketLayout, false);
+ return createVolumeAndBucket(client, bucketLayout, null, false);
}
- public static OzoneBucket createVolumeAndBucket(OzoneClient client,
- BucketLayout bucketLayout, boolean createLinkedBucket) throws
IOException {
+ public static OzoneBucket createVolumeAndBucket(OzoneClient client,
BucketLayout bucketLayout,
+ DefaultReplicationConfig
replicationConfig)
+ throws IOException {
+ return createVolumeAndBucket(client, bucketLayout, replicationConfig,
false);
+ }
+
+ public static OzoneBucket createVolumeAndBucket(OzoneClient client,
BucketLayout bucketLayout,
+ DefaultReplicationConfig
replicationConfig,
+ boolean createLinkedBucket)
+ throws IOException {
final int attempts = 5;
for (int i = 0; i < attempts; i++) {
try {
String volumeName = "volume" +
RandomStringUtils.secure().nextNumeric(5);
String bucketName = "bucket" +
RandomStringUtils.secure().nextNumeric(5);
OzoneBucket ozoneBucket = createVolumeAndBucket(client, volumeName,
bucketName,
- bucketLayout);
+ bucketLayout, replicationConfig);
if (createLinkedBucket) {
String targetBucketName = ozoneBucket.getName() +
RandomStringUtils.secure().nextNumeric(5);
ozoneBucket = createLinkedBucket(client, volumeName, bucketName,
targetBucketName);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
index 366f61990f4..69441f580d7 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
@@ -238,7 +238,7 @@ private void init() throws Exception {
cluster.waitForClusterToBeReady();
client = cluster.newClient();
// create a volume and a bucket to be used by OzoneFileSystem
- ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout,
createLinkedBucket);
+ ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout,
null, createLinkedBucket);
if (createLinkedBucket) {
this.linkedBuckets.put(ozoneBucket.getName(),
ozoneBucket.getSourceBucket());
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java
index 7db6c8d41db..fca8b137b72 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java
@@ -132,7 +132,7 @@ public void setupFsClient() throws IOException {
writeClient = objectStore.getClientProxy().getOzoneManagerClient();
ozoneManager = cluster().getOzoneManager();
- OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
bucketLayout, createLinkedBuckets);
+ OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
bucketLayout, null, createLinkedBuckets);
if (createLinkedBuckets) {
linkedBucketMaps.put(bucket.getName(), bucket.getSourceBucket());
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java
index d58e2a38381..e02971be6eb 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java
@@ -33,6 +33,10 @@ public Injector getInjector() {
return injector;
}
+ public static Injector getStaticInjector() {
+ return injector;
+ }
+
static void setInjector(Injector inj) {
injector = inj;
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
index 048ab0ebc4e..840c14b12ac 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
@@ -42,8 +42,10 @@
import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
+import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.Deque;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
@@ -58,6 +60,7 @@
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
@@ -646,9 +649,9 @@ private void getPendingForDeletionDirInfo(
keyEntityInfo.setKey(omKeyInfo.getFileName());
keyEntityInfo.setPath(createPath(omKeyInfo));
keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime());
- keyEntityInfo.setSize(
- fetchSizeForDeletedDirectory(omKeyInfo.getObjectID()));
- keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize());
+ Pair<Long, Long> sizeInfo =
fetchSizeForDeletedDirectory(omKeyInfo.getObjectID());
+ keyEntityInfo.setSize(sizeInfo.getLeft());
+ keyEntityInfo.setReplicatedSize(sizeInfo.getRight());
keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig());
pendingForDeletionKeyInfo.setUnreplicatedDataSize(
pendingForDeletionKeyInfo.getUnreplicatedDataSize() +
@@ -674,24 +677,32 @@ private void getPendingForDeletionDirInfo(
}
/**
- * Given an object ID, return total data size (no replication)
+ * Given an object ID, return total data size as a pair of Total Size, Total
Replicated Size
* under this object. Note:- This method is RECURSIVE.
*
* @param objectId the object's ID
- * @return total used data size in bytes
+ * @return total used data size and replicated total used data size in bytes
* @throws IOException ioEx
*/
- protected long fetchSizeForDeletedDirectory(long objectId)
+ protected Pair<Long, Long> fetchSizeForDeletedDirectory(long objectId)
throws IOException {
- NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(objectId);
- if (nsSummary == null) {
- return 0L;
- }
- long totalSize = nsSummary.getSizeOfFiles();
- for (long childId : nsSummary.getChildDir()) {
- totalSize += fetchSizeForDeletedDirectory(childId);
+ long totalSize = 0;
+ long totalReplicatedSize = 0;
+ Deque<Long> stack = new ArrayDeque();
+ stack.push(objectId);
+
+ while (!stack.isEmpty()) {
+ long currentId = stack.pop();
+ NSSummary nsSummary =
reconNamespaceSummaryManager.getNSSummary(currentId);
+ if (nsSummary != null) {
+ totalSize += nsSummary.getSizeOfFiles();
+ totalReplicatedSize += nsSummary.getReplicatedSizeOfFiles();
+ for (long childId : nsSummary.getChildDir()) {
+ stack.push(childId);
+ }
+ }
}
- return totalSize;
+ return Pair.of(totalSize, totalReplicatedSize);
}
/** This method retrieves set of directories pending for deletion.
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
index f20fdc764af..24b43716a93 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
@@ -31,24 +31,27 @@
public class NSSummary {
private int numOfFiles;
private long sizeOfFiles;
+ private long replicatedSizeOfFiles;
private int[] fileSizeBucket;
private Set<Long> childDir;
private String dirName;
private long parentId = 0;
public NSSummary() {
- this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS],
+ this(0, 0L, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS],
new HashSet<>(), "", 0);
}
public NSSummary(int numOfFiles,
long sizeOfFiles,
+ long replicatedSizeOfFiles,
int[] bucket,
Set<Long> childDir,
String dirName,
long parentId) {
this.numOfFiles = numOfFiles;
this.sizeOfFiles = sizeOfFiles;
+ this.replicatedSizeOfFiles = replicatedSizeOfFiles;
setFileSizeBucket(bucket);
this.childDir = childDir;
this.dirName = dirName;
@@ -63,6 +66,10 @@ public long getSizeOfFiles() {
return sizeOfFiles;
}
+ public long getReplicatedSizeOfFiles() {
+ return replicatedSizeOfFiles;
+ }
+
public int[] getFileSizeBucket() {
return Arrays.copyOf(fileSizeBucket, ReconConstants.NUM_OF_FILE_SIZE_BINS);
}
@@ -83,6 +90,10 @@ public void setSizeOfFiles(long sizeOfFiles) {
this.sizeOfFiles = sizeOfFiles;
}
+ public void setReplicatedSizeOfFiles(long replicatedSizeOfFiles) {
+ this.replicatedSizeOfFiles = replicatedSizeOfFiles;
+ }
+
public void setFileSizeBucket(int[] fileSizeBucket) {
this.fileSizeBucket = Arrays.copyOf(fileSizeBucket,
ReconConstants.NUM_OF_FILE_SIZE_BINS);
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
index 92068988d76..d1967a35f77 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
@@ -67,11 +67,12 @@ public byte[] toPersistedFormatImpl(NSSummary object)
throws IOException {
+ (numOfChildDirs + 1) * Long.BYTES // 1 long field for parentId +
list size
+ Short.BYTES // 2 dummy shorts to track length
+ dirName.length // directory name length
- + Long.BYTES; // Added space for parentId serialization
+ + 2 * Long.BYTES; // Added space for parentId serialization and
replicated size of files
ByteArrayOutputStream out = new ByteArrayOutputStream(resSize);
out.write(integerCodec.toPersistedFormat(object.getNumOfFiles()));
out.write(longCodec.toPersistedFormat(object.getSizeOfFiles()));
+ out.write(longCodec.toPersistedFormat(object.getReplicatedSizeOfFiles()));
out.write(shortCodec.toPersistedFormat(
(short) ReconConstants.NUM_OF_FILE_SIZE_BINS));
int[] fileSizeBucket = object.getFileSizeBucket();
@@ -95,6 +96,7 @@ public NSSummary fromPersistedFormatImpl(byte[] rawData)
throws IOException {
NSSummary res = new NSSummary();
res.setNumOfFiles(in.readInt());
res.setSizeOfFiles(in.readLong());
+ res.setReplicatedSizeOfFiles(in.readLong());
short len = in.readShort();
assert (len == (short) ReconConstants.NUM_OF_FILE_SIZE_BINS);
int[] fileSizeBucket = new int[len];
@@ -136,6 +138,7 @@ public NSSummary copyObject(NSSummary object) {
NSSummary copy = new NSSummary();
copy.setNumOfFiles(object.getNumOfFiles());
copy.setSizeOfFiles(object.getSizeOfFiles());
+ copy.setReplicatedSizeOfFiles(object.getReplicatedSizeOfFiles());
copy.setFileSizeBucket(object.getFileSizeBucket());
copy.setChildDir(object.getChildDir());
copy.setDirName(object.getDirName());
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
index 755d966b832..85a926df4a1 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
@@ -91,6 +91,7 @@ protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map<Long,
int[] fileBucket = nsSummary.getFileSizeBucket();
nsSummary.setNumOfFiles(nsSummary.getNumOfFiles() + 1);
nsSummary.setSizeOfFiles(nsSummary.getSizeOfFiles() +
keyInfo.getDataSize());
+ nsSummary.setReplicatedSizeOfFiles(nsSummary.getReplicatedSizeOfFiles() +
keyInfo.getReplicatedSize());
int binIndex = ReconUtils.getFileSizeBinIndex(keyInfo.getDataSize());
++fileBucket[binIndex];
@@ -162,6 +163,7 @@ protected void handleDeleteKeyEvent(OmKeyInfo keyInfo,
// we still need children dir IDs info
nsSummary.setNumOfFiles(nsSummary.getNumOfFiles() - 1);
nsSummary.setSizeOfFiles(nsSummary.getSizeOfFiles() -
keyInfo.getDataSize());
+ nsSummary.setReplicatedSizeOfFiles(nsSummary.getReplicatedSizeOfFiles() -
keyInfo.getReplicatedSize());
--fileBucket[binIndex];
nsSummary.setFileSizeBucket(fileBucket);
nsSummaryMap.put(parentObjectId, nsSummary);
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java
index 2b4569d449e..050e1b26d85 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java
@@ -31,7 +31,8 @@ public enum ReconLayoutFeature {
// Represents the starting point for Recon's layout versioning system.
INITIAL_VERSION(0, "Recon Layout Versioning Introduction"),
TASK_STATUS_STATISTICS(1, "Recon Task Status Statistics Tracking
Introduced"),
- UNHEALTHY_CONTAINER_REPLICA_MISMATCH(2, "Adding replica mismatch state to
the unhealthy container table");
+ UNHEALTHY_CONTAINER_REPLICA_MISMATCH(2, "Adding replica mismatch state to
the unhealthy container table"),
+ REPLICATED_SIZE_OF_FILES(3, "Adds replicatedSizeOfFiles to NSSummary");
private final int version;
private final String description;
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java
new file mode 100644
index 00000000000..3ecfac465f1
--- /dev/null
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.upgrade;
+
+import com.google.inject.Injector;
+import javax.sql.DataSource;
+import org.apache.hadoop.ozone.recon.ReconGuiceServletContextListener;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Upgrade action for the REPLICATED_SIZE_OF_FILES layout feature.
+ * The action triggers a full rebuild of the NSSummary ensuring that the new
field: replicatedSizeOfFiles is correctly
+ * populated for all objects.
+ */
+@UpgradeActionRecon(feature = ReconLayoutFeature.REPLICATED_SIZE_OF_FILES,
+ type = ReconUpgradeAction.UpgradeActionType.FINALIZE)
+public class ReplicatedSizeOfFilesUpgradeAction implements ReconUpgradeAction {
+
+ private static final Logger LOG =
LoggerFactory.getLogger(ReplicatedSizeOfFilesUpgradeAction.class);
+
+ @Override
+ public void execute(DataSource dataSource) {
+ try {
+ Injector injector = ReconGuiceServletContextListener.getStaticInjector();
+ if (injector == null) {
+ throw new IllegalStateException("Guice injector is not initialized.
Cannot perform NSSummary rebuild.");
+ }
+ ReconNamespaceSummaryManager nsSummaryManager =
injector.getInstance(ReconNamespaceSummaryManager.class);
+ ReconOMMetadataManager omMetadataManager =
injector.getInstance(ReconOMMetadataManager.class);
+ LOG.info("Starting full rebuild of NSSummary for
REPLICATED_SIZE_OF_FILES upgrade...");
+ nsSummaryManager.rebuildNSSummaryTree(omMetadataManager);
+ LOG.info("Completed full rebuild of NSSummary for
REPLICATED_SIZE_OF_FILES upgrade.");
+ } catch (Exception e) {
+ LOG.error("Error during NSSummary rebuild for REPLICATED_SIZE_OF_FILES
upgrade.", e);
+ throw new RuntimeException("Failed to rebuild NSSummary during upgrade",
e);
+ }
+ }
+
+ @Override
+ public UpgradeActionType getType() {
+ return UpgradeActionType.FINALIZE;
+ }
+}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
index 7cd08ff009a..5721038b249 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
@@ -39,6 +39,8 @@
import java.nio.file.Path;
import java.sql.Timestamp;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
@@ -49,6 +51,7 @@
import java.util.stream.Collectors;
import javax.ws.rs.core.Response;
import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -64,6 +67,7 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.recon.ReconTestInjector;
import org.apache.hadoop.ozone.recon.ReconUtils;
@@ -88,9 +92,12 @@
import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS;
import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
+import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
/**
* Unit test for OmDBInsightEndPoint.
@@ -112,9 +119,7 @@ public class TestOmDBInsightEndPoint extends
AbstractReconSqlDBTest {
private Random random = new Random();
private OzoneConfiguration ozoneConfiguration;
private Set<Long> generatedIds = new HashSet<>();
-
private static final String VOLUME_ONE = "volume1";
-
private static final String OBS_BUCKET = "obs-bucket";
private static final String FSO_BUCKET = "fso-bucket";
private static final String EMPTY_OBS_BUCKET = "empty-obs-bucket";
@@ -256,6 +261,30 @@ public TestOmDBInsightEndPoint() {
super();
}
+ public static Collection<Object[]> replicationConfigValues() {
+ return Arrays.asList(new Object[][]{
+
{ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.THREE)},
+
{ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.ONE)},
+ {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null,
+ toProto(3, 2, ECReplicationConfig.EcCodec.RS, 1024))},
+ {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null,
+ toProto(6, 3, ECReplicationConfig.EcCodec.RS, 1024))},
+ {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null,
+ toProto(10, 4, ECReplicationConfig.EcCodec.XOR, 4096))}
+ });
+ }
+
+ public static HddsProtos.ECReplicationConfig toProto(int data, int parity,
ECReplicationConfig.EcCodec codec,
+ int ecChunkSize) {
+ return HddsProtos.ECReplicationConfig.newBuilder()
+ .setData(data)
+ .setParity(parity)
+ .setCodec(codec.toString())
+ .setEcChunkSize(ecChunkSize)
+ .build();
+ }
+
private long generateUniqueRandomLong() {
long newValue;
do {
@@ -318,6 +347,26 @@ public void setUp() throws Exception {
nsSummaryTaskWithFSO.reprocessWithFSO(reconOMMetadataManager);
}
+ /**
+ * Releases resources (network sockets, database files) after each test run.
+ * This is critical to prevent resource leaks between tests, which would
otherwise cause "Too many open files" errors.
+ */
+ @AfterEach
+ public void tearDown() throws Exception {
+
+ if (ozoneStorageContainerManager != null) {
+ ozoneStorageContainerManager.stop();
+ }
+
+ if (reconOMMetadataManager != null) {
+ reconOMMetadataManager.stop();
+ }
+
+ if (omMetadataManager != null) {
+ omMetadataManager.stop();
+ }
+ }
+
@SuppressWarnings("methodlength")
private void setUpOmData() throws Exception {
List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
@@ -1391,14 +1440,24 @@ public void
testGetDeletedKeysWithBothPrevKeyAndStartPrefixProvided()
private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
String keyName, boolean isFile) {
+ return buildOmKeyInfo(volumeName, bucketName, keyName, isFile,
+
StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE));
+ }
+
+ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+ String keyName, boolean isFile,
ReplicationConfig replicationConfig) {
+ return buildOmKeyInfo(volumeName, bucketName, keyName, isFile,
replicationConfig);
+ }
+
+ private OmKeyInfo buildOmKeyInfo(String volumeName, String bucketName,
+ String keyName, boolean isFile,
ReplicationConfig replicationConfig) {
return new OmKeyInfo.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
.setKeyName(keyName)
.setFile(isFile)
.setObjectID(generateUniqueRandomLong())
- .setReplicationConfig(StandaloneReplicationConfig
- .getInstance(HddsProtos.ReplicationFactor.ONE))
+ .setReplicationConfig(replicationConfig)
.setDataSize(random.nextLong())
.build();
}
@@ -1503,15 +1562,17 @@ public void testGetDeletedDirInfo() throws Exception {
keyInsightInfoResp.getLastKey());
}
- @Test
- public void testGetDirectorySizeInfo() throws Exception {
+ @ParameterizedTest
+ @MethodSource("replicationConfigValues")
+ public void testGetDirectorySizeInfo(ReplicationConfig replicationConfig)
throws Exception {
OmKeyInfo omKeyInfo1 =
- getOmKeyInfo("sampleVol", "bucketOne", "dir1", false);
+ getOmKeyInfo("sampleVol", "bucketOne", "dir1", false,
replicationConfig);
OmKeyInfo omKeyInfo2 =
- getOmKeyInfo("sampleVol", "bucketTwo", "dir2", false);
+ getOmKeyInfo("sampleVol", "bucketTwo", "dir2", false,
replicationConfig);
OmKeyInfo omKeyInfo3 =
- getOmKeyInfo("sampleVol", "bucketThree", "dir3", false);
+ getOmKeyInfo("sampleVol", "bucketThree", "dir3", false,
+ replicationConfig);
// Add 3 entries to deleted dir table for directory dir1, dir2 and dir3
// having object id 1, 2 and 3 respectively
@@ -1525,11 +1586,11 @@ public void testGetDirectorySizeInfo() throws Exception
{
// Prepare NS summary data and populate the table
Table<Long, NSSummary> table = omdbInsightEndpoint.getNsSummaryTable();
// Set size of files to 5 for directory object id 1
- table.put(omKeyInfo1.getObjectID(), getNsSummary(5L));
+ table.put(omKeyInfo1.getObjectID(), getNsSummary(5L, replicationConfig));
// Set size of files to 6 for directory object id 2
- table.put(omKeyInfo2.getObjectID(), getNsSummary(6L));
+ table.put(omKeyInfo2.getObjectID(), getNsSummary(6L, replicationConfig));
// Set size of files to 7 for directory object id 3
- table.put(omKeyInfo3.getObjectID(), getNsSummary(7L));
+ table.put(omKeyInfo3.getObjectID(), getNsSummary(7L, replicationConfig));
Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(-1, "");
KeyInsightInfoResponse keyInsightInfoResp =
@@ -1540,15 +1601,23 @@ public void testGetDirectorySizeInfo() throws Exception
{
// Assert the total size under directory dir1 is 5L
assertEquals(5L,
keyInsightInfoResp.getDeletedDirInfoList().get(0).getSize());
+ assertEquals(QuotaUtil.getReplicatedSize(5L, replicationConfig),
+ keyInsightInfoResp.getDeletedDirInfoList().get(0).getReplicatedSize());
// Assert the total size under directory dir2 is 6L
assertEquals(6L,
keyInsightInfoResp.getDeletedDirInfoList().get(1).getSize());
+ assertEquals(QuotaUtil.getReplicatedSize(6L, replicationConfig),
+ keyInsightInfoResp.getDeletedDirInfoList().get(1).getReplicatedSize());
// Assert the total size under directory dir3 is 7L
assertEquals(7L,
keyInsightInfoResp.getDeletedDirInfoList().get(2).getSize());
+ assertEquals(QuotaUtil.getReplicatedSize(7L, replicationConfig),
+ keyInsightInfoResp.getDeletedDirInfoList().get(2).getReplicatedSize());
// Assert the total of all the deleted directories is 18L
assertEquals(18L, keyInsightInfoResp.getUnreplicatedDataSize());
+ assertEquals(QuotaUtil.getReplicatedSize(18L, replicationConfig),
+ keyInsightInfoResp.getReplicatedDataSize());
}
@Test
@@ -2014,9 +2083,10 @@ public void
testListKeysLegacyBucketWithFSEnabledAndPagination() {
assertEquals("", listKeysResponse.getLastKey());
}
- private NSSummary getNsSummary(long size) {
+ private NSSummary getNsSummary(long size, ReplicationConfig
replicationConfig) {
NSSummary summary = new NSSummary();
summary.setSizeOfFiles(size);
+ summary.setReplicatedSizeOfFiles(QuotaUtil.getReplicatedSize(size,
replicationConfig));
return summary;
}
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
index c0931ba6d35..e33bee04256 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
@@ -112,9 +112,9 @@ public void testInitNSSummaryTable() throws IOException {
private void putThreeNSMetadata() throws IOException {
HashMap<Long, NSSummary> hmap = new HashMap<>();
- hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1", -1));
- hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2", -1));
- hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3", -1));
+ hmap.put(1L, new NSSummary(1, 2, 2 * 3, testBucket, TEST_CHILD_DIR,
"dir1", -1));
+ hmap.put(2L, new NSSummary(3, 4, 4 * 3, testBucket, TEST_CHILD_DIR,
"dir2", -1));
+ hmap.put(3L, new NSSummary(5, 6, 6 * 3, testBucket, TEST_CHILD_DIR,
"dir3", -1));
RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
for (Map.Entry entry: hmap.entrySet()) {
reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation,
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
index 75fb468c5a9..aae0b5d061a 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.recon.tasks;
+import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
import static
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD;
@@ -34,6 +35,7 @@
import java.util.List;
import java.util.Set;
import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -483,16 +485,16 @@ void
testProcessWithFSOFlushAfterThresholdAndFailureOfLastElement()
Mockito.when(event4.getAction()).thenReturn(OMDBUpdateEvent.OMDBUpdateAction.PUT);
OmKeyInfo keyInfo1 = new
OmKeyInfo.Builder().setParentObjectID(1).setObjectID(2).setKeyName("key1")
- .setBucketName("bucket1")
+
.setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE))
.setDataSize(1024).setVolumeName("volume1").build();
OmKeyInfo keyInfo2 = new
OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2")
- .setBucketName("bucket1")
+
.setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE))
.setDataSize(1024).setVolumeName("volume1").build();
OmKeyInfo keyInfo3 = new
OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2")
- .setBucketName("bucket1")
+
.setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE))
.setDataSize(1024).setVolumeName("volume1").build();
OmKeyInfo keyInfo4 = new
OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2")
- .setBucketName("bucket1")
+
.setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE))
.setDataSize(1024).setVolumeName("volume1").build();
Mockito.when(event1.getValue()).thenReturn(keyInfo1);
Mockito.when(event2.getValue()).thenReturn(keyInfo2);
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java
new file mode 100644
index 00000000000..46af00e9c23
--- /dev/null
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.upgrade;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mockStatic;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import com.google.inject.Injector;
+import javax.sql.DataSource;
+import org.apache.hadoop.ozone.recon.ReconGuiceServletContextListener;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Mock;
+import org.mockito.MockedStatic;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+/**
+ * Test class for ReplicatedSizeOfFilesUpgradeAction.
+ */
+@ExtendWith(MockitoExtension.class)
+public class TestReplicatedSizeOfFilesUpgradeAction {
+
+ private ReplicatedSizeOfFilesUpgradeAction upgradeAction;
+ @Mock
+ private DataSource mockDataSource;
+ @Mock
+ private Injector mockInjector;
+ @Mock
+ private ReconNamespaceSummaryManager mockNsSummaryManager;
+ @Mock
+ private ReconOMMetadataManager mockOmMetadataManager;
+
+ @BeforeEach
+ public void setUp() {
+ upgradeAction = new ReplicatedSizeOfFilesUpgradeAction();
+ }
+
+ @Test
+ public void testExecuteSuccessfullyRebuildsNSSummary() {
+ try (MockedStatic<ReconGuiceServletContextListener> mockStaticContext =
+ mockStatic(ReconGuiceServletContextListener.class)) {
+
mockStaticContext.when(ReconGuiceServletContextListener::getStaticInjector).thenReturn(mockInjector);
+
when(mockInjector.getInstance(ReconNamespaceSummaryManager.class)).thenReturn(mockNsSummaryManager);
+
when(mockInjector.getInstance(ReconOMMetadataManager.class)).thenReturn(mockOmMetadataManager);
+
+ upgradeAction.execute(mockDataSource);
+
+ // Verify that rebuildNSSummaryTree was called exactly once.
+ verify(mockNsSummaryManager,
times(1)).rebuildNSSummaryTree(mockOmMetadataManager);
+ }
+ }
+
+ @Test
+ public void testExecuteThrowsRuntimeExceptionOnRebuildFailure() {
+ try (MockedStatic<ReconGuiceServletContextListener> mockStaticContext =
+ mockStatic(ReconGuiceServletContextListener.class)) {
+
mockStaticContext.when(ReconGuiceServletContextListener::getStaticInjector).thenReturn(mockInjector);
+
when(mockInjector.getInstance(ReconNamespaceSummaryManager.class)).thenReturn(mockNsSummaryManager);
+
when(mockInjector.getInstance(ReconOMMetadataManager.class)).thenReturn(mockOmMetadataManager);
+
+ // Simulate a failure during the rebuild process
+ doThrow(new RuntimeException("Simulated rebuild
error")).when(mockNsSummaryManager)
+ .rebuildNSSummaryTree(any(ReconOMMetadataManager.class));
+
+ RuntimeException thrown = assertThrows(RuntimeException.class, () ->
upgradeAction.execute(mockDataSource));
+ assertEquals("Failed to rebuild NSSummary during upgrade",
thrown.getMessage());
+ }
+ }
+
+ @Test
+ public void testGetTypeReturnsFinalize() {
+ assertEquals(ReconUpgradeAction.UpgradeActionType.FINALIZE,
upgradeAction.getType());
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]