This is an automated email from the ASF dual-hosted git repository.

arafat2198 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 33d4744f0ce HDDS-13758. Add replicatedSizeOfFiles to NSSummary to 
Calculate DiskUsage (#9127)
33d4744f0ce is described below

commit 33d4744f0ce4446ff3e23774e7bcb78c82448a2a
Author: Priyesh Karatha <[email protected]>
AuthorDate: Fri Oct 17 13:53:34 2025 +0530

    HDDS-13758. Add replicatedSizeOfFiles to NSSummary to Calculate DiskUsage 
(#9127)
---
 .../TestReconInsightsForDeletedDirectories.java    |  84 +++++--
 .../java/org/apache/hadoop/ozone/TestDataUtil.java |  37 +++-
 .../hadoop/ozone/om/snapshot/TestOmSnapshot.java   |   2 +-
 .../om/snapshot/TestOmSnapshotFileSystem.java      |   2 +-
 .../ozone/recon/api/OMDBInsightEndpoint.java       |  19 +-
 .../recon/api/handlers/BucketEntityHandler.java    |  24 +-
 .../recon/api/handlers/DirectoryEntityHandler.java |  24 +-
 .../ozone/recon/api/handlers/FSOBucketHandler.java |  44 +---
 .../recon/api/handlers/LegacyBucketHandler.java    |  60 +----
 .../ozone/recon/api/handlers/OBSBucketHandler.java |  36 +--
 .../hadoop/ozone/recon/api/types/NSSummary.java    |  14 +-
 .../hadoop/ozone/recon/codec/NSSummaryCodec.java   |  29 ++-
 .../recon/tasks/NSSummaryTaskDbEventHandler.java   |  47 +++-
 .../ozone/recon/upgrade/ReconLayoutFeature.java    |   3 +-
 .../ReplicatedSizeOfFilesUpgradeAction.java        |  66 ++++++
 .../recon/api/TestNSSummaryEndpointWithFSO.java    | 244 +++++++++++++++++++--
 .../recon/api/TestNSSummaryEndpointWithLegacy.java | 192 ++++++++++++++--
 .../ozone/recon/api/TestOmDBInsightEndPoint.java   |  87 ++++++--
 .../hadoop/ozone/recon/common/CommonUtils.java     |  14 +-
 .../impl/TestReconNamespaceSummaryManagerImpl.java |   6 +-
 .../recon/tasks/TestNSSummaryTaskWithFSO.java      |  10 +-
 .../TestReplicatedSizeOfFilesUpgradeAction.java    |  96 ++++++++
 22 files changed, 853 insertions(+), 287 deletions(-)

diff --git 
a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
 
b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
index 5e6fe7e4269..793fe07ea52 100644
--- 
a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
+++ 
b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
@@ -17,6 +17,8 @@
 
 package org.apache.hadoop.ozone.recon;
 
+import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
+import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
@@ -29,6 +31,7 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
@@ -40,6 +43,9 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
 import org.apache.hadoop.hdds.utils.IOUtils;
@@ -53,6 +59,7 @@
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
 import org.apache.hadoop.ozone.recon.api.OMDBInsightEndpoint;
 import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse;
 import org.apache.hadoop.ozone.recon.api.types.NSSummary;
@@ -64,7 +71,9 @@
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -78,40 +87,41 @@ public class TestReconInsightsForDeletedDirectories {
       LoggerFactory.getLogger(TestReconInsightsForDeletedDirectories.class);
 
   private static MiniOzoneCluster cluster;
-  private static FileSystem fs;
+  private FileSystem fs;
   private static OzoneClient client;
   private static ReconService recon;
+  private static OzoneConfiguration conf;
 
   @BeforeAll
   public static void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
+    conf = new OzoneConfiguration();
     conf.setInt(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000000);
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000,
         TimeUnit.MILLISECONDS);
     conf.setBoolean(OZONE_ACL_ENABLED, true);
     recon = new ReconService(conf);
     cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
+        .setNumDatanodes(5)
         .addService(recon)
         .build();
     cluster.waitForClusterToBeReady();
     client = cluster.newClient();
 
-    // create a volume and a bucket to be used by OzoneFileSystem
-    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
-        BucketLayout.FILE_SYSTEM_OPTIMIZED);
-    String volumeName = bucket.getVolumeName();
-    String bucketName = bucket.getName();
-
-    String rootPath = String.format("%s://%s.%s/",
-        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
-
-    // Set the fs.defaultFS and start the filesystem
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
     // Set the number of keys to be processed during batch operate.
     conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+  }
 
-    fs = FileSystem.get(conf);
+  /**
+   * Provides a list of replication configurations (RATIS and EC)
+   * to be used for parameterized tests.
+   *
+   * @return List of replication configurations as Arguments.
+   */
+  static List<Arguments> replicationConfigs() {
+    return Arrays.asList(
+        Arguments.of(ReplicationConfig.fromTypeAndFactor(RATIS, THREE)),
+        Arguments.of(new ECReplicationConfig("RS-3-2-1024k"))
+    );
   }
 
   @AfterAll
@@ -120,7 +130,6 @@ public static void teardown() {
     if (cluster != null) {
       cluster.shutdown();
     }
-    IOUtils.closeQuietly(fs);
   }
 
   @AfterEach
@@ -132,6 +141,8 @@ public void cleanup() throws IOException {
         fs.delete(fileStatus.getPath(), true);
       }
     });
+
+    IOUtils.closeQuietly(fs);
   }
 
   /**
@@ -143,9 +154,16 @@ public void cleanup() throws IOException {
    *      ├── ...
    *      └── file10
    */
-  @Test
-  public void testGetDeletedDirectoryInfo()
+  @ParameterizedTest
+  @MethodSource("replicationConfigs")
+  public void testGetDeletedDirectoryInfo(ReplicationConfig replicationConfig)
       throws Exception {
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, 
BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        new DefaultReplicationConfig(replicationConfig));
+    String rootPath = String.format("%s://%s.%s/", 
OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+        bucket.getVolumeName());
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    fs = FileSystem.get(conf);
 
     // Create a directory structure with 10 files in dir1.
     Path dir1 = new Path("/dir1");
@@ -209,6 +227,7 @@ public void testGetDeletedDirectoryInfo()
       // Assert that the directory dir1 has 10 sub-files and size of 1000 
bytes.
       assertEquals(10, summary.getNumOfFiles());
       assertEquals(10, summary.getSizeOfFiles());
+      assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), 
summary.getReplicatedSizeOfFiles());
     }
 
     // Delete the entire directory dir1.
@@ -236,6 +255,7 @@ public void testGetDeletedDirectoryInfo()
         (KeyInsightInfoResponse) deletedDirInfo.getEntity();
     // Assert the size of deleted directory is 10.
     assertEquals(10, entity.getUnreplicatedDataSize());
+    assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), 
entity.getReplicatedDataSize());
 
     // Cleanup the tables.
     cleanupTables();
@@ -253,9 +273,16 @@ public void testGetDeletedDirectoryInfo()
    *      │   │   └── file3
    *
    */
-  @Test
-  public void testGetDeletedDirectoryInfoForNestedDirectories()
+  @ParameterizedTest
+  @MethodSource("replicationConfigs")
+  public void 
testGetDeletedDirectoryInfoForNestedDirectories(ReplicationConfig 
replicationConfig)
       throws Exception {
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, 
BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        new DefaultReplicationConfig(replicationConfig));
+    String rootPath = String.format("%s://%s.%s/", 
OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+        bucket.getVolumeName());
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    fs = FileSystem.get(conf);
 
     // Create a directory structure with 10 files and 3 nested directories.
     Path path = new Path("/dir1/dir2/dir3");
@@ -325,6 +352,7 @@ public void 
testGetDeletedDirectoryInfoForNestedDirectories()
         (KeyInsightInfoResponse) deletedDirInfo.getEntity();
     // Assert the size of deleted directory is 3.
     assertEquals(3, entity.getUnreplicatedDataSize());
+    assertEquals(QuotaUtil.getReplicatedSize(3, replicationConfig), 
entity.getReplicatedDataSize());
 
     // Cleanup the tables.
     cleanupTables();
@@ -351,9 +379,18 @@ public void 
testGetDeletedDirectoryInfoForNestedDirectories()
    *        ├── ...
    *        └── file10
    */
-  @Test
-  public void testGetDeletedDirectoryInfoWithMultipleSubdirectories()
+  @ParameterizedTest
+  @MethodSource("replicationConfigs")
+  public void 
testGetDeletedDirectoryInfoWithMultipleSubdirectories(ReplicationConfig 
replicationConfig)
       throws Exception {
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, 
BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        new DefaultReplicationConfig(replicationConfig));
+    String rootPath = String.format("%s://%s.%s/", 
OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+        bucket.getVolumeName());
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    fs = FileSystem.get(conf);
+
     int numSubdirectories = 10;
     int filesPerSubdirectory = 10;
 
@@ -387,6 +424,7 @@ public void 
testGetDeletedDirectoryInfoWithMultipleSubdirectories()
         (KeyInsightInfoResponse) deletedDirInfo.getEntity();
     // Assert the size of deleted directory is 100.
     assertEquals(100, entity.getUnreplicatedDataSize());
+    assertEquals(QuotaUtil.getReplicatedSize(100, replicationConfig), 
entity.getReplicatedDataSize());
 
     // Cleanup the tables.
     cleanupTables();
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
index a30fc356057..7ac80ef4058 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
@@ -33,6 +33,7 @@
 import java.util.Scanner;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
@@ -65,7 +66,21 @@ public static OzoneBucket createVolumeAndBucket(OzoneClient 
client,
   }
 
   public static OzoneBucket createVolumeAndBucket(OzoneClient client,
-      String volumeName, String bucketName, BucketLayout bucketLayout)
+      String volumeName, String bucketName, BucketLayout bucketLayout) throws 
IOException {
+    BucketArgs omBucketArgs;
+    BucketArgs.Builder builder = BucketArgs.newBuilder();
+    builder.setStorageType(StorageType.DISK);
+    if (bucketLayout != null) {
+      builder.setBucketLayout(bucketLayout);
+    }
+    omBucketArgs = builder.build();
+
+    return createVolumeAndBucket(client, volumeName, bucketName,
+        omBucketArgs);
+  }
+
+  public static OzoneBucket createVolumeAndBucket(OzoneClient client,
+      String volumeName, String bucketName, BucketLayout bucketLayout, 
DefaultReplicationConfig replicationConfig)
       throws IOException {
     BucketArgs omBucketArgs;
     BucketArgs.Builder builder = BucketArgs.newBuilder();
@@ -73,6 +88,10 @@ public static OzoneBucket createVolumeAndBucket(OzoneClient 
client,
     if (bucketLayout != null) {
       builder.setBucketLayout(bucketLayout);
     }
+
+    if (replicationConfig != null) {
+      builder.setDefaultReplicationConfig(replicationConfig);
+    }
     omBucketArgs = builder.build();
 
     return createVolumeAndBucket(client, volumeName, bucketName,
@@ -197,18 +216,26 @@ public static OzoneBucket createLinkedBucket(OzoneClient 
client, String vol, Str
   public static OzoneBucket createVolumeAndBucket(OzoneClient client,
                                                   BucketLayout bucketLayout)
       throws IOException {
-    return createVolumeAndBucket(client, bucketLayout, false);
+    return createVolumeAndBucket(client, bucketLayout, null, false);
   }
 
-  public static OzoneBucket createVolumeAndBucket(OzoneClient client,
-      BucketLayout bucketLayout, boolean createLinkedBucket) throws 
IOException {
+  public static OzoneBucket createVolumeAndBucket(OzoneClient client, 
BucketLayout bucketLayout,
+                                                  DefaultReplicationConfig 
replicationConfig)
+      throws IOException {
+    return createVolumeAndBucket(client, bucketLayout, replicationConfig, 
false);
+  }
+
+  public static OzoneBucket createVolumeAndBucket(OzoneClient client, 
BucketLayout bucketLayout,
+                                                  DefaultReplicationConfig 
replicationConfig,
+                                                  boolean createLinkedBucket)
+      throws IOException {
     final int attempts = 5;
     for (int i = 0; i < attempts; i++) {
       try {
         String volumeName = "volume" + 
RandomStringUtils.secure().nextNumeric(5);
         String bucketName = "bucket" + 
RandomStringUtils.secure().nextNumeric(5);
         OzoneBucket ozoneBucket = createVolumeAndBucket(client, volumeName, 
bucketName,
-            bucketLayout);
+            bucketLayout, replicationConfig);
         if (createLinkedBucket) {
           String targetBucketName = ozoneBucket.getName() + 
RandomStringUtils.secure().nextNumeric(5);
           ozoneBucket = createLinkedBucket(client, volumeName, bucketName, 
targetBucketName);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
index aac18d5d36d..93dba945d46 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
@@ -244,7 +244,7 @@ private void init() throws Exception {
     cluster.waitForClusterToBeReady();
     client = cluster.newClient();
     // create a volume and a bucket to be used by OzoneFileSystem
-    ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, 
createLinkedBucket);
+    ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, 
null, createLinkedBucket);
     if (createLinkedBucket) {
       this.linkedBuckets.put(ozoneBucket.getName(), 
ozoneBucket.getSourceBucket());
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java
index 7db6c8d41db..fca8b137b72 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java
@@ -132,7 +132,7 @@ public void setupFsClient() throws IOException {
     writeClient = objectStore.getClientProxy().getOzoneManagerClient();
     ozoneManager = cluster().getOzoneManager();
 
-    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, 
bucketLayout, createLinkedBuckets);
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, 
bucketLayout, null, createLinkedBuckets);
     if (createLinkedBuckets) {
       linkedBucketMaps.put(bucket.getName(), bucket.getSourceBucket());
     }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
index 7e2e57879ed..acaf348f22a 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
@@ -57,6 +57,7 @@
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
@@ -599,9 +600,9 @@ private void getPendingForDeletionDirInfo(
         keyEntityInfo.setKey(omKeyInfo.getFileName());
         keyEntityInfo.setPath(createPath(omKeyInfo));
         keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime());
-        keyEntityInfo.setSize(
-            fetchSizeForDeletedDirectory(omKeyInfo.getObjectID()));
-        keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize());
+        Pair<Long, Long> sizeInfo = 
fetchSizeForDeletedDirectory(omKeyInfo.getObjectID());
+        keyEntityInfo.setSize(sizeInfo.getLeft());
+        keyEntityInfo.setReplicatedSize(sizeInfo.getRight());
         keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig());
         pendingForDeletionKeyInfo.setUnreplicatedDataSize(
             pendingForDeletionKeyInfo.getUnreplicatedDataSize() +
@@ -627,20 +628,20 @@ private void getPendingForDeletionDirInfo(
   }
 
   /**
-   * Given an object ID, return total data size (no replication)
+   * Given an object ID, return total data size as a pair of Total Size, Total 
Replicated Size
    * under this object. Note:- This method is RECURSIVE.
    *
    * @param objectId the object's ID
-   * @return total used data size in bytes
+   * @return total used data size and replicated total used data size in bytes
    * @throws IOException ioEx
    */
-  protected long fetchSizeForDeletedDirectory(long objectId)
+  protected Pair<Long, Long> fetchSizeForDeletedDirectory(long objectId)
       throws IOException {
     NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(objectId);
-    if (nsSummary == null) {
-      return 0L;
+    if (nsSummary != null) {
+      return Pair.of(nsSummary.getSizeOfFiles(), 
nsSummary.getReplicatedSizeOfFiles());
     }
-    return nsSummary.getSizeOfFiles();
+    return Pair.of(0L, 0L);
   }
 
   /** This method retrieves set of directories pending for deletion.
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java
index cb233541bdf..5f55cbdb40c 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java
@@ -108,8 +108,10 @@ public DUResponse getDuResponse(
     Set<Long> bucketSubdirs = bucketNSSummary.getChildDir();
     duResponse.setKeySize(bucketNSSummary.getSizeOfFiles());
     List<DUResponse.DiskUsage> dirDUData = new ArrayList<>();
-    long bucketDataSize = duResponse.getKeySize();
-    long bucketDataSizeWithReplica = 0L;
+    long bucketDataSize = bucketNSSummary.getSizeOfFiles();
+    if (withReplica) {
+      
duResponse.setSizeWithReplica(bucketNSSummary.getReplicatedSizeOfFiles());
+    }
     for (long subdirObjectId: bucketSubdirs) {
       NSSummary subdirNSSummary = getReconNamespaceSummaryManager()
               .getNSSummary(subdirObjectId);
@@ -121,26 +123,16 @@ public DUResponse getDuResponse(
       // format with leading slash and without trailing slash
       DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
       diskUsage.setSubpath(subpath);
-      long dataSize = getTotalSize(subdirObjectId);
-      bucketDataSize += dataSize;
 
       if (withReplica) {
-        long dirDU = getBucketHandler()
-            .calculateDUUnderObject(subdirObjectId);
-        diskUsage.setSizeWithReplica(dirDU);
-        bucketDataSizeWithReplica += dirDU;
+        
diskUsage.setSizeWithReplica(subdirNSSummary.getReplicatedSizeOfFiles());
       }
-      diskUsage.setSize(dataSize);
+      diskUsage.setSize(subdirNSSummary.getSizeOfFiles());
       dirDUData.add(diskUsage);
     }
-    // Either listFile or withReplica is enabled, we need the directKeys info
     if (listFile || withReplica) {
-      bucketDataSizeWithReplica += getBucketHandler()
-              .handleDirectKeys(bucketObjectId, withReplica,
-                  listFile, dirDUData, getNormalizedPath());
-    }
-    if (withReplica) {
-      duResponse.setSizeWithReplica(bucketDataSizeWithReplica);
+      getBucketHandler().handleDirectKeys(bucketObjectId, withReplica,
+          listFile, dirDUData, getNormalizedPath());
     }
     duResponse.setCount(dirDUData.size());
     duResponse.setSize(bucketDataSize);
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
index 866dd37091a..8ec49830156 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
@@ -100,7 +100,9 @@ public DUResponse getDuResponse(
 
     duResponse.setKeySize(dirNSSummary.getSizeOfFiles());
     long dirDataSize = duResponse.getKeySize();
-    long dirDataSizeWithReplica = 0L;
+    if (withReplica) {
+      duResponse.setSizeWithReplica(dirNSSummary.getReplicatedSizeOfFiles());
+    }
     List<DUResponse.DiskUsage> subdirDUData = new ArrayList<>();
     // iterate all subdirectories to get disk usage data
     for (long subdirObjectId: subdirs) {
@@ -129,30 +131,18 @@ public DUResponse getDuResponse(
       DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
       // reformat the response
       diskUsage.setSubpath(subpath);
-      long dataSize = getTotalSize(subdirObjectId);
-      dirDataSize += dataSize;
-
       if (withReplica) {
-        long subdirDU = getBucketHandler()
-                .calculateDUUnderObject(subdirObjectId);
-        diskUsage.setSizeWithReplica(subdirDU);
-        dirDataSizeWithReplica += subdirDU;
+        
diskUsage.setSizeWithReplica(subdirNSSummary.getReplicatedSizeOfFiles());
       }
 
-      diskUsage.setSize(dataSize);
+      diskUsage.setSize(subdirNSSummary.getSizeOfFiles());
       subdirDUData.add(diskUsage);
     }
-
-    // handle direct keys under directory
     if (listFile || withReplica) {
-      dirDataSizeWithReplica += getBucketHandler()
-              .handleDirectKeys(dirObjectId, withReplica,
-                  listFile, subdirDUData, getNormalizedPath());
+      getBucketHandler().handleDirectKeys(dirObjectId, withReplica,
+              listFile, subdirDUData, getNormalizedPath());
     }
 
-    if (withReplica) {
-      duResponse.setSizeWithReplica(dirDataSizeWithReplica);
-    }
     duResponse.setCount(subdirDUData.size());
     duResponse.setSize(dirDataSize);
 
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
index 845e27b5bde..7d482745c21 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
@@ -24,7 +24,6 @@
 import java.nio.file.Paths;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Set;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@ -115,48 +114,9 @@ public EntityType determineKeyPath(String keyName)
   @Override
   public long calculateDUUnderObject(long parentId)
       throws IOException {
-    Table<String, OmKeyInfo> keyTable = getOmMetadataManager().getFileTable();
-
-    long totalDU = 0L;
-    try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
-            iterator = keyTable.iterator()) {
-
-      String seekPrefix = OM_KEY_PREFIX +
-          volumeId +
-          OM_KEY_PREFIX +
-          bucketId +
-          OM_KEY_PREFIX +
-          parentId +
-          OM_KEY_PREFIX;
-      iterator.seek(seekPrefix);
-      // handle direct keys
-      while (iterator.hasNext()) {
-        Table.KeyValue<String, OmKeyInfo> kv = iterator.next();
-        String dbKey = kv.getKey();
-        // since the RocksDB is ordered, seek until the prefix isn't matched
-        if (!dbKey.startsWith(seekPrefix)) {
-          break;
-        }
-        OmKeyInfo keyInfo = kv.getValue();
-        if (keyInfo != null) {
-          totalDU += keyInfo.getReplicatedSize();
-        }
-      }
-    }
-
-    // handle nested keys (DFS)
     NSSummary nsSummary = getReconNamespaceSummaryManager()
-            .getNSSummary(parentId);
-    // empty bucket
-    if (nsSummary == null) {
-      return 0;
-    }
-
-    Set<Long> subDirIds = nsSummary.getChildDir();
-    for (long subDirId: subDirIds) {
-      totalDU += calculateDUUnderObject(subDirId);
-    }
-    return totalDU;
+        .getNSSummary(parentId);
+    return nsSummary != null ? nsSummary.getReplicatedSizeOfFiles() : 0L;
   }
 
   /**
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
index 1673d76282f..03396a63400 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
@@ -22,7 +22,6 @@
 import com.google.common.base.Preconditions;
 import java.io.IOException;
 import java.util.List;
-import java.util.Set;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@ -108,66 +107,9 @@ public EntityType determineKeyPath(String keyName)
   @Override
   public long calculateDUUnderObject(long parentId)
       throws IOException {
-    Table<String, OmKeyInfo> keyTable = getKeyTable();
-
-    long totalDU = 0L;
-
-    String seekPrefix = OM_KEY_PREFIX +
-        vol +
-        OM_KEY_PREFIX +
-        bucket +
-        OM_KEY_PREFIX;
-
     NSSummary nsSummary = getReconNamespaceSummaryManager()
         .getNSSummary(parentId);
-    // empty bucket
-    if (nsSummary == null) {
-      return 0;
-    }
-
-    if (omBucketInfo.getObjectID() != parentId) {
-      String dirName = nsSummary.getDirName();
-      seekPrefix += dirName;
-    }
-
-    String[] seekKeys = seekPrefix.split(OM_KEY_PREFIX);
-    try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
-             iterator = keyTable.iterator()) {
-      iterator.seek(seekPrefix);
-      // handle direct keys
-      while (iterator.hasNext()) {
-        Table.KeyValue<String, OmKeyInfo> kv = iterator.next();
-        String dbKey = kv.getKey();
-        // since the RocksDB is ordered, seek until the prefix isn't matched
-        if (!dbKey.startsWith(seekPrefix)) {
-          break;
-        }
-
-        String[] keys = dbKey.split(OM_KEY_PREFIX);
-
-        // iteration moved to the next level
-        // and not handling direct keys
-        if (keys.length - seekKeys.length > 1) {
-          continue;
-        }
-
-        OmKeyInfo keyInfo = kv.getValue();
-        if (keyInfo != null) {
-          // skip directory markers, just include directKeys
-          if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) {
-            continue;
-          }
-          totalDU += keyInfo.getReplicatedSize();
-        }
-      }
-    }
-
-    // handle nested keys (DFS)
-    Set<Long> subDirIds = nsSummary.getChildDir();
-    for (long subDirId: subDirIds) {
-      totalDU += calculateDUUnderObject(subDirId);
-    }
-    return totalDU;
+    return nsSummary != null ? nsSummary.getReplicatedSizeOfFiles() : 0L;
   }
 
   /**
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java
index 8b535f626f9..7c4fb871791 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java
@@ -169,40 +169,8 @@ public long handleDirectKeys(long parentId, boolean 
withReplica,
    */
   @Override
   public long calculateDUUnderObject(long parentId) throws IOException {
-    // Initialize the total disk usage variable.
-    long totalDU = 0L;
-
-    // Access the key table for the bucket.
-    Table<String, OmKeyInfo> keyTable = getKeyTable();
-
-    try (
-        TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
-            iterator = keyTable.iterator()) {
-      // Construct the seek prefix to filter keys under this bucket.
-      String seekPrefix =
-          OM_KEY_PREFIX + vol + OM_KEY_PREFIX + bucket + OM_KEY_PREFIX;
-      iterator.seek(seekPrefix);
-
-      // Iterate over keys in the bucket.
-      while (iterator.hasNext()) {
-        Table.KeyValue<String, OmKeyInfo> kv = iterator.next();
-        String keyName = kv.getKey();
-
-        // Break the loop if the current key does not start with the 
seekPrefix.
-        if (!keyName.startsWith(seekPrefix)) {
-          break;
-        }
-
-        // Sum the size of each key to the total disk usage.
-        OmKeyInfo keyInfo = kv.getValue();
-        if (keyInfo != null) {
-          totalDU += keyInfo.getDataSize();
-        }
-      }
-    }
-
-    // Return the total disk usage of all keys in the bucket.
-    return totalDU;
+    NSSummary nsSummary = 
getReconNamespaceSummaryManager().getNSSummary(parentId);
+    return nsSummary != null ? nsSummary.getReplicatedSizeOfFiles() : 0L;
   }
 
   /**
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
index b9075ca53b4..32ef6bd2485 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
@@ -37,24 +37,27 @@ public class NSSummary {
   // for performance optimization, not just direct files in this directory
   private int numOfFiles;
   private long sizeOfFiles;
+  private long replicatedSizeOfFiles;
   private int[] fileSizeBucket;
   private Set<Long> childDir;
   private String dirName;
   private long parentId = 0;
 
   public NSSummary() {
-    this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS],
+    this(0, 0L, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS],
         new HashSet<>(), "", 0);
   }
 
   public NSSummary(int numOfFiles,
                    long sizeOfFiles,
+                   long replicatedSizeOfFiles,
                    int[] bucket,
                    Set<Long> childDir,
                    String dirName,
                    long parentId) {
     this.numOfFiles = numOfFiles;
     this.sizeOfFiles = sizeOfFiles;
+    this.replicatedSizeOfFiles = replicatedSizeOfFiles;
     setFileSizeBucket(bucket);
     this.childDir = childDir;
     this.dirName = dirName;
@@ -75,6 +78,10 @@ public long getSizeOfFiles() {
     return sizeOfFiles;
   }
 
+  public long getReplicatedSizeOfFiles() {
+    return replicatedSizeOfFiles;
+  }
+
   public int[] getFileSizeBucket() {
     return Arrays.copyOf(fileSizeBucket, ReconConstants.NUM_OF_FILE_SIZE_BINS);
   }
@@ -101,6 +108,10 @@ public void setSizeOfFiles(long sizeOfFiles) {
     this.sizeOfFiles = sizeOfFiles;
   }
 
+  public void setReplicatedSizeOfFiles(long replicatedSizeOfFiles) {
+    this.replicatedSizeOfFiles = replicatedSizeOfFiles;
+  }
+
   public void setFileSizeBucket(int[] fileSizeBucket) {
     this.fileSizeBucket = Arrays.copyOf(fileSizeBucket,
             ReconConstants.NUM_OF_FILE_SIZE_BINS);
@@ -142,6 +153,7 @@ public String toString() {
         ", childDir=" + childDir +
         ", numOfFiles=" + numOfFiles +
         ", sizeOfFiles=" + sizeOfFiles +
+        ", replicatedSizeOfFiles=" + replicatedSizeOfFiles +
         ", fileSizeBucket=" + Arrays.toString(fileSizeBucket) +
         '}';
   }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
index 92068988d76..3f7ece0432a 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
@@ -67,7 +67,7 @@ public byte[] toPersistedFormatImpl(NSSummary object) throws 
IOException {
         + (numOfChildDirs + 1) * Long.BYTES // 1 long field for parentId + 
list size
         + Short.BYTES // 2 dummy shorts to track length
         + dirName.length // directory name length
-        + Long.BYTES; // Added space for parentId serialization
+        + 2 * Long.BYTES; // Added space for parentId serialization and 
replicated size of files
 
     ByteArrayOutputStream out = new ByteArrayOutputStream(resSize);
     out.write(integerCodec.toPersistedFormat(object.getNumOfFiles()));
@@ -85,6 +85,7 @@ public byte[] toPersistedFormatImpl(NSSummary object) throws 
IOException {
     out.write(integerCodec.toPersistedFormat(dirName.length));
     out.write(dirName);
     out.write(longCodec.toPersistedFormat(object.getParentId()));
+    out.write(longCodec.toPersistedFormat(object.getReplicatedSizeOfFiles()));
 
     return out.toByteArray();
   }
@@ -112,6 +113,8 @@ public NSSummary fromPersistedFormatImpl(byte[] rawData) 
throws IOException {
 
     int strLen = in.readInt();
     if (strLen == 0) {
+      //we need to read even though dir name is empty
+      readParentIdAndReplicatedSize(in, res);
       return res;
     }
     byte[] buffer = new byte[strLen];
@@ -119,15 +122,7 @@ public NSSummary fromPersistedFormatImpl(byte[] rawData) 
throws IOException {
     assert (bytesRead == strLen);
     String dirName = stringCodec.fromPersistedFormat(buffer);
     res.setDirName(dirName);
-
-    // Check if there is enough data available to read the parentId
-    if (in.available() >= Long.BYTES) {
-      long parentId = in.readLong();
-      res.setParentId(parentId);
-    } else {
-      // Set default parentId to -1 indicating it's from old format
-      res.setParentId(-1);
-    }
+    readParentIdAndReplicatedSize(in, res);
     return res;
   }
 
@@ -136,10 +131,24 @@ public NSSummary copyObject(NSSummary object) {
     NSSummary copy = new NSSummary();
     copy.setNumOfFiles(object.getNumOfFiles());
     copy.setSizeOfFiles(object.getSizeOfFiles());
+    copy.setReplicatedSizeOfFiles(object.getReplicatedSizeOfFiles());
     copy.setFileSizeBucket(object.getFileSizeBucket());
     copy.setChildDir(object.getChildDir());
     copy.setDirName(object.getDirName());
     copy.setParentId(object.getParentId());
     return copy;
   }
+
+  private void readParentIdAndReplicatedSize(DataInputStream input, NSSummary 
output) throws IOException {
+    if (input.available() >= 2 * Long.BYTES) {
+      output.setParentId(input.readLong());
+      output.setReplicatedSizeOfFiles(input.readLong());
+    } else if (input.available() >= Long.BYTES) {
+      output.setParentId(input.readLong());
+      output.setReplicatedSizeOfFiles(-1);
+    } else {
+      output.setParentId(-1);
+      output.setReplicatedSizeOfFiles(-1);
+    }
+  }
 }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
index 5d2f747f940..faf2008ec6b 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
@@ -98,10 +98,18 @@ protected void handlePutKeyEvent(OmKeyInfo keyInfo, 
Map<Long,
       nsSummary = new NSSummary();
     }
     int[] fileBucket = nsSummary.getFileSizeBucket();
-    
-    // Update immediate parent's totals (these fields now represent totals)
+
+    // Update immediate parent's totals (includes all descendant files)
     nsSummary.setNumOfFiles(nsSummary.getNumOfFiles() + 1);
     nsSummary.setSizeOfFiles(nsSummary.getSizeOfFiles() + 
keyInfo.getDataSize());
+    // Before arithmetic operations, check for sentinel value
+    long currentReplSize = nsSummary.getReplicatedSizeOfFiles();
+    if (currentReplSize < 0) {
+      // Old data, initialize to 0 before first use
+      currentReplSize = 0;
+      nsSummary.setReplicatedSizeOfFiles(0);
+    }
+    nsSummary.setReplicatedSizeOfFiles(currentReplSize + 
keyInfo.getReplicatedSize());
     int binIndex = ReconUtils.getFileSizeBinIndex(keyInfo.getDataSize());
 
     ++fileBucket[binIndex];
@@ -109,7 +117,7 @@ protected void handlePutKeyEvent(OmKeyInfo keyInfo, 
Map<Long,
     nsSummaryMap.put(parentObjectId, nsSummary);
 
     // Propagate upwards to all parents in the parent chain
-    propagateSizeUpwards(parentObjectId, keyInfo.getDataSize(), 1, 
nsSummaryMap);
+    propagateSizeUpwards(parentObjectId, keyInfo.getDataSize(), 
keyInfo.getReplicatedSize(), 1, nsSummaryMap);
   }
 
   protected void handlePutDirEvent(OmDirectoryInfo directoryInfo,
@@ -130,6 +138,7 @@ protected void handlePutDirEvent(OmDirectoryInfo 
directoryInfo,
     boolean directoryAlreadyExists = (curNSSummary != null);
     long existingSizeOfFiles = directoryAlreadyExists ? 
curNSSummary.getSizeOfFiles() : 0;
     int existingNumOfFiles = directoryAlreadyExists ? 
curNSSummary.getNumOfFiles() : 0;
+    long existingReplicatedSizeOfFiles = directoryAlreadyExists ? 
curNSSummary.getReplicatedSizeOfFiles() : 0;
 
     if (curNSSummary == null) {
       // If we don't have it locally and in the DB we create a new instance
@@ -158,7 +167,8 @@ protected void handlePutDirEvent(OmDirectoryInfo 
directoryInfo,
 
     // If the directory already existed with content, propagate its totals 
upward
     if (directoryAlreadyExists && (existingSizeOfFiles > 0 || 
existingNumOfFiles > 0)) {
-      propagateSizeUpwards(parentObjectId, existingSizeOfFiles, 
existingNumOfFiles, nsSummaryMap);
+      propagateSizeUpwards(parentObjectId, existingSizeOfFiles,
+          existingReplicatedSizeOfFiles, existingNumOfFiles, nsSummaryMap);
     }
   }
 
@@ -185,12 +195,18 @@ protected void handleDeleteKeyEvent(OmKeyInfo keyInfo,
     // Decrement immediate parent's totals (these fields now represent totals)
     nsSummary.setNumOfFiles(nsSummary.getNumOfFiles() - 1);
     nsSummary.setSizeOfFiles(nsSummary.getSizeOfFiles() - 
keyInfo.getDataSize());
+    long currentReplSize = nsSummary.getReplicatedSizeOfFiles();
+    long keyReplSize = keyInfo.getReplicatedSize();
+    if (currentReplSize >= 0 && keyReplSize >= 0) {
+      nsSummary.setReplicatedSizeOfFiles(currentReplSize - keyReplSize);
+    }
     --fileBucket[binIndex];
     nsSummary.setFileSizeBucket(fileBucket);
     nsSummaryMap.put(parentObjectId, nsSummary);
 
     // Propagate upwards to all parents in the parent chain
-    propagateSizeUpwards(parentObjectId, -keyInfo.getDataSize(), -1, 
nsSummaryMap);
+    propagateSizeUpwards(parentObjectId, -keyInfo.getDataSize(),
+        -keyInfo.getReplicatedSize(), -1, nsSummaryMap);
   }
 
   protected void handleDeleteDirEvent(OmDirectoryInfo directoryInfo,
@@ -222,10 +238,18 @@ protected void handleDeleteDirEvent(OmDirectoryInfo 
directoryInfo,
       // Decrement parent's totals by the deleted directory's totals
       parentNsSummary.setNumOfFiles(parentNsSummary.getNumOfFiles() - 
deletedDirSummary.getNumOfFiles());
       parentNsSummary.setSizeOfFiles(parentNsSummary.getSizeOfFiles() - 
deletedDirSummary.getSizeOfFiles());
+      long parentReplSize = parentNsSummary.getReplicatedSizeOfFiles();
+      long deletedReplSize = deletedDirSummary.getReplicatedSizeOfFiles();
+      if (parentReplSize >= 0 && deletedReplSize >= 0) {
+        parentNsSummary.setReplicatedSizeOfFiles(parentReplSize - 
deletedReplSize);
+      }
       
       // Propagate the decrements upwards to all ancestors
-      propagateSizeUpwards(parentObjectId, 
-deletedDirSummary.getSizeOfFiles(), 
-                          -deletedDirSummary.getNumOfFiles(), nsSummaryMap);
+      if (deletedReplSize < 0) {
+        deletedReplSize = 0;
+      }
+      propagateSizeUpwards(parentObjectId, -deletedDirSummary.getSizeOfFiles(),
+          -deletedReplSize, -deletedDirSummary.getNumOfFiles(), nsSummaryMap);
       
       // Set the deleted directory's parentId to 0 (unlink it)
       deletedDirSummary.setParentId(0);
@@ -274,7 +298,7 @@ protected boolean flushAndCommitUpdatedNSToDB(Map<Long, 
NSSummary> nsSummaryMap,
    * This ensures that when files are added/deleted, all ancestor directories
    * reflect the total changes in their sizeOfFiles and numOfFiles fields.
    */
-  protected void propagateSizeUpwards(long objectId, long sizeChange,
+  protected void propagateSizeUpwards(long objectId, long sizeChange, long 
replicatedSizeChange,
                                        int countChange, Map<Long, NSSummary> 
nsSummaryMap) 
                                        throws IOException {
     // Get the current directory's NSSummary
@@ -297,11 +321,16 @@ protected void propagateSizeUpwards(long objectId, long 
sizeChange,
       if (parentSummary != null) {
         // Update parent's totals
         parentSummary.setSizeOfFiles(parentSummary.getSizeOfFiles() + 
sizeChange);
+        long parentReplSize = parentSummary.getReplicatedSizeOfFiles();
+        if (parentReplSize < 0) {
+          parentReplSize = 0;
+        }
+        parentSummary.setReplicatedSizeOfFiles(parentReplSize + 
replicatedSizeChange);
         parentSummary.setNumOfFiles(parentSummary.getNumOfFiles() + 
countChange);
         nsSummaryMap.put(parentId, parentSummary);
         
         // Recursively propagate to grandparents
-        propagateSizeUpwards(parentId, sizeChange, countChange, nsSummaryMap);
+        propagateSizeUpwards(parentId, sizeChange, replicatedSizeChange, 
countChange, nsSummaryMap);
       }
     }
   }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java
index bd0b52ae183..e55e6410557 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java
@@ -34,7 +34,8 @@ public enum ReconLayoutFeature {
   UNHEALTHY_CONTAINER_REPLICA_MISMATCH(2, "Adding replica mismatch state to 
the unhealthy container table"),
 
   // HDDS-13432: Materialize NSSummary totals and rebuild tree on upgrade
-  NSSUMMARY_AGGREGATED_TOTALS(3, "Aggregated totals for NSSummary and 
auto-rebuild on upgrade");
+  NSSUMMARY_AGGREGATED_TOTALS(3, "Aggregated totals for NSSummary and 
auto-rebuild on upgrade"),
+  REPLICATED_SIZE_OF_FILES(4, "Adds replicatedSizeOfFiles to NSSummary");
 
   private final int version;
   private final String description;
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java
new file mode 100644
index 00000000000..e4eea25dad6
--- /dev/null
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.upgrade;
+
+import com.google.inject.Injector;
+import javax.sql.DataSource;
+import org.apache.hadoop.ozone.recon.ReconGuiceServletContextListener;
+import org.apache.hadoop.ozone.recon.tasks.ReconTaskController;
+import org.apache.hadoop.ozone.recon.tasks.ReconTaskReInitializationEvent;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Upgrade action for the REPLICATED_SIZE_OF_FILES layout feature.
+ * The action triggers a full rebuild of the NSSummary ensuring that the new 
field: replicatedSizeOfFiles is correctly
+ * populated for all objects.
+ */
+@UpgradeActionRecon(feature = ReconLayoutFeature.REPLICATED_SIZE_OF_FILES,
+    type = ReconUpgradeAction.UpgradeActionType.FINALIZE)
+public class ReplicatedSizeOfFilesUpgradeAction implements ReconUpgradeAction {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(ReplicatedSizeOfFilesUpgradeAction.class);
+
+  @Override
+  public void execute(DataSource dataSource) {
+    try {
+      Injector injector = ReconGuiceServletContextListener.getGlobalInjector();
+      if (injector == null) {
+        throw new IllegalStateException("Guice injector is not initialized. 
Cannot perform NSSummary rebuild.");
+      }
+      ReconTaskController reconTaskController = 
injector.getInstance(ReconTaskController.class);
+      LOG.info("Starting full rebuild of NSSummary for 
REPLICATED_SIZE_OF_FILES upgrade...");
+      ReconTaskController.ReInitializationResult result = 
reconTaskController.queueReInitializationEvent(
+          
ReconTaskReInitializationEvent.ReInitializationReason.MANUAL_TRIGGER);
+      if (result != ReconTaskController.ReInitializationResult.SUCCESS) {
+        throw new RuntimeException(
+            "Failed to queue reinitialization event (result: " + result + "). 
" +
+                "NSSummary rebuild required for REPLICATED_SIZE_OF_FILES 
upgrade.");
+      }
+    } catch (Exception e) {
+      LOG.error("Error during NSSummary rebuild for REPLICATED_SIZE_OF_FILES 
upgrade.", e);
+      throw new RuntimeException("Failed to rebuild NSSummary during upgrade", 
e);
+    }
+    LOG.info("Completed full rebuild of NSSummary for REPLICATED_SIZE_OF_FILES 
upgrade.");
+  }
+
+  @Override
+  public UpgradeActionType getType() {
+    return UpgradeActionType.FINALIZE;
+  }
+}
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
index 05b6b5f300e..557115150d2 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
@@ -19,6 +19,7 @@
 
 import static 
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
 import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize;
 import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO;
@@ -43,9 +44,11 @@
 import java.util.Set;
 import javax.ws.rs.core.Response;
 import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
@@ -62,6 +65,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
 import org.apache.hadoop.ozone.recon.ReconConstants;
 import org.apache.hadoop.ozone.recon.ReconTestInjector;
 import org.apache.hadoop.ozone.recon.ReconUtils;
@@ -81,6 +85,7 @@
 import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
 import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO;
+import org.apache.hadoop.util.Time;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
@@ -106,6 +111,16 @@
  *   file8     dir5      file11
  *            /    \
  *        file9    file10
+ *  ----------------------------------------
+ *                    vol3
+ *                     |
+ *                 bucket5
+ *                 /      \
+ *             file12     dir6
+ *                     /    \
+ *                 file13    dir7
+ *                          /
+ *                          file14
  * This is a test for the Rest APIs only. We have tested NSSummaryTask before,
  * so there is no need to test process() on DB's updates
  */
@@ -128,10 +143,12 @@ public class TestNSSummaryEndpointWithFSO {
   // Object names in FSO-enabled format
   private static final String VOL = "vol";
   private static final String VOL_TWO = "vol2";
+  private static final String VOL_THREE = "vol3";
   private static final String BUCKET_ONE = "bucket1";
   private static final String BUCKET_TWO = "bucket2";
   private static final String BUCKET_THREE = "bucket3";
   private static final String BUCKET_FOUR = "bucket4";
+  private static final String BUCKET_FIVE = "bucket5";
   private static final String KEY_ONE = "file1";
   private static final String KEY_TWO = "dir1/dir2/file2";
   private static final String KEY_THREE = "dir1/dir3/file3";
@@ -145,6 +162,9 @@ public class TestNSSummaryEndpointWithFSO {
   private static final String KEY_ELEVEN = "file11";
   private static final String MULTI_BLOCK_KEY = "dir1/file7";
   private static final String MULTI_BLOCK_FILE = "file7";
+  private static final String KEY_TWELVE = "file12";
+  private static final String KEY_THIRTEEN = "dir6/file13";
+  private static final String KEY_FOURTEEN = "dir6/dir7/file14";
 
   private static final String FILE_ONE = "file1";
   private static final String FILE_TWO = "file2";
@@ -157,12 +177,17 @@ public class TestNSSummaryEndpointWithFSO {
   private static final String FILE_NINE = "file9";
   private static final String FILE_TEN = "file10";
   private static final String FILE_ELEVEN = "file11";
+  private static final String FILE_TWELVE = "file12";
+  private static final String FILE_THIRTEEN = "file13";
+  private static final String FILE_FOURTEEN = "file14";
 
   private static final String DIR_ONE = "dir1";
   private static final String DIR_TWO = "dir2";
   private static final String DIR_THREE = "dir3";
   private static final String DIR_FOUR = "dir4";
   private static final String DIR_FIVE = "dir5";
+  private static final String DIR_SIX = "dir6";
+  private static final String DIR_SEVEN = "dir7";
   // objects IDs
   private static final long VOL_OBJECT_ID = 0L;
   private static final long BUCKET_ONE_OBJECT_ID = 1L;
@@ -187,6 +212,13 @@ public class TestNSSummaryEndpointWithFSO {
   private static final long KEY_NINE_OBJECT_ID = 19L;
   private static final long KEY_TEN_OBJECT_ID = 20L;
   private static final long KEY_ELEVEN_OBJECT_ID = 21L;
+  private static final long VOL_THREE_OBJECT_ID = 22L;
+  private static final long DIR_SIX_OBJECT_ID = 23L;
+  private static final long DIR_SEVEN_OBJECT_ID = 24L;
+  private static final long FILE_TWELVE_OBJECT_ID = 25L;
+  private static final long FILE_THIRTEEN_OBJECT_ID = 26L;
+  private static final long FILE_FOURTEEN_OBJECT_ID = 27L;
+  private static final long BUCKET_FIVE_OBJECT_ID = 28L;
 
   // container IDs
   private static final long CONTAINER_ONE_ID = 1L;
@@ -224,6 +256,9 @@ public class TestNSSummaryEndpointWithFSO {
   private static final long KEY_NINE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
   private static final long KEY_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
   private static final long KEY_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1
+  private static final long KEY_TWELVE_SIZE = OzoneConsts.KB;
+  private static final long KEY_THIRTEEN_SIZE = OzoneConsts.KB;
+  private static final long KEY_FOURTEEN_SIZE = OzoneConsts.KB;
 
   private static final long FILE1_SIZE_WITH_REPLICA =
       getReplicatedSize(KEY_ONE_SIZE,
@@ -258,6 +293,12 @@ public class TestNSSummaryEndpointWithFSO {
   private static final long FILE11_SIZE_WITH_REPLICA =
       getReplicatedSize(KEY_ELEVEN_SIZE,
               StandaloneReplicationConfig.getInstance(ONE));
+  private static final long FILE12_SIZE_WITH_REPLICA =
+      getReplicatedSize(KEY_TWELVE_SIZE, 
RatisReplicationConfig.getInstance(THREE));
+  private static final long FILE13_SIZE_WITH_REPLICA =
+      getReplicatedSize(KEY_THIRTEEN_SIZE, 
RatisReplicationConfig.getInstance(THREE));
+  private static final long FILE14_SIZE_WITH_REPLICA =
+      getReplicatedSize(KEY_FOURTEEN_SIZE, 
RatisReplicationConfig.getInstance(THREE));
   private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA
           = FILE7_SIZE_WITH_REPLICA;
   private static final long
@@ -272,7 +313,10 @@ public class TestNSSummaryEndpointWithFSO {
       + FILE8_SIZE_WITH_REPLICA
       + FILE9_SIZE_WITH_REPLICA
       + FILE10_SIZE_WITH_REPLICA
-      + FILE11_SIZE_WITH_REPLICA;
+      + FILE11_SIZE_WITH_REPLICA
+      + FILE12_SIZE_WITH_REPLICA
+      + FILE13_SIZE_WITH_REPLICA
+      + FILE14_SIZE_WITH_REPLICA;
 
   private static final long
       MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL
@@ -311,10 +355,12 @@ public class TestNSSummaryEndpointWithFSO {
   private static final long ROOT_QUOTA = 2 * (2 * OzoneConsts.MB);
   private static final long VOL_QUOTA = 2 * OzoneConsts.MB;
   private static final long VOL_TWO_QUOTA = 2 * OzoneConsts.MB;
+  private static final long VOL_THREE_QUOTA = 2 * OzoneConsts.MB;
   private static final long BUCKET_ONE_QUOTA = OzoneConsts.MB;
   private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB;
   private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB;
   private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB;
+  private static final long BUCKET_FIVE_QUOTA = OzoneConsts.MB;
 
   // mock client's path requests
   private static final String TEST_USER = "TestUser";
@@ -330,25 +376,28 @@ public class TestNSSummaryEndpointWithFSO {
   private static final String KEY_PATH = "/vol/bucket2/file4";
   private static final String MULTI_BLOCK_KEY_PATH = "/vol/bucket1/dir1/file7";
   private static final String INVALID_PATH = "/vol/path/not/found";
+  private static final String VOL_THREE_PATH = "/vol3";
 
   // some expected answers
   private static final long ROOT_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE +
-      KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE +
-      KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE;
+      KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + 
KEY_SEVEN_SIZE +
+      KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE +
+      FILE12_SIZE_WITH_REPLICA + FILE13_SIZE_WITH_REPLICA + 
FILE14_SIZE_WITH_REPLICA;
+
   private static final long VOL_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE +
-          KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE;
+          KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + 
KEY_SEVEN_SIZE;
 
   private static final long VOL_TWO_DATA_SIZE =
       KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE;
 
   private static final long BUCKET_ONE_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE 
+
-          KEY_THREE_SIZE + KEY_SIX_SIZE;
+          KEY_THREE_SIZE + KEY_SIX_SIZE + KEY_SEVEN_SIZE;
 
   private static final long BUCKET_TWO_DATA_SIZE =
           KEY_FOUR_SIZE + KEY_FIVE_SIZE;
 
   private static final long DIR_ONE_DATA_SIZE = KEY_TWO_SIZE +
-          KEY_THREE_SIZE + KEY_SIX_SIZE;
+          KEY_THREE_SIZE + KEY_SIX_SIZE + KEY_SEVEN_SIZE;
 
   @BeforeEach
   public void setUp() throws Exception {
@@ -381,6 +430,8 @@ public void setUp() throws Exception {
 
     // populate OM DB and reprocess into Recon RocksDB
     populateOMDB();
+    populateVolumeThree();
+    setUpMultiBlockReplicatedKeys();
     NSSummaryTaskWithFSO nSSummaryTaskWithFso =
         new NSSummaryTaskWithFSO(reconNamespaceSummaryManager,
             reconOMMetadataManager, 10);
@@ -451,7 +502,7 @@ public void testDiskUsageRoot() throws Exception {
     Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH,
         false, false, false);
     DUResponse duRootRes = (DUResponse) rootResponse.getEntity();
-    assertEquals(2, duRootRes.getCount());
+    assertEquals(3, duRootRes.getCount());
     List<DUResponse.DiskUsage> duRootData = duRootRes.getDuData();
     // sort based on subpath
     Collections.sort(duRootData,
@@ -555,7 +606,6 @@ public void testDiskUsageWithReplication() throws Exception 
{
 
   @Test
   public void testDataSizeUnderRootWithReplication() throws IOException {
-    setUpMultiBlockReplicatedKeys();
     //   withReplica is true
     Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH,
         false, true, false);
@@ -570,7 +620,6 @@ public void testDataSizeUnderRootWithReplication() throws 
IOException {
 
   @Test
   public void testDataSizeUnderVolWithReplication() throws IOException {
-    setUpMultiBlockReplicatedKeys();
     Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH,
         false, true, false);
     DUResponse replicaDUResponse = (DUResponse) volResponse.getEntity();
@@ -583,7 +632,6 @@ public void testDataSizeUnderVolWithReplication() throws 
IOException {
 
   @Test
   public void testDataSizeUnderBucketWithReplication() throws IOException {
-    setUpMultiBlockReplicatedKeys();
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH,
         false, true, false);
     DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity();
@@ -594,6 +642,69 @@ public void testDataSizeUnderBucketWithReplication() 
throws IOException {
         replicaDUResponse.getDuData().get(0).getSizeWithReplica());
   }
 
+  @Test
+  public void testReplicatedSizePropagationUpwards() throws IOException {
+    // Test that replicated size propagates correctly from files up through 
the directory hierarchy
+
+    // Get disk usage for individual files first to establish baseline
+    DUResponse file2Response = getDiskUsageResponse(DIR_TWO_PATH + "/file2");
+    DUResponse file3Response = getDiskUsageResponse(DIR_THREE_PATH + "/file3");
+    DUResponse file6Response = getDiskUsageResponse(DIR_FOUR_PATH + "/file6");
+    DUResponse file7Response = getDiskUsageResponse(DIR_ONE_PATH + "/file7");
+
+    // Verify individual file replicated sizes
+    assertEquals(FILE2_SIZE_WITH_REPLICA, file2Response.getSizeWithReplica());
+    assertEquals(FILE3_SIZE_WITH_REPLICA, file3Response.getSizeWithReplica());
+    assertEquals(FILE6_SIZE_WITH_REPLICA, file6Response.getSizeWithReplica());
+    assertEquals(FILE7_SIZE_WITH_REPLICA, file7Response.getSizeWithReplica());
+
+    // Test dir2 (contains only file2)
+    DUResponse dir2Response = getDiskUsageResponse(DIR_TWO_PATH);
+    assertEquals(FILE2_SIZE_WITH_REPLICA, dir2Response.getSizeWithReplica());
+
+    // Test dir3 (contains only file3)
+    DUResponse dir3Response = getDiskUsageResponse(DIR_THREE_PATH);
+    assertEquals(FILE3_SIZE_WITH_REPLICA, dir3Response.getSizeWithReplica());
+
+    // Test dir4 (contains only file6)
+    DUResponse dir4Response = getDiskUsageResponse(DIR_FOUR_PATH);
+    assertEquals(FILE6_SIZE_WITH_REPLICA, dir4Response.getSizeWithReplica());
+
+    // Test dir1 (contains file7 directly + dir2, dir3, dir4 contents)
+    DUResponse dir1Response = getDiskUsageResponse(DIR_ONE_PATH);
+    long expectedDir1ReplicatedSize = FILE2_SIZE_WITH_REPLICA + 
FILE3_SIZE_WITH_REPLICA +
+        FILE6_SIZE_WITH_REPLICA + FILE7_SIZE_WITH_REPLICA;
+    assertEquals(expectedDir1ReplicatedSize, 
dir1Response.getSizeWithReplica());
+
+    // Test bucket1 (contains file1 directly + all dir1 contents)
+    DUResponse bucket1Response = getDiskUsageResponse(BUCKET_ONE_PATH);
+    long expectedBucket1ReplicatedSize = FILE1_SIZE_WITH_REPLICA + 
expectedDir1ReplicatedSize;
+    assertEquals(expectedBucket1ReplicatedSize, 
bucket1Response.getSizeWithReplica());
+
+    // Test bucket2 (contains file4 and file5)
+    DUResponse bucket2Response = getDiskUsageResponse(BUCKET_TWO_PATH);
+    long expectedBucket2ReplicatedSize = FILE4_SIZE_WITH_REPLICA + 
FILE5_SIZE_WITH_REPLICA;
+    assertEquals(expectedBucket2ReplicatedSize, 
bucket2Response.getSizeWithReplica());
+
+    // Test vol (contains bucket1 + bucket2)
+    DUResponse volResponse = getDiskUsageResponse(VOL_PATH);
+    long expectedVolReplicatedSize = expectedBucket1ReplicatedSize + 
expectedBucket2ReplicatedSize;
+    assertEquals(expectedVolReplicatedSize, volResponse.getSizeWithReplica());
+
+    // Test root (contains vol + vol2)
+    DUResponse rootResponse = getDiskUsageResponse(ROOT_PATH);
+    long expectedVol2ReplicatedSize = FILE8_SIZE_WITH_REPLICA + 
FILE9_SIZE_WITH_REPLICA +
+        FILE10_SIZE_WITH_REPLICA + FILE11_SIZE_WITH_REPLICA;
+    long vol3TotalSize = FILE12_SIZE_WITH_REPLICA + FILE13_SIZE_WITH_REPLICA + 
FILE14_SIZE_WITH_REPLICA;
+    long expectedRootReplicatedSize = expectedVolReplicatedSize + 
expectedVol2ReplicatedSize + vol3TotalSize;
+    assertEquals(expectedRootReplicatedSize, 
rootResponse.getSizeWithReplica());
+  }
+
+  private DUResponse getDiskUsageResponse(String path) throws IOException {
+    Response response = nsSummaryEndpoint.getDiskUsage(path, false, true, 
false);
+    return (DUResponse) response.getEntity();
+  }
+
   /**
    * When calculating DU under dir1
    * there are 3 keys, file2, file3, file6.
@@ -602,7 +713,6 @@ public void testDataSizeUnderBucketWithReplication() throws 
IOException {
    */
   @Test
   public void testDataSizeUnderDirWithReplication() throws IOException {
-    setUpMultiBlockReplicatedKeys();
     Response dir1Response = nsSummaryEndpoint.getDiskUsage(DIR_ONE_PATH,
         false, true, false);
     DUResponse replicaDUResponse = (DUResponse) dir1Response.getEntity();
@@ -615,7 +725,6 @@ public void testDataSizeUnderDirWithReplication() throws 
IOException {
 
   @Test
   public void testDataSizeUnderKeyWithReplication() throws IOException {
-    setUpMultiBlockReplicatedKeys();
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH,
         false, true, false);
     DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
@@ -674,10 +783,10 @@ public void testQuotaUsage() throws Exception {
 
   @Test
   public void testFileSizeDist() throws Exception {
-    checkFileSizeDist(ROOT_PATH, 2, 3, 4, 1);
-    checkFileSizeDist(VOL_PATH, 2, 1, 2, 1);
-    checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 1, 1);
-    checkFileSizeDist(DIR_ONE_PATH, 0, 1, 1, 1);
+    checkFileSizeDist(ROOT_PATH, 5, 3, 4, 2);
+    checkFileSizeDist(VOL_PATH, 2, 1, 2, 2);
+    checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 1, 2);
+    checkFileSizeDist(DIR_ONE_PATH, 0, 1, 1, 2);
   }
 
   public void checkFileSizeDist(String path, int bin0,
@@ -807,6 +916,36 @@ public void 
testConstructFullPathWithNegativeParentIdTriggersRebuild() throws IO
     Assertions.assertEquals("", fullPath, "Should return empty string when 
NSSummary has negative parentId");
   }
 
+  @Test
+  public void testDataSizeUnderVolumeWithRatisReplication()throws IOException {
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(VOL_THREE_PATH,
+        false, true, false);
+    DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
+    assertEquals(QuotaUtil.getReplicatedSize(replicaDUResponse.getSize(), 
RatisReplicationConfig.getInstance(
+            HddsProtos.ReplicationFactor.THREE)), 
replicaDUResponse.getSizeWithReplica());
+  }
+
+  @Test
+  public void testDataSizeUnderBucketWithRatisReplication()throws IOException {
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(VOL_THREE_PATH + "/" 
+ BUCKET_FIVE,
+        false, true, false);
+    DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
+    assertEquals(QuotaUtil.getReplicatedSize(replicaDUResponse.getSize(), 
RatisReplicationConfig.getInstance(
+        HddsProtos.ReplicationFactor.THREE)), 
replicaDUResponse.getSizeWithReplica());
+  }
+
+  @Test
+  public void testDataSizeUnderDirWithRatisReplication()throws IOException {
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(VOL_THREE_PATH + "/" 
+ BUCKET_FIVE + "/" + DIR_SIX,
+        false, true, false);
+    DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
+    assertEquals(QuotaUtil.getReplicatedSize(replicaDUResponse.getSize(), 
RatisReplicationConfig.getInstance(
+        HddsProtos.ReplicationFactor.THREE)), 
replicaDUResponse.getSizeWithReplica());
+  }
+
   /**
    * Write directories and keys info into OM DB.
    * @throws Exception
@@ -940,6 +1079,57 @@ private void populateOMDB() throws Exception {
           VOL_TWO_OBJECT_ID,
           KEY_ELEVEN_SIZE,
           getBucketLayout());
+
+  }
+
+  private void populateVolumeThree() throws IOException {
+
+    writeDirToOm(reconOMMetadataManager, DIR_SIX_OBJECT_ID,
+        BUCKET_FIVE_OBJECT_ID, BUCKET_FIVE_OBJECT_ID,
+        VOL_THREE_OBJECT_ID, DIR_SIX);
+
+    writeDirToOm(reconOMMetadataManager, DIR_SEVEN_OBJECT_ID,
+        DIR_SIX_OBJECT_ID, BUCKET_FIVE_OBJECT_ID,
+        VOL_THREE_OBJECT_ID, DIR_SEVEN);
+
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_TWELVE,
+        BUCKET_FIVE,
+        VOL_THREE,
+        FILE_TWELVE,
+        FILE_TWELVE_OBJECT_ID,
+        BUCKET_FIVE_OBJECT_ID,
+        BUCKET_FIVE_OBJECT_ID,
+        VOL_THREE_OBJECT_ID,
+        KEY_TWELVE_SIZE,
+        getBucketLayout(), 
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE),
+        Time.now(), true);
+
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_THIRTEEN,
+        BUCKET_FIVE,
+        VOL_THREE,
+        FILE_THIRTEEN,
+        FILE_THIRTEEN_OBJECT_ID,
+        DIR_SIX_OBJECT_ID,
+        BUCKET_FIVE_OBJECT_ID,
+        VOL_THREE_OBJECT_ID,
+        KEY_THIRTEEN_SIZE,
+        getBucketLayout(), 
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE),
+        Time.now(), true);
+
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_FOURTEEN,
+        BUCKET_FIVE,
+        VOL_THREE,
+        FILE_FOURTEEN,
+        FILE_FOURTEEN_OBJECT_ID,
+        DIR_SEVEN_OBJECT_ID,
+        BUCKET_FIVE_OBJECT_ID,
+        VOL_THREE_OBJECT_ID,
+        KEY_FOURTEEN_SIZE,
+        getBucketLayout(), 
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE),
+        Time.now(), true);
   }
 
   /**
@@ -976,8 +1166,19 @@ private static OMMetadataManager 
initializeNewOmMetadataManager(
             .setQuotaInBytes(VOL_TWO_QUOTA)
             .build();
 
+    String volume3Key = omMetadataManager.getVolumeKey(VOL_THREE);
+    OmVolumeArgs args3 =
+        OmVolumeArgs.newBuilder()
+            .setObjectID(VOL_THREE_OBJECT_ID)
+            .setVolume(VOL_THREE)
+            .setAdminName(TEST_USER)
+            .setOwnerName(TEST_USER)
+            .setQuotaInBytes(VOL_THREE_QUOTA)
+            .build();
+
     omMetadataManager.getVolumeTable().put(volumeKey, args);
     omMetadataManager.getVolumeTable().put(volume2Key, args2);
+    omMetadataManager.getVolumeTable().put(volume3Key, args3);
 
     OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(VOL)
@@ -1011,6 +1212,14 @@ private static OMMetadataManager 
initializeNewOmMetadataManager(
         .setBucketLayout(getBucketLayout())
         .build();
 
+    OmBucketInfo bucketInfo5 = OmBucketInfo.newBuilder()
+        .setVolumeName(VOL_THREE)
+        .setBucketName(BUCKET_FIVE)
+        .setObjectID(BUCKET_FIVE_OBJECT_ID)
+        .setQuotaInBytes(BUCKET_FIVE_QUOTA)
+        .setBucketLayout(getBucketLayout())
+        .build();
+
     String bucketKey = omMetadataManager.getBucketKey(
             bucketInfo.getVolumeName(), bucketInfo.getBucketName());
     String bucketKey2 = omMetadataManager.getBucketKey(
@@ -1019,11 +1228,14 @@ private static OMMetadataManager 
initializeNewOmMetadataManager(
         bucketInfo3.getVolumeName(), bucketInfo3.getBucketName());
     String bucketKey4 = omMetadataManager.getBucketKey(
         bucketInfo4.getVolumeName(), bucketInfo4.getBucketName());
+    String bucketKey5 = omMetadataManager.getBucketKey(
+        bucketInfo5.getVolumeName(), bucketInfo5.getBucketName());
 
     omMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
     omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
     omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3);
     omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4);
+    omMetadataManager.getBucketTable().put(bucketKey5, bucketInfo5);
 
     return omMetadataManager;
   }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
index 6973a67930f..8844b96c022 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
@@ -19,6 +19,7 @@
 
 import static 
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
 import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize;
@@ -44,9 +45,11 @@
 import java.util.Set;
 import javax.ws.rs.core.Response;
 import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
@@ -64,6 +67,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
 import org.apache.hadoop.ozone.recon.ReconConstants;
 import org.apache.hadoop.ozone.recon.ReconTestInjector;
 import org.apache.hadoop.ozone.recon.ReconUtils;
@@ -82,6 +86,7 @@
 import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
 import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy;
+import org.apache.hadoop.util.Time;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
@@ -107,6 +112,16 @@
  *   file8     dir5      file11
  *            /    \
  *        file9    file10
+ *  ----------------------------------------
+ *                    vol3
+ *                     |
+ *                 bucket5
+ *                 /      \
+ *             file12     dir6
+ *                     /    \
+ *                 file13    dir7
+ *                          /
+ *                          file14
  * This is a test for the Rest APIs only. We have tested NSSummaryTask before,
  * so there is no need to test process() on DB's updates
  */
@@ -129,10 +144,12 @@ public class TestNSSummaryEndpointWithLegacy {
   // Object names
   private static final String VOL = "vol";
   private static final String VOL_TWO = "vol2";
+  private static final String VOL_THREE = "vol3";
   private static final String BUCKET_ONE = "bucket1";
   private static final String BUCKET_TWO = "bucket2";
   private static final String BUCKET_THREE = "bucket3";
   private static final String BUCKET_FOUR = "bucket4";
+  private static final String BUCKET_FIVE = "bucket5";
   private static final String KEY_ONE = "file1";
   private static final String KEY_TWO = "dir1/dir2/file2";
   private static final String KEY_THREE = "dir1/dir3/file3";
@@ -146,6 +163,9 @@ public class TestNSSummaryEndpointWithLegacy {
   private static final String KEY_ELEVEN = "file11";
   private static final String MULTI_BLOCK_KEY = "dir1/file7";
   private static final String MULTI_BLOCK_FILE = "file7";
+  private static final String KEY_TWELVE = "file12";
+  private static final String KEY_THIRTEEN = "dir6/file13";
+  private static final String KEY_FOURTEEN = "dir6/dir7/file14";
 
   private static final String FILE_ONE = "file1";
   private static final String FILE_TWO = "file2";
@@ -158,12 +178,17 @@ public class TestNSSummaryEndpointWithLegacy {
   private static final String FILE_NINE = "file9";
   private static final String FILE_TEN = "file10";
   private static final String FILE_ELEVEN = "file11";
+  private static final String FILE_TWELVE = "file12";
+  private static final String FILE_THIRTEEN = "file13";
+  private static final String FILE_FOURTEEN = "file14";
 
   private static final String DIR_ONE = "dir1";
   private static final String DIR_TWO = "dir2";
   private static final String DIR_THREE = "dir3";
   private static final String DIR_FOUR = "dir4";
   private static final String DIR_FIVE = "dir5";
+  private static final String DIR_SIX = "dir6";
+  private static final String DIR_SEVEN = "dir7";
   // objects IDs
   private static final long PARENT_OBJECT_ID_ZERO = 0L;
   private static final long VOL_OBJECT_ID = 0L;
@@ -189,6 +214,13 @@ public class TestNSSummaryEndpointWithLegacy {
   private static final long KEY_NINE_OBJECT_ID = 19L;
   private static final long KEY_TEN_OBJECT_ID = 20L;
   private static final long KEY_ELEVEN_OBJECT_ID = 21L;
+  private static final long VOL_THREE_OBJECT_ID = 22L;
+  private static final long DIR_SIX_OBJECT_ID = 23L;
+  private static final long DIR_SEVEN_OBJECT_ID = 24L;
+  private static final long FILE_TWELVE_OBJECT_ID = 25L;
+  private static final long FILE_THIRTEEN_OBJECT_ID = 26L;
+  private static final long FILE_FOURTEEN_OBJECT_ID = 27L;
+  private static final long BUCKET_FIVE_OBJECT_ID = 28L;
 
   // container IDs
   private static final long CONTAINER_ONE_ID = 1L;
@@ -226,6 +258,9 @@ public class TestNSSummaryEndpointWithLegacy {
   private static final long KEY_NINE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
   private static final long KEY_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
   private static final long KEY_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1
+  private static final long KEY_TWELVE_SIZE = OzoneConsts.KB;
+  private static final long KEY_THIRTEEN_SIZE = OzoneConsts.KB;
+  private static final long KEY_FOURTEEN_SIZE = OzoneConsts.KB;
 
   private static final long FILE1_SIZE_WITH_REPLICA =
       getReplicatedSize(KEY_ONE_SIZE,
@@ -260,6 +295,12 @@ public class TestNSSummaryEndpointWithLegacy {
   private static final long FILE11_SIZE_WITH_REPLICA =
       getReplicatedSize(KEY_ELEVEN_SIZE,
               StandaloneReplicationConfig.getInstance(ONE));
+  private static final long FILE12_SIZE_WITH_REPLICA =
+      getReplicatedSize(KEY_TWELVE_SIZE, 
RatisReplicationConfig.getInstance(THREE));
+  private static final long FILE13_SIZE_WITH_REPLICA =
+      getReplicatedSize(KEY_THIRTEEN_SIZE, 
RatisReplicationConfig.getInstance(THREE));
+  private static final long FILE14_SIZE_WITH_REPLICA =
+      getReplicatedSize(KEY_FOURTEEN_SIZE, 
RatisReplicationConfig.getInstance(THREE));
 
   private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA
           = FILE7_SIZE_WITH_REPLICA;
@@ -275,7 +316,10 @@ public class TestNSSummaryEndpointWithLegacy {
       + FILE8_SIZE_WITH_REPLICA
       + FILE9_SIZE_WITH_REPLICA
       + FILE10_SIZE_WITH_REPLICA
-      + FILE11_SIZE_WITH_REPLICA;
+      + FILE11_SIZE_WITH_REPLICA
+      + FILE12_SIZE_WITH_REPLICA
+      + FILE13_SIZE_WITH_REPLICA
+      + FILE14_SIZE_WITH_REPLICA;
 
   private static final long
       MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL
@@ -314,10 +358,12 @@ public class TestNSSummaryEndpointWithLegacy {
   private static final long ROOT_QUOTA = 2 * (2 * OzoneConsts.MB);
   private static final long VOL_QUOTA = 2 * OzoneConsts.MB;
   private static final long VOL_TWO_QUOTA = 2 * OzoneConsts.MB;
+  private static final long VOL_THREE_QUOTA = 2 * OzoneConsts.MB;
   private static final long BUCKET_ONE_QUOTA = OzoneConsts.MB;
   private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB;
   private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB;
   private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB;
+  private static final long BUCKET_FIVE_QUOTA = OzoneConsts.MB;
 
   // mock client's path requests
   private static final String TEST_USER = "TestUser";
@@ -333,25 +379,27 @@ public class TestNSSummaryEndpointWithLegacy {
   private static final String KEY_PATH = "/vol/bucket2/file4";
   private static final String MULTI_BLOCK_KEY_PATH = "/vol/bucket1/dir1/file7";
   private static final String INVALID_PATH = "/vol/path/not/found";
+  private static final String VOL_THREE_PATH = "/vol3";
 
   // some expected answers
   private static final long ROOT_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE +
-      KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE +
-      KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE;
+      KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + 
KEY_SEVEN_SIZE +
+      KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE +
+      FILE12_SIZE_WITH_REPLICA + FILE13_SIZE_WITH_REPLICA + 
FILE14_SIZE_WITH_REPLICA;
   private static final long VOL_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE +
-      KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE;
+      KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + 
KEY_SEVEN_SIZE;
 
   private static final long VOL_TWO_DATA_SIZE =
       KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE;
 
   private static final long BUCKET_ONE_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE 
+
-      KEY_THREE_SIZE + KEY_SIX_SIZE;
+      KEY_THREE_SIZE + KEY_SIX_SIZE + KEY_SEVEN_SIZE;
 
   private static final long BUCKET_TWO_DATA_SIZE =
       KEY_FOUR_SIZE + KEY_FIVE_SIZE;
 
   private static final long DIR_ONE_DATA_SIZE = KEY_TWO_SIZE +
-      KEY_THREE_SIZE + KEY_SIX_SIZE;
+      KEY_THREE_SIZE + KEY_SIX_SIZE + KEY_SEVEN_SIZE;
 
   @BeforeEach
   public void setUp() throws Exception {
@@ -383,6 +431,8 @@ public void setUp() throws Exception {
 
     // populate OM DB and reprocess into Recon RocksDB
     populateOMDB();
+    populateVolumeThree();
+    setUpMultiBlockReplicatedKeys();
     NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy = 
         new NSSummaryTaskWithLegacy(reconNamespaceSummaryManager, 
                                     reconOMMetadataManager, conf, 10);
@@ -452,7 +502,7 @@ public void testDiskUsageRoot() throws Exception {
     Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH,
         false, false, false);
     DUResponse duRootRes = (DUResponse) rootResponse.getEntity();
-    assertEquals(2, duRootRes.getCount());
+    assertEquals(3, duRootRes.getCount());
     List<DUResponse.DiskUsage> duRootData = duRootRes.getDuData();
     // sort based on subpath
     Collections.sort(duRootData,
@@ -552,7 +602,6 @@ public void testDiskUsageWithReplication() throws Exception 
{
 
   @Test
   public void testDataSizeUnderRootWithReplication() throws IOException {
-    setUpMultiBlockReplicatedKeys();
     //   withReplica is true
     Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH,
         false, true, false);
@@ -567,7 +616,6 @@ public void testDataSizeUnderRootWithReplication() throws 
IOException {
 
   @Test
   public void testDataSizeUnderVolWithReplication() throws IOException {
-    setUpMultiBlockReplicatedKeys();
     Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH,
         false, true, false);
     DUResponse replicaDUResponse = (DUResponse) volResponse.getEntity();
@@ -580,7 +628,6 @@ public void testDataSizeUnderVolWithReplication() throws 
IOException {
 
   @Test
   public void testDataSizeUnderBucketWithReplication() throws IOException {
-    setUpMultiBlockReplicatedKeys();
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH,
         false, true, false);
     DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity();
@@ -599,7 +646,6 @@ public void testDataSizeUnderBucketWithReplication() throws 
IOException {
    */
   @Test
   public void testDataSizeUnderDirWithReplication() throws IOException {
-    setUpMultiBlockReplicatedKeys();
     Response dir1Response = nsSummaryEndpoint.getDiskUsage(DIR_ONE_PATH,
         false, true, false);
     DUResponse replicaDUResponse = (DUResponse) dir1Response.getEntity();
@@ -612,7 +658,6 @@ public void testDataSizeUnderDirWithReplication() throws 
IOException {
 
   @Test
   public void testDataSizeUnderKeyWithReplication() throws IOException {
-    setUpMultiBlockReplicatedKeys();
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH,
         false, true, false);
     DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
@@ -671,10 +716,10 @@ public void testQuotaUsage() throws Exception {
 
   @Test
   public void testFileSizeDist() throws Exception {
-    checkFileSizeDist(ROOT_PATH, 2, 3, 4, 1);
-    checkFileSizeDist(VOL_PATH, 2, 1, 2, 1);
-    checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 1, 1);
-    checkFileSizeDist(DIR_ONE_PATH, 0, 1, 1, 1);
+    checkFileSizeDist(ROOT_PATH, 5, 3, 4, 2);
+    checkFileSizeDist(VOL_PATH, 2, 1, 2, 2);
+    checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 1, 2);
+    checkFileSizeDist(DIR_ONE_PATH, 0, 1, 1, 2);
   }
 
   public void checkFileSizeDist(String path, int bin0,
@@ -733,6 +778,35 @@ public void testConstructFullPath() throws IOException {
     Assertions.assertEquals(expectedPath, fullPath);
   }
 
+  @Test
+  public void testDataSizeUnderVolumeWithRatisReplication()throws IOException {
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(VOL_THREE_PATH,
+        false, true, false);
+    DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
+    assertEquals(QuotaUtil.getReplicatedSize(replicaDUResponse.getSize(), 
RatisReplicationConfig.getInstance(
+        HddsProtos.ReplicationFactor.THREE)), 
replicaDUResponse.getSizeWithReplica());
+  }
+
+  @Test
+  public void testDataSizeUnderBucketWithRatisReplication()throws IOException {
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(VOL_THREE_PATH + "/" 
+ BUCKET_FIVE,
+        false, true, false);
+    DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
+    assertEquals(QuotaUtil.getReplicatedSize(replicaDUResponse.getSize(), 
RatisReplicationConfig.getInstance(
+        HddsProtos.ReplicationFactor.THREE)), 
replicaDUResponse.getSizeWithReplica());
+  }
+
+  @Test
+  public void testDataSizeUnderDirWithRatisReplication()throws IOException {
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(VOL_THREE_PATH + "/" 
+ BUCKET_FIVE + "/" + DIR_SIX,
+        false, true, false);
+    DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
+    assertEquals(QuotaUtil.getReplicatedSize(replicaDUResponse.getSize(), 
RatisReplicationConfig.getInstance(
+        HddsProtos.ReplicationFactor.THREE)), 
replicaDUResponse.getSizeWithReplica());
+  }
 
   /**
    * Write directories and keys info into OM DB.
@@ -905,6 +979,70 @@ private void populateOMDB() throws Exception {
           getBucketLayout());
   }
 
+  private void populateVolumeThree() throws IOException {
+
+    writeDirToOm(reconOMMetadataManager,
+        (DIR_SIX + OM_KEY_PREFIX),
+        BUCKET_FIVE,
+        VOL_THREE,
+        DIR_SIX,
+        DIR_SIX_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_FIVE_OBJECT_ID,
+        VOL_THREE_OBJECT_ID,
+        getBucketLayout());
+
+    writeDirToOm(reconOMMetadataManager,
+        (DIR_SIX + OM_KEY_PREFIX + DIR_SEVEN + OM_KEY_PREFIX),
+        BUCKET_FIVE,
+        VOL_THREE,
+        DIR_SEVEN,
+        DIR_SEVEN_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_FIVE_OBJECT_ID,
+        VOL_THREE_OBJECT_ID,
+        getBucketLayout());
+
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_TWELVE,
+        BUCKET_FIVE,
+        VOL_THREE,
+        FILE_TWELVE,
+        FILE_TWELVE_OBJECT_ID,
+        BUCKET_FIVE_OBJECT_ID,
+        BUCKET_FIVE_OBJECT_ID,
+        VOL_THREE_OBJECT_ID,
+        KEY_TWELVE_SIZE,
+        getBucketLayout(), 
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE),
+        Time.now(), true);
+
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_THIRTEEN,
+        BUCKET_FIVE,
+        VOL_THREE,
+        FILE_THIRTEEN,
+        FILE_THIRTEEN_OBJECT_ID,
+        DIR_SIX_OBJECT_ID,
+        BUCKET_FIVE_OBJECT_ID,
+        VOL_THREE_OBJECT_ID,
+        KEY_THIRTEEN_SIZE,
+        getBucketLayout(), 
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE),
+        Time.now(), true);
+
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_FOURTEEN,
+        BUCKET_FIVE,
+        VOL_THREE,
+        FILE_FOURTEEN,
+        FILE_FOURTEEN_OBJECT_ID,
+        DIR_SEVEN_OBJECT_ID,
+        BUCKET_FIVE_OBJECT_ID,
+        VOL_THREE_OBJECT_ID,
+        KEY_FOURTEEN_SIZE,
+        getBucketLayout(), 
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE),
+        Time.now(), true);
+  }
+
   /**
    * Create a new OM Metadata manager instance with one user, one vol, and two
    * buckets.
@@ -941,8 +1079,19 @@ private static OMMetadataManager 
initializeNewOmMetadataManager(
             .setQuotaInBytes(VOL_TWO_QUOTA)
             .build();
 
+    String volume3Key = omMetadataManager.getVolumeKey(VOL_THREE);
+    OmVolumeArgs args3 =
+        OmVolumeArgs.newBuilder()
+            .setObjectID(VOL_THREE_OBJECT_ID)
+            .setVolume(VOL_THREE)
+            .setAdminName(TEST_USER)
+            .setOwnerName(TEST_USER)
+            .setQuotaInBytes(VOL_THREE_QUOTA)
+            .build();
+
     omMetadataManager.getVolumeTable().put(volumeKey, args);
     omMetadataManager.getVolumeTable().put(volume2Key, args2);
+    omMetadataManager.getVolumeTable().put(volume3Key, args3);
 
     OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(VOL)
@@ -976,6 +1125,14 @@ private static OMMetadataManager 
initializeNewOmMetadataManager(
         .setBucketLayout(getBucketLayout())
         .build();
 
+    OmBucketInfo bucketInfo5 = OmBucketInfo.newBuilder()
+        .setVolumeName(VOL_THREE)
+        .setBucketName(BUCKET_FIVE)
+        .setObjectID(BUCKET_FIVE_OBJECT_ID)
+        .setQuotaInBytes(BUCKET_FIVE_QUOTA)
+        .setBucketLayout(getBucketLayout())
+        .build();
+
     String bucketKey = omMetadataManager.getBucketKey(
         bucketInfo.getVolumeName(), bucketInfo.getBucketName());
     String bucketKey2 = omMetadataManager.getBucketKey(
@@ -984,11 +1141,14 @@ private static OMMetadataManager 
initializeNewOmMetadataManager(
         bucketInfo3.getVolumeName(), bucketInfo3.getBucketName());
     String bucketKey4 = omMetadataManager.getBucketKey(
         bucketInfo4.getVolumeName(), bucketInfo4.getBucketName());
+    String bucketKey5 = omMetadataManager.getBucketKey(
+        bucketInfo5.getVolumeName(), bucketInfo5.getBucketName());
 
     omMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
     omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
     omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3);
     omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4);
+    omMetadataManager.getBucketTable().put(bucketKey5, bucketInfo5);
 
     return omMetadataManager;
   }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
index 22b649dd986..1e83f1354a2 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
@@ -39,6 +39,8 @@
 import java.nio.file.Path;
 import java.sql.Timestamp;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
@@ -49,6 +51,7 @@
 import java.util.stream.Collectors;
 import javax.ws.rs.core.Response;
 import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -64,6 +67,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.recon.ReconTestInjector;
 import org.apache.hadoop.ozone.recon.ReconUtils;
@@ -88,9 +92,12 @@
 import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS;
 import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
 import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
+import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 
 /**
  * Unit test for OmDBInsightEndPoint.
@@ -106,9 +113,7 @@ public class TestOmDBInsightEndPoint extends 
AbstractReconSqlDBTest {
   private Random random = new Random();
   private OzoneConfiguration ozoneConfiguration;
   private Set<Long> generatedIds = new HashSet<>();
-
   private static final String VOLUME_ONE = "volume1";
-
   private static final String OBS_BUCKET = "obs-bucket";
   private static final String FSO_BUCKET = "fso-bucket";
   private static final String EMPTY_OBS_BUCKET = "empty-obs-bucket";
@@ -250,6 +255,30 @@ public TestOmDBInsightEndPoint() {
     super();
   }
 
+  public static Collection<Object[]> replicationConfigValues() {
+    return Arrays.asList(new Object[][]{
+        
{ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE)},
+        
{ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, 
HddsProtos.ReplicationFactor.ONE)},
+        {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null,
+            toProto(3, 2, ECReplicationConfig.EcCodec.RS, 1024))},
+        {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null,
+            toProto(6, 3, ECReplicationConfig.EcCodec.RS, 1024))},
+        {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null,
+            toProto(10, 4, ECReplicationConfig.EcCodec.XOR, 4096))}
+    });
+  }
+
+  public static HddsProtos.ECReplicationConfig toProto(int data, int parity, 
ECReplicationConfig.EcCodec codec,
+                                                       int ecChunkSize) {
+    return HddsProtos.ECReplicationConfig.newBuilder()
+        .setData(data)
+        .setParity(parity)
+        .setCodec(codec.toString())
+        .setEcChunkSize(ecChunkSize)
+        .build();
+  }
+
   private long generateUniqueRandomLong() {
     long newValue;
     do {
@@ -312,6 +341,17 @@ public void setUp() throws Exception {
     nsSummaryTaskWithFSO.reprocessWithFSO(reconOMMetadataManager);
   }
 
+  /**
+   * Releases resources (network sockets, database files) after each test run.
+   * This is critical to prevent resource leaks between tests, which would 
otherwise cause "Too many open files" errors.
+   */
+  @AfterEach
+  public void tearDown() throws Exception {
+    if (reconOMMetadataManager != null) {
+      reconOMMetadataManager.stop();
+    }
+  }
+
   @SuppressWarnings("methodlength")
   private void setUpOmData() throws Exception {
     List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
@@ -1385,14 +1425,24 @@ public void 
testGetDeletedKeysWithBothPrevKeyAndStartPrefixProvided()
 
   private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
                                  String keyName, boolean isFile) {
+    return buildOmKeyInfo(volumeName, bucketName, keyName, isFile,
+        
StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE));
+  }
+
+  private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+                                 String keyName, boolean isFile, 
ReplicationConfig replicationConfig) {
+    return buildOmKeyInfo(volumeName, bucketName, keyName, isFile, 
replicationConfig);
+  }
+
+  private OmKeyInfo buildOmKeyInfo(String volumeName, String bucketName,
+                                   String keyName, boolean isFile, 
ReplicationConfig replicationConfig) {
     return new OmKeyInfo.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
         .setFile(isFile)
         .setObjectID(generateUniqueRandomLong())
-        .setReplicationConfig(StandaloneReplicationConfig
-            .getInstance(HddsProtos.ReplicationFactor.ONE))
+        .setReplicationConfig(replicationConfig)
         .setDataSize(random.nextLong())
         .build();
   }
@@ -1497,15 +1547,17 @@ public void testGetDeletedDirInfo() throws Exception {
         keyInsightInfoResp.getLastKey());
   }
 
-  @Test
-  public void testGetDirectorySizeInfo() throws Exception {
+  @ParameterizedTest
+  @MethodSource("replicationConfigValues")
+  public void testGetDirectorySizeInfo(ReplicationConfig replicationConfig) 
throws Exception {
 
     OmKeyInfo omKeyInfo1 =
-        getOmKeyInfo("sampleVol", "bucketOne", "dir1", false);
+        getOmKeyInfo("sampleVol", "bucketOne", "dir1", false, 
replicationConfig);
     OmKeyInfo omKeyInfo2 =
-        getOmKeyInfo("sampleVol", "bucketTwo", "dir2", false);
+        getOmKeyInfo("sampleVol", "bucketTwo", "dir2", false, 
replicationConfig);
     OmKeyInfo omKeyInfo3 =
-        getOmKeyInfo("sampleVol", "bucketThree", "dir3", false);
+        getOmKeyInfo("sampleVol", "bucketThree", "dir3", false,
+            replicationConfig);
 
     // Add 3 entries to deleted dir table for directory dir1, dir2 and dir3
     // having object id 1, 2 and 3 respectively
@@ -1519,11 +1571,11 @@ public void testGetDirectorySizeInfo() throws Exception 
{
     // Prepare NS summary data and populate the table
     Table<Long, NSSummary> table = omdbInsightEndpoint.getNsSummaryTable();
     // Set size of files to 5 for directory object id 1
-    table.put(omKeyInfo1.getObjectID(), getNsSummary(5L));
+    table.put(omKeyInfo1.getObjectID(), getNsSummary(5L, replicationConfig));
     // Set size of files to 6 for directory object id 2
-    table.put(omKeyInfo2.getObjectID(), getNsSummary(6L));
+    table.put(omKeyInfo2.getObjectID(), getNsSummary(6L, replicationConfig));
     // Set size of files to 7 for directory object id 3
-    table.put(omKeyInfo3.getObjectID(), getNsSummary(7L));
+    table.put(omKeyInfo3.getObjectID(), getNsSummary(7L, replicationConfig));
 
     Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(-1, "");
     KeyInsightInfoResponse keyInsightInfoResp =
@@ -1534,15 +1586,23 @@ public void testGetDirectorySizeInfo() throws Exception 
{
     // Assert the total size under directory dir1 is 5L
     assertEquals(5L,
         keyInsightInfoResp.getDeletedDirInfoList().get(0).getSize());
+    assertEquals(QuotaUtil.getReplicatedSize(5L, replicationConfig),
+        keyInsightInfoResp.getDeletedDirInfoList().get(0).getReplicatedSize());
     // Assert the total size under directory dir2 is 6L
     assertEquals(6L,
         keyInsightInfoResp.getDeletedDirInfoList().get(1).getSize());
+    assertEquals(QuotaUtil.getReplicatedSize(6L, replicationConfig),
+        keyInsightInfoResp.getDeletedDirInfoList().get(1).getReplicatedSize());
     // Assert the total size under directory dir3 is 7L
     assertEquals(7L,
         keyInsightInfoResp.getDeletedDirInfoList().get(2).getSize());
+    assertEquals(QuotaUtil.getReplicatedSize(7L, replicationConfig),
+        keyInsightInfoResp.getDeletedDirInfoList().get(2).getReplicatedSize());
 
     // Assert the total of all the deleted directories is 18L
     assertEquals(18L, keyInsightInfoResp.getUnreplicatedDataSize());
+    assertEquals(QuotaUtil.getReplicatedSize(18L, replicationConfig),
+        keyInsightInfoResp.getReplicatedDataSize());
   }
 
   @Test
@@ -2008,9 +2068,10 @@ public void 
testListKeysLegacyBucketWithFSEnabledAndPagination() {
     assertEquals("", listKeysResponse.getLastKey());
   }
 
-  private NSSummary getNsSummary(long size) {
+  private NSSummary getNsSummary(long size, ReplicationConfig 
replicationConfig) {
     NSSummary summary = new NSSummary();
     summary.setSizeOfFiles(size);
+    summary.setReplicatedSizeOfFiles(QuotaUtil.getReplicatedSize(size, 
replicationConfig));
     return summary;
   }
 }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java
index df211867264..f5acc8d2ed5 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java
@@ -87,10 +87,10 @@ public void testNSSummaryBasicInfoRoot(
     NamespaceSummaryResponse rootResponseObj =
         (NamespaceSummaryResponse) rootResponse.getEntity();
     assertEquals(EntityType.ROOT, rootResponseObj.getEntityType());
-    assertEquals(2, rootResponseObj.getCountStats().getNumVolume());
-    assertEquals(4, rootResponseObj.getCountStats().getNumBucket());
-    assertEquals(5, rootResponseObj.getCountStats().getNumTotalDir());
-    assertEquals(10, rootResponseObj.getCountStats().getNumTotalKey());
+    assertEquals(3, rootResponseObj.getCountStats().getNumVolume());
+    assertEquals(5, rootResponseObj.getCountStats().getNumBucket());
+    assertEquals(7, rootResponseObj.getCountStats().getNumTotalDir());
+    assertEquals(14, rootResponseObj.getCountStats().getNumTotalKey());
     assertEquals("USER",
         rootResponseObj.getObjectDBInfo().getAcls().get(0).getType());
     assertEquals("WRITE", rootResponseObj.getObjectDBInfo().getAcls().get(0)
@@ -112,7 +112,7 @@ public void testNSSummaryBasicInfoVolume(
         volResponseObj.getEntityType());
     assertEquals(2, volResponseObj.getCountStats().getNumBucket());
     assertEquals(4, volResponseObj.getCountStats().getNumTotalDir());
-    assertEquals(6, volResponseObj.getCountStats().getNumTotalKey());
+    assertEquals(7, volResponseObj.getCountStats().getNumTotalKey());
     assertEquals("TestUser", ((VolumeObjectDBInfo) volResponseObj.
             getObjectDBInfo()).getAdmin());
     assertEquals("TestUser", ((VolumeObjectDBInfo) volResponseObj.
@@ -130,7 +130,7 @@ public void testNSSummaryBasicInfoBucketOne(BucketLayout 
bucketLayout,
         (NamespaceSummaryResponse) bucketOneResponse.getEntity();
     assertEquals(EntityType.BUCKET, bucketOneObj.getEntityType());
     assertEquals(4, bucketOneObj.getCountStats().getNumTotalDir());
-    assertEquals(4, bucketOneObj.getCountStats().getNumTotalKey());
+    assertEquals(5, bucketOneObj.getCountStats().getNumTotalKey());
     assertEquals("vol",
         ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getVolumeName());
     assertEquals(StorageType.DISK,
@@ -172,7 +172,7 @@ public void testNSSummaryBasicInfoDir(
         (NamespaceSummaryResponse) dirOneResponse.getEntity();
     assertEquals(EntityType.DIRECTORY, dirOneObj.getEntityType());
     assertEquals(3, dirOneObj.getCountStats().getNumTotalDir());
-    assertEquals(3, dirOneObj.getCountStats().getNumTotalKey());
+    assertEquals(4, dirOneObj.getCountStats().getNumTotalKey());
     assertEquals("dir1", dirOneObj.getObjectDBInfo().getName());
     assertEquals(0, dirOneObj.getObjectDBInfo().getMetadata().size());
     assertEquals(0, dirOneObj.getObjectDBInfo().getQuotaInBytes());
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
index cd5618cc9a8..b4e62e9d03c 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
@@ -111,9 +111,9 @@ public void testInitNSSummaryTable() throws IOException {
 
   private void putThreeNSMetadata() throws IOException {
     HashMap<Long, NSSummary> hmap = new HashMap<>();
-    hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1", -1));
-    hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2", -1));
-    hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3", -1));
+    hmap.put(1L, new NSSummary(1, 2, 2 * 3, testBucket, TEST_CHILD_DIR, 
"dir1", -1));
+    hmap.put(2L, new NSSummary(3, 4, 4 * 3, testBucket, TEST_CHILD_DIR, 
"dir2", -1));
+    hmap.put(3L, new NSSummary(5, 6, 6 * 3, testBucket, TEST_CHILD_DIR, 
"dir3", -1));
     RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
     for (Map.Entry entry: hmap.entrySet()) {
       reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation,
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
index fcee6893276..5978e11ba65 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
@@ -17,6 +17,7 @@
 
 package org.apache.hadoop.ozone.recon.tasks;
 
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD;
@@ -34,6 +35,7 @@
 import java.util.List;
 import java.util.Set;
 import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -478,16 +480,16 @@ void 
testProcessWithFSOFlushAfterThresholdAndFailureOfLastElement()
       
Mockito.when(event4.getAction()).thenReturn(OMDBUpdateEvent.OMDBUpdateAction.PUT);
 
       OmKeyInfo keyInfo1 = new 
OmKeyInfo.Builder().setParentObjectID(1).setObjectID(2).setKeyName("key1")
-          .setBucketName("bucket1")
+          
.setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE))
           .setDataSize(1024).setVolumeName("volume1").build();
       OmKeyInfo keyInfo2 = new 
OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2")
-          .setBucketName("bucket1")
+          
.setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE))
           .setDataSize(1024).setVolumeName("volume1").build();
       OmKeyInfo keyInfo3 = new 
OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2")
-          .setBucketName("bucket1")
+          
.setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE))
           .setDataSize(1024).setVolumeName("volume1").build();
       OmKeyInfo keyInfo4 = new 
OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2")
-          .setBucketName("bucket1")
+          
.setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE))
           .setDataSize(1024).setVolumeName("volume1").build();
       Mockito.when(event1.getValue()).thenReturn(keyInfo1);
       Mockito.when(event2.getValue()).thenReturn(keyInfo2);
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java
new file mode 100644
index 00000000000..8bc7863f073
--- /dev/null
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.upgrade;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mockStatic;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import com.google.inject.Injector;
+import javax.sql.DataSource;
+import org.apache.hadoop.ozone.recon.ReconGuiceServletContextListener;
+import org.apache.hadoop.ozone.recon.tasks.ReconTaskController;
+import org.apache.hadoop.ozone.recon.tasks.ReconTaskReInitializationEvent;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Mock;
+import org.mockito.MockedStatic;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+/**
+ * Test class for ReplicatedSizeOfFilesUpgradeAction.
+ */
+@ExtendWith(MockitoExtension.class)
+public class TestReplicatedSizeOfFilesUpgradeAction {
+
+  private ReplicatedSizeOfFilesUpgradeAction upgradeAction;
+  @Mock
+  private DataSource mockDataSource;
+  @Mock
+  private Injector mockInjector;
+  @Mock
+  private ReconTaskController mockReconTaskController;
+
+  @BeforeEach
+  public void setUp() {
+    upgradeAction = new ReplicatedSizeOfFilesUpgradeAction();
+  }
+
+  @Test
+  public void testExecuteSuccessfullyRebuildsNSSummary() {
+    try (MockedStatic<ReconGuiceServletContextListener> mockStaticContext =
+             mockStatic(ReconGuiceServletContextListener.class)) {
+      
mockStaticContext.when(ReconGuiceServletContextListener::getGlobalInjector).thenReturn(mockInjector);
+      
when(mockInjector.getInstance(ReconTaskController.class)).thenReturn(mockReconTaskController);
+      when(mockReconTaskController.queueReInitializationEvent(
+          any(ReconTaskReInitializationEvent.ReInitializationReason.class)))
+          .thenReturn(ReconTaskController.ReInitializationResult.SUCCESS);
+      upgradeAction.execute(mockDataSource);
+
+      // Verify that rebuildNSSummaryTree was called exactly once.
+      verify(mockReconTaskController, 
times(1)).queueReInitializationEvent(any());
+    }
+  }
+
+  @Test
+  public void testExecuteThrowsRuntimeExceptionOnRebuildFailure() {
+    try (MockedStatic<ReconGuiceServletContextListener> mockStaticContext =
+             mockStatic(ReconGuiceServletContextListener.class)) {
+      
mockStaticContext.when(ReconGuiceServletContextListener::getGlobalInjector).thenReturn(mockInjector);
+      
when(mockInjector.getInstance(ReconTaskController.class)).thenReturn(mockReconTaskController);
+
+      // Simulate a failure during the rebuild process
+      doThrow(new RuntimeException("Simulated rebuild 
error")).when(mockReconTaskController)
+          
.queueReInitializationEvent(any(ReconTaskReInitializationEvent.ReInitializationReason.class));
+
+      RuntimeException thrown = assertThrows(RuntimeException.class, () -> 
upgradeAction.execute(mockDataSource));
+      assertEquals("Failed to rebuild NSSummary during upgrade", 
thrown.getMessage());
+    }
+  }
+
+  @Test
+  public void testGetTypeReturnsFinalize() {
+    assertEquals(ReconUpgradeAction.UpgradeActionType.FINALIZE, 
upgradeAction.getType());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to