This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new d2da18f69b HDDS-12346. Reduce code duplication among TestNSSummaryTask 
classes (#8287)
d2da18f69b is described below

commit d2da18f69bf3954b8dcf21edca02ad27e9ba3303
Author: Priyesh Karatha <[email protected]>
AuthorDate: Wed May 7 14:07:52 2025 +0530

    HDDS-12346. Reduce code duplication among TestNSSummaryTask classes (#8287)
---
 .../recon/tasks/AbstractNSSummaryTaskTest.java     | 767 +++++++++++++++++++++
 .../ozone/recon/tasks/TestNSSummaryTask.java       | 325 +--------
 .../recon/tasks/TestNSSummaryTaskWithFSO.java      | 326 ++-------
 .../recon/tasks/TestNSSummaryTaskWithLegacy.java   | 408 +----------
 .../TestNSSummaryTaskWithLegacyOBSLayout.java      | 316 +--------
 .../recon/tasks/TestNSSummaryTaskWithOBS.java      | 305 +-------
 6 files changed, 939 insertions(+), 1508 deletions(-)

diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/AbstractNSSummaryTaskTest.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/AbstractNSSummaryTaskTest.java
new file mode 100644
index 0000000000..68b891cc42
--- /dev/null
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/AbstractNSSummaryTaskTest.java
@@ -0,0 +1,767 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.tasks;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
+import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmConfig;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils;
+import org.apache.hadoop.ozone.recon.ReconConstants;
+import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.api.types.NSSummary;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
+import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
+
+/**
+ * Abstract Class created to handle common objects and methods.
+ */
+public abstract class AbstractNSSummaryTaskTest {
+  // User and Volume Constants
+  protected static final String TEST_USER = "TestUser";
+  protected static final String VOL = "vol";
+  protected static final long PARENT_OBJECT_ID_ZERO = 0L;
+  protected static final long VOL_OBJECT_ID = 0L;
+  // Bucket Constants
+  protected static final String BUCKET_ONE = "bucket1";
+  protected static final String BUCKET_TWO = "bucket2";
+  protected static final String BUCKET_THREE = "bucket3";
+  protected static final long BUCKET_ONE_OBJECT_ID = 1L;
+  protected static final long BUCKET_TWO_OBJECT_ID = 2L;
+  protected static final long BUCKET_THREE_OBJECT_ID = 4L;
+  // File/Key Constants
+  protected static final String KEY_ONE = "file1";
+  protected static final String KEY_TWO = "file2";
+  protected static final String KEY_THREE = "file3";
+  protected static final String KEY_FOUR = "file4";
+  protected static final String KEY_FIVE = "file5";
+  protected static final String KEY_SIX = "key6";
+  protected static final String KEY_SEVEN = "/////key7";
+  protected static final String KEY_THREE_1 = "dir1/dir2/file3";
+  protected static final String FILE_ONE = "file1";
+  protected static final String FILE_TWO = "file2";
+  protected static final String FILE_THREE = "file3";
+  protected static final String FILE_FOUR = "file4";
+  protected static final String FILE_FIVE = "file5";
+  protected static final long KEY_ONE_OBJECT_ID = 3L;
+  protected static final long KEY_TWO_OBJECT_ID = 5L;
+  protected static final long KEY_THREE_OBJECT_ID = 8L;
+  protected static final long KEY_FOUR_OBJECT_ID = 6L;
+  protected static final long KEY_FIVE_OBJECT_ID = 9L;
+  protected static final long KEY_SIX_OBJECT_ID = 10L;
+  protected static final long KEY_SEVEN_OBJECT_ID = 11L;
+  protected static final long KEY_ONE_SIZE = 500L;
+  protected static final long KEY_TWO_SIZE = 1025L;
+  protected static final long KEY_TWO_OLD_SIZE = 1025L;
+  protected static final long KEY_TWO_UPDATE_SIZE = 1023L;
+  protected static final long KEY_THREE_SIZE = 
ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L;
+  protected static final long KEY_FOUR_SIZE = 2050L;
+  protected static final long KEY_FIVE_SIZE = 100L;
+  protected static final long KEY_SIX_SIZE = 6000L;
+  protected static final long KEY_SEVEN_SIZE = 7000L;
+  // Directory Constants
+  protected static final String DIR_ONE = "dir1";
+  protected static final String DIR_ONE_RENAME = "dir1_new";
+  protected static final String DIR_TWO = "dir2";
+  protected static final String DIR_THREE = "dir3";
+  protected static final String DIR_FOUR = "dir4";
+  protected static final String DIR_FIVE = "dir5";
+  protected static final long DIR_ONE_OBJECT_ID = 4L;
+  protected static final long DIR_TWO_OBJECT_ID = 7L;
+  protected static final long DIR_THREE_OBJECT_ID = 10L;
+  protected static final long DIR_FOUR_OBJECT_ID = 11L;
+  protected static final long DIR_FIVE_OBJECT_ID = 12L;
+
+  private OzoneConfiguration ozoneConfiguration;
+  private OzoneConfiguration omConfiguration;
+  private OMMetadataManager omMetadataManager;
+  private ReconOMMetadataManager reconOMMetadataManager;
+  private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
+
+  // Helper Methods
+  protected void commonSetup(File tmpDir, OMConfigParameter configParameter) 
throws Exception {
+
+    if (configParameter.overrideConfig) {
+      setOzoneConfiguration(new OzoneConfiguration());
+      
getOzoneConfiguration().setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
 configParameter.enableFSPaths);
+      
getOzoneConfiguration().setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD,
 configParameter.flushThreshold);
+    }
+
+    initializeNewOmMetadataManager(new File(tmpDir, "om"), 
configParameter.layout);
+    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
+        configParameter.isFSO ? getMockOzoneManagerServiceProviderWithFSO()
+            : getMockOzoneManagerServiceProvider();
+
+    
setReconOMMetadataManager(getTestReconOmMetadataManager(getOmMetadataManager(), 
new File(tmpDir, "recon")));
+    ReconTestInjector reconTestInjector =
+        new ReconTestInjector.Builder(tmpDir)
+            .withReconOm(getReconOMMetadataManager())
+            .withOmServiceProvider(ozoneManagerServiceProvider)
+            .withReconSqlDb()
+            .withContainerDB()
+            .build();
+
+    
setReconNamespaceSummaryManager(reconTestInjector.getInstance(ReconNamespaceSummaryManager.class));
+    
assertNull(getReconNamespaceSummaryManager().getNSSummary(BUCKET_ONE_OBJECT_ID));
+
+    if (!configParameter.legacyPopulate && !configParameter.isFSO && 
!configParameter.isOBS) {
+      populateOMDBCommon();
+    } else if (configParameter.isOBS) {
+      populateOMDBOBS(configParameter.layout);
+    } else if (configParameter.legacyPopulate) {
+      populateOMDB(configParameter.layout, true);
+    } else {
+      populateOMDB(configParameter.layout, false);
+    }
+  }
+
+  public List<NSSummary> commonSetUpTestReprocess(Runnable reprocessTask, 
long... bucketObjectIds) throws IOException {
+
+    List<NSSummary> result = new ArrayList<>();
+    NSSummary staleNSSummary = new NSSummary();
+    RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
+    getReconNamespaceSummaryManager().batchStoreNSSummaries(rdbBatchOperation, 
-1L, staleNSSummary);
+    getReconNamespaceSummaryManager().commitBatchOperation(rdbBatchOperation);
+
+    // Verify commit
+    assertNotNull(getReconNamespaceSummaryManager().getNSSummary(-1L));
+    // Reinitialize Recon RocksDB's namespace CF.
+    getReconNamespaceSummaryManager().clearNSSummaryTable();
+    // Run specific reprocess task
+    reprocessTask.run();
+    // Verify cleanup
+    assertNull(getReconNamespaceSummaryManager().getNSSummary(-1L));
+    // Assign and verify NSSummaries for buckets
+    if (bucketObjectIds.length > 0) {
+      NSSummary nsSummaryForBucket1 = 
getReconNamespaceSummaryManager().getNSSummary(bucketObjectIds[0]);
+      assertNotNull(nsSummaryForBucket1);
+      result.add(nsSummaryForBucket1);
+    }
+    if (bucketObjectIds.length > 1) {
+      NSSummary nsSummaryForBucket2 = 
getReconNamespaceSummaryManager().getNSSummary(bucketObjectIds[1]);
+      assertNotNull(nsSummaryForBucket2);
+      result.add(nsSummaryForBucket2);
+    }
+    if (bucketObjectIds.length > 2) {
+      NSSummary nsSummaryForBucket3 = 
getReconNamespaceSummaryManager().getNSSummary(bucketObjectIds[2]);
+      assertNotNull(nsSummaryForBucket3);
+      result.add(nsSummaryForBucket3);
+    }
+    return result;
+  }
+
+  /**
+   * Build a key info for put/update action.
+   *
+   * @param volume         volume name
+   * @param bucket         bucket name
+   * @param key            key name
+   * @param fileName       file name
+   * @param objectID       object ID
+   * @param parentObjectId parent object ID
+   * @param dataSize       file size
+   * @return the KeyInfo
+   */
+  protected static OmKeyInfo buildOmKeyInfo(String volume, String bucket, 
String key, String fileName,
+                                            long objectID, long 
parentObjectId, long dataSize) {
+    return new OmKeyInfo.Builder()
+        .setBucketName(bucket)
+        .setVolumeName(volume)
+        .setKeyName(key)
+        .setFileName(fileName)
+        
.setReplicationConfig(StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE))
+        .setObjectID(objectID)
+        .setParentObjectID(parentObjectId)
+        .setDataSize(dataSize)
+        .build();
+  }
+
+  /**
+   * Build a key info for delete action.
+   *
+   * @param volume         volume name
+   * @param bucket         bucket name
+   * @param key            key name
+   * @param fileName       file name
+   * @param objectID       object ID
+   * @param parentObjectId parent object ID
+   * @return the KeyInfo
+   */
+  protected static OmKeyInfo buildOmKeyInfo(String volume, String bucket, 
String key, String fileName,
+                                            long objectID, long 
parentObjectId) {
+    return new OmKeyInfo.Builder()
+        .setBucketName(bucket)
+        .setVolumeName(volume)
+        .setKeyName(key)
+        .setFileName(fileName)
+        
.setReplicationConfig(StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE))
+        .setObjectID(objectID)
+        .setParentObjectID(parentObjectId)
+        .build();
+  }
+
+  protected static OmDirectoryInfo buildOmDirInfo(String dirName, long 
objectId, long parentObjectId) {
+    return new OmDirectoryInfo.Builder()
+        .setName(dirName)
+        .setObjectID(objectId)
+        .setParentObjectID(parentObjectId)
+        .build();
+  }
+
+  /**
+   * Build a directory as key info for put/update action.
+   * We don't need to set size.
+   * @param volume volume name
+   * @param bucket bucket name
+   * @param key key name
+   * @param fileName file name
+   * @param objectID object ID
+   * @return the KeyInfo
+   */
+  protected static OmKeyInfo buildOmDirKeyInfo(String volume,
+                                               String bucket,
+                                               String key,
+                                               String fileName,
+                                               long objectID) {
+    return new OmKeyInfo.Builder()
+        .setBucketName(bucket)
+        .setVolumeName(volume)
+        .setKeyName(key)
+        .setFileName(fileName)
+        .setReplicationConfig(
+            StandaloneReplicationConfig.getInstance(
+                HddsProtos.ReplicationFactor.ONE))
+        .setObjectID(objectID)
+        .build();
+  }
+
+  protected static BucketLayout getFSOBucketLayout() {
+    return BucketLayout.FILE_SYSTEM_OPTIMIZED;
+  }
+
+  protected static BucketLayout getLegacyBucketLayout() {
+    return BucketLayout.LEGACY;
+  }
+
+  protected static BucketLayout getOBSBucketLayout() {
+    return BucketLayout.OBJECT_STORE;
+  }
+
+  protected void initializeNewOmMetadataManager(File omDbDir, BucketLayout 
layout) throws IOException {
+
+    if (layout == null) {
+      initializeNewOmMetadataManager(omDbDir);
+      return;
+    }
+
+    if (layout == BucketLayout.FILE_SYSTEM_OPTIMIZED) {
+      omMetadataManager = 
OMMetadataManagerTestUtils.initializeNewOmMetadataManager(omDbDir);
+      return;
+    }
+
+    omConfiguration = new OzoneConfiguration();
+    omConfiguration.set(OZONE_OM_DB_DIRS, omDbDir.getAbsolutePath());
+    omConfiguration.set(OmConfig.Keys.ENABLE_FILESYSTEM_PATHS, "true");
+    omConfiguration.set(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, "10");
+
+    omMetadataManager = new OmMetadataManagerImpl(omConfiguration, null);
+
+    String volumeKey = omMetadataManager.getVolumeKey(VOL);
+    OmVolumeArgs args = OmVolumeArgs.newBuilder()
+        .setObjectID(VOL_OBJECT_ID)
+        .setVolume(VOL)
+        .setAdminName(TEST_USER)
+        .setOwnerName(TEST_USER)
+        .build();
+    omMetadataManager.getVolumeTable().put(volumeKey, args);
+
+    OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder()
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(BUCKET_ONE_OBJECT_ID)
+        .setBucketLayout(layout)
+        .build();
+
+    OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder()
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_TWO)
+        .setObjectID(BUCKET_TWO_OBJECT_ID)
+        .setBucketLayout(layout)
+        .build();
+
+    String bucketKey1 = omMetadataManager.getBucketKey(VOL, BUCKET_ONE);
+    String bucketKey2 = omMetadataManager.getBucketKey(VOL, BUCKET_TWO);
+    omMetadataManager.getBucketTable().put(bucketKey1, bucketInfo1);
+    omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
+  }
+
+  // Helper method to check if an array contains a specific value
+  protected boolean contains(int[] arr, int value) {
+    for (int num : arr) {
+      if (num == value) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private void initializeNewOmMetadataManager(
+      File omDbDir)
+      throws IOException {
+    omConfiguration = new OzoneConfiguration();
+    omConfiguration.set(OZONE_OM_DB_DIRS,
+        omDbDir.getAbsolutePath());
+    omConfiguration.set(OMConfigKeys
+        .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true");
+    omMetadataManager = new OmMetadataManagerImpl(
+        omConfiguration, null);
+
+    String volumeKey = omMetadataManager.getVolumeKey(VOL);
+    OmVolumeArgs args =
+        OmVolumeArgs.newBuilder()
+            .setObjectID(VOL_OBJECT_ID)
+            .setVolume(VOL)
+            .setAdminName(TEST_USER)
+            .setOwnerName(TEST_USER)
+            .build();
+    omMetadataManager.getVolumeTable().put(volumeKey, args);
+
+    OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder()
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(BUCKET_ONE_OBJECT_ID)
+        .setBucketLayout(getFSOBucketLayout())
+        .build();
+
+    OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder()
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_TWO)
+        .setObjectID(BUCKET_TWO_OBJECT_ID)
+        .setBucketLayout(getLegacyBucketLayout())
+        .build();
+
+    OmBucketInfo bucketInfo3 = OmBucketInfo.newBuilder()
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_THREE)
+        .setObjectID(BUCKET_THREE_OBJECT_ID)
+        .setBucketLayout(getOBSBucketLayout())
+        .build();
+
+    String bucketKey = omMetadataManager.getBucketKey(
+        bucketInfo1.getVolumeName(), bucketInfo1.getBucketName());
+    String bucketKey2 = omMetadataManager.getBucketKey(
+        bucketInfo2.getVolumeName(), bucketInfo2.getBucketName());
+    String bucketKey3 = omMetadataManager.getBucketKey(
+        bucketInfo3.getVolumeName(), bucketInfo3.getBucketName());
+
+    omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1);
+    omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
+    omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3);
+  }
+
+  protected void populateOMDB(BucketLayout layout, boolean isLegacy) throws 
IOException {
+    if (layout == BucketLayout.FILE_SYSTEM_OPTIMIZED) {
+      populateOMDBFSO();
+    } else if (layout == BucketLayout.LEGACY) {
+      populateOMDBLegacy();
+    } else if (isLegacy && layout == BucketLayout.OBJECT_STORE) {
+      populateOMDBOBS(getLegacyBucketLayout());
+    } else {
+      populateOMDBOBS(getOBSBucketLayout());
+    }
+  }
+
+  /**
+   * Populate OMDB with the following configs.
+   *                 vol
+   *           /      |       \
+   *      bucket1   bucket2    bucket3
+   *         /        |         \
+   *       file1    file2     file3
+   *
+   * @throws IOException
+   */
+  protected void populateOMDBCommon() throws IOException {
+    // Bucket1 FSO layout
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_ONE,
+        BUCKET_ONE,
+        VOL,
+        FILE_ONE,
+        KEY_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_ONE_SIZE,
+        getFSOBucketLayout());
+
+    // Bucket2 Legacy layout
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_TWO,
+        BUCKET_TWO,
+        VOL,
+        FILE_TWO,
+        KEY_TWO_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_TWO_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_TWO_SIZE,
+        getLegacyBucketLayout());
+
+    // Bucket3 OBS layout
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_THREE,
+        BUCKET_THREE,
+        VOL,
+        FILE_THREE,
+        KEY_THREE_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_THREE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_THREE_SIZE,
+        getOBSBucketLayout());
+  }
+
+   /**
+   * Populate OMDB with the following configs.
+   *              vol
+   *            /     \
+   *        bucket1   bucket2
+   *        /    \      /    \
+   *     file1  dir1  file2  file4
+   *            /   \
+   *         dir2   dir3
+   *          /
+   *        file3
+   *
+   * @throws IOException
+   */
+  private void populateOMDBFSO() throws IOException {
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_ONE,
+        BUCKET_ONE,
+        VOL,
+        FILE_ONE,
+        KEY_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_ONE_SIZE,
+        getFSOBucketLayout());
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_TWO,
+        BUCKET_TWO,
+        VOL,
+        FILE_TWO,
+        KEY_TWO_OBJECT_ID,
+        BUCKET_TWO_OBJECT_ID,
+        BUCKET_TWO_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_TWO_OLD_SIZE,
+        getFSOBucketLayout());
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_THREE,
+        BUCKET_ONE,
+        VOL,
+        FILE_THREE,
+        KEY_THREE_OBJECT_ID,
+        DIR_TWO_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_THREE_SIZE,
+        getFSOBucketLayout());
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_FOUR,
+        BUCKET_TWO,
+        VOL,
+        FILE_FOUR,
+        KEY_FOUR_OBJECT_ID,
+        BUCKET_TWO_OBJECT_ID,
+        BUCKET_TWO_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_FOUR_SIZE,
+        getFSOBucketLayout());
+    writeDirToOm(reconOMMetadataManager, DIR_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID, DIR_ONE);
+    writeDirToOm(reconOMMetadataManager, DIR_TWO_OBJECT_ID,
+        DIR_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID, DIR_TWO);
+    writeDirToOm(reconOMMetadataManager, DIR_THREE_OBJECT_ID,
+        DIR_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID, DIR_THREE);
+  }
+
+   /**
+   * Populate OMDB with the following configs.
+   *              vol
+   *            /     \
+   *        bucket1   bucket2
+   *        /    \      /    \
+   *     file1  dir1  file2  file4
+   *            /   \
+   *         dir2   dir3
+   *          /
+   *        file3
+   *
+   * @throws IOException
+   */
+  private void populateOMDBLegacy() throws IOException {
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_ONE,
+        BUCKET_ONE,
+        VOL,
+        FILE_ONE,
+        KEY_ONE_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_ONE_SIZE,
+        getLegacyBucketLayout());
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_TWO,
+        BUCKET_TWO,
+        VOL,
+        FILE_TWO,
+        KEY_TWO_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_TWO_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_TWO_OLD_SIZE,
+        getLegacyBucketLayout());
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_THREE_1,
+        BUCKET_ONE,
+        VOL,
+        FILE_THREE,
+        KEY_THREE_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_THREE_SIZE,
+        getLegacyBucketLayout());
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_FOUR,
+        BUCKET_TWO,
+        VOL,
+        FILE_FOUR,
+        KEY_FOUR_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_TWO_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_FOUR_SIZE,
+        getLegacyBucketLayout());
+
+    writeDirToOm(reconOMMetadataManager,
+        (DIR_ONE + OM_KEY_PREFIX),
+        BUCKET_ONE,
+        VOL,
+        DIR_ONE,
+        DIR_ONE_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        getLegacyBucketLayout());
+    writeDirToOm(reconOMMetadataManager,
+        (DIR_ONE + OM_KEY_PREFIX +
+            DIR_TWO + OM_KEY_PREFIX),
+        BUCKET_ONE,
+        VOL,
+        DIR_TWO,
+        DIR_TWO_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        getLegacyBucketLayout());
+    writeDirToOm(reconOMMetadataManager,
+        (DIR_ONE + OM_KEY_PREFIX +
+            DIR_THREE + OM_KEY_PREFIX),
+        BUCKET_ONE,
+        VOL,
+        DIR_THREE,
+        DIR_THREE_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        getLegacyBucketLayout());
+  }
+
+   /**
+   * Populate OMDB with the following configs.
+   *                 vol
+   *              /       \
+   *          bucket1     bucket2
+   *        /    \   \        \  \
+   *     key1  key2   key3   key4 key5
+   *
+   * @throws IOException
+   */
+  protected void populateOMDBOBS(BucketLayout layout) throws IOException {
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_ONE,
+        BUCKET_ONE,
+        VOL,
+        KEY_ONE,
+        KEY_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_ONE_SIZE,
+        layout);
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_TWO,
+        BUCKET_ONE,
+        VOL,
+        KEY_TWO,
+        KEY_TWO_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_TWO_OLD_SIZE,
+        layout);
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_THREE,
+        BUCKET_ONE,
+        VOL,
+        KEY_THREE,
+        KEY_THREE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_THREE_SIZE,
+        layout);
+
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_FOUR,
+        BUCKET_TWO,
+        VOL,
+        KEY_FOUR,
+        KEY_FOUR_OBJECT_ID,
+        BUCKET_TWO_OBJECT_ID,
+        BUCKET_TWO_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_FOUR_SIZE,
+        layout);
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_FIVE,
+        BUCKET_TWO,
+        VOL,
+        KEY_FIVE,
+        KEY_FIVE_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_TWO_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_FIVE_SIZE,
+        layout);
+  }
+
+  public OzoneConfiguration getOzoneConfiguration() {
+    return ozoneConfiguration;
+  }
+
+  public void setOzoneConfiguration(OzoneConfiguration ozoneConfiguration) {
+    this.ozoneConfiguration = ozoneConfiguration;
+  }
+
+  public OzoneConfiguration getOmConfiguration() {
+    return omConfiguration;
+  }
+
+  public void setOmConfiguration(OzoneConfiguration omConfiguration) {
+    this.omConfiguration = omConfiguration;
+  }
+
+  public OMMetadataManager getOmMetadataManager() {
+    return omMetadataManager;
+  }
+
+  public void setOmMetadataManager(OMMetadataManager omMetadataManager) {
+    this.omMetadataManager = omMetadataManager;
+  }
+
+  public ReconOMMetadataManager getReconOMMetadataManager() {
+    return reconOMMetadataManager;
+  }
+
+  public void setReconOMMetadataManager(ReconOMMetadataManager 
reconOMMetadataManager) {
+    this.reconOMMetadataManager = reconOMMetadataManager;
+  }
+
+  public ReconNamespaceSummaryManager getReconNamespaceSummaryManager() {
+    return reconNamespaceSummaryManager;
+  }
+
+  public void setReconNamespaceSummaryManager(ReconNamespaceSummaryManager 
reconNamespaceSummaryManager) {
+    this.reconNamespaceSummaryManager = reconNamespaceSummaryManager;
+  }
+
+  /**
+   * Class for storing configuration to identify layout.
+   */
+  public static class OMConfigParameter {
+
+    private final boolean isFSO;
+    private final boolean isOBS;
+    private final BucketLayout layout;
+    private final long flushThreshold;
+    private final boolean overrideConfig;
+    private final boolean enableFSPaths;
+    private final boolean legacyPopulate;
+
+    public OMConfigParameter(boolean isFSO,
+                             boolean isOBS,
+                             BucketLayout layout,
+                             long flushThreshold,
+                             boolean overrideConfig,
+                             boolean enableFSPaths,
+                             boolean legacyPopulate) {
+      this.isFSO = isFSO;
+      this.isOBS = isOBS;
+      this.layout = layout;
+      this.flushThreshold = flushThreshold;
+      this.overrideConfig = overrideConfig;
+      this.enableFSPaths = enableFSPaths;
+      this.legacyPopulate = legacyPopulate;
+    }
+  }
+}
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java
index ea7efaddfd..617edc3b9b 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java
@@ -18,10 +18,6 @@
 package org.apache.hadoop.ozone.recon.tasks;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
@@ -30,24 +26,11 @@
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.List;
 import java.util.Set;
-import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.BucketLayout;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.recon.ReconConstants;
-import org.apache.hadoop.ozone.recon.ReconTestInjector;
 import org.apache.hadoop.ozone.recon.api.types.NSSummary;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
-import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Nested;
@@ -62,72 +45,25 @@
  * for the OBS bucket is null.
  */
 @TestInstance(TestInstance.Lifecycle.PER_CLASS)
-public class TestNSSummaryTask {
+public class TestNSSummaryTask extends AbstractNSSummaryTaskTest {
 
-  private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
-  private OMMetadataManager omMetadataManager;
-  private ReconOMMetadataManager reconOMMetadataManager;
   private NSSummaryTask nSSummaryTask;
-  private OzoneConfiguration omConfiguration;
-
-  // Object names
-  private static final String VOL = "vol";
-  private static final String BUCKET_ONE = "bucket1";
-  private static final String BUCKET_TWO = "bucket2";
-  private static final String BUCKET_THREE = "bucket3";
-  private static final String KEY_ONE = "file1";
-  private static final String KEY_TWO = "file2";
-  private static final String KEY_THREE = "file3";
-  private static final String KEY_FIVE = "file5";
-  private static final String FILE_ONE = "file1";
-  private static final String FILE_TWO = "file2";
-  private static final String FILE_THREE = "file3";
-  private static final String FILE_FIVE = "file5";
-
-  private static final String TEST_USER = "TestUser";
-
-  private static final long PARENT_OBJECT_ID_ZERO = 0L;
-  private static final long VOL_OBJECT_ID = 0L;
-  private static final long BUCKET_ONE_OBJECT_ID = 1L;
-  private static final long BUCKET_TWO_OBJECT_ID = 2L;
-  private static final long BUCKET_THREE_OBJECT_ID = 4L;
-  private static final long KEY_ONE_OBJECT_ID = 3L;
-  private static final long KEY_TWO_OBJECT_ID = 5L;
-  private static final long KEY_THREE_OBJECT_ID = 8L;
-  private static final long KEY_FIVE_OBJECT_ID = 9L;
-
-  private static final long KEY_ONE_SIZE = 500L;
-  private static final long KEY_TWO_SIZE = 1025L;
-  private static final long KEY_THREE_SIZE =
-      ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L;
-  private static final long KEY_FIVE_SIZE = 100L;
 
   @BeforeAll
   void setUp(@TempDir File tmpDir) throws Exception {
-    initializeNewOmMetadataManager(new File(tmpDir, "om"));
-    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
-        getMockOzoneManagerServiceProvider();
-    reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager,
-        new File(tmpDir, "recon"));
-
-    ReconTestInjector reconTestInjector =
-        new ReconTestInjector.Builder(tmpDir)
-            .withReconOm(reconOMMetadataManager)
-            .withOmServiceProvider(ozoneManagerServiceProvider)
-            .withReconSqlDb()
-            .withContainerDB()
-            .build();
-    reconNamespaceSummaryManager =
-        reconTestInjector.getInstance(ReconNamespaceSummaryManager.class);
-
-    NSSummary nonExistentSummary =
-        reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
-    assertNull(nonExistentSummary);
-
-    populateOMDB();
-
-    nSSummaryTask = new NSSummaryTask(reconNamespaceSummaryManager,
-        reconOMMetadataManager, omConfiguration);
+    commonSetup(tmpDir,
+        new OMConfigParameter(false,
+            false,
+            null, 0,
+            false,
+            true,
+            false));
+
+    nSSummaryTask = new NSSummaryTask(
+        getReconNamespaceSummaryManager(),
+        getReconOMMetadataManager(),
+        getOmConfiguration()
+    );
   }
 
   /**
@@ -142,43 +78,30 @@ public class TestReprocess {
 
     @BeforeEach
     public void setUp() throws Exception {
-      // write a NSSummary prior to reprocess
-      // verify it got cleaned up after.
-      NSSummary staleNSSummary = new NSSummary();
-      RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
-      reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, 
-1L,
-          staleNSSummary);
-      reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation);
-
-      // Verify commit
-      assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L));
-
-      nSSummaryTask.reprocess(reconOMMetadataManager);
-      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
-
-      nsSummaryForBucket1 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
-      nsSummaryForBucket2 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
-      nsSummaryForBucket3 =
-      reconNamespaceSummaryManager.getNSSummary(BUCKET_THREE_OBJECT_ID);
-      assertNotNull(nsSummaryForBucket1);
-      assertNotNull(nsSummaryForBucket2);
-      assertNotNull(nsSummaryForBucket3);
+      List<NSSummary> result = commonSetUpTestReprocess(
+          () -> nSSummaryTask.reprocess(getReconOMMetadataManager()),
+          BUCKET_ONE_OBJECT_ID,
+          BUCKET_TWO_OBJECT_ID,
+          BUCKET_THREE_OBJECT_ID);
+      nsSummaryForBucket1 = result.get(0);
+      nsSummaryForBucket2 = result.get(1);
+      nsSummaryForBucket3 = result.get(2);
     }
 
     @Test
     public void testReprocessNSSummaryNull() throws IOException {
-      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
+      assertNull(getReconNamespaceSummaryManager().getNSSummary(-1L));
     }
 
     @Test
     public void testReprocessGetFiles() {
       assertEquals(1, nsSummaryForBucket1.getNumOfFiles());
       assertEquals(1, nsSummaryForBucket2.getNumOfFiles());
+      assertEquals(1, nsSummaryForBucket3.getNumOfFiles());
 
       assertEquals(KEY_ONE_SIZE, nsSummaryForBucket1.getSizeOfFiles());
       assertEquals(KEY_TWO_SIZE, nsSummaryForBucket2.getSizeOfFiles());
+      assertEquals(KEY_THREE_SIZE, nsSummaryForBucket3.getSizeOfFiles());
     }
 
     @Test
@@ -220,17 +143,17 @@ public class TestProcess {
 
     @BeforeEach
     public void setUp() throws IOException {
-      nSSummaryTask.reprocess(reconOMMetadataManager);
+      nSSummaryTask.reprocess(getReconOMMetadataManager());
       nSSummaryTask.process(processEventBatch(), Collections.emptyMap());
 
       nsSummaryForBucket1 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(BUCKET_ONE_OBJECT_ID);
       assertNotNull(nsSummaryForBucket1);
       nsSummaryForBucket2 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(BUCKET_TWO_OBJECT_ID);
       assertNotNull(nsSummaryForBucket2);
       nsSummaryForBucket3 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_THREE_OBJECT_ID);
+          
getReconNamespaceSummaryManager().getNSSummary(BUCKET_THREE_OBJECT_ID);
       assertNotNull(nsSummaryForBucket3);
     }
 
@@ -246,7 +169,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omPutKey)
           .setValue(omPutKeyInfo)
-          .setTable(omMetadataManager.getKeyTable(getLegacyBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getLegacyBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
           .build();
@@ -260,7 +183,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omDeleteKey)
           .setValue(omDeleteInfo)
-          .setTable(omMetadataManager.getKeyTable(getFSOBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getFSOBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE)
           .build();
@@ -299,188 +222,4 @@ public void testProcessBucket() throws IOException {
       }
     }
   }
-
-  /**
-   * Build a key info for put/update action.
-   * @param volume         volume name
-   * @param bucket         bucket name
-   * @param key            key name
-   * @param fileName       file name
-   * @param objectID       object ID
-   * @param parentObjectId parent object ID
-   * @param dataSize       file size
-   * @return the KeyInfo
-   */
-  private static OmKeyInfo buildOmKeyInfo(String volume,
-                                          String bucket,
-                                          String key,
-                                          String fileName,
-                                          long objectID,
-                                          long parentObjectId,
-                                          long dataSize) {
-    return new OmKeyInfo.Builder()
-        .setBucketName(bucket)
-        .setVolumeName(volume)
-        .setKeyName(key)
-        .setFileName(fileName)
-        .setReplicationConfig(
-            StandaloneReplicationConfig.getInstance(
-                HddsProtos.ReplicationFactor.ONE))
-        .setObjectID(objectID)
-        .setParentObjectID(parentObjectId)
-        .setDataSize(dataSize)
-        .build();
-  }
-
-  /**
-   * Build a key info for delete action.
-   * @param volume         volume name
-   * @param bucket         bucket name
-   * @param key            key name
-   * @param fileName       file name
-   * @param objectID       object ID
-   * @param parentObjectId parent object ID
-   * @return the KeyInfo
-   */
-  private static OmKeyInfo buildOmKeyInfo(String volume,
-                                          String bucket,
-                                          String key,
-                                          String fileName,
-                                          long objectID,
-                                          long parentObjectId) {
-    return new OmKeyInfo.Builder()
-        .setBucketName(bucket)
-        .setVolumeName(volume)
-        .setKeyName(key)
-        .setFileName(fileName)
-        .setReplicationConfig(
-            StandaloneReplicationConfig.getInstance(
-                HddsProtos.ReplicationFactor.ONE))
-        .setObjectID(objectID)
-        .setParentObjectID(parentObjectId)
-        .build();
-  }
-
-  /**
-   * Populate OMDB with the following configs.
-   *             vol
-   *      /       \       \
-   * bucket1   bucket2    bucket3
-   *    /        /        /
-   * file1    file2     file3
-   *
-   * @throws IOException
-   */
-  private void populateOMDB() throws IOException {
-    // Bucket1 FSO layout
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_ONE,
-        BUCKET_ONE,
-        VOL,
-        FILE_ONE,
-        KEY_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_ONE_SIZE,
-        getFSOBucketLayout());
-
-    // Bucket2 Legacy layout
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_TWO,
-        BUCKET_TWO,
-        VOL,
-        FILE_TWO,
-        KEY_TWO_OBJECT_ID,
-        PARENT_OBJECT_ID_ZERO,
-        BUCKET_TWO_OBJECT_ID,
-        VOL_OBJECT_ID,
-          KEY_TWO_SIZE,
-        getLegacyBucketLayout());
-
-    // Bucket3 OBS layout
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_THREE,
-        BUCKET_THREE,
-        VOL,
-        FILE_THREE,
-        KEY_THREE_OBJECT_ID,
-        PARENT_OBJECT_ID_ZERO,
-        BUCKET_THREE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_THREE_SIZE,
-        getOBSBucketLayout());
-  }
-
-  /**
-   * Create a new OM Metadata manager instance with one user, one vol, and two
-   * buckets. Bucket1 will have FSO layout, bucket2 will have Legacy layout
-   * and bucket3 will have OBS layout.
-   * @throws IOException ioEx
-   */
-  private void initializeNewOmMetadataManager(
-      File omDbDir)
-      throws IOException {
-    omConfiguration = new OzoneConfiguration();
-    omConfiguration.set(OZONE_OM_DB_DIRS,
-        omDbDir.getAbsolutePath());
-    omConfiguration.set(OMConfigKeys
-        .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true");
-    omMetadataManager = new OmMetadataManagerImpl(
-        omConfiguration, null);
-
-    String volumeKey = omMetadataManager.getVolumeKey(VOL);
-    OmVolumeArgs args =
-        OmVolumeArgs.newBuilder()
-            .setObjectID(VOL_OBJECT_ID)
-            .setVolume(VOL)
-            .setAdminName(TEST_USER)
-            .setOwnerName(TEST_USER)
-            .build();
-    omMetadataManager.getVolumeTable().put(volumeKey, args);
-
-    OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder()
-        .setVolumeName(VOL)
-        .setBucketName(BUCKET_ONE)
-        .setObjectID(BUCKET_ONE_OBJECT_ID)
-        .setBucketLayout(getFSOBucketLayout())
-        .build();
-
-    OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder()
-        .setVolumeName(VOL)
-        .setBucketName(BUCKET_TWO)
-        .setObjectID(BUCKET_TWO_OBJECT_ID)
-        .setBucketLayout(getLegacyBucketLayout())
-        .build();
-
-    OmBucketInfo bucketInfo3 = OmBucketInfo.newBuilder()
-        .setVolumeName(VOL)
-        .setBucketName(BUCKET_THREE)
-        .setObjectID(BUCKET_THREE_OBJECT_ID)
-        .setBucketLayout(getOBSBucketLayout())
-        .build();
-
-    String bucketKey = omMetadataManager.getBucketKey(
-        bucketInfo1.getVolumeName(), bucketInfo1.getBucketName());
-    String bucketKey2 = omMetadataManager.getBucketKey(
-        bucketInfo2.getVolumeName(), bucketInfo2.getBucketName());
-    String bucketKey3 = omMetadataManager.getBucketKey(
-        bucketInfo3.getVolumeName(), bucketInfo3.getBucketName());
-
-    omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1);
-    omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
-    omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3);
-  }
-
-  private static BucketLayout getFSOBucketLayout() {
-    return BucketLayout.FILE_SYSTEM_OPTIMIZED;
-  }
-
-  private static BucketLayout getLegacyBucketLayout() {
-    return BucketLayout.LEGACY;
-  }
-
-  private static BucketLayout getOBSBucketLayout() {
-    return BucketLayout.OBJECT_STORE;
-  }
 }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
index a98c432dee..75fb468c5a 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
@@ -19,11 +19,6 @@
 
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -36,21 +31,14 @@
 import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Set;
 import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.recon.ReconConstants;
-import org.apache.hadoop.ozone.recon.ReconTestInjector;
 import org.apache.hadoop.ozone.recon.api.types.NSSummary;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider;
 import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeAll;
@@ -65,94 +53,33 @@
  * Test for NSSummaryTaskWithFSO.
  */
 @TestInstance(TestInstance.Lifecycle.PER_CLASS)
-public class TestNSSummaryTaskWithFSO {
-
-  private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
-  private OMMetadataManager omMetadataManager;
-  private ReconOMMetadataManager reconOMMetadataManager;
-  private NSSummaryTaskWithFSO nSSummaryTaskWithFso;
-
-  private OzoneConfiguration ozoneConfiguration;
-
-  // Object names in FSO-enabled format
-  private static final String VOL = "vol";
-  private static final String BUCKET_ONE = "bucket1";
-  private static final String BUCKET_TWO = "bucket2";
-  private static final String KEY_ONE = "file1";
-  private static final String KEY_TWO = "file2";
-  private static final String KEY_THREE = "dir1/dir2/file3";
-  private static final String KEY_FOUR = "file4";
-  private static final String KEY_FIVE = "file5";
-  private static final String FILE_ONE = "file1";
-  private static final String FILE_TWO = "file2";
-  private static final String FILE_THREE = "file3";
-  private static final String FILE_FOUR = "file4";
-  private static final String FILE_FIVE = "file5";
-  private static final String DIR_ONE = "dir1";
-  private static final String DIR_ONE_RENAME = "dir1_new";
-  private static final String DIR_TWO = "dir2";
-  private static final String DIR_THREE = "dir3";
-  private static final String DIR_FOUR = "dir4";
-  private static final String DIR_FIVE = "dir5";
-
-  private static final long VOL_OBJECT_ID = 0L;
-  private static final long BUCKET_ONE_OBJECT_ID = 1L;
-  private static final long BUCKET_TWO_OBJECT_ID = 2L;
-  private static final long KEY_ONE_OBJECT_ID = 3L;
-  private static final long DIR_ONE_OBJECT_ID = 4L;
-  private static final long KEY_TWO_OBJECT_ID = 5L;
-  private static final long KEY_FOUR_OBJECT_ID = 6L;
-  private static final long DIR_TWO_OBJECT_ID = 7L;
-  private static final long KEY_THREE_OBJECT_ID = 8L;
-  private static final long KEY_FIVE_OBJECT_ID = 9L;
-  private static final long DIR_THREE_OBJECT_ID = 10L;
-  private static final long DIR_FOUR_OBJECT_ID = 11L;
-  private static final long DIR_FIVE_OBJECT_ID = 12L;
-
-  private static final long KEY_ONE_SIZE = 500L;
-  private static final long KEY_TWO_OLD_SIZE = 1025L;
-  private static final long KEY_TWO_UPDATE_SIZE = 1023L;
-  private static final long KEY_THREE_SIZE =
-          ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L;
-  private static final long KEY_FOUR_SIZE = 2050L;
-  private static final long KEY_FIVE_SIZE = 100L;
+public class TestNSSummaryTaskWithFSO extends AbstractNSSummaryTaskTest {
 
+  // Answer Sets
   private static Set<Long> bucketOneAns = new HashSet<>();
   private static Set<Long> bucketTwoAns = new HashSet<>();
   private static Set<Long> dirOneAns = new HashSet<>();
+  private NSSummaryTaskWithFSO nSSummaryTaskWithFso;
+
+  private static BucketLayout getBucketLayout() {
+    return BucketLayout.FILE_SYSTEM_OPTIMIZED;
+  }
 
   @BeforeAll
   void setUp(@TempDir File tmpDir) throws Exception {
-    ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration.setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD,
-        3);
-    omMetadataManager = initializeNewOmMetadataManager(new File(tmpDir, "om"));
-    OzoneManagerServiceProvider ozoneManagerServiceProvider =
-        getMockOzoneManagerServiceProviderWithFSO();
-    reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager,
-            new File(tmpDir, "recon"));
-
-    ReconTestInjector reconTestInjector =
-        new ReconTestInjector.Builder(tmpDir)
-            .withReconOm(reconOMMetadataManager)
-            .withOmServiceProvider(ozoneManagerServiceProvider)
-            .withReconSqlDb()
-            .withContainerDB()
-            .build();
-    reconNamespaceSummaryManager =
-            reconTestInjector.getInstance(ReconNamespaceSummaryManager.class);
-
-    NSSummary nonExistentSummary =
-            reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
-    assertNull(nonExistentSummary);
-
-    populateOMDB();
-
-    long nsSummaryFlushToDBMaxThreshold = ozoneConfiguration.getLong(
-        OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, 3);
+    commonSetup(tmpDir,
+        new OMConfigParameter(true,
+          false,
+          getBucketLayout(),
+          3,
+          true,
+          true,
+          false));
+    long threshold = 
getOzoneConfiguration().getLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD,
 3);
     nSSummaryTaskWithFso = new NSSummaryTaskWithFSO(
-        reconNamespaceSummaryManager, reconOMMetadataManager,
-         nsSummaryFlushToDBMaxThreshold);
+        getReconNamespaceSummaryManager(),
+        getReconOMMetadataManager(),
+        threshold);
   }
 
   /**
@@ -168,32 +95,16 @@ public class TestReprocess {
     public void setUp() throws IOException {
       // write a NSSummary prior to reprocess
       // verify it got cleaned up after.
-      NSSummary staleNSSummary = new NSSummary();
-      RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
-      reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, 
-1L,
-          staleNSSummary);
-      reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation);
-
-      // Verify commit
-      assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L));
-
-      // reinit Recon RocksDB's namespace CF.
-      reconNamespaceSummaryManager.clearNSSummaryTable();
-
-      nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
-      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
-
-      nsSummaryForBucket1 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
-      nsSummaryForBucket2 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
-      assertNotNull(nsSummaryForBucket1);
-      assertNotNull(nsSummaryForBucket2);
+      List<NSSummary> result =
+          commonSetUpTestReprocess(() -> 
nSSummaryTaskWithFso.reprocessWithFSO(getReconOMMetadataManager()),
+              BUCKET_ONE_OBJECT_ID, BUCKET_TWO_OBJECT_ID);
+      nsSummaryForBucket1 = result.get(0);
+      nsSummaryForBucket2 = result.get(1);
     }
 
     @Test
     public void testReprocessNSSummaryNull() throws IOException {
-      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
+      assertNull(getReconNamespaceSummaryManager().getNSSummary(-1L));
     }
 
     @Test
@@ -204,7 +115,7 @@ public void testReprocessGetFiles() {
       assertEquals(KEY_ONE_SIZE, nsSummaryForBucket1.getSizeOfFiles());
       assertEquals(KEY_TWO_OLD_SIZE + KEY_FOUR_SIZE,
           nsSummaryForBucket2.getSizeOfFiles());
-    } 
+    }
 
     @Test
     public void testReprocessFileBucketSize() {
@@ -245,7 +156,7 @@ public void testReprocessBucketDirs() {
     public void testReprocessDirsUnderDir() throws Exception {
 
       // Dir 1 has two dir: dir2 and dir3.
-      NSSummary nsSummaryInDir1 = reconNamespaceSummaryManager
+      NSSummary nsSummaryInDir1 = getReconNamespaceSummaryManager()
           .getNSSummary(DIR_ONE_OBJECT_ID);
       assertNotNull(nsSummaryInDir1);
       Set<Long> childDirForDirOne = nsSummaryInDir1.getChildDir();
@@ -255,7 +166,7 @@ public void testReprocessDirsUnderDir() throws Exception {
       dirOneAns.add(DIR_THREE_OBJECT_ID);
       assertEquals(dirOneAns, childDirForDirOne);
 
-      NSSummary nsSummaryInDir2 = reconNamespaceSummaryManager
+      NSSummary nsSummaryInDir2 = getReconNamespaceSummaryManager()
           .getNSSummary(DIR_TWO_OBJECT_ID);
       assertEquals(1, nsSummaryInDir2.getNumOfFiles());
       assertEquals(KEY_THREE_SIZE, nsSummaryInDir2.getSizeOfFiles());
@@ -280,11 +191,11 @@ public void testReprocessDirsUnderDir() throws Exception {
     @Test
     public void testDirectoryParentIdAssignment() throws Exception {
       // Trigger reprocess to simulate reading from OM DB and processing into 
NSSummary.
-      nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
+      nSSummaryTaskWithFso.reprocessWithFSO(getReconOMMetadataManager());
 
       // Fetch NSSummary for DIR_ONE and verify its parent ID matches 
BUCKET_ONE_OBJECT_ID.
       NSSummary nsSummaryDirOne =
-          reconNamespaceSummaryManager.getNSSummary(DIR_ONE_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(DIR_ONE_OBJECT_ID);
       assertNotNull(nsSummaryDirOne,
           "NSSummary for DIR_ONE should not be null.");
       assertEquals(BUCKET_ONE_OBJECT_ID, nsSummaryDirOne.getParentId(),
@@ -292,7 +203,7 @@ public void testDirectoryParentIdAssignment() throws 
Exception {
 
       // Fetch NSSummary for DIR_TWO and verify its parent ID matches 
DIR_ONE_OBJECT_ID.
       NSSummary nsSummaryDirTwo =
-          reconNamespaceSummaryManager.getNSSummary(DIR_TWO_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(DIR_TWO_OBJECT_ID);
       assertNotNull(nsSummaryDirTwo,
           "NSSummary for DIR_TWO should not be null.");
       assertEquals(DIR_ONE_OBJECT_ID, nsSummaryDirTwo.getParentId(),
@@ -300,7 +211,7 @@ public void testDirectoryParentIdAssignment() throws 
Exception {
 
       // Fetch NSSummary for DIR_THREE and verify its parent ID matches 
DIR_ONE_OBJECT_ID.
       NSSummary nsSummaryDirThree =
-          reconNamespaceSummaryManager.getNSSummary(DIR_THREE_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(DIR_THREE_OBJECT_ID);
       assertNotNull(nsSummaryDirThree,
           "NSSummary for DIR_THREE should not be null.");
       assertEquals(DIR_ONE_OBJECT_ID, nsSummaryDirThree.getParentId(),
@@ -326,7 +237,7 @@ public class TestProcess {
 
     @BeforeEach
     public void setUp() throws IOException {
-      nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
+      nSSummaryTaskWithFso.reprocessWithFSO(getReconOMMetadataManager());
       result = nSSummaryTaskWithFso.processWithFSO(processEventBatch(), 0);
     }
 
@@ -340,7 +251,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omPutKey)
           .setValue(omPutKeyInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
           .build();
@@ -354,7 +265,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omDeleteKey)
           .setValue(omDeleteInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE)
           .build();
@@ -372,7 +283,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setKey(omUpdateKey)
           .setValue(omUpdateInfo)
           .setOldValue(omOldInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE)
           .build();
@@ -387,7 +298,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setKey(omDirPutKey1)
           .setValue(omDirPutValue1)
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
-          .setTable(omMetadataManager.getDirectoryTable().getName())
+          .setTable(getOmMetadataManager().getDirectoryTable().getName())
           .build();
 
       // add dir 5 under bucket 2
@@ -399,7 +310,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setKey(omDirPutKey2)
           .setValue(omDirPutValue2)
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
-          .setTable(omMetadataManager.getDirectoryTable().getName())
+          .setTable(getOmMetadataManager().getDirectoryTable().getName())
           .build();
 
       // delete dir 3 under dir 1
@@ -411,7 +322,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setKey(omDirDeleteKey)
           .setValue(omDirDeleteValue)
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE)
-          .setTable(omMetadataManager.getDirectoryTable().getName())
+          .setTable(getOmMetadataManager().getDirectoryTable().getName())
           .build();
 
       // rename dir1
@@ -426,7 +337,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setValue(omDirUpdateValue)
           .setOldValue(omDirOldValue)
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE)
-          .setTable(omMetadataManager.getDirectoryTable().getName())
+          .setTable(getOmMetadataManager().getDirectoryTable().getName())
           .build();
 
       return new OMUpdateEventBatch(Arrays.asList(
@@ -438,7 +349,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
     @Test
     public void testProcessUpdateFileSize() throws IOException {
       NSSummary nsSummaryForBucket1 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(BUCKET_ONE_OBJECT_ID);
       // file 1 is gone, so bucket 1 is empty now
       assertNotNull(nsSummaryForBucket1);
       assertEquals(0, nsSummaryForBucket1.getNumOfFiles());
@@ -455,7 +366,7 @@ public void testProcessUpdateFileSize() throws IOException {
     @Test
     public void testProcessBucket() throws IOException {
       NSSummary nsSummaryForBucket2 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(BUCKET_TWO_OBJECT_ID);
       // file 5 is added under bucket 2, so bucket 2 has 3 keys now
       // file 2 is updated with new datasize,
       // so file size dist for bucket 2 should be updated
@@ -489,7 +400,7 @@ public void testProcessBucket() throws IOException {
     @Test
     public void testProcessDirDeleteRename() throws IOException {
       // after delete dir 3, dir 1 now has only one dir: dir2
-      NSSummary nsSummaryForDir1 = reconNamespaceSummaryManager
+      NSSummary nsSummaryForDir1 = getReconNamespaceSummaryManager()
           .getNSSummary(DIR_ONE_OBJECT_ID);
       assertNotNull(nsSummaryForDir1);
       Set<Long> childDirForDir1 = nsSummaryForDir1.getChildDir();
@@ -507,7 +418,7 @@ public void testParentIdAfterProcessEventBatch() throws 
IOException {
 
       // Verify the parent ID of DIR_FOUR after it's added under BUCKET_ONE.
       NSSummary nsSummaryDirFour =
-          reconNamespaceSummaryManager.getNSSummary(DIR_FOUR_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(DIR_FOUR_OBJECT_ID);
       assertNotNull(nsSummaryDirFour,
           "NSSummary for DIR_FOUR should not be null.");
       assertEquals(BUCKET_ONE_OBJECT_ID, nsSummaryDirFour.getParentId(),
@@ -515,7 +426,7 @@ public void testParentIdAfterProcessEventBatch() throws 
IOException {
 
       // Verify the parent ID of DIR_FIVE after it's added under BUCKET_TWO.
       NSSummary nsSummaryDirFive =
-          reconNamespaceSummaryManager.getNSSummary(DIR_FIVE_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(DIR_FIVE_OBJECT_ID);
       assertNotNull(nsSummaryDirFive,
           "NSSummary for DIR_FIVE should not be null.");
       assertEquals(BUCKET_TWO_OBJECT_ID, nsSummaryDirFive.getParentId(),
@@ -614,149 +525,4 @@ void 
testProcessWithFSOFlushAfterThresholdAndFailureOfLastElement()
       Mockito.verify(taskSpy, 
Mockito.times(1)).flushAndCommitNSToDB(Mockito.anyMap());
     }
   }
-
-  /**
-   * Build a key info for put/update action.
-   * @param volume volume name
-   * @param bucket bucket name
-   * @param key key name
-   * @param fileName file name
-   * @param objectID object ID
-   * @param parentObjectId parent object ID
-   * @param dataSize file size
-   * @return the KeyInfo
-   */
-  private static OmKeyInfo buildOmKeyInfo(String volume,
-                                          String bucket,
-                                          String key,
-                                          String fileName,
-                                          long objectID,
-                                          long parentObjectId,
-                                          long dataSize) {
-    return new OmKeyInfo.Builder()
-        .setBucketName(bucket)
-        .setVolumeName(volume)
-        .setKeyName(key)
-        .setFileName(fileName)
-        .setReplicationConfig(
-            StandaloneReplicationConfig.getInstance(
-                HddsProtos.ReplicationFactor.ONE))
-        .setObjectID(objectID)
-        .setParentObjectID(parentObjectId)
-        .setDataSize(dataSize)
-        .build();
-  }
-
-  /**
-   * Build a key info for delete action.
-   * @param volume volume name
-   * @param bucket bucket name
-   * @param key key name
-   * @param fileName file name
-   * @param objectID object ID
-   * @param parentObjectId parent object ID
-   * @return the KeyInfo
-   */
-  private static OmKeyInfo buildOmKeyInfo(String volume,
-                                          String bucket,
-                                          String key,
-                                          String fileName,
-                                          long objectID,
-                                          long parentObjectId) {
-    return new OmKeyInfo.Builder()
-            .setBucketName(bucket)
-            .setVolumeName(volume)
-            .setKeyName(key)
-            .setFileName(fileName)
-            .setReplicationConfig(
-                    StandaloneReplicationConfig.getInstance(
-                            HddsProtos.ReplicationFactor.ONE))
-            .setObjectID(objectID)
-            .setParentObjectID(parentObjectId)
-            .build();
-  }
-
-  private static OmDirectoryInfo buildOmDirInfo(String dirName,
-                                                long objectId,
-                                                long parentObjectId) {
-    return new OmDirectoryInfo.Builder()
-            .setName(dirName)
-            .setObjectID(objectId)
-            .setParentObjectID(parentObjectId)
-            .build();
-  }
-
-  /**
-   * Populate OMDB with the following configs.
-   *              vol
-   *            /     \
-   *        bucket1   bucket2
-   *        /    \      /    \
-   *     file1  dir1  file2  file4
-   *            /   \
-   *         dir2   dir3
-   *          /
-   *        file3
-   *
-   * @throws IOException
-   */
-  private void populateOMDB() throws IOException {
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_ONE,
-        BUCKET_ONE,
-        VOL,
-        FILE_ONE,
-        KEY_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_ONE_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_TWO,
-        BUCKET_TWO,
-        VOL,
-        FILE_TWO,
-        KEY_TWO_OBJECT_ID,
-        BUCKET_TWO_OBJECT_ID,
-        BUCKET_TWO_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_TWO_OLD_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_THREE,
-        BUCKET_ONE,
-        VOL,
-        FILE_THREE,
-        KEY_THREE_OBJECT_ID,
-        DIR_TWO_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_THREE_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_FOUR,
-        BUCKET_TWO,
-        VOL,
-        FILE_FOUR,
-        KEY_FOUR_OBJECT_ID,
-        BUCKET_TWO_OBJECT_ID,
-        BUCKET_TWO_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_FOUR_SIZE,
-        getBucketLayout());
-    writeDirToOm(reconOMMetadataManager, DIR_ONE_OBJECT_ID,
-            BUCKET_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID,
-            VOL_OBJECT_ID, DIR_ONE);
-    writeDirToOm(reconOMMetadataManager, DIR_TWO_OBJECT_ID,
-            DIR_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID,
-            VOL_OBJECT_ID, DIR_TWO);
-    writeDirToOm(reconOMMetadataManager, DIR_THREE_OBJECT_ID,
-            DIR_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID,
-            VOL_OBJECT_ID, DIR_THREE);
-  }
-
-  private static BucketLayout getBucketLayout() {
-    return BucketLayout.FILE_SYSTEM_OPTIMIZED;
-  }
 }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java
index 4d1f58e671..f1e08c50b3 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java
@@ -18,11 +18,6 @@
 package org.apache.hadoop.ozone.recon.tasks;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -32,24 +27,12 @@
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
-import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmConfig;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.recon.ReconConstants;
-import org.apache.hadoop.ozone.recon.ReconTestInjector;
 import org.apache.hadoop.ozone.recon.api.types.NSSummary;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
-import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Nested;
@@ -61,93 +44,28 @@
  * Test for NSSummaryTaskWithLegacy.
  */
 @TestInstance(TestInstance.Lifecycle.PER_CLASS)
-public class TestNSSummaryTaskWithLegacy {
+public class TestNSSummaryTaskWithLegacy extends AbstractNSSummaryTaskTest {
 
-  private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
-  private OMMetadataManager omMetadataManager;
-  private ReconOMMetadataManager reconOMMetadataManager;
   private NSSummaryTaskWithLegacy nSSummaryTaskWithLegacy;
-  private OzoneConfiguration omConfiguration;
-
-  // Object names
-  private static final String VOL = "vol";
-  private static final String BUCKET_ONE = "bucket1";
-  private static final String BUCKET_TWO = "bucket2";
-  private static final String KEY_ONE = "file1";
-  private static final String KEY_TWO = "file2";
-  private static final String KEY_THREE = "dir1/dir2/file3";
-  private static final String KEY_FOUR = "file4";
-  private static final String KEY_FIVE = "file5";
-  private static final String FILE_ONE = "file1";
-  private static final String FILE_TWO = "file2";
-  private static final String FILE_THREE = "file3";
-  private static final String FILE_FOUR = "file4";
-  private static final String FILE_FIVE = "file5";
-  private static final String DIR_ONE = "dir1";
-  private static final String DIR_ONE_RENAME = "dir1_new";
-  private static final String DIR_TWO = "dir2";
-  private static final String DIR_THREE = "dir3";
-  private static final String DIR_FOUR = "dir4";
-  private static final String DIR_FIVE = "dir5";
-
-  private static final String TEST_USER = "TestUser";
-
-  private static final long PARENT_OBJECT_ID_ZERO = 0L;
-  private static final long VOL_OBJECT_ID = 0L;
-  private static final long BUCKET_ONE_OBJECT_ID = 1L;
-  private static final long BUCKET_TWO_OBJECT_ID = 2L;
-  private static final long KEY_ONE_OBJECT_ID = 3L;
-  private static final long DIR_ONE_OBJECT_ID = 4L;
-  private static final long KEY_TWO_OBJECT_ID = 5L;
-  private static final long KEY_FOUR_OBJECT_ID = 6L;
-  private static final long DIR_TWO_OBJECT_ID = 7L;
-  private static final long KEY_THREE_OBJECT_ID = 8L;
-  private static final long KEY_FIVE_OBJECT_ID = 9L;
-  private static final long DIR_THREE_OBJECT_ID = 10L;
-  private static final long DIR_FOUR_OBJECT_ID = 11L;
-  private static final long DIR_FIVE_OBJECT_ID = 12L;
-
-  private static final long KEY_ONE_SIZE = 500L;
-  private static final long KEY_TWO_OLD_SIZE = 1025L;
-  private static final long KEY_TWO_UPDATE_SIZE = 1023L;
-  private static final long KEY_THREE_SIZE =
-      ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L;
-  private static final long KEY_FOUR_SIZE = 2050L;
-  private static final long KEY_FIVE_SIZE = 100L;
-
-  private final Set<Long> bucketOneAns = new HashSet<>();
-  private final Set<Long> bucketTwoAns = new HashSet<>();
-  private final Set<Long> dirOneAns = new HashSet<>();
+  // Answer Sets
+  private static Set<Long> bucketOneAns = new HashSet<>();
+  private static Set<Long> bucketTwoAns = new HashSet<>();
+  private static Set<Long> dirOneAns = new HashSet<>();
 
   @BeforeAll
   void setUp(@TempDir File tmpDir) throws Exception {
-    initializeNewOmMetadataManager(new File(tmpDir, "om"));
-    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
-        getMockOzoneManagerServiceProvider();
-    reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager,
-        new File(tmpDir, "recon"));
-
-    ReconTestInjector reconTestInjector =
-        new ReconTestInjector.Builder(tmpDir)
-            .withReconOm(reconOMMetadataManager)
-            .withOmServiceProvider(ozoneManagerServiceProvider)
-            .withReconSqlDb()
-            .withContainerDB()
-            .build();
-    reconNamespaceSummaryManager =
-        reconTestInjector.getInstance(ReconNamespaceSummaryManager.class);
-
-    NSSummary nonExistentSummary =
-        reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
-    assertNull(nonExistentSummary);
-
-    populateOMDB();
-
-    long nsSummaryFlushToDBMaxThreshold = omConfiguration.getLong(
-        OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, 10);
-    nSSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy(
-        reconNamespaceSummaryManager,
-        reconOMMetadataManager, omConfiguration, 
nsSummaryFlushToDBMaxThreshold);
+    commonSetup(tmpDir,
+          new OMConfigParameter(false,
+          false,
+          getBucketLayout(),
+          10,
+          false,
+          true,
+          true));
+    long threshold = 
getOmConfiguration().getLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, 
10);
+    nSSummaryTaskWithLegacy =
+        new NSSummaryTaskWithLegacy(getReconNamespaceSummaryManager(), 
getReconOMMetadataManager(),
+            getOmConfiguration(), threshold);
   }
 
   /**
@@ -161,34 +79,16 @@ public class TestReprocess {
 
     @BeforeEach
     public void setUp() throws IOException {
-      // write a NSSummary prior to reprocess
-      // verify it got cleaned up after.
-      NSSummary staleNSSummary = new NSSummary();
-      RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
-      reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, 
-1L,
-          staleNSSummary);
-      reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation);
-
-      // Verify commit
-      assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L));
-
-      // reinit Recon RocksDB's namespace CF.
-      reconNamespaceSummaryManager.clearNSSummaryTable();
-
-      nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager);
-      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
-
-      nsSummaryForBucket1 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
-      nsSummaryForBucket2 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
-      assertNotNull(nsSummaryForBucket1);
-      assertNotNull(nsSummaryForBucket2);
+      List<NSSummary> result =
+          commonSetUpTestReprocess(() -> 
nSSummaryTaskWithLegacy.reprocessWithLegacy(getReconOMMetadataManager()),
+              BUCKET_ONE_OBJECT_ID, BUCKET_TWO_OBJECT_ID);
+      nsSummaryForBucket1 = result.get(0);
+      nsSummaryForBucket2 = result.get(1);
     }
 
     @Test
     public void testReprocessNSSummaryNull() throws IOException {
-      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
+      assertNull(getReconNamespaceSummaryManager().getNSSummary(-1L));
     }
 
     @Test
@@ -240,7 +140,7 @@ public void testReprocessBucketDirs() {
     public void testReprocessDirsUnderDir() throws Exception {
 
       // Dir 1 has two dir: dir2 and dir3.
-      NSSummary nsSummaryInDir1 = reconNamespaceSummaryManager
+      NSSummary nsSummaryInDir1 = getReconNamespaceSummaryManager()
           .getNSSummary(DIR_ONE_OBJECT_ID);
       assertNotNull(nsSummaryInDir1);
       Set<Long> childDirForDirOne = nsSummaryInDir1.getChildDir();
@@ -250,7 +150,7 @@ public void testReprocessDirsUnderDir() throws Exception {
       dirOneAns.add(DIR_THREE_OBJECT_ID);
       assertEquals(dirOneAns, childDirForDirOne);
 
-      NSSummary nsSummaryInDir2 = reconNamespaceSummaryManager
+      NSSummary nsSummaryInDir2 = getReconNamespaceSummaryManager()
           .getNSSummary(DIR_TWO_OBJECT_ID);
       assertEquals(1, nsSummaryInDir2.getNumOfFiles());
       assertEquals(KEY_THREE_SIZE, nsSummaryInDir2.getSizeOfFiles());
@@ -258,8 +158,7 @@ public void testReprocessDirsUnderDir() throws Exception {
       int[] fileDistForDir2 = nsSummaryInDir2.getFileSizeBucket();
       assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS,
           fileDistForDir2.length);
-      assertEquals(1,
-          fileDistForDir2[fileDistForDir2.length - 1]);
+      assertEquals(1, fileDistForDir2[fileDistForDir2.length - 1]);
       for (int i = 0; i < ReconConstants.NUM_OF_FILE_SIZE_BINS - 1; ++i) {
         assertEquals(0, fileDistForDir2[i]);
       }
@@ -294,14 +193,14 @@ public class TestProcess {
 
     @BeforeEach
     public void setUp() throws IOException {
-      nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager);
+      nSSummaryTaskWithLegacy.reprocessWithLegacy(getReconOMMetadataManager());
       nSSummaryTaskWithLegacy.processWithLegacy(processEventBatch(), 0);
 
       nsSummaryForBucket1 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(BUCKET_ONE_OBJECT_ID);
       assertNotNull(nsSummaryForBucket1);
       nsSummaryForBucket2 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(BUCKET_TWO_OBJECT_ID);
       assertNotNull(nsSummaryForBucket2);
     }
 
@@ -317,7 +216,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omPutKey)
           .setValue(omPutKeyInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
           .build();
@@ -334,7 +233,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omDeleteKey)
           .setValue(omDeleteInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE)
           .build();
@@ -355,7 +254,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setKey(omUpdateKey)
           .setValue(omUpdateInfo)
           .setOldValue(omOldInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE)
           .build();
@@ -373,7 +272,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setKey(omDirPutKey1)
           .setValue(omDirPutValue1)
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout()).getName())
+          
.setTable(getOmMetadataManager().getKeyTable(getBucketLayout()).getName())
           .build();
 
       // add dir 5 under bucket 2
@@ -389,7 +288,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setKey(omDirPutKey2)
           .setValue(omDirPutValue2)
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout()).getName())
+          
.setTable(getOmMetadataManager().getKeyTable(getBucketLayout()).getName())
           .build();
 
       // delete dir 3 under dir 1
@@ -406,7 +305,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setKey(omDirDeleteKey)
           .setValue(omDirDeleteValue)
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout()).getName())
+          
.setTable(getOmMetadataManager().getKeyTable(getBucketLayout()).getName())
           .build();
 
       // rename dir1
@@ -426,7 +325,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setValue(omDirUpdateValue)
           .setOldValue(omDirOldValue)
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout()).getName())
+          
.setTable(getOmMetadataManager().getKeyTable(getBucketLayout()).getName())
           .build();
 
       return new OMUpdateEventBatch(Arrays.asList(
@@ -485,7 +384,7 @@ public void testProcessBucket() throws IOException {
     @Test
     public void testProcessDirDeleteRename() throws IOException {
       // after delete dir 3, dir 1 now has only one dir: dir2
-      NSSummary nsSummaryForDir1 = reconNamespaceSummaryManager
+      NSSummary nsSummaryForDir1 = getReconNamespaceSummaryManager()
           .getNSSummary(DIR_ONE_OBJECT_ID);
       assertNotNull(nsSummaryForDir1);
       Set<Long> childDirForDir1 = nsSummaryForDir1.getChildDir();
@@ -499,237 +398,6 @@ public void testProcessDirDeleteRename() throws 
IOException {
     }
   }
 
-  /**
-   * Build a key info for put/update action.
-   * @param volume volume name
-   * @param bucket bucket name
-   * @param key key name
-   * @param fileName file name
-   * @param objectID object ID
-   * @param parentObjectId parent object ID
-   * @param dataSize file size
-   * @return the KeyInfo
-   */
-  private static OmKeyInfo buildOmKeyInfo(String volume,
-                                          String bucket,
-                                          String key,
-                                          String fileName,
-                                          long objectID,
-                                          long parentObjectId,
-                                          long dataSize) {
-    return new OmKeyInfo.Builder()
-        .setBucketName(bucket)
-        .setVolumeName(volume)
-        .setKeyName(key)
-        .setFileName(fileName)
-        .setReplicationConfig(
-            StandaloneReplicationConfig.getInstance(
-                HddsProtos.ReplicationFactor.ONE))
-        .setObjectID(objectID)
-        .setParentObjectID(parentObjectId)
-        .setDataSize(dataSize)
-        .build();
-  }
-
-  /**
-   * Build a key info for delete action.
-   * @param volume volume name
-   * @param bucket bucket name
-   * @param key key name
-   * @param fileName file name
-   * @param objectID object ID
-   * @param parentObjectId parent object ID
-   * @return the KeyInfo
-   */
-  private static OmKeyInfo buildOmKeyInfo(String volume,
-                                          String bucket,
-                                          String key,
-                                          String fileName,
-                                          long objectID,
-                                          long parentObjectId) {
-    return new OmKeyInfo.Builder()
-        .setBucketName(bucket)
-        .setVolumeName(volume)
-        .setKeyName(key)
-        .setFileName(fileName)
-        .setReplicationConfig(
-            StandaloneReplicationConfig.getInstance(
-                HddsProtos.ReplicationFactor.ONE))
-        .setObjectID(objectID)
-        .setParentObjectID(parentObjectId)
-        .build();
-  }
-
-  /**
-   * Build a directory as key info for put/update action.
-   * We don't need to set size.
-   * @param volume volume name
-   * @param bucket bucket name
-   * @param key key name
-   * @param fileName file name
-   * @param objectID object ID
-   * @return the KeyInfo
-   */
-  private static OmKeyInfo buildOmDirKeyInfo(String volume,
-                                             String bucket,
-                                             String key,
-                                             String fileName,
-                                             long objectID) {
-    return new OmKeyInfo.Builder()
-        .setBucketName(bucket)
-        .setVolumeName(volume)
-        .setKeyName(key)
-        .setFileName(fileName)
-        .setReplicationConfig(
-            StandaloneReplicationConfig.getInstance(
-                HddsProtos.ReplicationFactor.ONE))
-        .setObjectID(objectID)
-        .build();
-  }
-
-  /**
-   * Populate OMDB with the following configs.
-   *              vol
-   *            /     \
-   *        bucket1   bucket2
-   *        /    \      /    \
-   *     file1  dir1  file2  file4
-   *            /   \
-   *         dir2   dir3
-   *          /
-   *        file3
-   *
-   * @throws IOException
-   */
-  private void populateOMDB() throws IOException {
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_ONE,
-        BUCKET_ONE,
-        VOL,
-        FILE_ONE,
-        KEY_ONE_OBJECT_ID,
-        PARENT_OBJECT_ID_ZERO,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_ONE_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_TWO,
-        BUCKET_TWO,
-        VOL,
-        FILE_TWO,
-        KEY_TWO_OBJECT_ID,
-        PARENT_OBJECT_ID_ZERO,
-        BUCKET_TWO_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_TWO_OLD_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_THREE,
-        BUCKET_ONE,
-        VOL,
-        FILE_THREE,
-        KEY_THREE_OBJECT_ID,
-        PARENT_OBJECT_ID_ZERO,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_THREE_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_FOUR,
-        BUCKET_TWO,
-        VOL,
-        FILE_FOUR,
-        KEY_FOUR_OBJECT_ID,
-        PARENT_OBJECT_ID_ZERO,
-        BUCKET_TWO_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_FOUR_SIZE,
-        getBucketLayout());
-
-    writeDirToOm(reconOMMetadataManager,
-        (DIR_ONE + OM_KEY_PREFIX),
-        BUCKET_ONE,
-        VOL,
-        DIR_ONE,
-        DIR_ONE_OBJECT_ID,
-        PARENT_OBJECT_ID_ZERO,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        getBucketLayout());
-    writeDirToOm(reconOMMetadataManager,
-        (DIR_ONE + OM_KEY_PREFIX +
-            DIR_TWO + OM_KEY_PREFIX),
-        BUCKET_ONE,
-        VOL,
-        DIR_TWO,
-        DIR_TWO_OBJECT_ID,
-        PARENT_OBJECT_ID_ZERO,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        getBucketLayout());
-    writeDirToOm(reconOMMetadataManager,
-        (DIR_ONE + OM_KEY_PREFIX +
-            DIR_THREE + OM_KEY_PREFIX),
-        BUCKET_ONE,
-        VOL,
-        DIR_THREE,
-        DIR_THREE_OBJECT_ID,
-        PARENT_OBJECT_ID_ZERO,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        getBucketLayout());
-  }
-
-  /**
-   * Create a new OM Metadata manager instance with one user, one vol, and two
-   * buckets.
-   * @throws IOException ioEx
-   */
-  private void initializeNewOmMetadataManager(
-      File omDbDir)
-      throws IOException {
-    omConfiguration = new OzoneConfiguration();
-    omConfiguration.set(OZONE_OM_DB_DIRS,
-        omDbDir.getAbsolutePath());
-    omConfiguration.set(OmConfig.Keys.ENABLE_FILESYSTEM_PATHS, "true");
-    omConfiguration.set(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, "10");
-    omMetadataManager = new OmMetadataManagerImpl(
-        omConfiguration, null);
-
-    String volumeKey = omMetadataManager.getVolumeKey(VOL);
-    OmVolumeArgs args =
-        OmVolumeArgs.newBuilder()
-            .setObjectID(VOL_OBJECT_ID)
-            .setVolume(VOL)
-            .setAdminName(TEST_USER)
-            .setOwnerName(TEST_USER)
-            .build();
-    omMetadataManager.getVolumeTable().put(volumeKey, args);
-
-    OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder()
-        .setVolumeName(VOL)
-        .setBucketName(BUCKET_ONE)
-        .setObjectID(BUCKET_ONE_OBJECT_ID)
-        .setBucketLayout(getBucketLayout())
-        .build();
-
-    OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder()
-        .setVolumeName(VOL)
-        .setBucketName(BUCKET_TWO)
-        .setObjectID(BUCKET_TWO_OBJECT_ID)
-        .setBucketLayout(getBucketLayout())
-        .build();
-
-    String bucketKey = omMetadataManager.getBucketKey(
-        bucketInfo1.getVolumeName(), bucketInfo1.getBucketName());
-    String bucketKey2 = omMetadataManager.getBucketKey(
-        bucketInfo2.getVolumeName(), bucketInfo2.getBucketName());
-
-    omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1);
-    omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
-  }
-
   private static BucketLayout getBucketLayout() {
     return BucketLayout.LEGACY;
   }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java
index f8ecd911c5..8dd2a6583d 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java
@@ -18,10 +18,6 @@
 package org.apache.hadoop.ozone.recon.tasks;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -30,24 +26,12 @@
 import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.List;
 import java.util.Set;
-import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.recon.ReconConstants;
-import org.apache.hadoop.ozone.recon.ReconTestInjector;
 import org.apache.hadoop.ozone.recon.api.types.NSSummary;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
-import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Nested;
@@ -59,87 +43,25 @@
  * Test for NSSummaryTaskWithLegacy focusing on the OBS (Object Store) layout.
  */
 @TestInstance(TestInstance.Lifecycle.PER_CLASS)
-public class TestNSSummaryTaskWithLegacyOBSLayout {
+public class TestNSSummaryTaskWithLegacyOBSLayout extends 
AbstractNSSummaryTaskTest {
 
-  private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
-  private ReconOMMetadataManager reconOMMetadataManager;
-  private OzoneConfiguration ozoneConfiguration;
   private NSSummaryTaskWithLegacy nSSummaryTaskWithLegacy;
 
-  private OMMetadataManager omMetadataManager;
-  private OzoneConfiguration omConfiguration;
-
-  // Object names
-  private static final String VOL = "vol";
-  private static final String BUCKET_ONE = "bucket1";
-  private static final String BUCKET_TWO = "bucket2";
-  private static final String KEY_ONE = "key1";
-  private static final String KEY_TWO = "key2";
-  private static final String KEY_THREE = "dir1/dir2/key3";
-  private static final String KEY_FOUR = "key4///////////";
-  private static final String KEY_FIVE = "//////////";
-  private static final String KEY_SIX = "key6";
-  private static final String KEY_SEVEN = "/////key7";
-
-  private static final String TEST_USER = "TestUser";
-
-  private static final long PARENT_OBJECT_ID_ZERO = 0L;
-  private static final long VOL_OBJECT_ID = 0L;
-  private static final long BUCKET_ONE_OBJECT_ID = 1L;
-  private static final long BUCKET_TWO_OBJECT_ID = 2L;
-  private static final long KEY_ONE_OBJECT_ID = 3L;
-  private static final long KEY_TWO_OBJECT_ID = 5L;
-  private static final long KEY_FOUR_OBJECT_ID = 6L;
-  private static final long KEY_THREE_OBJECT_ID = 8L;
-  private static final long KEY_FIVE_OBJECT_ID = 9L;
-  private static final long KEY_SIX_OBJECT_ID = 10L;
-  private static final long KEY_SEVEN_OBJECT_ID = 11L;
-
-  private static final long KEY_ONE_SIZE = 500L;
-  private static final long KEY_TWO_OLD_SIZE = 1025L;
-  private static final long KEY_TWO_UPDATE_SIZE = 1023L;
-  private static final long KEY_THREE_SIZE =
-      ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L;
-  private static final long KEY_FOUR_SIZE = 2050L;
-  private static final long KEY_FIVE_SIZE = 100L;
-  private static final long KEY_SIX_SIZE = 6000L;
-  private static final long KEY_SEVEN_SIZE = 7000L;
-
   @BeforeAll
   void setUp(@TempDir File tmpDir) throws Exception {
-    initializeNewOmMetadataManager(new File(tmpDir, "om"));
-    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
-        getMockOzoneManagerServiceProviderWithFSO();
-    reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager,
-        new File(tmpDir, "recon"));
-    ozoneConfiguration = new OzoneConfiguration();
-    
ozoneConfiguration.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-        false);
-    ozoneConfiguration.setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD,
-        10);
-
-    ReconTestInjector reconTestInjector =
-        new ReconTestInjector.Builder(tmpDir)
-            .withReconOm(reconOMMetadataManager)
-            .withOmServiceProvider(ozoneManagerServiceProvider)
-            .withReconSqlDb()
-            .withContainerDB()
-            .build();
-    reconNamespaceSummaryManager =
-        reconTestInjector.getInstance(ReconNamespaceSummaryManager.class);
-
-    NSSummary nonExistentSummary =
-        reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
-    assertNull(nonExistentSummary);
-
-    populateOMDB();
-
-    long nsSummaryFlushToDBMaxThreshold = ozoneConfiguration.getLong(
-        OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, 10);
-    nSSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy(
-        reconNamespaceSummaryManager,
-        reconOMMetadataManager, ozoneConfiguration,
-        nsSummaryFlushToDBMaxThreshold);
+    commonSetup(tmpDir,
+        new OMConfigParameter(true,
+          true,
+          getBucketLayout(),
+          10,
+          true,
+          false,
+          false));
+    long threshold = 
getOzoneConfiguration().getLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD,
 10);
+    nSSummaryTaskWithLegacy = new 
NSSummaryTaskWithLegacy(getReconNamespaceSummaryManager(),
+        getReconOMMetadataManager(),
+        getOzoneConfiguration(),
+        threshold);
   }
 
   /**
@@ -153,34 +75,16 @@ public class TestReprocess {
 
     @BeforeEach
     public void setUp() throws IOException {
-      // write a NSSummary prior to reprocess
-      // verify it got cleaned up after.
-      NSSummary staleNSSummary = new NSSummary();
-      RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
-      reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, 
-1L,
-          staleNSSummary);
-      reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation);
-
-      // Verify commit
-      assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L));
-
-      // reinit Recon RocksDB's namespace CF.
-      reconNamespaceSummaryManager.clearNSSummaryTable();
-
-      nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager);
-      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
-
-      nsSummaryForBucket1 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
-      nsSummaryForBucket2 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
-      assertNotNull(nsSummaryForBucket1);
-      assertNotNull(nsSummaryForBucket2);
+      List<NSSummary> result = commonSetUpTestReprocess(() ->
+          
nSSummaryTaskWithLegacy.reprocessWithLegacy(getReconOMMetadataManager()),
+          BUCKET_ONE_OBJECT_ID, BUCKET_TWO_OBJECT_ID);
+      nsSummaryForBucket1 = result.get(0);
+      nsSummaryForBucket2 = result.get(1);
     }
 
     @Test
     public void testReprocessNSSummaryNull() throws IOException {
-      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
+      assertNull(getReconNamespaceSummaryManager().getNSSummary(-1L));
     }
 
     @Test
@@ -243,15 +147,15 @@ public class TestProcess {
     @BeforeEach
     public void setUp() throws IOException {
       // reinit Recon RocksDB's namespace CF.
-      reconNamespaceSummaryManager.clearNSSummaryTable();
-      nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager);
+      getReconNamespaceSummaryManager().clearNSSummaryTable();
+      nSSummaryTaskWithLegacy.reprocessWithLegacy(getReconOMMetadataManager());
       nSSummaryTaskWithLegacy.processWithLegacy(processEventBatch(), 0);
 
       nsSummaryForBucket1 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(BUCKET_ONE_OBJECT_ID);
       assertNotNull(nsSummaryForBucket1);
       nsSummaryForBucket2 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(BUCKET_TWO_OBJECT_ID);
       assertNotNull(nsSummaryForBucket2);
     }
 
@@ -268,7 +172,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omPutKey)
           .setValue(omPutKeyInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
           .build();
@@ -283,7 +187,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omPutKey)
           .setValue(omPutKeyInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
           .build();
@@ -299,7 +203,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
       keyEvent3 = new OMDBUpdateEvent.
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omDeleteKey)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setValue(omDeleteKeyInfo)
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE)
@@ -322,7 +226,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setKey(omResizeKey)
           .setOldValue(oldOmResizeKeyInfo)
           .setValue(newOmResizeKeyInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE)
           .build();
@@ -387,168 +291,6 @@ public void testProcessFileBucketSize() {
 
   }
 
-  /**
-   * Populate OMDB with the following configs.
-   *         vol
-   *      /       \
-   * bucket1     bucket2
-   * /    \   \        \  \
-   * key1  key2   key3   key4 key5
-   *
-   * @throws IOException
-   */
-  private void populateOMDB() throws IOException {
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_ONE,
-        BUCKET_ONE,
-        VOL,
-        KEY_ONE,
-        KEY_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_ONE_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_TWO,
-        BUCKET_ONE,
-        VOL,
-        KEY_TWO,
-        KEY_TWO_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_TWO_OLD_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_THREE,
-        BUCKET_ONE,
-        VOL,
-        KEY_THREE,
-        KEY_THREE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_THREE_SIZE,
-        getBucketLayout());
-
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_FOUR,
-        BUCKET_TWO,
-        VOL,
-        KEY_FOUR,
-        KEY_FOUR_OBJECT_ID,
-        BUCKET_TWO_OBJECT_ID,
-        BUCKET_TWO_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_FOUR_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_FIVE,
-        BUCKET_TWO,
-        VOL,
-        KEY_FIVE,
-        KEY_FIVE_OBJECT_ID,
-        PARENT_OBJECT_ID_ZERO,
-        BUCKET_TWO_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_FIVE_SIZE,
-        getBucketLayout());
-  }
-
-  /**
-   * Create a new OM Metadata manager instance with one user, one vol, and two
-   * buckets.
-   *
-   * @throws IOException ioEx
-   */
-  private void initializeNewOmMetadataManager(
-      File omDbDir)
-      throws IOException {
-    omConfiguration = new OzoneConfiguration();
-    omConfiguration.set(OZONE_OM_DB_DIRS,
-        omDbDir.getAbsolutePath());
-    omConfiguration.set(OMConfigKeys
-        .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true");
-    omMetadataManager = new OmMetadataManagerImpl(
-        omConfiguration, null);
-
-    String volumeKey = omMetadataManager.getVolumeKey(VOL);
-    OmVolumeArgs args =
-        OmVolumeArgs.newBuilder()
-            .setObjectID(VOL_OBJECT_ID)
-            .setVolume(VOL)
-            .setAdminName(TEST_USER)
-            .setOwnerName(TEST_USER)
-            .build();
-    omMetadataManager.getVolumeTable().put(volumeKey, args);
-
-    OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder()
-        .setVolumeName(VOL)
-        .setBucketName(BUCKET_ONE)
-        .setObjectID(BUCKET_ONE_OBJECT_ID)
-        .setBucketLayout(getBucketLayout())
-        .build();
-
-    OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder()
-        .setVolumeName(VOL)
-        .setBucketName(BUCKET_TWO)
-        .setObjectID(BUCKET_TWO_OBJECT_ID)
-        .setBucketLayout(getBucketLayout())
-        .build();
-
-    String bucketKey = omMetadataManager.getBucketKey(
-        bucketInfo1.getVolumeName(), bucketInfo1.getBucketName());
-    String bucketKey2 = omMetadataManager.getBucketKey(
-        bucketInfo2.getVolumeName(), bucketInfo2.getBucketName());
-
-    omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1);
-    omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
-  }
-
-  /**
-   * Build a key info for put/update action.
-   *
-   * @param volume         volume name
-   * @param bucket         bucket name
-   * @param key            key name
-   * @param fileName       file name
-   * @param objectID       object ID
-   * @param parentObjectId parent object ID
-   * @param dataSize       file size
-   * @return the KeyInfo
-   */
-  private static OmKeyInfo buildOmKeyInfo(String volume,
-                                          String bucket,
-                                          String key,
-                                          String fileName,
-                                          long objectID,
-                                          long parentObjectId,
-                                          long dataSize) {
-    return new OmKeyInfo.Builder()
-        .setBucketName(bucket)
-        .setVolumeName(volume)
-        .setKeyName(key)
-        .setFileName(fileName)
-        .setReplicationConfig(
-            StandaloneReplicationConfig.getInstance(
-                HddsProtos.ReplicationFactor.ONE))
-        .setObjectID(objectID)
-        .setParentObjectID(parentObjectId)
-        .setDataSize(dataSize)
-        .build();
-  }
-
-  // Helper method to check if an array contains a specific value
-  private boolean contains(int[] arr, int value) {
-    for (int num : arr) {
-      if (num == value) {
-        return true;
-      }
-    }
-    return false;
-  }
-
   private static BucketLayout getBucketLayout() {
     return BucketLayout.LEGACY;
   }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java
index b275e1b66e..d748e9c05f 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java
@@ -18,10 +18,6 @@
 package org.apache.hadoop.ozone.recon.tasks;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD_DEFAULT;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -31,24 +27,12 @@
 import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.List;
 import java.util.Set;
-import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmConfig;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.recon.ReconConstants;
-import org.apache.hadoop.ozone.recon.ReconTestInjector;
 import org.apache.hadoop.ozone.recon.api.types.NSSummary;
-import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
-import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Nested;
@@ -60,80 +44,24 @@
  * Unit test for NSSummaryTaskWithOBS.
  */
 @TestInstance(TestInstance.Lifecycle.PER_CLASS)
-public class TestNSSummaryTaskWithOBS {
-  private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
-  private OMMetadataManager omMetadataManager;
-  private ReconOMMetadataManager reconOMMetadataManager;
+public class TestNSSummaryTaskWithOBS extends AbstractNSSummaryTaskTest {
+
   private NSSummaryTaskWithOBS nSSummaryTaskWithOBS;
-  private OzoneConfiguration omConfiguration;
-
-  // Object names
-  private static final String VOL = "vol";
-  private static final String BUCKET_ONE = "bucket1";
-  private static final String BUCKET_TWO = "bucket2";
-  private static final String KEY_ONE = "key1";
-  private static final String KEY_TWO = "key2";
-  private static final String KEY_THREE = "dir1/dir2/key3";
-  private static final String KEY_FOUR = "key4///////////";
-  private static final String KEY_FIVE = "//////////";
-  private static final String KEY_SIX = "key6";
-  private static final String KEY_SEVEN = "key7";
-
-  private static final String TEST_USER = "TestUser";
-
-  private static final long PARENT_OBJECT_ID_ZERO = 0L;
-  private static final long VOL_OBJECT_ID = 0L;
-  private static final long BUCKET_ONE_OBJECT_ID = 1L;
-  private static final long BUCKET_TWO_OBJECT_ID = 2L;
-  private static final long KEY_ONE_OBJECT_ID = 3L;
-  private static final long KEY_TWO_OBJECT_ID = 5L;
-  private static final long KEY_FOUR_OBJECT_ID = 6L;
-  private static final long KEY_THREE_OBJECT_ID = 8L;
-  private static final long KEY_FIVE_OBJECT_ID = 9L;
-  private static final long KEY_SIX_OBJECT_ID = 10L;
-  private static final long KEY_SEVEN_OBJECT_ID = 11L;
-
-  private static final long KEY_ONE_SIZE = 500L;
-  private static final long KEY_TWO_OLD_SIZE = 1025L;
-  private static final long KEY_TWO_UPDATE_SIZE = 1023L;
-  private static final long KEY_THREE_SIZE =
-      ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L;
-  private static final long KEY_FOUR_SIZE = 2050L;
-  private static final long KEY_FIVE_SIZE = 100L;
-  private static final long KEY_SIX_SIZE = 6000L;
-  private static final long KEY_SEVEN_SIZE = 7000L;
 
   @BeforeAll
   void setUp(@TempDir File tmpDir) throws Exception {
-    initializeNewOmMetadataManager(new File(tmpDir, "om"));
-    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
-        getMockOzoneManagerServiceProviderWithFSO();
-    reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager,
-        new File(tmpDir, "recon"));
-
-    ReconTestInjector reconTestInjector =
-        new ReconTestInjector.Builder(tmpDir)
-            .withReconOm(reconOMMetadataManager)
-            .withOmServiceProvider(ozoneManagerServiceProvider)
-            .withReconSqlDb()
-            .withContainerDB()
-            .build();
-    reconNamespaceSummaryManager =
-        reconTestInjector.getInstance(ReconNamespaceSummaryManager.class);
-
-    NSSummary nonExistentSummary =
-        reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
-    assertNull(nonExistentSummary);
-
-    populateOMDB();
-
-    long nsSummaryFlushToDBMaxThreshold = omConfiguration.getLong(
+    commonSetup(tmpDir, new OMConfigParameter(true,
+        false, getBucketLayout(),
+        OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD_DEFAULT,
+        false,
+        true,
+        false));
+    long threshold = getOmConfiguration().getLong(
         OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD,
         OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD_DEFAULT);
-    nSSummaryTaskWithOBS = new NSSummaryTaskWithOBS(
-        reconNamespaceSummaryManager,
-        reconOMMetadataManager,
-        nsSummaryFlushToDBMaxThreshold);
+    nSSummaryTaskWithOBS = new 
NSSummaryTaskWithOBS(getReconNamespaceSummaryManager(),
+        getReconOMMetadataManager(),
+        threshold);
   }
 
   /**
@@ -147,34 +75,16 @@ class TestReprocess {
 
     @BeforeEach
     public void setUp() throws IOException {
-      // write a NSSummary prior to reprocess
-      // verify it got cleaned up after.
-      NSSummary staleNSSummary = new NSSummary();
-      RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
-      reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, 
-1L,
-          staleNSSummary);
-      reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation);
-
-      // Verify commit
-      assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L));
-
-      // reinit Recon RocksDB's namespace CF.
-      reconNamespaceSummaryManager.clearNSSummaryTable();
-
-      nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager);
-      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
-
-      nsSummaryForBucket1 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
-      nsSummaryForBucket2 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
-      assertNotNull(nsSummaryForBucket1);
-      assertNotNull(nsSummaryForBucket2);
+      List<NSSummary> result = commonSetUpTestReprocess(
+          () -> 
nSSummaryTaskWithOBS.reprocessWithOBS(getReconOMMetadataManager()),
+          BUCKET_ONE_OBJECT_ID, BUCKET_TWO_OBJECT_ID);
+      nsSummaryForBucket1 = result.get(0);
+      nsSummaryForBucket2 = result.get(1);
     }
 
     @Test
     public void testReprocessNSSummaryNull() throws IOException {
-      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
+      assertNull(getReconNamespaceSummaryManager().getNSSummary(-1L));
     }
 
     @Test
@@ -237,15 +147,15 @@ public class TestProcess {
     @BeforeEach
     public void setUp() throws IOException {
       // reinit Recon RocksDB's namespace CF.
-      reconNamespaceSummaryManager.clearNSSummaryTable();
-      nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager);
+      getReconNamespaceSummaryManager().clearNSSummaryTable();
+      nSSummaryTaskWithOBS.reprocessWithOBS(getReconOMMetadataManager());
       nSSummaryTaskWithOBS.processWithOBS(processEventBatch(), 0);
 
       nsSummaryForBucket1 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(BUCKET_ONE_OBJECT_ID);
       assertNotNull(nsSummaryForBucket1);
       nsSummaryForBucket2 =
-          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
+          getReconNamespaceSummaryManager().getNSSummary(BUCKET_TWO_OBJECT_ID);
       assertNotNull(nsSummaryForBucket2);
     }
 
@@ -262,7 +172,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omPutKey)
           .setValue(omPutKeyInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
           .build();
@@ -277,7 +187,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omPutKey)
           .setValue(omPutKeyInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
           .build();
@@ -293,7 +203,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
       keyEvent3 = new OMDBUpdateEvent.
           OMUpdateEventBuilder<String, OmKeyInfo>()
           .setKey(omDeleteKey)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setValue(omDeleteKeyInfo)
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE)
@@ -316,7 +226,7 @@ private OMUpdateEventBatch processEventBatch() throws 
IOException {
           .setKey(omResizeKey)
           .setOldValue(oldOmResizeKeyInfo)
           .setValue(newOmResizeKeyInfo)
-          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+          .setTable(getOmMetadataManager().getKeyTable(getBucketLayout())
               .getName())
           .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE)
           .build();
@@ -381,167 +291,6 @@ public void testProcessFileBucketSize() {
 
   }
 
-  /**
-   * Populate OMDB with the following configs.
-   *                 vol
-   *              /       \
-   *          bucket1     bucket2
-   *        /    \   \        \  \
-   *     key1  key2   key3   key4 key5
-   *
-   * @throws IOException
-   */
-  private void populateOMDB() throws IOException {
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_ONE,
-        BUCKET_ONE,
-        VOL,
-        KEY_ONE,
-        KEY_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_ONE_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_TWO,
-        BUCKET_ONE,
-        VOL,
-        KEY_TWO,
-        KEY_TWO_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_TWO_OLD_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_THREE,
-        BUCKET_ONE,
-        VOL,
-        KEY_THREE,
-        KEY_THREE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        BUCKET_ONE_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_THREE_SIZE,
-        getBucketLayout());
-
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_FOUR,
-        BUCKET_TWO,
-        VOL,
-        KEY_FOUR,
-        KEY_FOUR_OBJECT_ID,
-        BUCKET_TWO_OBJECT_ID,
-        BUCKET_TWO_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_FOUR_SIZE,
-        getBucketLayout());
-    writeKeyToOm(reconOMMetadataManager,
-        KEY_FIVE,
-        BUCKET_TWO,
-        VOL,
-        KEY_FIVE,
-        KEY_FIVE_OBJECT_ID,
-        PARENT_OBJECT_ID_ZERO,
-        BUCKET_TWO_OBJECT_ID,
-        VOL_OBJECT_ID,
-        KEY_FIVE_SIZE,
-        getBucketLayout());
-  }
-
-  /**
-   * Create a new OM Metadata manager instance with one user, one vol, and two
-   * buckets.
-   *
-   * @throws IOException ioEx
-   */
-  private void initializeNewOmMetadataManager(
-      File omDbDir)
-      throws IOException {
-    omConfiguration = new OzoneConfiguration();
-    omConfiguration.set(OZONE_OM_DB_DIRS,
-        omDbDir.getAbsolutePath());
-    omConfiguration.set(OmConfig.Keys.ENABLE_FILESYSTEM_PATHS, "true");
-    omConfiguration.set(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, "10");
-    omMetadataManager = new OmMetadataManagerImpl(
-        omConfiguration, null);
-
-    String volumeKey = omMetadataManager.getVolumeKey(VOL);
-    OmVolumeArgs args =
-        OmVolumeArgs.newBuilder()
-            .setObjectID(VOL_OBJECT_ID)
-            .setVolume(VOL)
-            .setAdminName(TEST_USER)
-            .setOwnerName(TEST_USER)
-            .build();
-    omMetadataManager.getVolumeTable().put(volumeKey, args);
-
-    OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder()
-        .setVolumeName(VOL)
-        .setBucketName(BUCKET_ONE)
-        .setObjectID(BUCKET_ONE_OBJECT_ID)
-        .setBucketLayout(getBucketLayout())
-        .build();
-
-    OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder()
-        .setVolumeName(VOL)
-        .setBucketName(BUCKET_TWO)
-        .setObjectID(BUCKET_TWO_OBJECT_ID)
-        .setBucketLayout(getBucketLayout())
-        .build();
-
-    String bucketKey = omMetadataManager.getBucketKey(
-        bucketInfo1.getVolumeName(), bucketInfo1.getBucketName());
-    String bucketKey2 = omMetadataManager.getBucketKey(
-        bucketInfo2.getVolumeName(), bucketInfo2.getBucketName());
-
-    omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1);
-    omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
-  }
-
-  /**
-   * Build a key info for put/update action.
-   * @param volume volume name
-   * @param bucket bucket name
-   * @param key key name
-   * @param fileName file name
-   * @param objectID object ID
-   * @param parentObjectId parent object ID
-   * @param dataSize file size
-   * @return the KeyInfo
-   */
-  private static OmKeyInfo buildOmKeyInfo(String volume,
-                                          String bucket,
-                                          String key,
-                                          String fileName,
-                                          long objectID,
-                                          long parentObjectId,
-                                          long dataSize) {
-    return new OmKeyInfo.Builder()
-        .setBucketName(bucket)
-        .setVolumeName(volume)
-        .setKeyName(key)
-        .setFileName(fileName)
-        .setReplicationConfig(
-            StandaloneReplicationConfig.getInstance(
-                HddsProtos.ReplicationFactor.ONE))
-        .setObjectID(objectID)
-        .setParentObjectID(parentObjectId)
-        .setDataSize(dataSize)
-        .build();
-  }
-
-  // Helper method to check if an array contains a specific value
-  private boolean contains(int[] arr, int value) {
-    for (int num : arr) {
-      if (num == value) {
-        return true;
-      }
-    }
-    return false;
-  }
-
   private static BucketLayout getBucketLayout() {
     return BucketLayout.OBJECT_STORE;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to