This is an automated email from the ASF dual-hosted git repository.

sumitagrawal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 155c0284384 HDDS-13805. Ozone Recon - Correct the open keys and delete 
pending and summary APIs to load OM metadata from new globalStats rocksDB 
table. (#9167)
155c0284384 is described below

commit 155c0284384b6b44fdb73bf6d000bc8540351044
Author: Devesh Kumar Singh <[email protected]>
AuthorDate: Fri Oct 17 20:53:37 2025 +0530

    HDDS-13805. Ozone Recon - Correct the open keys and delete pending and 
summary APIs to load OM metadata from new globalStats rocksDB table. (#9167)
---
 .../TestReconInsightsForDeletedDirectories.java    |   8 +-
 .../ozone/recon/api/OMDBInsightEndpoint.java       | 117 ++++++++++++---------
 .../ozone/recon/api/TestOmDBInsightEndPoint.java   |  67 ++++++------
 3 files changed, 107 insertions(+), 85 deletions(-)

diff --git 
a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
 
b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
index 793fe07ea52..deb22736779 100644
--- 
a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
+++ 
b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
@@ -64,9 +64,9 @@
 import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse;
 import org.apache.hadoop.ozone.recon.api.types.NSSummary;
 import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconGlobalStatsManager;
 import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl;
-import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
 import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.AfterEach;
@@ -247,7 +247,7 @@ public void testGetDeletedDirectoryInfo(ReplicationConfig 
replicationConfig)
 
     OMDBInsightEndpoint omdbInsightEndpoint =
         new OMDBInsightEndpoint(reconSCM, reconOmMetadataManagerInstance,
-            mock(GlobalStatsDao.class), reconNamespaceSummaryManager);
+            mock(ReconGlobalStatsManager.class), reconNamespaceSummaryManager);
 
     // Fetch the deleted directory info from Recon OmDbInsightEndpoint.
     Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(-1, "");
@@ -335,7 +335,7 @@ public void 
testGetDeletedDirectoryInfoForNestedDirectories(ReplicationConfig re
 
     OMDBInsightEndpoint omdbInsightEndpoint =
         new OMDBInsightEndpoint(reconSCM, reconOmMetadataManagerInstance,
-            mock(GlobalStatsDao.class), namespaceSummaryManager);
+            mock(ReconGlobalStatsManager.class), namespaceSummaryManager);
 
     // Delete the entire root directory dir1.
     fs.delete(new Path("/dir1/dir2/dir3"), true);
@@ -418,7 +418,7 @@ public void 
testGetDeletedDirectoryInfoWithMultipleSubdirectories(ReplicationCon
             .getOzoneManagerServiceProvider().getOMMetadataManagerInstance();
     OMDBInsightEndpoint omdbInsightEndpoint =
         new OMDBInsightEndpoint(reconSCM, reconOmMetadataManagerInstance,
-            mock(GlobalStatsDao.class), namespaceSummaryManager);
+            mock(ReconGlobalStatsManager.class), namespaceSummaryManager);
     Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(-1, "");
     KeyInsightInfoResponse entity =
         (KeyInsightInfoResponse) deletedDirInfo.getEntity();
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
index acaf348f22a..ae8e9bdb0b2 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
@@ -77,10 +77,10 @@
 import org.apache.hadoop.ozone.recon.api.types.ReconBasicOmKeyInfo;
 import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
 import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconGlobalStatsManager;
 import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl;
+import org.apache.hadoop.ozone.recon.tasks.GlobalStatsValue;
 import org.apache.hadoop.ozone.recon.tasks.OmTableInsightTask;
-import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
-import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -102,18 +102,18 @@ public class OMDBInsightEndpoint {
   private final ReconOMMetadataManager omMetadataManager;
   private static final Logger LOG =
       LoggerFactory.getLogger(OMDBInsightEndpoint.class);
-  private final GlobalStatsDao globalStatsDao;
+  private final ReconGlobalStatsManager reconGlobalStatsManager;
   private ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager;
   private final OzoneStorageContainerManager reconSCM;
 
   @Inject
   public OMDBInsightEndpoint(OzoneStorageContainerManager reconSCM,
                              ReconOMMetadataManager omMetadataManager,
-                             GlobalStatsDao globalStatsDao,
+                             ReconGlobalStatsManager reconGlobalStatsManager,
                              ReconNamespaceSummaryManagerImpl
                                  reconNamespaceSummaryManager) {
     this.omMetadataManager = omMetadataManager;
-    this.globalStatsDao = globalStatsDao;
+    this.reconGlobalStatsManager = reconGlobalStatsManager;
     this.reconNamespaceSummaryManager = reconNamespaceSummaryManager;
     this.reconSCM = reconSCM;
   }
@@ -341,31 +341,38 @@ public Response getOpenKeySummary() {
    */
   private void createKeysSummaryForOpenKey(
       Map<String, Long> keysSummary) {
-    Long replicatedSizeOpenKey = getValueFromId(globalStatsDao.findById(
-        OmTableInsightTask.getReplicatedSizeKeyFromTable(OPEN_KEY_TABLE)));
-    Long replicatedSizeOpenFile = getValueFromId(globalStatsDao.findById(
-        OmTableInsightTask.getReplicatedSizeKeyFromTable(OPEN_FILE_TABLE)));
-    Long unreplicatedSizeOpenKey = getValueFromId(globalStatsDao.findById(
-        OmTableInsightTask.getUnReplicatedSizeKeyFromTable(OPEN_KEY_TABLE)));
-    Long unreplicatedSizeOpenFile = getValueFromId(globalStatsDao.findById(
-        OmTableInsightTask.getUnReplicatedSizeKeyFromTable(OPEN_FILE_TABLE)));
-    Long openKeyCountForKeyTable = getValueFromId(globalStatsDao.findById(
-        OmTableInsightTask.getTableCountKeyFromTable(OPEN_KEY_TABLE)));
-    Long openKeyCountForFileTable = getValueFromId(globalStatsDao.findById(
-        OmTableInsightTask.getTableCountKeyFromTable(OPEN_FILE_TABLE)));
-
-    // Calculate the total number of open keys
-    keysSummary.put("totalOpenKeys",
-        openKeyCountForKeyTable + openKeyCountForFileTable);
-    // Calculate the total replicated and unreplicated sizes
-    keysSummary.put("totalReplicatedDataSize",
-        replicatedSizeOpenKey + replicatedSizeOpenFile);
-    keysSummary.put("totalUnreplicatedDataSize",
-        unreplicatedSizeOpenKey + unreplicatedSizeOpenFile);
-
+    try {
+      Long replicatedSizeOpenKey = 
getValueFromId(reconGlobalStatsManager.getGlobalStatsValue(
+          OmTableInsightTask.getReplicatedSizeKeyFromTable(OPEN_KEY_TABLE)));
+      Long replicatedSizeOpenFile = 
getValueFromId(reconGlobalStatsManager.getGlobalStatsValue(
+          OmTableInsightTask.getReplicatedSizeKeyFromTable(OPEN_FILE_TABLE)));
+      Long unreplicatedSizeOpenKey = 
getValueFromId(reconGlobalStatsManager.getGlobalStatsValue(
+          OmTableInsightTask.getUnReplicatedSizeKeyFromTable(OPEN_KEY_TABLE)));
+      Long unreplicatedSizeOpenFile = 
getValueFromId(reconGlobalStatsManager.getGlobalStatsValue(
+          
OmTableInsightTask.getUnReplicatedSizeKeyFromTable(OPEN_FILE_TABLE)));
+      Long openKeyCountForKeyTable = 
getValueFromId(reconGlobalStatsManager.getGlobalStatsValue(
+          OmTableInsightTask.getTableCountKeyFromTable(OPEN_KEY_TABLE)));
+      Long openKeyCountForFileTable = 
getValueFromId(reconGlobalStatsManager.getGlobalStatsValue(
+          OmTableInsightTask.getTableCountKeyFromTable(OPEN_FILE_TABLE)));
+
+      // Calculate the total number of open keys
+      keysSummary.put("totalOpenKeys",
+          openKeyCountForKeyTable + openKeyCountForFileTable);
+      // Calculate the total replicated and unreplicated sizes
+      keysSummary.put("totalReplicatedDataSize",
+          replicatedSizeOpenKey + replicatedSizeOpenFile);
+      keysSummary.put("totalUnreplicatedDataSize",
+          unreplicatedSizeOpenKey + unreplicatedSizeOpenFile);
+    } catch (IOException e) {
+      LOG.error("Error retrieving open key summary from RocksDB", e);
+      // Return zeros in case of error
+      keysSummary.put("totalOpenKeys", 0L);
+      keysSummary.put("totalReplicatedDataSize", 0L);
+      keysSummary.put("totalUnreplicatedDataSize", 0L);
+    }
   }
 
-  private Long getValueFromId(GlobalStats record) {
+  private Long getValueFromId(GlobalStatsValue record) {
     // If the record is null, return 0
     return record != null ? record.getValue() : 0L;
   }
@@ -543,19 +550,27 @@ private boolean getPendingForDeletionKeyInfo(
    * @param keysSummary A map to store the keys summary information.
    */
   private void createKeysSummaryForDeletedKey(Map<String, Long> keysSummary) {
-    // Fetch the necessary metrics for deleted keys
-    Long replicatedSizeDeleted = getValueFromId(globalStatsDao.findById(
-        OmTableInsightTask.getReplicatedSizeKeyFromTable(DELETED_TABLE)));
-    Long unreplicatedSizeDeleted = getValueFromId(globalStatsDao.findById(
-        OmTableInsightTask.getUnReplicatedSizeKeyFromTable(DELETED_TABLE)));
-    Long deletedKeyCount = getValueFromId(globalStatsDao.findById(
-        OmTableInsightTask.getTableCountKeyFromTable(DELETED_TABLE)));
-
-    // Calculate the total number of deleted keys
-    keysSummary.put("totalDeletedKeys", deletedKeyCount);
-    // Calculate the total replicated and unreplicated sizes
-    keysSummary.put("totalReplicatedDataSize", replicatedSizeDeleted);
-    keysSummary.put("totalUnreplicatedDataSize", unreplicatedSizeDeleted);
+    try {
+      // Fetch the necessary metrics for deleted keys
+      Long replicatedSizeDeleted = 
getValueFromId(reconGlobalStatsManager.getGlobalStatsValue(
+          OmTableInsightTask.getReplicatedSizeKeyFromTable(DELETED_TABLE)));
+      Long unreplicatedSizeDeleted = 
getValueFromId(reconGlobalStatsManager.getGlobalStatsValue(
+          OmTableInsightTask.getUnReplicatedSizeKeyFromTable(DELETED_TABLE)));
+      Long deletedKeyCount = 
getValueFromId(reconGlobalStatsManager.getGlobalStatsValue(
+          OmTableInsightTask.getTableCountKeyFromTable(DELETED_TABLE)));
+
+      // Calculate the total number of deleted keys
+      keysSummary.put("totalDeletedKeys", deletedKeyCount);
+      // Calculate the total replicated and unreplicated sizes
+      keysSummary.put("totalReplicatedDataSize", replicatedSizeDeleted);
+      keysSummary.put("totalUnreplicatedDataSize", unreplicatedSizeDeleted);
+    } catch (IOException e) {
+      LOG.error("Error retrieving deleted key summary from RocksDB", e);
+      // Return zeros in case of error
+      keysSummary.put("totalDeletedKeys", 0L);
+      keysSummary.put("totalReplicatedDataSize", 0L);
+      keysSummary.put("totalUnreplicatedDataSize", 0L);
+    }
   }
 
   private void getPendingForDeletionDirInfo(
@@ -1308,11 +1323,17 @@ private KeyEntityInfo 
createKeyEntityInfoFromOmKeyInfo(String dbKey,
 
   private void createSummaryForDeletedDirectories(
       Map<String, Long> dirSummary) {
-    // Fetch the necessary metrics for deleted directories.
-    Long deletedDirCount = getValueFromId(globalStatsDao.findById(
-        OmTableInsightTask.getTableCountKeyFromTable(DELETED_DIR_TABLE)));
-    // Calculate the total number of deleted directories
-    dirSummary.put("totalDeletedDirectories", deletedDirCount);
+    try {
+      // Fetch the necessary metrics for deleted directories.
+      Long deletedDirCount = 
getValueFromId(reconGlobalStatsManager.getGlobalStatsValue(
+          OmTableInsightTask.getTableCountKeyFromTable(DELETED_DIR_TABLE)));
+      // Calculate the total number of deleted directories
+      dirSummary.put("totalDeletedDirectories", deletedDirCount);
+    } catch (IOException e) {
+      LOG.error("Error retrieving deleted directory summary from RocksDB", e);
+      // Return zero in case of error
+      dirSummary.put("totalDeletedDirectories", 0L);
+    }
   }
 
   private boolean validateStartPrefix(String startPrefix) {
@@ -1335,8 +1356,8 @@ private String createPath(OmKeyInfo omKeyInfo) {
   }
 
   @VisibleForTesting
-  public GlobalStatsDao getDao() {
-    return this.globalStatsDao;
+  public ReconGlobalStatsManager getReconGlobalStatsManager() {
+    return this.reconGlobalStatsManager;
   }
 
   @VisibleForTesting
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
index 1e83f1354a2..c83501bb7a0 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
@@ -82,16 +82,16 @@
 import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager;
 import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
 import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconGlobalStatsManager;
 import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
 import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
 import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
 import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTaskOBS;
+import org.apache.hadoop.ozone.recon.tasks.GlobalStatsValue;
 import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO;
 import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy;
 import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS;
-import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
-import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
@@ -923,22 +923,22 @@ public void testGetOpenKeyInfo() throws Exception {
   }
 
   @Test
-  public void testKeyCountsForValidAndInvalidKeyPrefix() {
+  public void testKeyCountsForValidAndInvalidKeyPrefix() throws IOException {
     Timestamp now = new Timestamp(System.currentTimeMillis());
-    GlobalStatsDao statsDao = omdbInsightEndpoint.getDao();
+    ReconGlobalStatsManager statsManager = 
omdbInsightEndpoint.getReconGlobalStatsManager();
 
     // Insert valid key count with valid key prefix
-    insertGlobalStatsRecords(statsDao, now,
+    insertGlobalStatsRecords(statsManager, now,
         "openKeyTable" + "Count", 3L);
-    insertGlobalStatsRecords(statsDao, now,
+    insertGlobalStatsRecords(statsManager, now,
         "openFileTable" + "Count", 3L);
-    insertGlobalStatsRecords(statsDao, now,
+    insertGlobalStatsRecords(statsManager, now,
         "openKeyTable" + "ReplicatedDataSize", 150L);
-    insertGlobalStatsRecords(statsDao, now,
+    insertGlobalStatsRecords(statsManager, now,
         "openFileTable" + "ReplicatedDataSize", 150L);
-    insertGlobalStatsRecords(statsDao, now,
+    insertGlobalStatsRecords(statsManager, now,
         "openKeyTable" + "UnReplicatedDataSize", 50L);
-    insertGlobalStatsRecords(statsDao, now,
+    insertGlobalStatsRecords(statsManager, now,
         "openFileTable" + "UnReplicatedDataSize", 50L);
 
     Response openKeyInfoResp =
@@ -956,17 +956,18 @@ public void testKeyCountsForValidAndInvalidKeyPrefix() {
         openKeysSummary.get("totalUnreplicatedDataSize"));
 
     // Delete the previous records and Update the new value for valid key 
prefix
-    statsDao.deleteById("openKeyTable" + "Count",
-        "openFileTable" + "Count",
-        "openKeyTable" + "ReplicatedDataSize",
-        "openFileTable" + "ReplicatedDataSize",
-        "openKeyTable" + "UnReplicatedDataSize",
-        "openFileTable" + "UnReplicatedDataSize");
+    Table<String, GlobalStatsValue> globalStatsTable = 
statsManager.getGlobalStatsTable();
+    globalStatsTable.delete("openKeyTable" + "Count");
+    globalStatsTable.delete("openFileTable" + "Count");
+    globalStatsTable.delete("openKeyTable" + "ReplicatedDataSize");
+    globalStatsTable.delete("openFileTable" + "ReplicatedDataSize");
+    globalStatsTable.delete("openKeyTable" + "UnReplicatedDataSize");
+    globalStatsTable.delete("openFileTable" + "UnReplicatedDataSize");
 
     // Insert new record for a key with invalid prefix
-    insertGlobalStatsRecords(statsDao, now, "openKeyTable" + "InvalidPrefix",
+    insertGlobalStatsRecords(statsManager, now, "openKeyTable" + 
"InvalidPrefix",
         3L);
-    insertGlobalStatsRecords(statsDao, now, "openFileTable" + "InvalidPrefix",
+    insertGlobalStatsRecords(statsManager, now, "openFileTable" + 
"InvalidPrefix",
         3L);
 
     openKeyInfoResp =
@@ -985,27 +986,27 @@ public void testKeyCountsForValidAndInvalidKeyPrefix() {
   }
 
   @Test
-  public void testKeysSummaryAttribute() {
+  public void testKeysSummaryAttribute() throws IOException {
     Timestamp now = new Timestamp(System.currentTimeMillis());
-    GlobalStatsDao statsDao = omdbInsightEndpoint.getDao();
+    ReconGlobalStatsManager statsManager = 
omdbInsightEndpoint.getReconGlobalStatsManager();
     // Insert records for replicated and unreplicated data sizes
-    insertGlobalStatsRecords(statsDao, now, "openFileTableReplicatedDataSize",
+    insertGlobalStatsRecords(statsManager, now, 
"openFileTableReplicatedDataSize",
         30L);
-    insertGlobalStatsRecords(statsDao, now, "openKeyTableReplicatedDataSize",
+    insertGlobalStatsRecords(statsManager, now, 
"openKeyTableReplicatedDataSize",
         30L);
-    insertGlobalStatsRecords(statsDao, now, "deletedTableReplicatedDataSize",
+    insertGlobalStatsRecords(statsManager, now, 
"deletedTableReplicatedDataSize",
         30L);
-    insertGlobalStatsRecords(statsDao, now, 
"openFileTableUnReplicatedDataSize",
+    insertGlobalStatsRecords(statsManager, now, 
"openFileTableUnReplicatedDataSize",
         10L);
-    insertGlobalStatsRecords(statsDao, now, "openKeyTableUnReplicatedDataSize",
+    insertGlobalStatsRecords(statsManager, now, 
"openKeyTableUnReplicatedDataSize",
         10L);
-    insertGlobalStatsRecords(statsDao, now, "deletedTableUnReplicatedDataSize",
+    insertGlobalStatsRecords(statsManager, now, 
"deletedTableUnReplicatedDataSize",
         10L);
 
     // Insert records for table counts
-    insertGlobalStatsRecords(statsDao, now, "openKeyTableCount", 3L);
-    insertGlobalStatsRecords(statsDao, now, "openFileTableCount", 3L);
-    insertGlobalStatsRecords(statsDao, now, "deletedTableCount", 3L);
+    insertGlobalStatsRecords(statsManager, now, "openKeyTableCount", 3L);
+    insertGlobalStatsRecords(statsManager, now, "openFileTableCount", 3L);
+    insertGlobalStatsRecords(statsManager, now, "deletedTableCount", 3L);
 
     // Call the API of Open keys to get the response
     Response openKeyInfoResp =
@@ -1038,11 +1039,11 @@ public void testKeysSummaryAttribute() {
         deletedKeysSummary.get("totalDeletedKeys"));
   }
 
-  private void insertGlobalStatsRecords(GlobalStatsDao statsDao,
+  private void insertGlobalStatsRecords(ReconGlobalStatsManager statsManager,
                                         Timestamp timestamp, String key,
-                                        long value) {
-    GlobalStats newRecord = new GlobalStats(key, value, timestamp);
-    statsDao.insert(newRecord);
+                                        long value) throws IOException {
+    GlobalStatsValue newRecord = new GlobalStatsValue(value);
+    statsManager.getGlobalStatsTable().put(key, newRecord);
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to