This is an automated email from the ASF dual-hosted git repository.

swamirishi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 902e715221 HDDS-13159. Refactor KeyManagerImpl for getting deleted 
subdirectories and deleted subFiles (#8538)
902e715221 is described below

commit 902e71522186eccac5c2616e290cac9b3acf9f09
Author: Swaminathan Balachandran <[email protected]>
AuthorDate: Tue Jun 3 11:59:03 2025 -0400

    HDDS-13159. Refactor KeyManagerImpl for getting deleted subdirectories and 
deleted subFiles (#8538)
---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 149 +++++++--------------
 .../ozone/om/request/file/OMFileRequest.java       |  16 +++
 2 files changed, 67 insertions(+), 98 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index b399d6bb9c..da080be68c 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -131,6 +131,7 @@
 import org.apache.hadoop.hdds.utils.BackgroundService;
 import org.apache.hadoop.hdds.utils.db.StringCodec;
 import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -160,6 +161,7 @@
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.WithParentObjectId;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils;
@@ -734,7 +736,7 @@ public ListKeysResult listKeys(String volumeName, String 
bucketName,
 
   @Override
   public PendingKeysDeletion getPendingDeletionKeys(
-      final CheckedFunction<Table.KeyValue<String, OmKeyInfo>, Boolean, 
IOException> filter, final int count)
+      final CheckedFunction<KeyValue<String, OmKeyInfo>, Boolean, IOException> 
filter, final int count)
       throws IOException {
     return getPendingDeletionKeys(null, null, null, filter, count);
   }
@@ -742,13 +744,13 @@ public PendingKeysDeletion getPendingDeletionKeys(
   @Override
   public PendingKeysDeletion getPendingDeletionKeys(
       String volume, String bucket, String startKey,
-      CheckedFunction<Table.KeyValue<String, OmKeyInfo>, Boolean, IOException> 
filter,
+      CheckedFunction<KeyValue<String, OmKeyInfo>, Boolean, IOException> 
filter,
       int count) throws IOException {
     List<BlockGroup> keyBlocksList = Lists.newArrayList();
     Map<String, RepeatedOmKeyInfo> keysToModify = new HashMap<>();
     // Bucket prefix would be empty if volume is empty i.e. either null or "".
     Optional<String> bucketPrefix = getBucketPrefix(volume, bucket, false);
-    try (TableIterator<String, ? extends Table.KeyValue<String, 
RepeatedOmKeyInfo>>
+    try (TableIterator<String, ? extends KeyValue<String, RepeatedOmKeyInfo>>
              delKeyIter = 
metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) {
 
       /* Seeking to the start key if it not null. The next key picked up would 
be ensured to start with the bucket
@@ -760,7 +762,7 @@ public PendingKeysDeletion getPendingDeletionKeys(
       int currentCount = 0;
       while (delKeyIter.hasNext() && currentCount < count) {
         RepeatedOmKeyInfo notReclaimableKeyInfo = new RepeatedOmKeyInfo();
-        Table.KeyValue<String, RepeatedOmKeyInfo> kv = delKeyIter.next();
+        KeyValue<String, RepeatedOmKeyInfo> kv = delKeyIter.next();
         if (kv != null) {
           List<BlockGroup> blockGroupList = Lists.newArrayList();
           // Multiple keys with the same path can be queued in one DB entry
@@ -795,12 +797,12 @@ public PendingKeysDeletion getPendingDeletionKeys(
     return new PendingKeysDeletion(keyBlocksList, keysToModify);
   }
 
-  private <V, R> List<Table.KeyValue<String, R>> getTableEntries(String 
startKey,
-          TableIterator<String, ? extends Table.KeyValue<String, V>> 
tableIterator,
+  private <V, R> List<KeyValue<String, R>> getTableEntries(String startKey,
+          TableIterator<String, ? extends KeyValue<String, V>> tableIterator,
           Function<V, R> valueFunction,
-          CheckedFunction<Table.KeyValue<String, V>, Boolean, IOException> 
filter,
+          CheckedFunction<KeyValue<String, V>, Boolean, IOException> filter,
           int size) throws IOException {
-    List<Table.KeyValue<String, R>> entries = new ArrayList<>();
+    List<KeyValue<String, R>> entries = new ArrayList<>();
     /* Seek to the start key if it's not null. The next key in queue is 
ensured to start with the bucket
          prefix, {@link 
org.apache.hadoop.hdds.utils.db.Table#iterator(bucketPrefix)} would ensure this.
     */
@@ -811,7 +813,7 @@ private <V, R> List<Table.KeyValue<String, R>> 
getTableEntries(String startKey,
     }
     int currentCount = 0;
     while (tableIterator.hasNext() && currentCount < size) {
-      Table.KeyValue<String, V> kv = tableIterator.next();
+      KeyValue<String, V> kv = tableIterator.next();
       if (kv != null && filter.apply(kv)) {
         entries.add(Table.newKeyValue(kv.getKey(), 
valueFunction.apply(kv.getValue())));
         currentCount++;
@@ -833,11 +835,11 @@ private Optional<String> getBucketPrefix(String 
volumeName, String bucketName, b
   }
 
   @Override
-  public List<Table.KeyValue<String, String>> getRenamesKeyEntries(
+  public List<KeyValue<String, String>> getRenamesKeyEntries(
       String volume, String bucket, String startKey,
-      CheckedFunction<Table.KeyValue<String, String>, Boolean, IOException> 
filter, int size) throws IOException {
+      CheckedFunction<KeyValue<String, String>, Boolean, IOException> filter, 
int size) throws IOException {
     Optional<String> bucketPrefix = getBucketPrefix(volume, bucket, false);
-    try (TableIterator<String, ? extends Table.KeyValue<String, String>>
+    try (TableIterator<String, ? extends KeyValue<String, String>>
              renamedKeyIter = 
metadataManager.getSnapshotRenamedTable().iterator(bucketPrefix.orElse(""))) {
       return getTableEntries(startKey, renamedKeyIter, Function.identity(), 
filter, size);
     }
@@ -882,12 +884,12 @@ private <T> CheckedFunction<KeyManager, T, IOException> 
getPreviousSnapshotOzone
   }
 
   @Override
-  public List<Table.KeyValue<String, List<OmKeyInfo>>> getDeletedKeyEntries(
+  public List<KeyValue<String, List<OmKeyInfo>>> getDeletedKeyEntries(
       String volume, String bucket, String startKey,
-      CheckedFunction<Table.KeyValue<String, RepeatedOmKeyInfo>, Boolean, 
IOException> filter,
+      CheckedFunction<KeyValue<String, RepeatedOmKeyInfo>, Boolean, 
IOException> filter,
       int size) throws IOException {
     Optional<String> bucketPrefix = getBucketPrefix(volume, bucket, false);
-    try (TableIterator<String, ? extends Table.KeyValue<String, 
RepeatedOmKeyInfo>>
+    try (TableIterator<String, ? extends KeyValue<String, RepeatedOmKeyInfo>>
              delKeyIter = 
metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) {
       return getTableEntries(startKey, delKeyIter, 
RepeatedOmKeyInfo::cloneOmKeyInfoList, filter, size);
     }
@@ -1537,10 +1539,10 @@ private OmKeyInfo createFakeDirIfShould(String volume, 
String bucket,
       }
     }
 
-    try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+    try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>>
         keyTblItr = keyTable.iterator(targetKey)) {
       while (keyTblItr.hasNext()) {
-        Table.KeyValue<String, OmKeyInfo> keyValue = keyTblItr.next();
+        KeyValue<String, OmKeyInfo> keyValue = keyTblItr.next();
         if (keyValue != null) {
           String key = keyValue.getKey();
           // HDDS-7871: RocksIterator#seek() may position at the key
@@ -1851,7 +1853,7 @@ public List<OzoneFileStatus> listStatus(OmKeyArgs args, 
boolean recursive,
     String keyArgs = OzoneFSUtils.addTrailingSlashIfNeeded(
         metadataManager.getOzoneKey(volumeName, bucketName, keyName));
 
-    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> 
iterator;
+    TableIterator<String, ? extends KeyValue<String, OmKeyInfo>> iterator;
     Table<String, OmKeyInfo> keyTable;
     metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
         bucketName);
@@ -1908,12 +1910,12 @@ public List<OzoneFileStatus> listStatus(OmKeyArgs args, 
boolean recursive,
     return fileStatusList;
   }
 
-  private TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+  private TableIterator<String, ? extends KeyValue<String, OmKeyInfo>>
       getIteratorForKeyInTableCache(
       boolean recursive, String startKey, String volumeName, String bucketName,
       TreeMap<String, OzoneFileStatus> cacheKeyMap, String keyArgs,
       Table<String, OmKeyInfo> keyTable) throws IOException {
-    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> 
iterator;
+    TableIterator<String, ? extends KeyValue<String, OmKeyInfo>> iterator;
     Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
         cacheIter = keyTable.cacheIterator();
     String startCacheKey = metadataManager.getOzoneKey(volumeName, bucketName, 
startKey);
@@ -1931,12 +1933,12 @@ private void findKeyInDbWithIterator(boolean recursive, 
String startKey,
       TreeMap<String, OzoneFileStatus> cacheKeyMap, String keyArgs,
       Table<String, OmKeyInfo> keyTable,
       TableIterator<String,
-          ? extends Table.KeyValue<String, OmKeyInfo>> iterator)
+          ? extends KeyValue<String, OmKeyInfo>> iterator)
       throws IOException {
     // Then, find key in DB
     String seekKeyInDb =
         metadataManager.getOzoneKey(volumeName, bucketName, startKey);
-    Table.KeyValue<String, OmKeyInfo> entry = iterator.seek(seekKeyInDb);
+    KeyValue<String, OmKeyInfo> entry = iterator.seek(seekKeyInDb);
     int countEntries = 0;
     if (iterator.hasNext()) {
       if (entry.getKey().equals(keyArgs)) {
@@ -2187,7 +2189,7 @@ private void slimLocationVersion(OmKeyInfo... keyInfos) {
   }
 
   @Override
-  public TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> 
getDeletedDirEntries(
+  public TableIterator<String, ? extends KeyValue<String, OmKeyInfo>> 
getDeletedDirEntries(
       String volume, String bucket) throws IOException {
     Optional<String> bucketPrefix = getBucketPrefix(volume, bucket, true);
     return 
metadataManager.getDeletedDirTable().iterator(bucketPrefix.orElse(""));
@@ -2196,101 +2198,52 @@ private void slimLocationVersion(OmKeyInfo... 
keyInfos) {
   @Override
   public DeleteKeysResult getPendingDeletionSubDirs(long volumeId, long 
bucketId,
       OmKeyInfo parentInfo, long remainingBufLimit) throws IOException {
-    String seekDirInDB = metadataManager.getOzonePathKey(volumeId, bucketId,
-        parentInfo.getObjectID(), "");
-    long countEntries = 0;
-
-    Table<String, OmDirectoryInfo> dirTable = 
metadataManager.getDirectoryTable();
-    try (TableIterator<String,
-        ? extends Table.KeyValue<String, OmDirectoryInfo>>
-        iterator = dirTable.iterator(seekDirInDB)) {
-      return gatherSubDirsWithIterator(parentInfo, countEntries, iterator, 
remainingBufLimit);
-    }
-
+    return gatherSubPathsWithIterator(volumeId, bucketId, parentInfo, 
metadataManager.getDirectoryTable(),
+        omDirectoryInfo -> OMFileRequest.getKeyInfoWithFullPath(parentInfo, 
omDirectoryInfo), remainingBufLimit);
   }
 
-  private DeleteKeysResult gatherSubDirsWithIterator(OmKeyInfo parentInfo,
-      long countEntries,
-      TableIterator<String,
-          ? extends Table.KeyValue<String, OmDirectoryInfo>> iterator, long 
remainingBufLimit)
-      throws IOException {
-    List<OmKeyInfo> directories = new ArrayList<>();
-    long consumedSize = 0;
-    boolean processedSubDirs = false;
-
-    while (iterator.hasNext() && remainingBufLimit > 0) {
-      Table.KeyValue<String, OmDirectoryInfo> entry = iterator.next();
-      OmDirectoryInfo dirInfo = entry.getValue();
-      long objectSerializedSize = entry.getRawSize();
-      if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(),
-          parentInfo.getObjectID())) {
-        processedSubDirs = true;
-        break;
-      }
-      if (!metadataManager.getDirectoryTable().isExist(entry.getKey())) {
-        continue;
-      }
-      if (remainingBufLimit - objectSerializedSize < 0) {
-        break;
-      }
-      String dirName = OMFileRequest.getAbsolutePath(parentInfo.getKeyName(),
-          dirInfo.getName());
-      OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfo(
-          parentInfo.getVolumeName(), parentInfo.getBucketName(), dirInfo,
-          dirName);
-      directories.add(omKeyInfo);
-      countEntries++;
-      remainingBufLimit -= objectSerializedSize;
-      consumedSize += objectSerializedSize;
-    }
-
-    processedSubDirs = processedSubDirs || (!iterator.hasNext());
-
-    return new DeleteKeysResult(directories, consumedSize, processedSubDirs);
-  }
-
-  @Override
-  public DeleteKeysResult getPendingDeletionSubFiles(long volumeId,
-      long bucketId, OmKeyInfo parentInfo, long remainingBufLimit)
-          throws IOException {
-    List<OmKeyInfo> files = new ArrayList<>();
+  private <T extends WithParentObjectId> DeleteKeysResult 
gatherSubPathsWithIterator(
+      long volumeId, long bucketId, OmKeyInfo parentInfo,
+      Table<String, T> table, Function<T, OmKeyInfo> deleteKeyTransformer,
+      long remainingBufLimit) throws IOException {
+    List<OmKeyInfo> keyInfos = new ArrayList<>();
     String seekFileInDB = metadataManager.getOzonePathKey(volumeId, bucketId,
         parentInfo.getObjectID(), "");
     long consumedSize = 0;
-    boolean processedSubFiles = false;
-
-    Table fileTable = metadataManager.getFileTable();
-    try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
-        iterator = fileTable.iterator(seekFileInDB)) {
-
+    boolean processedSubPaths = false;
+    try (TableIterator<String, ? extends KeyValue<String, T>> iterator = 
table.iterator(seekFileInDB)) {
       while (iterator.hasNext() && remainingBufLimit > 0) {
-        Table.KeyValue<String, OmKeyInfo> entry = iterator.next();
-        OmKeyInfo fileInfo = entry.getValue();
+        KeyValue<String, T> entry = iterator.next();
+        T withParentObjectId = entry.getValue();
         long objectSerializedSize = entry.getRawSize();
-        if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(),
+        if 
(!OMFileRequest.isImmediateChild(withParentObjectId.getParentObjectID(),
             parentInfo.getObjectID())) {
-          processedSubFiles = true;
+          processedSubPaths = true;
           break;
         }
-        if (!metadataManager.getFileTable().isExist(entry.getKey())) {
+        if (!table.isExist(entry.getKey())) {
           continue;
         }
         if (remainingBufLimit - objectSerializedSize < 0) {
           break;
         }
-        fileInfo.setFileName(fileInfo.getKeyName());
-        String fullKeyPath = OMFileRequest.getAbsolutePath(
-            parentInfo.getKeyName(), fileInfo.getKeyName());
-        fileInfo.setKeyName(fullKeyPath);
-
-        files.add(fileInfo);
+        OmKeyInfo keyInfo = deleteKeyTransformer.apply(withParentObjectId);
+        keyInfos.add(keyInfo);
         remainingBufLimit -= objectSerializedSize;
         consumedSize += objectSerializedSize;
       }
-      processedSubFiles = processedSubFiles || (!iterator.hasNext());
+      processedSubPaths = processedSubPaths || (!iterator.hasNext());
+      return new DeleteKeysResult(keyInfos, consumedSize, processedSubPaths);
     }
+  }
 
-    return new DeleteKeysResult(files, consumedSize, processedSubFiles);
+  @Override
+  public DeleteKeysResult getPendingDeletionSubFiles(long volumeId,
+      long bucketId, OmKeyInfo parentInfo, long remainingBufLimit)
+          throws IOException {
+    return gatherSubPathsWithIterator(volumeId, bucketId, parentInfo, 
metadataManager.getFileTable(),
+        keyInfo -> OMFileRequest.getKeyInfoWithFullPath(parentInfo, keyInfo),
+        remainingBufLimit);
   }
 
   public boolean isBucketFSOptimized(String volName, String buckName)
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index f8058bd7a8..75ec1d5b72 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -720,6 +720,22 @@ public static OzoneFileStatus getOMKeyInfoIfExists(
     return null;
   }
 
+  public static OmKeyInfo getKeyInfoWithFullPath(OmKeyInfo parentInfo, 
OmDirectoryInfo directoryInfo) {
+    String dirName = OMFileRequest.getAbsolutePath(parentInfo.getKeyName(),
+        directoryInfo.getName());
+    return OMFileRequest.getOmKeyInfo(
+        parentInfo.getVolumeName(), parentInfo.getBucketName(), directoryInfo,
+        dirName);
+  }
+
+  public static OmKeyInfo getKeyInfoWithFullPath(OmKeyInfo parentInfo, 
OmKeyInfo omKeyInfo) {
+    omKeyInfo.setFileName(omKeyInfo.getKeyName());
+    String fullKeyPath = OMFileRequest.getAbsolutePath(
+        parentInfo.getKeyName(), omKeyInfo.getKeyName());
+    omKeyInfo.setKeyName(fullKeyPath);
+    return omKeyInfo;
+  }
+
   /**
    * Prepare OmKeyInfo from OmDirectoryInfo.
    *


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to