This is an automated email from the ASF dual-hosted git repository.
swamirishi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new e4de3ac5302 HDDS-13252. Use deleteRangeWithBatch API to delete keys in
snapshot scope from AOS deleted space. (#8964)
e4de3ac5302 is described below
commit e4de3ac5302b20c67d2c84214c09fbf7d975a951
Author: SaketaChalamchala <[email protected]>
AuthorDate: Thu Aug 28 08:08:27 2025 -0700
HDDS-13252. Use deleteRangeWithBatch API to delete keys in snapshot scope
from AOS deleted space. (#8964)
Co-authored-by: saketa <[email protected]>
Co-authored-by: Copilot <[email protected]>
---
.../java/org/apache/hadoop/hdds/StringUtils.java | 26 +++
.../org/apache/hadoop/hdds/utils/TestUtils.java | 15 +-
.../apache/hadoop/ozone/om/OmSnapshotManager.java | 114 +++++-------
.../snapshot/OMSnapshotCreateResponse.java | 23 ---
.../snapshot/TestOMSnapshotCreateRequest.java | 192 +++++++++++++++++----
.../snapshot/TestOMSnapshotCreateResponse.java | 48 ++++++
6 files changed, 273 insertions(+), 145 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
index e2c50a2da74..cfcac1a7712 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
@@ -97,4 +97,30 @@ public static String bytes2String(byte[] bytes) {
public static byte[] string2Bytes(String str) {
return str.getBytes(UTF8);
}
+
+ public static String getLexicographicallyLowerString(String val) {
+ if (val == null || val.isEmpty()) {
+ throw new IllegalArgumentException("Input string must not be null or
empty");
+ }
+ char[] charVal = val.toCharArray();
+ int lastIdx = charVal.length - 1;
+ if (charVal[lastIdx] == Character.MIN_VALUE) {
+ throw new IllegalArgumentException("Cannot decrement character below
Character.MIN_VALUE");
+ }
+ charVal[lastIdx] -= 1;
+ return String.valueOf(charVal);
+ }
+
+ public static String getLexicographicallyHigherString(String val) {
+ if (val == null || val.isEmpty()) {
+ throw new IllegalArgumentException("Input string must not be null or
empty");
+ }
+ char[] charVal = val.toCharArray();
+ int lastIdx = charVal.length - 1;
+ if (charVal[lastIdx] == Character.MAX_VALUE) {
+ throw new IllegalArgumentException("Cannot increment character above
Character.MAX_VALUE");
+ }
+ charVal[lastIdx] += 1;
+ return String.valueOf(charVal);
+ }
}
diff --git
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestUtils.java
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestUtils.java
index b2f02261387..0e0d8306759 100644
---
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestUtils.java
+++
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestUtils.java
@@ -17,6 +17,9 @@
package org.apache.hadoop.hdds.utils;
+import static
org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString;
+import static
org.apache.hadoop.hdds.StringUtils.getLexicographicallyLowerString;
+
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
@@ -33,18 +36,6 @@ public final class TestUtils {
private TestUtils() {
}
- public static String getLexicographicallyLowerString(String val) {
- char[] charVal = val.toCharArray();
- charVal[charVal.length - 1] -= 1;
- return String.valueOf(charVal);
- }
-
- public static String getLexicographicallyHigherString(String val) {
- char[] charVal = val.toCharArray();
- charVal[charVal.length - 1] += 1;
- return String.valueOf(charVal);
- }
-
public static List<Optional<String>> getTestingBounds(
SortedMap<String, Integer> keys) {
Set<String> boundary = new HashSet<>();
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
index 91e5abb217a..30e6c190d56 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.om;
import static org.apache.commons.lang3.StringUtils.isBlank;
+import static
org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString;
import static
org.apache.hadoop.hdds.utils.db.DBStoreBuilder.DEFAULT_COLUMN_FAMILY_NAME;
import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
@@ -89,15 +90,12 @@
import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint;
import org.apache.hadoop.hdds.utils.db.RocksDatabase;
import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.service.SnapshotDiffCleanupService;
@@ -109,7 +107,6 @@
import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone;
import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse;
import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
-import org.apache.ratis.util.function.CheckedFunction;
import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
@@ -126,9 +123,6 @@ public final class OmSnapshotManager implements
AutoCloseable {
private static final Logger LOG =
LoggerFactory.getLogger(OmSnapshotManager.class);
- // Threshold for the table iterator loop in nanoseconds.
- private static final long DB_TABLE_ITER_LOOP_THRESHOLD_NS = 100000;
-
private final OzoneManager ozoneManager;
private final SnapshotDiffManager snapshotDiffManager;
// Per-OM instance of snapshot cache map
@@ -507,12 +501,16 @@ public static DBCheckpoint createOmSnapshotCheckpoint(
OmSnapshotManager.createNewOmSnapshotLocalDataFile(omMetadataManager,
snapshotInfo, store);
// Clean up active DB's deletedTable right after checkpoint is taken,
- // There is no need to take any lock as of now, because transactions are
flushed sequentially.
+ // Snapshot create is processed as a single transaction and
+ // transactions are flushed sequentially so, no need to take any lock as
of now.
deleteKeysFromDelKeyTableInSnapshotScope(omMetadataManager,
snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(),
batchOperation);
// Clean up deletedDirectoryTable as well
deleteKeysFromDelDirTableInSnapshotScope(omMetadataManager,
snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(),
batchOperation);
+ // Remove entries from snapshotRenamedTable
+ deleteKeysFromSnapRenamedTableInSnapshotScope(omMetadataManager,
+ snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(),
batchOperation);
if (dbCheckpoint != null && snapshotDirExist) {
LOG.info("Checkpoint : {} for snapshot {} already exists.",
@@ -526,12 +524,43 @@ public static DBCheckpoint createOmSnapshotCheckpoint(
return dbCheckpoint;
}
+ /**
+ * Helper method to perform batch delete range operation on a given key
prefix.
+ * @param prefix prefix of keys to be deleted
+ * @param table table from which keys are to be deleted
+ * @param batchOperation batch operation
+ */
+ private static void deleteKeysFromTableWithPrefix(
+ String prefix, Table<String, ?> table, BatchOperation batchOperation)
throws IOException {
+ String endKey = getLexicographicallyHigherString(prefix);
+ LOG.debug("Deleting key range from {} - startKey: {}, endKey: {}",
+ table.getName(), prefix, endKey);
+ table.deleteRangeWithBatch(batchOperation, prefix, endKey);
+ }
+
/**
* Helper method to delete DB keys in the snapshot scope (bucket)
* from active DB's deletedDirectoryTable.
* @param omMetadataManager OMMetadataManager instance
* @param volumeName volume name
* @param bucketName bucket name
+ * @param batchOperation batch operation
+ */
+ private static void deleteKeysFromSnapRenamedTableInSnapshotScope(
+ OMMetadataManager omMetadataManager, String volumeName,
+ String bucketName, BatchOperation batchOperation) throws IOException {
+
+ final String keyPrefix = omMetadataManager.getBucketKeyPrefix(volumeName,
bucketName);
+ deleteKeysFromTableWithPrefix(keyPrefix,
omMetadataManager.getSnapshotRenamedTable(), batchOperation);
+ }
+
+ /**
+ * Helper method to delete DB keys in the snapshot scope (bucket)
+ * from active DB's deletedDirectoryTable.
+ * @param omMetadataManager OMMetadataManager instance
+ * @param volumeName volume name
+ * @param bucketName bucket name
+ * @param batchOperation batch operation
*/
private static void deleteKeysFromDelDirTableInSnapshotScope(
OMMetadataManager omMetadataManager, String volumeName,
@@ -539,18 +568,7 @@ private static void
deleteKeysFromDelDirTableInSnapshotScope(
// Range delete start key (inclusive)
final String keyPrefix =
omMetadataManager.getBucketKeyPrefixFSO(volumeName, bucketName);
-
- try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
- iter = omMetadataManager.getDeletedDirTable().iterator(keyPrefix)) {
- performOperationOnKeys(iter,
- entry -> {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Removing key {} from DeletedDirTable",
entry.getKey());
- }
-
omMetadataManager.getDeletedDirTable().deleteWithBatch(batchOperation,
entry.getKey());
- return null;
- });
- }
+ deleteKeysFromTableWithPrefix(keyPrefix,
omMetadataManager.getDeletedDirTable(), batchOperation);
}
@VisibleForTesting
@@ -563,68 +581,20 @@ public SnapshotDiffCleanupService
getSnapshotDiffCleanupService() {
return snapshotDiffCleanupService;
}
- /**
- * Helper method to perform operation on keys with a given iterator.
- * @param keyIter TableIterator
- * @param operationFunction operation to be performed for each key.
- */
- private static void performOperationOnKeys(
- TableIterator<String, ? extends Table.KeyValue<String, ?>> keyIter,
- CheckedFunction<Table.KeyValue<String, ?>,
- Void, IOException> operationFunction) throws IOException {
- // Continue only when there are entries of snapshot (bucket) scope
- // in deletedTable in the first place
- // Loop until prefix matches.
- // Start performance tracking timer
- long startTime = System.nanoTime();
- while (keyIter.hasNext()) {
- Table.KeyValue<String, ?> entry = keyIter.next();
- operationFunction.apply(entry);
- }
- // Time took for the iterator to finish (in ns)
- long timeElapsed = System.nanoTime() - startTime;
- if (timeElapsed >= DB_TABLE_ITER_LOOP_THRESHOLD_NS) {
- // Print time elapsed
- LOG.warn("Took {} ns to find endKey. Caller is {}", timeElapsed,
- new Throwable().fillInStackTrace().getStackTrace()[1]
- .getMethodName());
- }
- }
-
/**
* Helper method to delete DB keys in the snapshot scope (bucket)
* from active DB's deletedTable.
* @param omMetadataManager OMMetadataManager instance
* @param volumeName volume name
* @param bucketName bucket name
+ * @param batchOperation batch operation
*/
private static void deleteKeysFromDelKeyTableInSnapshotScope(
OMMetadataManager omMetadataManager, String volumeName,
String bucketName, BatchOperation batchOperation) throws IOException {
-
- // Range delete start key (inclusive)
- final String keyPrefix =
- omMetadataManager.getBucketKeyPrefix(volumeName, bucketName);
-
- try (TableIterator<String,
- ? extends Table.KeyValue<String, RepeatedOmKeyInfo>>
- iter = omMetadataManager.getDeletedTable().iterator(keyPrefix)) {
- performOperationOnKeys(iter, entry -> {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Removing key {} from DeletedTable", entry.getKey());
- }
- omMetadataManager.getDeletedTable().deleteWithBatch(batchOperation,
entry.getKey());
- return null;
- });
- }
-
- // No need to invalidate deletedTable (or deletedDirectoryTable) table
- // cache since entries are not added to its table cache in the first place.
- // See OMKeyDeleteRequest and OMKeyPurgeRequest#validateAndUpdateCache.
- //
- // This makes the table clean up efficient as we only need one
- // deleteRange() operation. No need to invalidate cache entries
- // one by one.
+ // Range delete prefix (inclusive)
+ final String keyPrefix = omMetadataManager.getBucketKeyPrefix(volumeName,
bucketName);
+ deleteKeysFromTableWithPrefix(keyPrefix,
omMetadataManager.getDeletedTable(), batchOperation);
}
/**
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java
index 2037c9ca6e6..db107f0772f 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.ozone.om.response.snapshot;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DELETED_TABLE;
import static
org.apache.hadoop.ozone.om.codec.OMDBDefinition.SNAPSHOT_INFO_TABLE;
import static
org.apache.hadoop.ozone.om.codec.OMDBDefinition.SNAPSHOT_RENAMED_TABLE;
@@ -25,8 +24,6 @@
import jakarta.annotation.Nonnull;
import java.io.IOException;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmSnapshotManager;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
@@ -78,25 +75,5 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
// Create the snapshot checkpoint. Also cleans up some tables.
OmSnapshotManager.createOmSnapshotCheckpoint(omMetadataManager,
snapshotInfo, batchOperation);
-
- // TODO: [SNAPSHOT] Move to createOmSnapshotCheckpoint and add table lock
- // Remove all entries from snapshotRenamedTable
- try (TableIterator<String, ? extends Table.KeyValue<String, String>>
- iterator = omMetadataManager.getSnapshotRenamedTable().iterator()) {
-
- String dbSnapshotBucketKey = omMetadataManager.getBucketKey(
- snapshotInfo.getVolumeName(), snapshotInfo.getBucketName())
- + OM_KEY_PREFIX;
- iterator.seek(dbSnapshotBucketKey);
-
- while (iterator.hasNext()) {
- String renameDbKey = iterator.next().getKey();
- if (!renameDbKey.startsWith(dbSnapshotBucketKey)) {
- break;
- }
- omMetadataManager.getSnapshotRenamedTable()
- .deleteWithBatch(batchOperation, renameDbKey);
- }
- }
}
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java
index a18ed38e3a4..68307c9dcc0 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.OmSnapshotManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.ResolvedBucket;
@@ -45,6 +46,7 @@
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
@@ -162,7 +164,7 @@ public void testValidateAndUpdateCache() throws Exception {
String bucketKey = getOmMetadataManager().getBucketKey(getVolumeName(),
getBucketName());
// Add a 1000-byte key to the bucket
- OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L);
+ OmKeyInfo key1 = addKeyInBucket(getVolumeName(), getBucketName(),
"key-testValidateAndUpdateCache", 12345L);
addKeyToTable(key1);
OmBucketInfo omBucketInfo = getOmMetadataManager().getBucketTable().get(
@@ -213,33 +215,51 @@ public void testValidateAndUpdateCache() throws Exception
{
@Test
public void testEntryRenamedKeyTable() throws Exception {
when(getOzoneManager().isAdmin(any())).thenReturn(true);
- Table<String, String> snapshotRenamedTable =
- getOmMetadataManager().getSnapshotRenamedTable();
-
- renameKey("key1", "key2", 0);
- renameDir("dir1", "dir2", 5);
- // Rename table should be empty as there is no rename happening in
- // the snapshot scope.
+ Table<String, String> snapshotRenamedTable =
getOmMetadataManager().getSnapshotRenamedTable();
+
+ String bucket1Name = getBucketName();
+ String bucket2Name = getBucketName() + "0";
+ String volumeName = getVolumeName();
+ OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucket2Name,
getOmMetadataManager());
+
+ renameKeyInBucket(volumeName, bucket1Name, "key1", "key2", 0);
+ renameDirInBucket(volumeName, bucket1Name, "dir1", "dir2", 5);
+ renameKeyInBucket(volumeName, bucket2Name, "key10", "key20", 0);
+ renameDirInBucket(volumeName, bucket2Name, "dir10", "dir20", 5);
+ // Rename table should be empty as there is no rename happening in the
snapshot scope.
assertTrue(snapshotRenamedTable.isEmpty());
// Create snapshot
- createSnapshot(snapshotName1);
- String snapKey = getTableKey(getVolumeName(),
- getBucketName(), snapshotName1);
- SnapshotInfo snapshotInfo =
- getOmMetadataManager().getSnapshotInfoTable().get(snapKey);
- assertNotNull(snapshotInfo);
-
- renameKey("key3", "key4", 10);
- renameDir("dir3", "dir4", 15);
-
- // Rename table should have two entries as rename is within snapshot scope.
- assertEquals(2, getOmMetadataManager()
- .countRowsInTable(snapshotRenamedTable));
-
- // Create snapshot to clear snapshotRenamedTable
- createSnapshot(snapshotName2);
- assertTrue(snapshotRenamedTable.isEmpty());
+ createSnapshotForBucket(volumeName, bucket1Name, snapshotName1);
+ createSnapshotForBucket(volumeName, bucket2Name, snapshotName1 + "0");
+ String bucket1SnapKey = getTableKey(volumeName, bucket1Name,
snapshotName1);
+ String bucket2SnapKey = getTableKey(volumeName, bucket2Name, snapshotName1
+ "0");
+ SnapshotInfo bucket1SnapshotInfo =
getOmMetadataManager().getSnapshotInfoTable().get(bucket1SnapKey);
+ SnapshotInfo bucket2SnapshotInfo =
getOmMetadataManager().getSnapshotInfoTable().get(bucket2SnapKey);
+ assertNotNull(bucket1SnapshotInfo);
+ assertNotNull(bucket2SnapshotInfo);
+
+ renameKeyInBucket(volumeName, bucket1Name, "key3", "key4", 10);
+ renameDirInBucket(volumeName, bucket1Name, "dir3", "dir4", 15);
+ renameKeyInBucket(volumeName, bucket2Name, "key30", "key40", 10);
+ renameDirInBucket(volumeName, bucket2Name, "dir30", "dir40", 15);
+
+ // Rename table should have four entries as rename is within snapshot
scope.
+ assertEquals(4,
getOmMetadataManager().countRowsInTable(snapshotRenamedTable));
+
+ // Create snapshot to clear snapshotRenamedTable of bucket1 entries.
+ createSnapshotForBucket(volumeName, bucket1Name, snapshotName2);
+ assertEquals(2,
getOmMetadataManager().countRowsInTable(snapshotRenamedTable));
+ // Verify the remaining entries are from bucket2
+ try (TableIterator<String, ? extends Table.KeyValue<String, String>> iter =
+ snapshotRenamedTable.iterator()) {
+ iter.seekToFirst();
+ while (iter.hasNext()) {
+ String key = iter.next().getKey();
+
assertTrue(key.startsWith(getOmMetadataManager().getBucketKey(volumeName,
bucket2Name)),
+ "Key should be from bucket2: " + key);
+ }
+ }
}
@Test
@@ -313,7 +333,7 @@ public void testSnapshotLimit() throws Exception {
assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(key3));
// Test Case 4: Three snapshots in chain, no in-flight
- // Try to create another snapshot - should fail as we've reached the limit
+ // Try to create another snapshot - should fail as we've reached the limit
OMRequest snapshotRequest5 = createSnapshotRequest(getVolumeName(),
getBucketName(), snapshotName5);
omException = assertThrows(OMException.class, () ->
doPreExecute(snapshotRequest5));
assertEquals(OMException.ResultCodes.TOO_MANY_SNAPSHOTS,
omException.getResult());
@@ -364,10 +384,102 @@ public void testSnapshotLimitWithFailures() throws
Exception {
assertEquals(OMException.ResultCodes.TOO_MANY_SNAPSHOTS,
omException.getResult());
}
- private void renameKey(String fromKey, String toKey, long offset)
+ @Test
+ public void testEntryDeletedTable() throws Exception {
+ when(getOzoneManager().isAdmin(any())).thenReturn(true);
+ Table<String, RepeatedOmKeyInfo> deletedTable =
getOmMetadataManager().getDeletedTable();
+
+ // 1. Create a second bucket with lexicographically higher name
+ String bucket1Name = getBucketName();
+ String bucket2Name = getBucketName() + "0";
+ String volumeName = getVolumeName();
+ OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucket2Name,
getOmMetadataManager());
+
+ // 2. Add and delete keys from both buckets
+ OmKeyInfo key1 = addKeyInBucket(volumeName, bucket1Name, "key1", 100L);
+ OmKeyInfo key2 = addKeyInBucket(volumeName, bucket2Name, "key2", 200L);
+ deleteKey(key1);
+ deleteKey(key2);
+
+ // 3. Verify deletedTable contains both deleted keys (2 rows)
+ assertEquals(2, getOmMetadataManager().countRowsInTable(deletedTable));
+
+ // 4. Create a snapshot on bucket1
+ createSnapshot(snapshotName1);
+
+ // 5. Verify deletedTable now only contains the key from bucket2 (1 row)
+ assertEquals(1, getOmMetadataManager().countRowsInTable(deletedTable));
+ // Verify the remaining entry is from bucket2
+ try (TableIterator<String, ? extends Table.KeyValue<String,
RepeatedOmKeyInfo>> iter = deletedTable.iterator()) {
+ iter.seekToFirst();
+ while (iter.hasNext()) {
+ String key = iter.next().getKey();
+
assertTrue(key.startsWith(getOmMetadataManager().getBucketKeyPrefix(volumeName,
bucket2Name)),
+ "Key should be from bucket2: " + key);
+ }
+ }
+
+
+ }
+
+ @Test
+ public void testEntryDeletedDirTable() throws Exception {
+ when(getOzoneManager().isAdmin(any())).thenReturn(true);
+ Table<String, OmKeyInfo> deletedDirTable =
getOmMetadataManager().getDeletedDirTable();
+
+ // 1. Create a second bucket with lexicographically higher name
+ String bucket1Name = getBucketName();
+ String bucket2Name = getBucketName() + "0";
+ String volumeName = getVolumeName();
+ OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucket2Name,
getOmMetadataManager());
+
+ // 2. Add and delete keys from both buckets
+ OmKeyInfo key1 = addKeyInBucket(volumeName, bucket1Name, "dir2", 100L);
+ OmKeyInfo key2 = addKeyInBucket(volumeName, bucket2Name, "dir20", 200L);
+ deleteDirectory(key1);
+ deleteDirectory(key2);
+
+ // 3. Verify deletedDirTable contains both deleted keys (2 rows)
+ assertEquals(2, getOmMetadataManager().countRowsInTable(deletedDirTable));
+
+ // 4. Create a snapshot on bucket1
+ createSnapshotForBucket(volumeName, bucket1Name, snapshotName1);
+
+ // 5. Verify deletedTable now only contains the key from bucket2 (1 row)
+ assertEquals(1, getOmMetadataManager().countRowsInTable(deletedDirTable));
+ // Verify the remaining entry is from bucket2
+ try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
iter = deletedDirTable.iterator()) {
+ while (iter.hasNext()) {
+ String key = iter.next().getKey();
+
assertTrue(key.startsWith(getOmMetadataManager().getBucketKeyPrefixFSO(volumeName,
bucket2Name)),
+ "Key should be from bucket2: " + key);
+ }
+ }
+ }
+
+ private void deleteDirectory(OmKeyInfo dirInfo) throws IOException {
+ String dirKey = getOmMetadataManager().getOzonePathKey(
+ getOmMetadataManager().getVolumeId(dirInfo.getVolumeName()),
+ getOmMetadataManager().getBucketId(dirInfo.getVolumeName(),
dirInfo.getBucketName()),
+ dirInfo.getParentObjectID(), dirInfo.getKeyName());
+
getOmMetadataManager().getDeletedDirTable().putWithBatch(getBatchOperation(),
+ dirKey, dirInfo);
+
getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation());
+ }
+
+ private void deleteKey(OmKeyInfo keyInfo) throws IOException {
+ String ozoneKey =
getOmMetadataManager().getOzoneKey(keyInfo.getVolumeName(),
+ keyInfo.getBucketName(), keyInfo.getKeyName());
+ RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
+ getOmMetadataManager().getDeletedTable().putWithBatch(getBatchOperation(),
+ ozoneKey, repeatedOmKeyInfo);
+
getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation());
+ }
+
+ private void renameKeyInBucket(String volumeName, String bucketName, String
fromKey, String toKey, long offset)
throws IOException {
- OmKeyInfo toKeyInfo = addKey(toKey, offset + 1L);
- OmKeyInfo fromKeyInfo = addKey(fromKey, offset + 2L);
+ OmKeyInfo toKeyInfo = addKeyInBucket(volumeName, bucketName, toKey, offset
+ 1L);
+ OmKeyInfo fromKeyInfo = addKeyInBucket(volumeName, bucketName, fromKey,
offset + 2L);
OMResponse omResponse = OMResponse
.newBuilder()
@@ -384,16 +496,16 @@ private void renameKey(String fromKey, String toKey, long
offset)
getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation());
}
- private void renameDir(String fromKey, String toKey, long offset)
- throws Exception {
+ private void renameDirInBucket(String volumeName, String bucketName, String
fromKey, String toKey, long offset)
+ throws IOException {
String fromKeyParentName = UUID.randomUUID().toString();
- OmKeyInfo fromKeyParent =
OMRequestTestUtils.createOmKeyInfo(getVolumeName(),
- getBucketName(), fromKeyParentName,
RatisReplicationConfig.getInstance(THREE))
+ OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName,
+ bucketName, fromKeyParentName,
RatisReplicationConfig.getInstance(THREE))
.setObjectID(100L)
.build();
- OmKeyInfo toKeyInfo = addKey(toKey, offset + 4L);
- OmKeyInfo fromKeyInfo = addKey(fromKey, offset + 5L);
+ OmKeyInfo toKeyInfo = addKeyInBucket(volumeName, bucketName, toKey, offset
+ 4L);
+ OmKeyInfo fromKeyInfo = addKeyInBucket(volumeName, bucketName, fromKey,
offset + 5L);
OMResponse omResponse = OMResponse
.newBuilder()
.setRenameKeyResponse(
@@ -418,9 +530,13 @@ protected String getDBKeyName(OmKeyInfo keyInfo) throws
IOException {
}
private void createSnapshot(String snapName) throws Exception {
+ createSnapshotForBucket(getVolumeName(), getBucketName(), snapName);
+ }
+
+ private void createSnapshotForBucket(String volumeName, String bucketName,
String snapName) throws Exception {
OMRequest omRequest =
createSnapshotRequest(
- getVolumeName(), getBucketName(), snapName);
+ volumeName, bucketName, snapName);
OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest);
//create entry
OMClientResponse omClientResponse =
@@ -447,8 +563,8 @@ public static OMSnapshotCreateRequest doPreExecute(
return new OMSnapshotCreateRequest(modifiedRequest);
}
- private OmKeyInfo addKey(String keyName, long objectId) {
- return OMRequestTestUtils.createOmKeyInfo(getVolumeName(),
getBucketName(), keyName,
+ private OmKeyInfo addKeyInBucket(String volumeName, String bucketName,
String keyName, long objectId) {
+ return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId)
.build();
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java
index 72a5efb5e09..39887192719 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.om.response.snapshot;
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -77,6 +78,7 @@ public void tearDown() {
if (batchOperation != null) {
batchOperation.close();
}
+ omMetadataManager.getStore().close();
}
@ParameterizedTest
@@ -102,6 +104,8 @@ public void testAddToDBBatch(int numberOfKeys) throws
Exception {
addTestKeysToDeletedTable(volumeName, bucketName, numberOfKeys);
Set<String> ddtSentinelKeys =
addTestKeysToDeletedDirTable(volumeName, bucketName, numberOfKeys);
+ Set<String> srtSentinelKeys =
+ addTestKeysToSnapshotRenameTable(volumeName, bucketName, numberOfKeys);
// commit to table
OMSnapshotCreateResponse omSnapshotCreateResponse =
@@ -136,6 +140,7 @@ public void testAddToDBBatch(int numberOfKeys) throws
Exception {
// Check deletedTable and deletedDirectoryTable clean up work as expected
verifyEntriesLeftInDeletedTable(dtSentinelKeys);
verifyEntriesLeftInDeletedDirTable(ddtSentinelKeys);
+ verifyEntriesLeftInSnapshotRenameTable(srtSentinelKeys);
}
private Set<String> addTestKeysToDeletedTable(String volumeName,
@@ -244,6 +249,43 @@ private Set<String> addTestKeysToDeletedDirTable(String
volumeName,
return sentinelKeys;
}
+ private Set<String> addTestKeysToSnapshotRenameTable(String volumeName,
+ String bucketName,
+ int numberOfKeys)
+ throws IOException {
+
+ // Add snapshotRenameTable key entries that "surround" the snapshot scope
+ Set<String> sentinelKeys = new HashSet<>();
+ final String srtKeyPfx = omMetadataManager.getBucketKey(volumeName,
bucketName);
+ final String srtBucketKey = omMetadataManager.getBucketKey(volumeName,
bucketName) + OM_KEY_PREFIX;
+ final int offset = srtKeyPfx.length() - 1;
+ char bucketIdLastChar = srtKeyPfx.charAt(offset);
+
+ String srtBucketKeyBefore = srtKeyPfx.substring(0, offset) + (char)
(bucketIdLastChar - 1) + OM_KEY_PREFIX;
+ for (int i = 0; i < 3; i++) {
+ String srtKey = srtBucketKeyBefore + "srtkey" + i + "a";
+ omMetadataManager.getSnapshotRenamedTable().put(srtKey,
srtBucketKeyBefore + "srtkey" + i + "b");
+ sentinelKeys.add(srtKey);
+ }
+
+ String srtBucketKeyAfter = srtKeyPfx.substring(0, offset) + (char)
(bucketIdLastChar + 1) + OM_KEY_PREFIX;
+ for (int i = 0; i < 3; i++) {
+ String srtKey = srtBucketKeyAfter + "srtkey" + i + "a";
+ omMetadataManager.getSnapshotRenamedTable().put(srtKey,
srtBucketKeyAfter + "srtkey" + i + "b");
+ sentinelKeys.add(srtKey);
+ }
+
+ // Add key entries in the snapshot (bucket) scope
+ for (int i = 0; i < numberOfKeys; i++) {
+ String srtKey = srtBucketKey + "srtkey" + i + "a";
+ omMetadataManager.getSnapshotRenamedTable().put(srtKey, srtBucketKey +
"srtkey" + i + "b");
+ // These are the keys that should be deleted.
+ // Thus not added to sentinelKeys list.
+ }
+
+ return sentinelKeys;
+ }
+
private void verifyEntriesLeftInDeletedTable(Set<String> expectedKeys)
throws IOException {
// Only keys inside the snapshot scope would be deleted from deletedTable.
@@ -256,6 +298,12 @@ private void
verifyEntriesLeftInDeletedDirTable(Set<String> expectedKeys)
expectedKeys);
}
+ private void verifyEntriesLeftInSnapshotRenameTable(Set<String> expectedKeys)
+ throws IOException {
+ verifyEntriesLeftInTable(omMetadataManager.getSnapshotRenamedTable(),
+ expectedKeys);
+ }
+
private void verifyEntriesLeftInTable(
Table<String, ?> table, Set<String> expectedKeys) throws IOException {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]