This is an automated email from the ASF dual-hosted git repository.
agupta pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 24aab04e5c HDDS-11508. Decouple delete batch limits from Ratis request
size for DirectoryDeletingService. (#7365)
24aab04e5c is described below
commit 24aab04e5c6c3ddd1ec051ce48b8d2a035b4d3ca
Author: Sadanand Shenoy <[email protected]>
AuthorDate: Mon Feb 3 13:20:46 2025 +0530
HDDS-11508. Decouple delete batch limits from Ratis request size for
DirectoryDeletingService. (#7365)
---
.../common/src/main/resources/ozone-default.xml | 8 ---
.../org/apache/hadoop/hdds/utils/db/Table.java | 56 +++++++++++++++++++
.../apache/hadoop/hdds/utils/db/TypedTable.java | 16 ++++--
.../org/apache/hadoop/ozone/om/OMConfigKeys.java | 7 ---
.../ozone/TestDirectoryDeletingServiceWithFSO.java | 21 ++++----
.../hadoop/fs/ozone/TestRootedDDSWithFSO.java | 1 -
...TestSnapshotDeletingServiceIntegrationTest.java | 1 -
.../TestReconInsightsForDeletedDirectories.java | 37 +++++++------
.../apache/hadoop/ozone/om/DeleteKeysResult.java | 53 ++++++++++++++++++
.../org/apache/hadoop/ozone/om/KeyManager.java | 10 ++--
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 49 +++++++++++------
.../java/org/apache/hadoop/ozone/om/OMMetrics.java | 1 -
.../om/service/AbstractKeyDeletingService.java | 60 +++++++++------------
.../ozone/om/service/DirectoryDeletingService.java | 52 ++++++------------
.../om/service/TestDirectoryDeletingService.java | 62 +---------------------
15 files changed, 233 insertions(+), 201 deletions(-)
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 4693392a21..f7ff9089b9 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -3686,14 +3686,6 @@
be defined with postfix (ns,ms,s,m,h,d)
</description>
</property>
- <property>
- <name>ozone.path.deleting.limit.per.task</name>
- <value>6000</value>
- <tag>OZONE, PERFORMANCE, OM</tag>
- <description>A maximum number of paths(dirs/files) to be deleted by
- directory deleting service per time interval.
- </description>
- </property>
<property>
<name>ozone.snapshot.filtering.limit.per.task</name>
<value>2</value>
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index c705526705..0c435066b8 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -336,6 +336,14 @@ interface KeyValue<KEY, VALUE> {
KEY getKey() throws IOException;
VALUE getValue() throws IOException;
+
+ default byte[] getRawKey() throws IOException {
+ return null;
+ }
+
+ default byte[] getRawValue() throws IOException {
+ return null;
+ }
}
static <K, V> KeyValue<K, V> newKeyValue(K key, V value) {
@@ -375,6 +383,54 @@ public int hashCode() {
};
}
+ static <K, V> KeyValue<K, V> newKeyValue(K key, V value, byte[] rawKey,
byte[] rawValue) {
+ return new KeyValue<K, V>() {
+ @Override
+ public K getKey() {
+ return key;
+ }
+
+ @Override
+ public V getValue() {
+ return value;
+ }
+
+ @Override
+ public byte[] getRawKey() throws IOException {
+ return rawKey;
+ }
+
+ @Override
+ public byte[] getRawValue() throws IOException {
+ return rawValue;
+ }
+
+ @Override
+ public String toString() {
+ return "(key=" + key + ", value=" + value + ")";
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (!(obj instanceof KeyValue)) {
+ return false;
+ }
+ KeyValue<?, ?> kv = (KeyValue<?, ?>) obj;
+ try {
+ return getKey().equals(kv.getKey()) &&
getValue().equals(kv.getValue());
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(getKey(), getValue());
+ }
+ };
+ }
+
+
/** A {@link TableIterator} to iterate {@link KeyValue}s. */
interface KeyValueIterator<KEY, VALUE>
extends TableIterator<KEY, KeyValue<KEY, VALUE>> {
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index 539bf8a29c..9609b5bfc2 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -573,6 +573,14 @@ public KEY getKey() throws IOException {
public VALUE getValue() throws IOException {
return decodeValue(rawKeyValue.getValue());
}
+
+ public byte[] getRawKey() throws IOException {
+ return rawKeyValue.getKey();
+ }
+
+ public byte[] getRawValue() throws IOException {
+ return rawKeyValue.getValue();
+ }
}
RawIterator<CodecBuffer> newCodecBufferTableIterator(
@@ -597,9 +605,11 @@ public CodecBuffer get() {
@Override
KeyValue<KEY, VALUE> convert(KeyValue<CodecBuffer, CodecBuffer> raw)
throws IOException {
- final KEY key = keyCodec.fromCodecBuffer(raw.getKey());
- final VALUE value = valueCodec.fromCodecBuffer(raw.getValue());
- return Table.newKeyValue(key, value);
+ CodecBuffer keyCodecBuffer = raw.getKey();
+ final KEY key = keyCodec.fromCodecBuffer(keyCodecBuffer);
+ CodecBuffer valueCodecBuffer = raw.getValue();
+ final VALUE value = valueCodec.fromCodecBuffer(valueCodecBuffer);
+ return Table.newKeyValue(key, value, keyCodecBuffer.getArray(),
valueCodecBuffer.getArray());
}
};
}
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index e274d822b6..0a1fd9f681 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -410,13 +410,6 @@ private OMConfigKeys() {
public static final String
OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT_DEFAULT = "300s";
- public static final String OZONE_PATH_DELETING_LIMIT_PER_TASK =
- "ozone.path.deleting.limit.per.task";
- // default is 6000 taking account of 32MB buffer size, and assuming
- // 4KB size (considering acls, key/file name, and other meata) * 6000
- // resulting 24MB
- public static final int OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT = 6000;
-
public static final String OZONE_THREAD_NUMBER_DIR_DELETION =
"ozone.thread.number.dir.deletion";
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
index 6cced07848..f31982e0f0 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
@@ -90,7 +90,6 @@
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.eq;
@@ -116,7 +115,6 @@ public class TestDirectoryDeletingServiceWithFSO {
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 2000);
- conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1000,
TimeUnit.MILLISECONDS);
@@ -155,7 +153,7 @@ public static void teardown() {
}
@AfterEach
- public void cleanup() {
+ public void cleanup() throws InterruptedException, TimeoutException {
assertDoesNotThrow(() -> {
Path root = new Path("/");
FileStatus[] fileStatuses = fs.listStatus(root);
@@ -273,8 +271,6 @@ public void testDeleteWithLargeSubPathsThanBatchSize()
throws Exception {
assertTableRowCount(dirTable, 1);
assertSubPathsCount(dirDeletingService::getMovedFilesCount, 15);
- // 15 subDir + 3 parentDir
- assertSubPathsCount(dirDeletingService::getMovedDirsCount, 18);
assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 19);
assertEquals(15, metrics.getNumSubFilesSentForPurge());
@@ -335,7 +331,7 @@ public void testDeleteWithMultiLevels() throws Exception {
assertTableRowCount(dirTable, 0);
assertSubPathsCount(dirDeletingService::getMovedFilesCount, 3);
- assertSubPathsCount(dirDeletingService::getMovedDirsCount, 2);
+ assertSubPathsCount(dirDeletingService::getMovedDirsCount, 0);
assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 5);
assertEquals(5, metrics.getNumDirsSentForPurge());
assertEquals(5, metrics.getNumDirsPurged());
@@ -431,7 +427,8 @@ public void testDeleteWithMultiLevelsBlockDoubleBuffer()
throws Exception {
omDoubleBuffer.stopDaemon();
OzoneVolume volume = client.getObjectStore().getVolume(volumeName);
- OzoneBucket bucket = volume.getBucket(bucketName); long volumeId =
metadataManager.getVolumeId(volumeName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+ long volumeId = metadataManager.getVolumeId(volumeName);
// manually delete dir and add to deleted table. namespace count occupied
"1" as manual deletion do not reduce
long bucketId = metadataManager.getBucketId(volumeName, bucketName);
@@ -629,13 +626,14 @@ public void
testAOSKeyDeletingWithSnapshotCreateParallelExecution()
assertTableRowCount(deletedDirTable, initialDeletedCount + 1);
assertTableRowCount(renameTable, initialRenameCount + 1);
Mockito.doAnswer(i -> {
- List<OzoneManagerProtocolProtos.PurgePathRequest> purgePathRequestList =
i.getArgument(5);
+ List<OzoneManagerProtocolProtos.PurgePathRequest> purgePathRequestList =
i.getArgument(4);
for (OzoneManagerProtocolProtos.PurgePathRequest purgeRequest :
purgePathRequestList) {
Assertions.assertNotEquals(deletePathKey,
purgeRequest.getDeletedDir());
}
- return i.callRealMethod();
- }).when(service).optimizeDirDeletesAndSubmitRequest(anyLong(), anyLong(),
anyLong(),
- anyLong(), anyList(), anyList(), eq(null), anyLong(), anyInt(),
Mockito.any(), any(), anyLong());
+ return null;
+ }).when(service).optimizeDirDeletesAndSubmitRequest(anyLong(), anyLong(),
+ anyLong(), anyList(), anyList(), eq(null), anyLong(), anyLong(),
Mockito.any(), any(),
+ anyLong());
Mockito.doAnswer(i -> {
store.createSnapshot(testVolumeName, testBucketName, snap2);
@@ -783,6 +781,7 @@ public void testDirDeletedTableCleanUpForSnapshot() throws
Exception {
assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 0);
// Manual cleanup deletedDirTable for next tests
+ client.getObjectStore().deleteSnapshot(volumeName, bucketName, "snap1");
cleanupTables();
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java
index 4a3bd85aa3..84d949d394 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java
@@ -85,7 +85,6 @@ public class TestRootedDDSWithFSO {
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 1);
- conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
TimeUnit.MILLISECONDS);
conf.setBoolean(OZONE_ACL_ENABLED, true);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
index 9bafe148ae..5575b3dc6d 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
@@ -134,7 +134,6 @@ public void setup() throws Exception {
conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT,
10000, TimeUnit.MILLISECONDS);
conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 500);
- conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500,
TimeUnit.MILLISECONDS);
conf.setBoolean(OZONE_ACL_ENABLED, true);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
index 80f06b2ef5..359c6d45e1 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
@@ -52,6 +52,8 @@
import javax.ws.rs.core.Response;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicLong;
@@ -60,7 +62,6 @@
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL;
-import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -86,7 +87,6 @@ public class TestReconInsightsForDeletedDirectories {
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000000);
- conf.setInt(OZONE_PATH_DELETING_LIMIT_PER_TASK, 0);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000,
TimeUnit.MILLISECONDS);
conf.setBoolean(OZONE_ACL_ENABLED, true);
@@ -124,7 +124,7 @@ public static void teardown() {
}
@AfterEach
- public void cleanup() {
+ public void cleanup() throws IOException {
assertDoesNotThrow(() -> {
Path root = new Path("/");
FileStatus[] fileStatuses = fs.listStatus(root);
@@ -417,24 +417,31 @@ private void cleanupTables() throws IOException {
OMMetadataManager metadataManager =
cluster.getOzoneManager().getMetadataManager();
- try (TableIterator<?, ?> it = metadataManager.getDeletedDirTable()
- .iterator()) {
- removeAllFromDB(it);
+ Table<String, OmKeyInfo> deletedDirTable =
+ metadataManager.getDeletedDirTable();
+ try (TableIterator<String, ? extends Table.KeyValue<String, ?>> it =
deletedDirTable.iterator()) {
+ removeAllFromDB(it, deletedDirTable);
}
- try (TableIterator<?, ?> it = metadataManager.getFileTable().iterator()) {
- removeAllFromDB(it);
+ Table<String, OmKeyInfo> fileTable = metadataManager.getFileTable();
+ try (TableIterator<String, ? extends Table.KeyValue<String, ?>> it =
fileTable.iterator()) {
+ removeAllFromDB(it, fileTable);
}
- try (TableIterator<?, ?> it = metadataManager.getDirectoryTable()
- .iterator()) {
- removeAllFromDB(it);
+ Table<String, OmDirectoryInfo> directoryTable =
+ metadataManager.getDirectoryTable();
+ try (TableIterator<String, ? extends Table.KeyValue<String, ?>> it =
directoryTable.iterator()) {
+ removeAllFromDB(it, directoryTable);
}
}
- private static void removeAllFromDB(TableIterator<?, ?> iterator)
- throws IOException {
+ private static void removeAllFromDB(
+ TableIterator<String, ? extends Table.KeyValue<String, ?>> iterator,
+ Table<String, ?> table) throws IOException {
+ List<String> keysToDelete = new ArrayList<>();
while (iterator.hasNext()) {
- iterator.next();
- iterator.removeFromDB();
+ keysToDelete.add(iterator.next().getKey());
+ }
+ for (String keyToDelete : keysToDelete) {
+ table.delete(keyToDelete);
}
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeleteKeysResult.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeleteKeysResult.java
new file mode 100644
index 0000000000..171525c149
--- /dev/null
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeleteKeysResult.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+
+import java.util.List;
+
+/**
+ * Used in {@link org.apache.hadoop.ozone.om.service.DirectoryDeletingService}
+ * to capture the result of each delete task.
+ */
+public class DeleteKeysResult {
+
+ private List<OmKeyInfo> keysToDelete;
+ private long consumedSize;
+
+ private boolean processedKeys;
+
+ public DeleteKeysResult(List<OmKeyInfo> keysToDelete,
+ long consumedSize, boolean processedKeys) {
+ this.keysToDelete = keysToDelete;
+ this.consumedSize = consumedSize;
+ this.processedKeys = processedKeys;
+ }
+
+ public List<OmKeyInfo> getKeysToDelete() {
+ return keysToDelete;
+ }
+
+ public long getConsumedSize() {
+ return consumedSize;
+ }
+
+ public boolean isProcessedKeys() {
+ return processedKeys;
+ }
+}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
index 9f6d8b81c1..db3d47dfcd 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
@@ -277,23 +277,21 @@ default List<Table.KeyValue<String, OmKeyInfo>>
getDeletedDirEntries(String volu
* Returns all sub directories under the given parent directory.
*
* @param parentInfo
- * @param numEntries
* @return list of dirs
* @throws IOException
*/
- List<OmKeyInfo> getPendingDeletionSubDirs(long volumeId, long bucketId,
- OmKeyInfo parentInfo, long numEntries) throws IOException;
+ DeleteKeysResult getPendingDeletionSubDirs(long volumeId, long bucketId,
+ OmKeyInfo parentInfo, long remainingBufLimit) throws IOException;
/**
* Returns all sub files under the given parent directory.
*
* @param parentInfo
- * @param numEntries
* @return list of files
* @throws IOException
*/
- List<OmKeyInfo> getPendingDeletionSubFiles(long volumeId,
- long bucketId, OmKeyInfo parentInfo, long numEntries)
+ DeleteKeysResult getPendingDeletionSubFiles(long volumeId,
+ long bucketId, OmKeyInfo parentInfo, long remainingBufLimit)
throws IOException;
/**
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 8e3bbb47c3..4fe509d7e9 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -2072,8 +2072,8 @@ public Table.KeyValue<String, OmKeyInfo>
getPendingDeletionDir()
}
@Override
- public List<OmKeyInfo> getPendingDeletionSubDirs(long volumeId, long
bucketId,
- OmKeyInfo parentInfo, long numEntries) throws IOException {
+ public DeleteKeysResult getPendingDeletionSubDirs(long volumeId, long
bucketId,
+ OmKeyInfo parentInfo, long remainingBufLimit) throws IOException {
String seekDirInDB = metadataManager.getOzonePathKey(volumeId, bucketId,
parentInfo.getObjectID(), "");
long countEntries = 0;
@@ -2082,31 +2082,38 @@ public List<OmKeyInfo> getPendingDeletionSubDirs(long
volumeId, long bucketId,
try (TableIterator<String,
? extends Table.KeyValue<String, OmDirectoryInfo>>
iterator = dirTable.iterator()) {
- return gatherSubDirsWithIterator(parentInfo, numEntries,
- seekDirInDB, countEntries, iterator);
+ return gatherSubDirsWithIterator(parentInfo,
+ seekDirInDB, countEntries, iterator, remainingBufLimit);
}
}
- private List<OmKeyInfo> gatherSubDirsWithIterator(OmKeyInfo parentInfo,
- long numEntries, String seekDirInDB,
+ private DeleteKeysResult gatherSubDirsWithIterator(OmKeyInfo parentInfo,
+ String seekDirInDB,
long countEntries,
TableIterator<String,
- ? extends Table.KeyValue<String, OmDirectoryInfo>> iterator)
+ ? extends Table.KeyValue<String, OmDirectoryInfo>> iterator, long
remainingBufLimit)
throws IOException {
List<OmKeyInfo> directories = new ArrayList<>();
iterator.seek(seekDirInDB);
+ long consumedSize = 0;
+ boolean processedSubDirs = false;
- while (iterator.hasNext() && numEntries - countEntries > 0) {
+ while (iterator.hasNext() && remainingBufLimit > 0) {
Table.KeyValue<String, OmDirectoryInfo> entry = iterator.next();
OmDirectoryInfo dirInfo = entry.getValue();
+ long objectSerializedSize = entry.getRawValue().length;
if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(),
parentInfo.getObjectID())) {
+ processedSubDirs = true;
break;
}
if (!metadataManager.getDirectoryTable().isExist(entry.getKey())) {
continue;
}
+ if (remainingBufLimit - objectSerializedSize < 0) {
+ break;
+ }
String dirName = OMFileRequest.getAbsolutePath(parentInfo.getKeyName(),
dirInfo.getName());
OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfo(
@@ -2114,19 +2121,24 @@ private List<OmKeyInfo>
gatherSubDirsWithIterator(OmKeyInfo parentInfo,
dirName);
directories.add(omKeyInfo);
countEntries++;
+ remainingBufLimit -= objectSerializedSize;
+ consumedSize += objectSerializedSize;
}
- return directories;
+ processedSubDirs = processedSubDirs || (!iterator.hasNext());
+
+ return new DeleteKeysResult(directories, consumedSize, processedSubDirs);
}
@Override
- public List<OmKeyInfo> getPendingDeletionSubFiles(long volumeId,
- long bucketId, OmKeyInfo parentInfo, long numEntries)
+ public DeleteKeysResult getPendingDeletionSubFiles(long volumeId,
+ long bucketId, OmKeyInfo parentInfo, long remainingBufLimit)
throws IOException {
List<OmKeyInfo> files = new ArrayList<>();
String seekFileInDB = metadataManager.getOzonePathKey(volumeId, bucketId,
parentInfo.getObjectID(), "");
- long countEntries = 0;
+ long consumedSize = 0;
+ boolean processedSubFiles = false;
Table fileTable = metadataManager.getFileTable();
try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
@@ -2134,27 +2146,34 @@ public List<OmKeyInfo> getPendingDeletionSubFiles(long
volumeId,
iterator.seek(seekFileInDB);
- while (iterator.hasNext() && numEntries - countEntries > 0) {
+ while (iterator.hasNext() && remainingBufLimit > 0) {
Table.KeyValue<String, OmKeyInfo> entry = iterator.next();
OmKeyInfo fileInfo = entry.getValue();
+ long objectSerializedSize = entry.getRawValue().length;
if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(),
parentInfo.getObjectID())) {
+ processedSubFiles = true;
break;
}
if (!metadataManager.getFileTable().isExist(entry.getKey())) {
continue;
}
+ if (remainingBufLimit - objectSerializedSize < 0) {
+ break;
+ }
fileInfo.setFileName(fileInfo.getKeyName());
String fullKeyPath = OMFileRequest.getAbsolutePath(
parentInfo.getKeyName(), fileInfo.getKeyName());
fileInfo.setKeyName(fullKeyPath);
files.add(fileInfo);
- countEntries++;
+ remainingBufLimit -= objectSerializedSize;
+ consumedSize += objectSerializedSize;
}
+ processedSubFiles = processedSubFiles || (!iterator.hasNext());
}
- return files;
+ return new DeleteKeysResult(files, consumedSize, processedSubFiles);
}
public boolean isBucketFSOptimized(String volName, String buckName)
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
index de4241b7ac..394ae02100 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
@@ -243,7 +243,6 @@ public class OMMetrics implements OmMetadataReaderMetrics {
private @Metric MutableCounterLong ecKeyCreateFailsTotal;
private @Metric MutableCounterLong ecBucketCreateTotal;
private @Metric MutableCounterLong ecBucketCreateFailsTotal;
-
private final DBCheckpointMetrics dbCheckpointMetrics;
public OMMetrics() {
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
index a3d7ccb661..6369d708a2 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+import org.apache.hadoop.ozone.om.DeleteKeysResult;
import org.apache.hadoop.ozone.om.DeletingServiceMetrics;
import org.apache.hadoop.ozone.om.KeyManager;
import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -348,9 +349,9 @@ private OzoneManagerProtocolProtos.PurgePathRequest
wrapPurgeRequest(
}
protected PurgePathRequest prepareDeleteDirRequest(
- long remainNum, OmKeyInfo pendingDeletedDirInfo, String delDirName,
+ OmKeyInfo pendingDeletedDirInfo, String delDirName,
List<Pair<String, OmKeyInfo>> subDirList,
- KeyManager keyManager) throws IOException {
+ KeyManager keyManager, long remainingBufLimit) throws IOException {
// step-0: Get one pending deleted directory
if (LOG.isDebugEnabled()) {
LOG.debug("Pending deleted dir name: {}",
@@ -362,10 +363,11 @@ protected PurgePathRequest prepareDeleteDirRequest(
final long bucketId = Long.parseLong(keys[2]);
// step-1: get all sub directories under the deletedDir
- List<OmKeyInfo> subDirs = keyManager
- .getPendingDeletionSubDirs(volumeId, bucketId,
- pendingDeletedDirInfo, remainNum);
- remainNum = remainNum - subDirs.size();
+ DeleteKeysResult subDirDeleteResult =
+ keyManager.getPendingDeletionSubDirs(volumeId, bucketId,
+ pendingDeletedDirInfo, remainingBufLimit);
+ List<OmKeyInfo> subDirs = subDirDeleteResult.getKeysToDelete();
+ remainingBufLimit -= subDirDeleteResult.getConsumedSize();
OMMetadataManager omMetadataManager = keyManager.getMetadataManager();
for (OmKeyInfo dirInfo : subDirs) {
@@ -378,10 +380,10 @@ protected PurgePathRequest prepareDeleteDirRequest(
}
// step-2: get all sub files under the deletedDir
- List<OmKeyInfo> subFiles = keyManager
- .getPendingDeletionSubFiles(volumeId, bucketId,
- pendingDeletedDirInfo, remainNum);
- remainNum = remainNum - subFiles.size();
+ DeleteKeysResult subFileDeleteResult =
+ keyManager.getPendingDeletionSubFiles(volumeId, bucketId,
+ pendingDeletedDirInfo, remainingBufLimit);
+ List<OmKeyInfo> subFiles = subFileDeleteResult.getKeysToDelete();
if (LOG.isDebugEnabled()) {
for (OmKeyInfo fileInfo : subFiles) {
@@ -389,50 +391,39 @@ protected PurgePathRequest prepareDeleteDirRequest(
}
}
- // step-3: Since there is a boundary condition of 'numEntries' in
- // each batch, check whether the sub paths count reached batch size
- // limit. If count reached limit then there can be some more child
- // paths to be visited and will keep the parent deleted directory
- // for one more pass.
- String purgeDeletedDir = remainNum > 0 ? delDirName : null;
+ // step-3: If both sub-dirs and sub-files are exhausted under a parent
+ // directory, only then delete the parent.
+ String purgeDeletedDir = subDirDeleteResult.isProcessedKeys() &&
+ subFileDeleteResult.isProcessedKeys() ? delDirName : null;
return wrapPurgeRequest(volumeId, bucketId,
purgeDeletedDir, subFiles, subDirs);
}
@SuppressWarnings("checkstyle:ParameterNumber")
- public long optimizeDirDeletesAndSubmitRequest(long remainNum,
+ public void optimizeDirDeletesAndSubmitRequest(
long dirNum, long subDirNum, long subFileNum,
List<Pair<String, OmKeyInfo>> allSubDirList,
List<PurgePathRequest> purgePathRequestList,
String snapTableKey, long startTime,
- int remainingBufLimit, KeyManager keyManager,
+ long remainingBufLimit, KeyManager keyManager,
UUID expectedPreviousSnapshotId, long rnCnt) {
- long limit = remainNum;
// Optimization to handle delete sub-dir and keys to remove quickly
// This case will be useful to handle when depth of directory is high
int subdirDelNum = 0;
int subDirRecursiveCnt = 0;
int consumedSize = 0;
- while (remainNum > 0 && subDirRecursiveCnt < allSubDirList.size()) {
+ while (subDirRecursiveCnt < allSubDirList.size() && remainingBufLimit > 0)
{
try {
Pair<String, OmKeyInfo> stringOmKeyInfoPair
= allSubDirList.get(subDirRecursiveCnt);
PurgePathRequest request = prepareDeleteDirRequest(
- remainNum, stringOmKeyInfoPair.getValue(),
- stringOmKeyInfoPair.getKey(), allSubDirList,
- keyManager);
- if (isBufferLimitCrossed(remainingBufLimit, consumedSize,
- request.getSerializedSize())) {
- // ignore further add request
- break;
- }
+ stringOmKeyInfoPair.getValue(),
+ stringOmKeyInfoPair.getKey(), allSubDirList, keyManager,
+ remainingBufLimit);
consumedSize += request.getSerializedSize();
+ remainingBufLimit -= consumedSize;
purgePathRequestList.add(request);
- // reduce remain count for self, sub-files, and sub-directories
- remainNum = remainNum - 1;
- remainNum = remainNum - request.getDeletedSubFilesCount();
- remainNum = remainNum - request.getMarkDeletedSubDirsCount();
// Count up the purgeDeletedDir, subDirs and subFiles
if (request.getDeletedDir() != null
&& !request.getDeletedDir().isEmpty()) {
@@ -461,13 +452,12 @@ public long optimizeDirDeletesAndSubmitRequest(long
remainNum,
LOG.info("Number of dirs deleted: {}, Number of sub-dir " +
"deleted: {}, Number of sub-files moved:" +
" {} to DeletedTable, Number of sub-dirs moved {} to " +
- "DeletedDirectoryTable, limit per iteration: {}, iteration
elapsed: {}ms, " +
+ "DeletedDirectoryTable, iteration elapsed: {}ms, " +
" totalRunCount: {}",
- dirNum, subdirDelNum, subFileNum, (subDirNum - subdirDelNum), limit,
+ dirNum, subdirDelNum, subFileNum, (subDirNum - subdirDelNum),
timeTakenInIteration, rnCnt);
metrics.incrementDirectoryDeletionTotalMetrics(dirNum + subdirDelNum,
subDirNum, subFileNum);
}
- return remainNum;
}
/**
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
index 05555439ac..1828ee73c0 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
@@ -52,9 +52,6 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
-import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK;
-import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT;
-
/**
* This is a background service to delete orphan directories and its
* sub paths(sub-dirs and sub-files).
@@ -80,11 +77,7 @@ public class DirectoryDeletingService extends
AbstractKeyDeletingService {
// from parent directory info from deleted directory table concurrently
// and send deletion requests.
private final int dirDeletingCorePoolSize;
- private static final int MIN_ERR_LIMIT_PER_TASK = 1000;
-
- // Number of items(dirs/files) to be batched in an iteration.
- private final long pathLimitPerTask;
- private final int ratisByteLimit;
+ private int ratisByteLimit;
private final AtomicBoolean suspended;
private AtomicBoolean isRunningOnAOS;
@@ -97,9 +90,6 @@ public DirectoryDeletingService(long interval, TimeUnit unit,
OzoneConfiguration configuration, int dirDeletingServiceCorePoolSize) {
super(DirectoryDeletingService.class.getSimpleName(), interval, unit,
dirDeletingServiceCorePoolSize, serviceTimeout, ozoneManager, null);
- this.pathLimitPerTask = configuration
- .getInt(OZONE_PATH_DELETING_LIMIT_PER_TASK,
- OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT);
int limit = (int) configuration.getStorageSize(
OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT,
OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT,
@@ -145,6 +135,10 @@ public void resume() {
suspended.set(false);
}
+ public void setRatisByteLimit(int ratisByteLimit) {
+ this.ratisByteLimit = ratisByteLimit;
+ }
+
@Override
public BackgroundTaskQueue getTasks() {
BackgroundTaskQueue queue = new BackgroundTaskQueue();
@@ -221,11 +215,11 @@ public BackgroundTaskResult call() {
long dirNum = 0L;
long subDirNum = 0L;
long subFileNum = 0L;
- long remainNum = pathLimitPerTask;
+ long remainingBufLimit = ratisByteLimit;
int consumedSize = 0;
List<PurgePathRequest> purgePathRequestList = new ArrayList<>();
List<Pair<String, OmKeyInfo>> allSubDirList =
- new ArrayList<>((int) remainNum);
+ new ArrayList<>();
Table.KeyValue<String, OmKeyInfo> pendingDeletedDirInfo;
// This is to avoid race condition b/w purge request and snapshot
chain updation. For AOS taking the global
@@ -236,7 +230,7 @@ public BackgroundTaskResult call() {
.getLatestGlobalSnapshotId();
long startTime = Time.monotonicNow();
- while (remainNum > 0) {
+ while (remainingBufLimit > 0) {
pendingDeletedDirInfo = getPendingDeletedDirInfo();
if (pendingDeletedDirInfo == null) {
break;
@@ -247,31 +241,14 @@ public BackgroundTaskResult call() {
continue;
}
- PurgePathRequest request = prepareDeleteDirRequest(remainNum,
+ PurgePathRequest request = prepareDeleteDirRequest(
pendingDeletedDirInfo.getValue(),
pendingDeletedDirInfo.getKey(), allSubDirList,
- getOzoneManager().getKeyManager());
- if (isBufferLimitCrossed(ratisByteLimit, consumedSize,
- request.getSerializedSize())) {
- if (purgePathRequestList.size() != 0) {
- // if message buffer reaches max limit, avoid sending further
- remainNum = 0;
- break;
- }
- // if directory itself is having a lot of keys / files,
- // reduce capacity to minimum level
- remainNum = MIN_ERR_LIMIT_PER_TASK;
- request = prepareDeleteDirRequest(remainNum,
- pendingDeletedDirInfo.getValue(),
- pendingDeletedDirInfo.getKey(), allSubDirList,
- getOzoneManager().getKeyManager());
- }
+ getOzoneManager().getKeyManager(), remainingBufLimit);
+
consumedSize += request.getSerializedSize();
+ remainingBufLimit -= consumedSize;
purgePathRequestList.add(request);
- // reduce remain count for self, sub-files, and sub-directories
- remainNum = remainNum - 1;
- remainNum = remainNum - request.getDeletedSubFilesCount();
- remainNum = remainNum - request.getMarkDeletedSubDirsCount();
// Count up the purgeDeletedDir, subDirs and subFiles
if (request.getDeletedDir() != null && !request.getDeletedDir()
.isEmpty()) {
@@ -280,9 +257,10 @@ public BackgroundTaskResult call() {
subDirNum += request.getMarkDeletedSubDirsCount();
subFileNum += request.getDeletedSubFilesCount();
}
- optimizeDirDeletesAndSubmitRequest(remainNum, dirNum, subDirNum,
+
+ optimizeDirDeletesAndSubmitRequest(dirNum, subDirNum,
subFileNum, allSubDirList, purgePathRequestList, null,
- startTime, ratisByteLimit - consumedSize,
+ startTime, remainingBufLimit,
getOzoneManager().getKeyManager(), expectedPreviousSnapshotId,
rnCnt);
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java
index 681b24b8e4..de5965d1c2 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java
@@ -50,10 +50,6 @@
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL;
-import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT;
-import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK;
-import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT;
-import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -161,64 +157,8 @@ public void testDeleteDirectoryCrossingSizeLimit() throws
Exception {
(DirectoryDeletingService) keyManager.getDirDeletingService();
GenericTestUtils.waitFor(
() -> dirDeletingService.getMovedFilesCount() >= 1000
- && dirDeletingService.getMovedFilesCount() < 2000,
+ && dirDeletingService.getMovedFilesCount() <= 2000,
500, 60000);
assertThat(dirDeletingService.getRunCount().get()).isGreaterThanOrEqualTo(1);
}
-
- @Test
- public void testDeleteDirectoryFlatDirsHavingNoChilds() throws Exception {
- OzoneConfiguration conf = createConfAndInitValues();
- OmTestManagers omTestManagers
- = new OmTestManagers(conf);
- KeyManager keyManager = omTestManagers.getKeyManager();
- writeClient = omTestManagers.getWriteClient();
- om = omTestManagers.getOzoneManager();
-
- OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
- om.getMetadataManager(), BucketLayout.FILE_SYSTEM_OPTIMIZED);
- String bucketKey = om.getMetadataManager().getBucketKey(volumeName,
bucketName);
- OmBucketInfo bucketInfo =
om.getMetadataManager().getBucketTable().get(bucketKey);
-
- int dirCreatesCount = OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT * 2 + 100;
- long parentId = 1;
- OmDirectoryInfo baseDir = new OmDirectoryInfo.Builder().setName("dir_base")
- .setCreationTime(Time.now()).setModificationTime(Time.now())
- .setObjectID(parentId).setParentObjectID(bucketInfo.getObjectID())
- .setUpdateID(0).build();
- OMRequestTestUtils.addDirKeyToDirTable(true, baseDir, volumeName,
bucketName,
- 1L, om.getMetadataManager());
- for (int i = 0; i < dirCreatesCount; ++i) {
- OmDirectoryInfo dir1 = new OmDirectoryInfo.Builder().setName("dir" + i)
-
.setCreationTime(Time.now()).setModificationTime(Time.now()).setParentObjectID(parentId)
- .setObjectID(i + 100).setUpdateID(i).build();
- OMRequestTestUtils.addDirKeyToDirTable(true, dir1, volumeName,
bucketName,
- 1L, om.getMetadataManager());
- }
-
- DirectoryDeletingService dirDeletingService =
keyManager.getDirDeletingService();
- long[] delDirCnt = new long[2];
- delDirCnt[0] = dirDeletingService.getDeletedDirsCount();
-
- OmKeyArgs delArgs = new OmKeyArgs.Builder()
-
.setVolumeName(volumeName).setBucketName(bucketName).setKeyName("dir_base")
- .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE))
- .setDataSize(0).setRecursive(true).build();
- writeClient.deleteKey(delArgs);
- int pathDelLimit = conf.getInt(OZONE_PATH_DELETING_LIMIT_PER_TASK,
- OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT);
- int numThread = conf.getInt(OZONE_THREAD_NUMBER_DIR_DELETION,
- OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT);
-
- // check if difference between each run should not cross the directory
deletion limit
- // and wait till all dir is removed
- GenericTestUtils.waitFor(() -> {
- delDirCnt[1] = dirDeletingService.getDeletedDirsCount();
- assertTrue(
- delDirCnt[1] - delDirCnt[0] <= ((long) pathDelLimit * numThread),
- "base: " + delDirCnt[0] + ", new: " + delDirCnt[1]);
- delDirCnt[0] = delDirCnt[1];
- return dirDeletingService.getDeletedDirsCount() >= dirCreatesCount;
- }, 500, 300000);
- }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]