This is an automated email from the ASF dual-hosted git repository.
swamirishi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 22fa6f5ea2e HDDS-13036. Bucket Quota usage should be a total of AOS
usage + Snapshot Usage (#8587)
22fa6f5ea2e is described below
commit 22fa6f5ea2e0ed8034096147534b4089f320ef87
Author: Swaminathan Balachandran <[email protected]>
AuthorDate: Fri Oct 10 06:03:04 2025 -0400
HDDS-13036. Bucket Quota usage should be a total of AOS usage + Snapshot
Usage (#8587)
---
hadoop-hdds/test-utils/pom.xml | 5 ++
.../org/apache/ozone/test/GenericTestUtils.java | 20 +++++
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 8 +-
.../java/org/apache/hadoop/fs/ozone/TestHSync.java | 28 +++---
.../ozone/client/rpc/OzoneRpcClientTests.java | 99 +++++++++++++---------
.../ozone/client/rpc/TestSecureOzoneRpcClient.java | 11 ++-
.../hadoop/ozone/om/BucketUtilizationMetrics.java | 2 +-
.../request/bucket/OMBucketSetPropertyRequest.java | 4 +-
.../hadoop/ozone/om/request/key/OMKeyRequest.java | 23 +----
.../ozone/om/TestBucketUtilizationMetrics.java | 4 +-
.../request/key/TestOMOpenKeysDeleteRequest.java | 2 -
.../ozone/om/service/TestKeyDeletingService.java | 1 -
12 files changed, 118 insertions(+), 89 deletions(-)
diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml
index 4a2ded61359..4dd86c7dc1f 100644
--- a/hadoop-hdds/test-utils/pom.xml
+++ b/hadoop-hdds/test-utils/pom.xml
@@ -87,6 +87,11 @@
<artifactId>log4j-api</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.apache.ratis</groupId>
+ <artifactId>ratis-common</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
diff --git
a/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/GenericTestUtils.java
b/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/GenericTestUtils.java
index 71d116b55c1..af69f102382 100644
---
a/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/GenericTestUtils.java
+++
b/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/GenericTestUtils.java
@@ -47,6 +47,7 @@
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
+import org.apache.ratis.util.function.CheckedSupplier;
import org.junit.jupiter.api.Assertions;
import org.mockito.Mockito;
import org.slf4j.LoggerFactory;
@@ -79,6 +80,25 @@ public static Instant getTestStartTime() {
return Instant.ofEpochMilli(System.currentTimeMillis());
}
+ /**
+ * Waits for a condition specified by the given {@code check} to return
{@code true}.
+ * If the condition throws an exception, the operation would be retried
assuming the condition didn't get satisfied.
+ * The condition will be checked initially and then at intervals specified by
+ * {@code checkEveryMillis}, until the total time exceeds {@code
waitForMillis}.
+ * If the condition is not satisfied within the allowed time, a {@link
TimeoutException}
+ * is thrown. If interrupted while waiting, an {@link InterruptedException}
is thrown.
+ */
+ public static <E extends Exception> void waitFor(CheckedSupplier<Boolean, E>
check, int checkEveryMillis,
+ int waitForMillis) throws InterruptedException, TimeoutException {
+ waitFor((BooleanSupplier) () -> {
+ try {
+ return check.get();
+ } catch (Exception e) {
+ return false;
+ }
+ }, checkEveryMillis, waitForMillis);
+ }
+
/**
* Wait for the specified test to return true. The test will be performed
* initially and then every {@code checkEveryMillis} until at least
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 77af5a9176b..ee4070c9eba 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1296,8 +1296,8 @@ public OzoneBucket getBucketDetails(
bucketInfo.getEncryptionKeyInfo().getKeyName() : null)
.setSourceVolume(bucketInfo.getSourceVolume())
.setSourceBucket(bucketInfo.getSourceBucket())
- .setUsedBytes(bucketInfo.getUsedBytes())
- .setUsedNamespace(bucketInfo.getUsedNamespace())
+ .setUsedBytes(bucketInfo.getTotalBucketSpace())
+ .setUsedNamespace(bucketInfo.getTotalBucketNamespace())
.setQuotaInBytes(bucketInfo.getQuotaInBytes())
.setQuotaInNamespace(bucketInfo.getQuotaInNamespace())
.setBucketLayout(bucketInfo.getBucketLayout())
@@ -1327,8 +1327,8 @@ public List<OzoneBucket> listBuckets(String volumeName,
String bucketPrefix,
bucket.getEncryptionKeyInfo().getKeyName() : null)
.setSourceVolume(bucket.getSourceVolume())
.setSourceBucket(bucket.getSourceBucket())
- .setUsedBytes(bucket.getUsedBytes())
- .setUsedNamespace(bucket.getUsedNamespace())
+ .setUsedBytes(bucket.getTotalBucketSpace())
+ .setUsedNamespace(bucket.getTotalBucketNamespace())
.setQuotaInBytes(bucket.getQuotaInBytes())
.setQuotaInNamespace(bucket.getQuotaInNamespace())
.setBucketLayout(bucket.getBucketLayout())
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
index 7ed1e1012b0..9643598e7e9 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
@@ -126,6 +126,7 @@
import org.apache.ratis.protocol.RaftClientReply;
import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.util.function.CheckedSupplier;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.MethodOrderer.OrderAnnotation;
@@ -1363,11 +1364,6 @@ public void testNormalKeyOverwriteHSyncKey() throws
Exception {
assertArrayEquals(data1.getBytes(UTF_8), readBuffer.array());
}
- // verify bucket info
- ozoneBucket = volume.getBucket(bucket.getName());
- assertEquals(keyInfo.getDataSize() *
keyInfo.getReplicationConfig().getRequiredNodes() + usedBytes,
- ozoneBucket.getUsedBytes());
-
// Resume openKeyCleanupService
openKeyCleanupService.resume();
// Verify entry from openKey gets deleted eventually
@@ -1378,6 +1374,11 @@ public void testNormalKeyOverwriteHSyncKey() throws
Exception {
throw new RuntimeException(e);
}
}, 100, 5000);
+ // verify bucket info
+ ozoneManager.getKeyManager().getDeletingService().resume();
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () ->
+ keyInfo.getDataSize() *
keyInfo.getReplicationConfig().getRequiredNodes() + usedBytes ==
+ volume.getBucket(bucket.getName()).getUsedBytes(), 1000, 30000);
} finally {
cleanupDeletedTable(ozoneManager);
cleanupOpenKeyTable(ozoneManager, BUCKET_LAYOUT);
@@ -1439,7 +1440,8 @@ public void testHSyncKeyOverwriteNormalKey() throws
Exception {
assertEquals(0, openKeys.size());
// There should be one key in delete table
assertEquals(1, deletedKeys.size());
-
+
assertTrue(deletedKeys.values().stream().findFirst().get().getOmKeyInfoList().get(0).isDeletedKeyCommitted());
+ ozoneManager.getKeyManager().getDeletingService().resume();
// final file will have data2 content
OzoneKeyDetails keyInfo = bucket.getKey(file.getName());
try (OzoneInputStream is = bucket.readKey(file.getName())) {
@@ -1450,9 +1452,9 @@ public void testHSyncKeyOverwriteNormalKey() throws
Exception {
}
// verify bucket info
- ozoneBucket = volume.getBucket(bucket.getName());
- assertEquals(keyInfo.getDataSize() *
keyInfo.getReplicationConfig().getRequiredNodes() + usedBytes,
- ozoneBucket.getUsedBytes());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () ->
+ keyInfo.getDataSize() *
keyInfo.getReplicationConfig().getRequiredNodes() + usedBytes ==
+ volume.getBucket(bucket.getName()).getUsedBytes(), 1000, 30000);
} finally {
cleanupDeletedTable(ozoneManager);
cleanupOpenKeyTable(ozoneManager, BUCKET_LAYOUT);
@@ -1527,11 +1529,11 @@ public void testHSyncKeyOverwriteHSyncKey() throws
Exception {
assertEquals(keyInfo.getDataSize(), readLen);
assertArrayEquals(data2.getBytes(UTF_8), readBuffer.array());
}
-
+ ozoneManager.getKeyManager().getDeletingService().resume();
// verify bucket info
- ozoneBucket = volume.getBucket(bucket.getName());
- assertEquals(keyInfo.getDataSize() *
keyInfo.getReplicationConfig().getRequiredNodes() + usedBytes,
- ozoneBucket.getUsedBytes());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () ->
+ keyInfo.getDataSize() *
keyInfo.getReplicationConfig().getRequiredNodes() + usedBytes ==
+ volume.getBucket(bucket.getName()).getUsedBytes(), 100, 30000);
} finally {
cleanupDeletedTable(ozoneManager);
cleanupOpenKeyTable(ozoneManager, BUCKET_LAYOUT);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
index 98c3e22594b..f8b61bec69a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
@@ -30,6 +30,7 @@
import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConsts.DEFAULT_OM_UPDATE_ID;
@@ -37,6 +38,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.GB;
import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
+import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL;
import static
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
import static
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR;
import static
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME;
@@ -196,6 +198,7 @@
import org.apache.ozone.test.OzoneTestBase;
import org.apache.ozone.test.tag.Flaky;
import org.apache.ozone.test.tag.Unhealthy;
+import org.apache.ratis.util.function.CheckedSupplier;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Test;
@@ -259,6 +262,8 @@ static void startCluster(OzoneConfiguration conf,
MiniOzoneCluster.Builder build
// for testZReadKeyWithUnhealthyContainerReplica.
conf.set("ozone.scm.stale.node.interval", "10s");
conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
+ conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1,
TimeUnit.SECONDS);
+ conf.setTimeDuration(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1,
TimeUnit.SECONDS);
ClientConfigForTesting.newBuilder(StorageUnit.MB)
.setDataStreamMinPacketSize(1)
@@ -1551,17 +1556,17 @@ public void testCheckUsedBytesQuota() throws
IOException {
}
@Test
- public void testBucketUsedBytes() throws IOException {
+ public void testBucketUsedBytes() throws IOException, InterruptedException,
TimeoutException {
bucketUsedBytesTestHelper(BucketLayout.OBJECT_STORE);
}
@Test
- public void testFSOBucketUsedBytes() throws IOException {
+ public void testFSOBucketUsedBytes() throws IOException,
InterruptedException, TimeoutException {
bucketUsedBytesTestHelper(BucketLayout.FILE_SYSTEM_OPTIMIZED);
}
private void bucketUsedBytesTestHelper(BucketLayout bucketLayout)
- throws IOException {
+ throws IOException, InterruptedException, TimeoutException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
int blockSize = (int) ozoneManager.getConfiguration().getStorageSize(
@@ -1578,22 +1583,22 @@ private void bucketUsedBytesTestHelper(BucketLayout
bucketLayout)
String keyName = UUID.randomUUID().toString();
writeKey(bucket, keyName, ONE, value, valueLength);
- assertEquals(valueLength,
- store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () ->
valueLength ==
+ store.getVolume(volumeName).getBucket(bucketName).getUsedBytes(),
1000, 30000);
writeKey(bucket, keyName, ONE, value, valueLength);
- assertEquals(valueLength,
- store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () ->
valueLength ==
+ store.getVolume(volumeName).getBucket(bucketName).getUsedBytes(),
1000, 30000);
// pre-allocate more blocks than needed
int fakeValueLength = valueLength + blockSize;
writeKey(bucket, keyName, ONE, value, fakeValueLength);
- assertEquals(valueLength,
- store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () ->
valueLength ==
+ store.getVolume(volumeName).getBucket(bucketName).getUsedBytes(),
1000, 30000);
bucket.deleteKey(keyName);
- assertEquals(0L,
- store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () -> 0L
==
+ store.getVolume(volumeName).getBucket(bucketName).getUsedBytes(),
1000, 30000);
}
static Stream<BucketLayout> bucketLayouts() {
@@ -1645,7 +1650,7 @@ void bucketUsedBytesOverWrite(BucketLayout bucketLayout)
// do cleanup when EC branch gets merged into master.
@ParameterizedTest
@MethodSource("replicationConfigs")
- void testBucketQuota(ReplicationConfig repConfig) throws IOException {
+ void testBucketQuota(ReplicationConfig repConfig) throws IOException,
InterruptedException, TimeoutException {
int blockSize = (int) ozoneManager.getConfiguration().getStorageSize(
OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES);
@@ -1656,7 +1661,7 @@ void testBucketQuota(ReplicationConfig repConfig) throws
IOException {
}
private void bucketQuotaTestHelper(int keyLength, ReplicationConfig
repConfig)
- throws IOException {
+ throws IOException, InterruptedException, TimeoutException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
@@ -1672,33 +1677,34 @@ private void bucketQuotaTestHelper(int keyLength,
ReplicationConfig repConfig)
OzoneOutputStream out = bucket.createKey(keyName, keyLength,
repConfig, new HashMap<>());
// Write a new key and do not update Bucket UsedBytes until commit.
- assertEquals(0,
- store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () -> 0 ==
+ store.getVolume(volumeName).getBucket(bucketName).getUsedBytes(),
1000, 30000);
out.write(value);
out.close();
// After committing the new key, the Bucket UsedBytes must be updated to
// keyQuota.
- assertEquals(keyQuota,
- store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () ->
keyQuota ==
+ store.getVolume(volumeName).getBucket(bucketName).getUsedBytes(),
1000, 30000);
out = bucket.createKey(keyName, keyLength, repConfig, new HashMap<>());
// Overwrite an old key. The Bucket UsedBytes are not updated before the
// commit. So the Bucket UsedBytes remain unchanged.
- assertEquals(keyQuota,
- store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () ->
keyQuota ==
+ store.getVolume(volumeName).getBucket(bucketName).getUsedBytes(),
1000, 30000);
out.write(value);
out.close();
- assertEquals(keyQuota,
- store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () ->
keyQuota ==
+ store.getVolume(volumeName).getBucket(bucketName).getUsedBytes(),
1000, 30000);
bucket.deleteKey(keyName);
- assertEquals(0L,
- store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () -> 0L
== store.getVolume(volumeName)
+ .getBucket(bucketName).getUsedBytes(), 1000, 30000);
}
@ParameterizedTest
@MethodSource("bucketLayoutsWithEnablePaths")
- public void testBucketUsedNamespace(BucketLayout layout, boolean
enablePaths) throws IOException {
+ public void testBucketUsedNamespace(BucketLayout layout, boolean enablePaths)
+ throws IOException, InterruptedException, TimeoutException {
boolean originalEnablePaths =
cluster.getOzoneManager().getConfig().isFileSystemPathEnabled();
cluster.getOzoneManager().getConfig().setFileSystemPathEnabled(enablePaths);
String volumeName = UUID.randomUUID().toString();
@@ -1716,16 +1722,23 @@ public void testBucketUsedNamespace(BucketLayout
layout, boolean enablePaths) th
String keyName2 = UUID.randomUUID().toString();
writeKey(bucket, keyName1, ONE, value, valueLength);
- assertEquals(1L, getBucketUsedNamespace(volumeName, bucketName));
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () -> 1L
== getBucketUsedNamespace(volumeName,
+ bucketName), 1000, 30000);
// Test create a file twice will not increase usedNamespace twice
writeKey(bucket, keyName1, ONE, value, valueLength);
- assertEquals(1L, getBucketUsedNamespace(volumeName, bucketName));
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () -> 1L
== getBucketUsedNamespace(volumeName,
+ bucketName), 1000, 30000);
writeKey(bucket, keyName2, ONE, value, valueLength);
- assertEquals(2L, getBucketUsedNamespace(volumeName, bucketName));
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () -> 2L
== getBucketUsedNamespace(volumeName,
+ bucketName), 1000, 30000);
bucket.deleteKey(keyName1);
- assertEquals(1L, getBucketUsedNamespace(volumeName, bucketName));
+ GenericTestUtils.waitFor(
+ (CheckedSupplier<Boolean, IOException>) () -> 1L ==
getBucketUsedNamespace(volumeName, bucketName),
+ 1000, 30000);
bucket.deleteKey(keyName2);
- assertEquals(0L, getBucketUsedNamespace(volumeName, bucketName));
+ GenericTestUtils.waitFor(
+ (CheckedSupplier<Boolean, IOException>) () -> 0L ==
getBucketUsedNamespace(volumeName, bucketName),
+ 1000, 30000);
RpcClient client = new RpcClient(cluster.getConf(), null);
try {
@@ -1733,10 +1746,12 @@ public void testBucketUsedNamespace(BucketLayout
layout, boolean enablePaths) th
String directoryName2 = UUID.randomUUID().toString();
client.createDirectory(volumeName, bucketName, directoryName1);
- assertEquals(1L, getBucketUsedNamespace(volumeName, bucketName));
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () ->
1L == getBucketUsedNamespace(volumeName,
+ bucketName), 1000, 30000);
// Test create a directory twice will not increase usedNamespace twice
client.createDirectory(volumeName, bucketName, directoryName2);
- assertEquals(2L, getBucketUsedNamespace(volumeName, bucketName));
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () ->
2L == getBucketUsedNamespace(volumeName,
+ bucketName), 1000, 30000);
if (layout == BucketLayout.LEGACY) {
handleLegacyBucketDelete(volumeName, bucketName, directoryName1,
directoryName2);
@@ -1755,7 +1770,7 @@ public void testBucketUsedNamespace(BucketLayout layout,
boolean enablePaths) th
}
private void handleLegacyBucketDelete(String volumeName, String bucketName,
String dir1, String dir2)
- throws IOException {
+ throws IOException, InterruptedException, TimeoutException {
String rootPath = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
cluster.getConf().set(FS_DEFAULT_NAME_KEY, rootPath);
FileSystem fs = FileSystem.get(cluster.getConf());
@@ -1764,17 +1779,21 @@ private void handleLegacyBucketDelete(String
volumeName, String bucketName, Stri
org.apache.hadoop.fs.Path dir2Path = new
org.apache.hadoop.fs.Path(OZONE_URI_DELIMITER, dir2);
fs.delete(dir1Path, false);
- assertEquals(1L, getBucketUsedNamespace(volumeName, bucketName));
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () -> 1L
== getBucketUsedNamespace(volumeName,
+ bucketName), 1000, 30000);
fs.delete(dir2Path, false);
- assertEquals(0L, getBucketUsedNamespace(volumeName, bucketName));
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () -> 0L
== getBucketUsedNamespace(volumeName,
+ bucketName), 1000, 30000);
}
private void handleNonLegacyBucketDelete(RpcClient client, String
volumeName, String bucketName, String dir1,
- String dir2) throws IOException {
+ String dir2) throws IOException, InterruptedException, TimeoutException {
client.deleteKey(volumeName, bucketName,
OzoneFSUtils.addTrailingSlashIfNeeded(dir1), false);
- assertEquals(1L, getBucketUsedNamespace(volumeName, bucketName));
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () -> 1L
== getBucketUsedNamespace(volumeName,
+ bucketName), 1000, 30000);
client.deleteKey(volumeName, bucketName,
OzoneFSUtils.addTrailingSlashIfNeeded(dir2), false);
- assertEquals(0L, getBucketUsedNamespace(volumeName, bucketName));
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () -> 0L
== getBucketUsedNamespace(volumeName,
+ bucketName), 1000, 30000);
}
@ParameterizedTest
@@ -1869,7 +1888,7 @@ public void testVolumeUsedNamespace() throws IOException {
}
@Test
- public void testBucketQuotaInNamespace() throws IOException {
+ public void testBucketQuotaInNamespace() throws IOException,
InterruptedException, TimeoutException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String key1 = UUID.randomUUID().toString();
@@ -1907,8 +1926,8 @@ public void testBucketQuotaInNamespace() throws
IOException {
store.getVolume(volumeName).getBucket(bucketName).getUsedNamespace());
bucket.deleteKeys(Arrays.asList(key1, key2));
- assertEquals(0L,
- store.getVolume(volumeName).getBucket(bucketName).getUsedNamespace());
+ GenericTestUtils.waitFor((CheckedSupplier<Boolean, IOException>) () -> 0L
==
+ store.getVolume(volumeName).getBucket(bucketName).getUsedNamespace(),
1000, 30000);
}
private void writeKey(OzoneBucket bucket, String keyName,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
index 750a4f4eb01..773ea96c3c7 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
@@ -90,6 +90,7 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.ozone.test.GenericTestUtils;
+import org.apache.ratis.util.function.CheckedSupplier;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -310,10 +311,12 @@ public void testPreallocateFileRecovery(long dataSize)
throws Exception {
assertEquals(committedBytes + dataSize,
getCluster().getOzoneManager().getMetrics().getDataCommittedBytes());
// check used quota
- bucket = volume.getBucket(bucketName);
- assertEquals(1, bucket.getUsedNamespace());
- assertEquals(dataSize * ReplicationFactor.THREE.getValue(),
bucket.getUsedBytes());
-
+ GenericTestUtils.waitFor(
+ (CheckedSupplier<Boolean, ? extends Exception>) () -> 1 ==
volume.getBucket(bucketName).getUsedNamespace(),
+ 1000, 30000);
+ GenericTestUtils.waitFor(
+ (CheckedSupplier<Boolean, ? extends Exception>) () -> dataSize *
ReplicationFactor.THREE.getValue()
+ == volume.getBucket(bucketName).getUsedBytes(), 1000, 30000);
// check unused pre-allocated blocks are reclaimed
Table<String, RepeatedOmKeyInfo> deletedTable =
getCluster().getOzoneManager().getMetadataManager().getDeletedTable();
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java
index f45052de441..270db635701 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java
@@ -76,7 +76,7 @@ public void getMetrics(MetricsCollector collector, boolean
all) {
if (quotaInBytes == -1) {
availableSpace = quotaInBytes;
} else {
- availableSpace = Math.max(bucketInfo.getQuotaInBytes() -
bucketInfo.getUsedBytes(), 0);
+ availableSpace = Math.max(bucketInfo.getQuotaInBytes() -
bucketInfo.getTotalBucketSpace(), 0);
}
collector.addRecord(SOURCE)
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
index 8ae403c3e8f..270b95d06da 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
@@ -295,7 +295,7 @@ public boolean checkQuotaBytesValid(OMMetadataManager
metadataManager,
if (quotaInBytes > OzoneConsts.QUOTA_RESET) {
totalBucketQuota = quotaInBytes;
- if (quotaInBytes < dbBucketInfo.getUsedBytes()) {
+ if (quotaInBytes < dbBucketInfo.getTotalBucketSpace()) {
throw new OMException("Cannot update bucket quota. Requested " +
"spaceQuota less than used spaceQuota.",
OMException.ResultCodes.QUOTA_ERROR);
@@ -344,7 +344,7 @@ public boolean checkQuotaNamespaceValid(OmVolumeArgs
omVolumeArgs,
}
if (quotaInNamespace != OzoneConsts.QUOTA_RESET
- && quotaInNamespace < dbBucketInfo.getUsedNamespace()) {
+ && quotaInNamespace < dbBucketInfo.getTotalBucketNamespace()) {
throw new OMException("Cannot update bucket quota. NamespaceQuota " +
"requested is less than used namespaceQuota.",
OMException.ResultCodes.QUOTA_ERROR);
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index c267041cc16..2317a481591 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -91,7 +91,6 @@
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.apache.hadoop.ozone.om.request.OMClientRequestUtils;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserInfo;
@@ -839,7 +838,7 @@ protected void checkBucketQuotaInBytes(
OMMetadataManager metadataManager, OmBucketInfo omBucketInfo,
long allocateSize) throws IOException {
if (omBucketInfo.getQuotaInBytes() > OzoneConsts.QUOTA_RESET) {
- long usedBytes = omBucketInfo.getUsedBytes();
+ long usedBytes = omBucketInfo.getTotalBucketSpace();
long quotaInBytes = omBucketInfo.getQuotaInBytes();
if (quotaInBytes - usedBytes < allocateSize) {
throw new OMException("The DiskSpace quota of bucket:"
@@ -857,7 +856,7 @@ protected void checkBucketQuotaInBytes(
protected void checkBucketQuotaInNamespace(OmBucketInfo omBucketInfo,
long allocatedNamespace) throws IOException {
if (omBucketInfo.getQuotaInNamespace() > OzoneConsts.QUOTA_RESET) {
- long usedNamespace = omBucketInfo.getUsedNamespace();
+ long usedNamespace = omBucketInfo.getTotalBucketNamespace();
long quotaInNamespace = omBucketInfo.getQuotaInNamespace();
long toUseNamespaceInTotal = usedNamespace + allocatedNamespace;
if (quotaInNamespace < toUseNamespaceInTotal) {
@@ -890,7 +889,7 @@ protected boolean checkDirectoryAlreadyExists(String
volumeName,
}
/**
- * @return the number of bytes used by blocks pointed to by {@code
omKeyInfo}.
+ * @return the number of bytes (replicated size) used by blocks pointed to
by {@code omKeyInfo}.
*/
public static long sumBlockLengths(OmKeyInfo omKeyInfo) {
long bytesUsed = 0;
@@ -904,22 +903,6 @@ public static long sumBlockLengths(OmKeyInfo omKeyInfo) {
return bytesUsed;
}
- /**
- * @return the number of bytes used by blocks pointed to by {@code
omKeyInfo}.
- */
- public static long sumBlockLengths(OzoneManagerProtocolProtos.KeyInfo
keyInfo) {
- long bytesUsed = 0;
- ReplicationConfig replicationConfig =
ReplicationConfig.fromProto(keyInfo.getType(), keyInfo.getFactor(),
- keyInfo.getEcReplicationConfig());
- for (OzoneManagerProtocolProtos.KeyLocationList group:
keyInfo.getKeyLocationListList()) {
- for (OzoneManagerProtocolProtos.KeyLocation locationInfo :
group.getKeyLocationsList()) {
- bytesUsed += QuotaUtil.getReplicatedSize(locationInfo.getLength(),
replicationConfig);
- }
- }
-
- return bytesUsed;
- }
-
/**
* Return bucket info for the specified bucket.
*/
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java
index f0e644b80a2..653df6dd920 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java
@@ -51,7 +51,7 @@ public class TestBucketUtilizationMetrics {
private static final long USED_BYTES_2 = 200;
private static final long SNAPSHOT_USED_BYTES_1 = 400;
private static final long SNAPSHOT_USED_BYTES_2 = 800;
- private static final long QUOTA_IN_BYTES_1 = 200;
+ private static final long QUOTA_IN_BYTES_1 = 600;
private static final long QUOTA_IN_BYTES_2 = QUOTA_RESET;
private static final long QUOTA_IN_NAMESPACE_1 = 1;
private static final long QUOTA_IN_NAMESPACE_2 = 2;
@@ -97,7 +97,7 @@ void testBucketUtilizationMetrics() {
verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaBytes,
QUOTA_IN_BYTES_1);
verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaNamespace,
QUOTA_IN_NAMESPACE_1);
verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketAvailableBytes,
- QUOTA_IN_BYTES_1 - USED_BYTES_1);
+ QUOTA_IN_BYTES_1 - USED_BYTES_1 - SNAPSHOT_USED_BYTES_1);
verify(mb, times(1)).tag(BucketMetricsInfo.VolumeName, VOLUME_NAME_2);
verify(mb, times(1)).tag(BucketMetricsInfo.BucketName, BUCKET_NAME_2);
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java
index 424c89828ea..53f384b65ac 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java
@@ -243,7 +243,6 @@ public void testDeleteKeyWithHigherUpdateID(
OMRequest omRequest = doPreExecute(createDeleteOpenKeyRequest(allKeys));
OMOpenKeysDeleteRequest openKeyDeleteRequest =
new OMOpenKeysDeleteRequest(omRequest, getBucketLayout());
-
OMClientResponse omClientResponse =
openKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
transactionId);
@@ -323,7 +322,6 @@ private void deleteOpenKeysFromCache(List<Pair<Long,
OmKeyInfo>> openKeys)
OMClientResponse omClientResponse =
openKeyDeleteRequest.validateAndUpdateCache(ozoneManager, 100L);
-
assertEquals(Status.OK,
omClientResponse.getOMResponse().getStatus());
for (OmKeyInfo openKey :
openKeys.stream().map(Pair::getRight).collect(Collectors.toList())) {
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
index 9246df12c9c..8c51527b10d 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
@@ -317,7 +317,6 @@ void checkDeletedTableCleanUpForSnapshot() throws Exception
{
bucketInfo = writeClient.getBucketInfo(volumeName, bucketName1);
assertEquals(key1Size + key3Size, bucketInfo.getSnapshotUsedBytes());
assertEquals(2, bucketInfo.getSnapshotUsedNamespace());
- writeClient.getBucketInfo(volumeName, bucketName1);
keyDeletingService.resume();
// Run KeyDeletingService
GenericTestUtils.waitFor(
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]