This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 61cf1f7c719 HDDS-13178. Include block size in delete request and pass
it to SCM. (#9246)
61cf1f7c719 is described below
commit 61cf1f7c7194812888333ab0a6e61c9f1d770c2a
Author: Priyesh Karatha <[email protected]>
AuthorDate: Thu Nov 6 11:53:33 2025 +0530
HDDS-13178. Include block size in delete request and pass it to SCM. (#9246)
---
.../org/apache/hadoop/ozone/common/BlockGroup.java | 49 +++++++++++-------
.../apache/hadoop/ozone/common/DeletedBlock.java | 58 ++++++++++++++++++++++
.../src/main/proto/ScmServerProtocol.proto | 2 +
.../hadoop/hdds/scm/block/BlockManagerImpl.java | 6 ++-
.../hdds/scm/server/SCMBlockProtocolServer.java | 8 +--
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 10 ++--
.../hadoop/ozone/om/OmMetadataManagerImpl.java | 8 +--
.../ozone/om/ScmBlockLocationTestingClient.java | 51 ++++++++++---------
.../ozone/om/service/TestKeyDeletingService.java | 6 ++-
9 files changed, 142 insertions(+), 56 deletions(-)
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
index 5e8b0e1724b..4ef34193aa2 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
@@ -20,7 +20,6 @@
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import
org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.KeyBlocks;
/**
@@ -29,15 +28,16 @@
public final class BlockGroup {
private String groupID;
- private List<BlockID> blockIDs;
+ private List<DeletedBlock> deletedBlocks;
+ public static final long SIZE_NOT_AVAILABLE = -1;
- private BlockGroup(String groupID, List<BlockID> blockIDs) {
+ private BlockGroup(String groupID, List<DeletedBlock> deletedBlocks) {
this.groupID = groupID;
- this.blockIDs = blockIDs;
+ this.deletedBlocks = deletedBlocks;
}
- public List<BlockID> getBlockIDList() {
- return blockIDs;
+ public List<DeletedBlock> getDeletedBlocks() {
+ return deletedBlocks;
}
public String getGroupID() {
@@ -46,8 +46,10 @@ public String getGroupID() {
public KeyBlocks getProto() {
KeyBlocks.Builder kbb = KeyBlocks.newBuilder();
- for (BlockID block : blockIDs) {
- kbb.addBlocks(block.getProtobuf());
+ for (DeletedBlock deletedBlock : deletedBlocks) {
+ kbb.addBlocks(deletedBlock.getBlockID().getProtobuf());
+ kbb.addSize(deletedBlock.getSize());
+ kbb.addReplicatedSize(deletedBlock.getReplicatedSize());
}
return kbb.setKey(groupID).build();
}
@@ -58,13 +60,23 @@ public KeyBlocks getProto() {
* @return a group of blocks.
*/
public static BlockGroup getFromProto(KeyBlocks proto) {
- List<BlockID> blockIDs = new ArrayList<>();
- for (HddsProtos.BlockID block : proto.getBlocksList()) {
- blockIDs.add(new BlockID(block.getContainerBlockID().getContainerID(),
- block.getContainerBlockID().getLocalID()));
+ List<DeletedBlock> deletedBlocksList = new ArrayList<>();
+ for (int i = 0; i < proto.getBlocksCount(); i++) {
+ long repSize = SIZE_NOT_AVAILABLE;
+ long size = SIZE_NOT_AVAILABLE;
+ if (proto.getSizeCount() > i) {
+ size = proto.getSize(i);
+ }
+ if (proto.getReplicatedSizeCount() > i) {
+ repSize = proto.getReplicatedSize(i);
+ }
+ BlockID block = new
BlockID(proto.getBlocks(i).getContainerBlockID().getContainerID(),
+ proto.getBlocks(i).getContainerBlockID().getLocalID());
+ deletedBlocksList.add(new DeletedBlock(block, size, repSize));
}
return BlockGroup.newBuilder().setKeyName(proto.getKey())
- .addAllBlockIDs(blockIDs).build();
+ .addAllDeletedBlocks(deletedBlocksList)
+ .build();
}
public static Builder newBuilder() {
@@ -75,7 +87,7 @@ public static Builder newBuilder() {
public String toString() {
return "BlockGroup[" +
"groupID='" + groupID + '\'' +
- ", blockIDs=" + blockIDs +
+ ", deletedBlocks=" + deletedBlocks +
']';
}
@@ -85,21 +97,20 @@ public String toString() {
public static class Builder {
private String groupID;
- private List<BlockID> blockIDs;
+ private List<DeletedBlock> deletedBlocks;
public Builder setKeyName(String blockGroupID) {
this.groupID = blockGroupID;
return this;
}
- public Builder addAllBlockIDs(List<BlockID> keyBlocks) {
- this.blockIDs = keyBlocks;
+ public Builder addAllDeletedBlocks(List<DeletedBlock> deletedBlockList) {
+ this.deletedBlocks = deletedBlockList;
return this;
}
public BlockGroup build() {
- return new BlockGroup(groupID, blockIDs);
+ return new BlockGroup(groupID, deletedBlocks);
}
}
-
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/DeletedBlock.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/DeletedBlock.java
new file mode 100644
index 00000000000..b611541578e
--- /dev/null
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/DeletedBlock.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.common;
+
+import org.apache.hadoop.hdds.client.BlockID;
+
+/**
+ * DeletedBlock of Ozone (BlockID + usedBytes).
+ */
+public class DeletedBlock {
+
+ private BlockID blockID;
+ private long size;
+ private long replicatedSize;
+
+ public DeletedBlock(BlockID blockID, long size, long replicatedSize) {
+ this.blockID = blockID;
+ this.size = size;
+ this.replicatedSize = replicatedSize;
+ }
+
+ public BlockID getBlockID() {
+ return this.blockID;
+ }
+
+ public long getSize() {
+ return this.size;
+ }
+
+ public long getReplicatedSize() {
+ return this.replicatedSize;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder(64);
+ sb.append(" localID: ").append(blockID.getContainerBlockID().getLocalID());
+ sb.append(" containerID:
").append(blockID.getContainerBlockID().getContainerID());
+ sb.append(" size: ").append(size);
+ sb.append(" replicatedSize: ").append(replicatedSize);
+ return sb.toString();
+ }
+}
diff --git
a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
index fc24d2562f9..4c794fe7dc1 100644
--- a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
+++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
@@ -181,6 +181,8 @@ message DeleteScmKeyBlocksRequestProto {
message KeyBlocks {
required string key = 1;
repeated BlockID blocks = 2;
+ repeated uint64 size = 3;
+ repeated uint64 replicatedSize = 4;
}
/**
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 9b46968424c..3bf60b06e7c 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -46,6 +46,7 @@
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.common.DeletedBlock;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -225,9 +226,10 @@ public void deleteBlocks(List<BlockGroup>
keyBlocksInfoList)
for (BlockGroup bg : keyBlocksInfoList) {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting blocks {}",
- StringUtils.join(",", bg.getBlockIDList()));
+ StringUtils.join(",", bg.getDeletedBlocks()));
}
- for (BlockID block : bg.getBlockIDList()) {
+ for (DeletedBlock deletedBlock : bg.getDeletedBlocks()) {
+ BlockID block = deletedBlock.getBlockID();
long containerID = block.getContainerID();
if (containerBlocks.containsKey(containerID)) {
containerBlocks.get(containerID).add(block.getLocalID());
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
index 60c6384ba82..22674042d20 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -45,7 +45,6 @@
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -80,6 +79,7 @@
import org.apache.hadoop.ozone.audit.SCMAction;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+import org.apache.hadoop.ozone.common.DeletedBlock;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -268,7 +268,7 @@ public List<DeleteBlockGroupResult> deleteKeyBlocks(
List<BlockGroup> keyBlocksInfoList) throws IOException {
long totalBlocks = 0;
for (BlockGroup bg : keyBlocksInfoList) {
- totalBlocks += bg.getBlockIDList().size();
+ totalBlocks += bg.getDeletedBlocks().size();
}
if (LOG.isDebugEnabled()) {
LOG.debug("SCM is informed by OM to delete {} keys. Total blocks to
deleted {}.",
@@ -312,8 +312,8 @@ public List<DeleteBlockGroupResult> deleteKeyBlocks(
}
for (BlockGroup bg : keyBlocksInfoList) {
List<DeleteBlockResult> blockResult = new ArrayList<>();
- for (BlockID b : bg.getBlockIDList()) {
- blockResult.add(new DeleteBlockResult(b, resultCode));
+ for (DeletedBlock b : bg.getDeletedBlocks()) {
+ blockResult.add(new DeleteBlockResult(b.getBlockID(), resultCode));
}
results.add(new DeleteBlockGroupResult(bg.getGroupID(), blockResult));
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index e458fa73236..cf6694480c0 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -142,6 +142,7 @@
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.common.DeletedBlock;
import org.apache.hadoop.ozone.om.PendingKeysDeletion.PurgedKey;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
@@ -832,12 +833,15 @@ public PendingKeysDeletion getPendingDeletionKeys(
// Skip the key if the filter doesn't allow the file to be deleted.
if (filter == null || filter.apply(Table.newKeyValue(kv.getKey(),
info))) {
- List<BlockID> blockIDS = info.getKeyLocationVersions().stream()
+ List<DeletedBlock> deletedBlocks =
info.getKeyLocationVersions().stream()
.flatMap(versionLocations ->
versionLocations.getLocationList().stream()
- .map(b -> new BlockID(b.getContainerID(),
b.getLocalID()))).collect(Collectors.toList());
+ .map(b -> new DeletedBlock(new
BlockID(b.getContainerID(),
+ b.getLocalID()), info.getDataSize(),
info.getReplicatedSize()))).collect(Collectors.toList());
String blockGroupName = kv.getKey() + "/" +
reclaimableKeyCount++;
+
BlockGroup keyBlocks =
BlockGroup.newBuilder().setKeyName(blockGroupName)
- .addAllBlockIDs(blockIDS).build();
+ .addAllDeletedBlocks(deletedBlocks)
+ .build();
reclaimableKeys.put(blockGroupName,
new PurgedKey(info.getVolumeName(), info.getBucketName(),
bucketId,
keyBlocks, kv.getKey(), OMKeyRequest.sumBlockLengths(info),
info.isDeletedKeyCommitted()));
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index c7b071a6e8d..4ca647e4ea6 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -80,6 +80,7 @@
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.common.DeletedBlock;
import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
@@ -1814,12 +1815,13 @@ public List<BlockGroup> getBlocksForKeyDelete(String
deletedKey)
for (OmKeyInfo info : omKeyInfo.cloneOmKeyInfoList()) {
for (OmKeyLocationInfoGroup keyLocations :
info.getKeyLocationVersions()) {
- List<BlockID> item = keyLocations.getLocationList().stream()
- .map(b -> new BlockID(b.getContainerID(), b.getLocalID()))
+ List<DeletedBlock> item = keyLocations.getLocationList().stream()
+ .map(b -> new DeletedBlock(
+ new BlockID(b.getContainerID(), b.getLocalID()),
info.getDataSize(), info.getReplicatedSize()))
.collect(Collectors.toList());
BlockGroup keyBlocks = BlockGroup.newBuilder()
.setKeyName(deletedKey)
- .addAllBlockIDs(item)
+ .addAllDeletedBlocks(item)
.build();
result.add(keyBlocks);
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
index 5edd683a43d..823a6405257 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
@@ -47,6 +47,7 @@
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+import org.apache.hadoop.ozone.common.DeletedBlock;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -148,35 +149,39 @@ private Pipeline createPipeline(DatanodeDetails datanode)
{
public List<DeleteBlockGroupResult> deleteKeyBlocks(
List<BlockGroup> keyBlocksInfoList) throws IOException {
List<DeleteBlockGroupResult> results = new ArrayList<>();
- List<DeleteBlockResult> blockResultList = new ArrayList<>();
- Result result;
for (BlockGroup keyBlocks : keyBlocksInfoList) {
- for (BlockID blockKey : keyBlocks.getBlockIDList()) {
- currentCall++;
- switch (this.failCallsFrequency) {
- case 0:
- result = success;
- numBlocksDeleted++;
- break;
- case 1:
- result = unknownFailure;
- break;
- default:
- if (currentCall % this.failCallsFrequency == 0) {
- result = unknownFailure;
- } else {
- result = success;
- numBlocksDeleted++;
- }
- }
- blockResultList.add(new DeleteBlockResult(blockKey, result));
+ List<DeleteBlockResult> blockResultList = new ArrayList<>();
+ // Process BlockIDs directly if present
+ for (DeletedBlock deletedBlock : keyBlocks.getDeletedBlocks()) {
+ blockResultList.add(processBlock(deletedBlock.getBlockID()));
}
- results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(),
- blockResultList));
+ results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(),
blockResultList));
}
return results;
}
+ private DeleteBlockResult processBlock(BlockID blockID) {
+ currentCall++;
+ Result result;
+ switch (failCallsFrequency) {
+ case 0:
+ result = success;
+ numBlocksDeleted++;
+ break;
+ case 1:
+ result = unknownFailure;
+ break;
+ default:
+ if (currentCall % failCallsFrequency == 0) {
+ result = unknownFailure;
+ } else {
+ result = success;
+ numBlocksDeleted++;
+ }
+ }
+ return new DeleteBlockResult(blockID, result);
+ }
+
@Override
public ScmInfo getScmInfo() throws IOException {
ScmInfo.Builder builder =
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
index 8c51527b10d..05cfca2fe55 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
@@ -75,6 +75,7 @@
import org.apache.hadoop.hdds.utils.db.DBConfigFromFile;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.common.DeletedBlock;
import org.apache.hadoop.ozone.om.DeletingServiceMetrics;
import org.apache.hadoop.ozone.om.KeyManager;
import org.apache.hadoop.ozone.om.KeyManagerImpl;
@@ -826,7 +827,8 @@ public void testFailingModifiedKeyPurge() throws
IOException, InterruptedExcepti
.setStatus(OzoneManagerProtocolProtos.Status.TIMEOUT).build();
});
BlockGroup blockGroup = BlockGroup.newBuilder().setKeyName("key1/1")
- .addAllBlockIDs(Collections.singletonList(new BlockID(1,
1))).build();
+ .addAllDeletedBlocks(Collections.singletonList(new DeletedBlock(
+ new BlockID(1, 1), 1, 3))).build();
Map<String, PurgedKey> blockGroups =
Collections.singletonMap(blockGroup.getGroupID(), new PurgedKey("vol",
"buck", 1, blockGroup, "key1", 30, true));
List<String> renameEntriesToBeDeleted =
Collections.singletonList("key2");
@@ -1393,7 +1395,7 @@ private long countBlocksPendingDeletion() {
.getPurgedKeys().values()
.stream()
.map(PurgedKey::getBlockGroup)
- .map(BlockGroup::getBlockIDList)
+ .map(BlockGroup::getDeletedBlocks)
.mapToLong(Collection::size)
.sum();
} catch (IOException e) {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]