This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new b32b54b96f9 HDDS-13305. Create wrapper object for container checksums
(#8789)
b32b54b96f9 is described below
commit b32b54b96f9f079704c86f3cad17d4487344db45
Author: Eric C. Ho <[email protected]>
AuthorDate: Fri Dec 19 19:24:51 2025 +0800
HDDS-13305. Create wrapper object for container checksums (#8789)
---
.../hdds/scm/container/ContainerChecksums.java | 87 ++++++++++++++++++++++
.../hdds/scm/container/TestContainerChecksums.java | 55 ++++++++++++++
.../container/AbstractContainerReportHandler.java | 2 +-
.../hdds/scm/container/ContainerReplica.java | 24 ++++--
...tBackgroundContainerDataScannerIntegration.java | 18 +++--
.../ozone/recon/fsck/ContainerHealthStatus.java | 4 +-
.../ozone/recon/fsck/ContainerHealthTask.java | 8 +-
.../ozone/recon/scm/ContainerReplicaHistory.java | 23 ++++--
.../ozone/recon/scm/ReconContainerManager.java | 19 ++---
.../ozone/recon/api/TestContainerEndpoint.java | 19 ++---
.../recon/fsck/TestContainerHealthStatus.java | 9 ++-
.../ozone/recon/fsck/TestContainerHealthTask.java | 5 +-
.../TestContainerHealthTaskRecordGenerator.java | 5 +-
.../ozone/recon/scm/TestReconContainerManager.java | 7 +-
14 files changed, 226 insertions(+), 59 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerChecksums.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerChecksums.java
new file mode 100644
index 00000000000..ef089a87d17
--- /dev/null
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerChecksums.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container;
+
+import java.util.Objects;
+import net.jcip.annotations.Immutable;
+
+/**
+ * Wrapper for container checksums (data, metadata, etc.).
+ * Provides equality, hash, and hex string rendering.
+ * A value of 0 indicates an unknown or unset checksum.
+ */
+@Immutable
+public final class ContainerChecksums {
+ // Checksum of the data within the wrapper.
+ private final long dataChecksum;
+
+ // Checksum of the metadata within the wrapper.
+ private final long metadataChecksum;
+
+ private static final ContainerChecksums UNKNOWN =
+ new ContainerChecksums(0L, 0L);
+
+ private ContainerChecksums(long dataChecksum, long metadataChecksum) {
+ this.dataChecksum = dataChecksum;
+ this.metadataChecksum = metadataChecksum;
+ }
+
+ public static ContainerChecksums unknown() {
+ return UNKNOWN;
+ }
+
+ public static ContainerChecksums of(long dataChecksum) {
+ return new ContainerChecksums(dataChecksum, 0L);
+ }
+
+ public static ContainerChecksums of(long dataChecksum, long
metadataChecksum) {
+ return new ContainerChecksums(dataChecksum, metadataChecksum);
+ }
+
+ public long getDataChecksum() {
+ return dataChecksum;
+ }
+
+ public long getMetadataChecksum() {
+ return metadataChecksum;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof ContainerChecksums)) {
+ return false;
+ }
+ ContainerChecksums that = (ContainerChecksums) obj;
+ return dataChecksum == that.dataChecksum &&
+ metadataChecksum == that.metadataChecksum;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(dataChecksum, metadataChecksum);
+ }
+
+ @Override
+ public String toString() {
+ return "data=" + Long.toHexString(getDataChecksum()) +
+ ", metadata=" + Long.toHexString(getMetadataChecksum());
+ }
+}
diff --git
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerChecksums.java
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerChecksums.java
new file mode 100644
index 00000000000..6e9d34c5562
--- /dev/null
+++
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerChecksums.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+
+import org.junit.jupiter.api.Test;
+
+class TestContainerChecksums {
+ @Test
+ void testEqualsAndHashCode() {
+ ContainerChecksums c1 = ContainerChecksums.of(123L, 0L);
+ ContainerChecksums c2 = ContainerChecksums.of(123L, 0L);
+ ContainerChecksums c3 = ContainerChecksums.of(456L, 0L);
+ ContainerChecksums c4 = ContainerChecksums.of(123L, 789L);
+ ContainerChecksums c5 = ContainerChecksums.of(123L, 789L);
+ ContainerChecksums c6 = ContainerChecksums.of(123L, 790L);
+
+ assertEquals(c1, c2);
+ assertEquals(c1.hashCode(), c2.hashCode());
+ assertNotEquals(c1, c3);
+ assertNotEquals(c1, c4);
+ assertEquals(c4, c5);
+ assertNotEquals(c4, c6);
+ }
+
+ @Test
+ void testToString() {
+ ContainerChecksums c1 = ContainerChecksums.of(0x1234ABCDL, 0L);
+ assertThat(c1.toString()).contains("data=1234abcd", "metadata=0");
+
+ ContainerChecksums c2 = ContainerChecksums.of(0x1234ABCDL, 0xDEADBEEFL);
+
assertThat(c2.toString()).contains("data=1234abcd").contains("metadata=deadbeef");
+
+ ContainerChecksums c3 = ContainerChecksums.unknown();
+ assertThat(c3.toString()).contains("data=0").contains("metadata=0");
+ }
+}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
index 7f2030f6e74..35908afff87 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
@@ -362,7 +362,7 @@ private void updateContainerReplica(final DatanodeDetails
datanodeDetails,
.setReplicaIndex(replicaProto.getReplicaIndex())
.setBytesUsed(replicaProto.getUsed())
.setEmpty(replicaProto.getIsEmpty())
- .setDataChecksum(replicaProto.getDataChecksum())
+ .setChecksums(ContainerChecksums.of(replicaProto.getDataChecksum()))
.build();
if (replica.getState().equals(State.DELETED)) {
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
index 6cba1e8f1c3..5c9bd57cd88 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
@@ -45,7 +45,7 @@ public final class ContainerReplica implements
Comparable<ContainerReplica> {
private final long keyCount;
private final long bytesUsed;
private final boolean isEmpty;
- private final long dataChecksum;
+ private final ContainerChecksums checksums;
private ContainerReplica(ContainerReplicaBuilder b) {
this.containerID = Objects.requireNonNull(b.containerID, "containerID ==
null");
@@ -57,7 +57,7 @@ private ContainerReplica(ContainerReplicaBuilder b) {
this.replicaIndex = b.replicaIndex;
this.isEmpty = b.isEmpty;
this.sequenceId = b.sequenceId;
- this.dataChecksum = b.dataChecksum;
+ this.checksums = Objects.requireNonNull(b.checksums, "checksums == null");
}
public ContainerID getContainerID() {
@@ -122,8 +122,12 @@ public boolean isEmpty() {
return isEmpty;
}
+ public ContainerChecksums getChecksums() {
+ return checksums;
+ }
+
public long getDataChecksum() {
- return dataChecksum;
+ return checksums.getDataChecksum();
}
@Override
@@ -180,7 +184,8 @@ public ContainerReplicaBuilder toBuilder() {
.setOriginNodeId(originDatanodeId)
.setReplicaIndex(replicaIndex)
.setSequenceId(sequenceId)
- .setEmpty(isEmpty);
+ .setEmpty(isEmpty)
+ .setChecksums(checksums);
}
@Override
@@ -194,7 +199,7 @@ public String toString() {
+ ", keyCount=" + keyCount
+ ", bytesUsed=" + bytesUsed
+ ", " + (isEmpty ? "empty" : "non-empty")
- + ", dataChecksum=" + dataChecksum
+ + ", checksums=" + checksums
+ '}';
}
@@ -212,7 +217,7 @@ public static class ContainerReplicaBuilder {
private long keyCount;
private int replicaIndex;
private boolean isEmpty;
- private long dataChecksum;
+ private ContainerChecksums checksums;
/**
* Set Container Id.
@@ -287,8 +292,8 @@ public ContainerReplicaBuilder setEmpty(boolean empty) {
return this;
}
- public ContainerReplicaBuilder setDataChecksum(long dataChecksum) {
- this.dataChecksum = dataChecksum;
+ public ContainerReplicaBuilder setChecksums(ContainerChecksums checksums) {
+ this.checksums = checksums;
return this;
}
@@ -298,6 +303,9 @@ public ContainerReplicaBuilder setDataChecksum(long
dataChecksum) {
* @return ContainerReplicaBuilder
*/
public ContainerReplica build() {
+ if (this.checksums == null) {
+ this.checksums = ContainerChecksums.unknown();
+ }
return new ContainerReplica(this);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java
index 87c5a719cd7..5e53eec00d3 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java
@@ -30,6 +30,7 @@
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
+import org.apache.hadoop.hdds.scm.container.ContainerChecksums;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.TestContainerCorruptions;
@@ -84,8 +85,8 @@ void testCorruptionDetected(TestContainerCorruptions
corruption)
assertNotEquals(0, container.getContainerData().getDataChecksum());
waitForScmToSeeReplicaState(containerID, CLOSED);
- long initialReportedDataChecksum =
getContainerReplica(containerID).getDataChecksum();
- assertNotEquals(0, initialReportedDataChecksum);
+ ContainerChecksums initialReportedChecksum =
getContainerReplica(containerID).getChecksums();
+ assertNotEquals(ContainerChecksums.unknown(), initialReportedChecksum);
corruption.applyTo(container);
resumeScanner();
@@ -97,16 +98,16 @@ void testCorruptionDetected(TestContainerCorruptions
corruption)
// Wait for SCM to get a report of the unhealthy replica with a different
checksum than before.
waitForScmToSeeReplicaState(containerID, UNHEALTHY);
- long newReportedDataChecksum =
getContainerReplica(containerID).getDataChecksum();
+ ContainerChecksums newReportedChecksum =
getContainerReplica(containerID).getChecksums();
if (corruption == TestContainerCorruptions.MISSING_METADATA_DIR ||
corruption == TestContainerCorruptions.MISSING_CONTAINER_DIR) {
// In these cases, the new tree will not be able to be written since it
exists in the metadata directory.
// When the tree write fails, the in-memory checksum should remain at
its original value.
- assertEquals(checksumToString(initialReportedDataChecksum),
checksumToString(newReportedDataChecksum));
+ assertEquals(initialReportedChecksum, newReportedChecksum);
} else {
- assertNotEquals(checksumToString(initialReportedDataChecksum),
checksumToString(newReportedDataChecksum));
+ assertNotEquals(initialReportedChecksum, newReportedChecksum);
// Test that the scanner wrote updated checksum info to the disk.
- assertReplicaChecksumMatches(container, newReportedDataChecksum);
+ assertReplicaChecksumMatches(container, newReportedChecksum);
assertFalse(container.getContainerData().needsDataChecksum());
KeyValueContainerData containerData = (KeyValueContainerData)
container.getContainerData();
verifyAllDataChecksumsMatch(containerData, getConf());
@@ -122,10 +123,11 @@ void testCorruptionDetected(TestContainerCorruptions
corruption)
}
}
- private void assertReplicaChecksumMatches(Container<?> container, long
expectedChecksum) throws Exception {
+ private void assertReplicaChecksumMatches(
+ Container<?> container, ContainerChecksums expectedChecksum) throws
Exception {
assertTrue(containerChecksumFileExists(container.getContainerData().getContainerID()));
long dataChecksumFromFile = readChecksumFile(container.getContainerData())
.getContainerMerkleTree().getDataChecksum();
- assertEquals(checksumToString(expectedChecksum),
checksumToString(dataChecksumFromFile));
+ assertEquals(checksumToString(expectedChecksum.getDataChecksum()),
checksumToString(dataChecksumFromFile));
}
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java
index 4c28806dfa8..7a69c403050 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java
@@ -160,9 +160,9 @@ public boolean isEmpty() {
return numKeys == 0;
}
- public boolean isDataChecksumMismatched() {
+ public boolean areChecksumsMismatched() {
return !replicas.isEmpty() && replicas.stream()
- .map(ContainerReplica::getDataChecksum)
+ .map(ContainerReplica::getChecksums)
.distinct()
.count() != 1;
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
index 56fc47132b2..a6b6f3a8c30 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
@@ -385,7 +385,7 @@ private void processContainer(ContainerInfo container, long
currentTime,
containerReplicas, placementPolicy,
reconContainerMetadataManager, conf);
- if ((h.isHealthilyReplicated() && !h.isDataChecksumMismatched()) ||
h.isDeleted()) {
+ if ((h.isHealthilyReplicated() && !h.areChecksumsMismatched()) ||
h.isDeleted()) {
return;
}
// For containers deleted in SCM, we sync the container state here.
@@ -563,7 +563,7 @@ public static List<UnhealthyContainers>
generateUnhealthyRecords(
Map<UnHealthyContainerStates, Map<String, Long>>
unhealthyContainerStateStatsMap) {
List<UnhealthyContainers> records = new ArrayList<>();
- if ((container.isHealthilyReplicated() &&
!container.isDataChecksumMismatched()) || container.isDeleted()) {
+ if ((container.isHealthilyReplicated() &&
!container.areChecksumsMismatched()) || container.isDeleted()) {
return records;
}
@@ -610,7 +610,7 @@ public static List<UnhealthyContainers>
generateUnhealthyRecords(
populateContainerStats(container,
UnHealthyContainerStates.OVER_REPLICATED, unhealthyContainerStateStatsMap);
}
- if (container.isDataChecksumMismatched()
+ if (container.areChecksumsMismatched()
&& !recordForStateExists.contains(
UnHealthyContainerStates.REPLICA_MISMATCH.toString())) {
records.add(recordForState(
@@ -686,7 +686,7 @@ private static boolean keepMisReplicatedRecord(
private static boolean keepReplicaMismatchRecord(
ContainerHealthStatus container, UnhealthyContainersRecord rec) {
- if (container.isDataChecksumMismatched()) {
+ if (container.areChecksumsMismatched()) {
updateExpectedReplicaCount(rec, container.getReplicationFactor());
updateActualReplicaCount(rec, container.getReplicaCount());
updateReplicaDelta(rec, container.replicaDelta());
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistory.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistory.java
index d47c7010a2a..971bc2d2725 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistory.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistory.java
@@ -19,6 +19,7 @@
import java.util.UUID;
import
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerReplicaHistoryProto;
+import org.apache.hadoop.hdds.scm.container.ContainerChecksums;
/**
* A ContainerReplica timestamp class that tracks first and last seen time.
@@ -39,16 +40,16 @@ public class ContainerReplicaHistory {
private long bcsId;
private String state;
- private long dataChecksum;
+ private ContainerChecksums checksums;
public ContainerReplicaHistory(UUID id, Long firstSeenTime,
- Long lastSeenTime, long bcsId, String state, long dataChecksum) {
+ Long lastSeenTime, long bcsId, String state, ContainerChecksums
checksums) {
this.uuid = id;
this.firstSeenTime = firstSeenTime;
this.lastSeenTime = lastSeenTime;
this.bcsId = bcsId;
this.state = state;
- this.dataChecksum = dataChecksum;
+ setChecksums(checksums);
}
public long getBcsId() {
@@ -84,23 +85,29 @@ public void setState(String state) {
}
public long getDataChecksum() {
- return dataChecksum;
+ return getChecksums().getDataChecksum();
}
- public void setDataChecksum(long dataChecksum) {
- this.dataChecksum = dataChecksum;
+ public ContainerChecksums getChecksums() {
+ return checksums;
+ }
+
+ public void setChecksums(ContainerChecksums checksums) {
+ this.checksums = checksums != null ? checksums :
ContainerChecksums.unknown();
}
public static ContainerReplicaHistory fromProto(
ContainerReplicaHistoryProto proto) {
return new ContainerReplicaHistory(UUID.fromString(proto.getUuid()),
proto.getFirstSeenTime(), proto.getLastSeenTime(), proto.getBcsId(),
- proto.getState(), proto.getDataChecksum());
+ proto.getState(), ContainerChecksums.of(proto.getDataChecksum()));
}
public ContainerReplicaHistoryProto toProto() {
return ContainerReplicaHistoryProto.newBuilder().setUuid(uuid.toString())
.setFirstSeenTime(firstSeenTime).setLastSeenTime(lastSeenTime)
- .setBcsId(bcsId).setState(state).setDataChecksum(dataChecksum).build();
+ .setBcsId(bcsId).setState(state)
+ .setDataChecksum(checksums.getDataChecksum())
+ .build();
}
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
index 59436cb72b2..586aad5fd68 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
+import org.apache.hadoop.hdds.scm.container.ContainerChecksums;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl;
@@ -278,7 +279,7 @@ public void updateContainerReplica(ContainerID containerID,
boolean flushToDB = false;
long bcsId = replica.getSequenceId() != null ? replica.getSequenceId() :
-1;
String state = replica.getState().toString();
- long dataChecksum = replica.getDataChecksum();
+ ContainerChecksums checksums = replica.getChecksums();
// If replica doesn't exist in in-memory map, add to DB and add to map
if (replicaLastSeenMap == null) {
@@ -286,7 +287,7 @@ public void updateContainerReplica(ContainerID containerID,
replicaHistoryMap.putIfAbsent(id,
new ConcurrentHashMap<UUID, ContainerReplicaHistory>() {{
put(uuid, new ContainerReplicaHistory(uuid, currTime, currTime,
- bcsId, state, dataChecksum));
+ bcsId, state, checksums));
}});
flushToDB = true;
} else {
@@ -296,18 +297,19 @@ public void updateContainerReplica(ContainerID
containerID,
// New Datanode
replicaLastSeenMap.put(uuid,
new ContainerReplicaHistory(uuid, currTime, currTime, bcsId,
- state, dataChecksum));
+ state, checksums));
flushToDB = true;
} else {
// if the object exists, only update the last seen time & bcsId fields
ts.setLastSeenTime(currTime);
ts.setBcsId(bcsId);
ts.setState(state);
+ ts.setChecksums(checksums);
}
}
if (flushToDB) {
- upsertContainerHistory(id, uuid, currTime, bcsId, state, dataChecksum);
+ upsertContainerHistory(id, uuid, currTime, bcsId, state, checksums);
}
}
@@ -324,7 +326,6 @@ public void removeContainerReplica(ContainerID containerID,
final DatanodeDetails dnInfo = replica.getDatanodeDetails();
final UUID uuid = dnInfo.getUuid();
String state = replica.getState().toString();
- long dataChecksum = replica.getDataChecksum();
final Map<UUID, ContainerReplicaHistory> replicaLastSeenMap =
replicaHistoryMap.get(id);
@@ -333,7 +334,7 @@ public void removeContainerReplica(ContainerID containerID,
if (ts != null) {
// Flush to DB, then remove from in-memory map
upsertContainerHistory(id, uuid, ts.getLastSeenTime(), ts.getBcsId(),
- state, dataChecksum);
+ state, ts.getChecksums());
replicaLastSeenMap.remove(uuid);
}
}
@@ -430,7 +431,7 @@ public void flushReplicaHistoryMapToDB(boolean clearMap) {
}
public void upsertContainerHistory(long containerID, UUID uuid, long time,
- long bcsId, String state, long
dataChecksum) {
+ long bcsId, String state,
ContainerChecksums checksums) {
Map<UUID, ContainerReplicaHistory> tsMap;
try {
tsMap = cdbServiceProvider.getContainerReplicaHistory(containerID);
@@ -438,12 +439,12 @@ public void upsertContainerHistory(long containerID, UUID
uuid, long time,
if (ts == null) {
// New entry
tsMap.put(uuid, new ContainerReplicaHistory(uuid, time, time, bcsId,
- state, dataChecksum));
+ state, checksums));
} else {
// Entry exists, update last seen time and put it back to DB.
ts.setLastSeenTime(time);
ts.setState(state);
- ts.setDataChecksum(dataChecksum);
+ ts.setChecksums(checksums);
}
cdbServiceProvider.storeContainerReplicaHistory(containerID, tsMap);
} catch (IOException e) {
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
index 7df56a57be6..801177d25c4 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
@@ -64,6 +64,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.scm.container.ContainerChecksums;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
@@ -1074,12 +1075,12 @@ public void testGetReplicaHistoryForContainer() throws
IOException {
final UUID u2 = newDatanode("host2", "127.0.0.2");
final UUID u3 = newDatanode("host3", "127.0.0.3");
final UUID u4 = newDatanode("host4", "127.0.0.4");
- reconContainerManager.upsertContainerHistory(1L, u1, 1L, 1L, "OPEN",
1234L);
- reconContainerManager.upsertContainerHistory(1L, u2, 2L, 1L, "OPEN",
1234L);
- reconContainerManager.upsertContainerHistory(1L, u3, 3L, 1L, "OPEN",
1234L);
- reconContainerManager.upsertContainerHistory(1L, u4, 4L, 1L, "OPEN",
1234L);
+ reconContainerManager.upsertContainerHistory(1L, u1, 1L, 1L, "OPEN",
ContainerChecksums.of(1234L, 0L));
+ reconContainerManager.upsertContainerHistory(1L, u2, 2L, 1L, "OPEN",
ContainerChecksums.of(1234L, 0L));
+ reconContainerManager.upsertContainerHistory(1L, u3, 3L, 1L, "OPEN",
ContainerChecksums.of(1234L, 0L));
+ reconContainerManager.upsertContainerHistory(1L, u4, 4L, 1L, "OPEN",
ContainerChecksums.of(1234L, 0L));
- reconContainerManager.upsertContainerHistory(1L, u1, 5L, 1L, "OPEN",
1234L);
+ reconContainerManager.upsertContainerHistory(1L, u1, 5L, 1L, "OPEN",
ContainerChecksums.of(1234L, 0L));
Response response = containerEndpoint.getReplicaHistoryForContainer(1L);
List<ContainerHistory> histories =
@@ -1189,13 +1190,13 @@ private void createUnhealthyRecord(int id, String
state, int expected,
long differentChecksum = dataChecksumMismatch ? 2345L : 1234L;
reconContainerManager.upsertContainerHistory(cID, uuid1, 1L, 1L,
- "UNHEALTHY", differentChecksum);
+ "UNHEALTHY", ContainerChecksums.of(differentChecksum, 0L));
reconContainerManager.upsertContainerHistory(cID, uuid2, 2L, 1L,
- "UNHEALTHY", differentChecksum);
+ "UNHEALTHY", ContainerChecksums.of(differentChecksum, 0L));
reconContainerManager.upsertContainerHistory(cID, uuid3, 3L, 1L,
- "UNHEALTHY", 1234L);
+ "UNHEALTHY", ContainerChecksums.of(1234L, 0L));
reconContainerManager.upsertContainerHistory(cID, uuid4, 4L, 1L,
- "UNHEALTHY", 1234L);
+ "UNHEALTHY", ContainerChecksums.of(1234L, 0L));
}
protected ContainerWithPipeline getTestContainer(
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
index dc488ea303c..6fcf5c5fb4f 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.ContainerChecksums;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
@@ -189,7 +190,7 @@ public void testSameDataChecksumContainer() {
assertFalse(status.isUnderReplicated());
assertFalse(status.isOverReplicated());
assertFalse(status.isMisReplicated());
- assertFalse(status.isDataChecksumMismatched());
+ assertFalse(status.areChecksumsMismatched());
}
@Test
@@ -206,7 +207,7 @@ public void testDataChecksumMismatchContainer() {
assertFalse(status.isUnderReplicated());
assertFalse(status.isOverReplicated());
assertFalse(status.isMisReplicated());
- assertTrue(status.isDataChecksumMismatched());
+ assertTrue(status.areChecksumsMismatched());
}
/**
@@ -416,7 +417,7 @@ private Set<ContainerReplica>
generateReplicas(ContainerInfo cont,
replicas.add(new ContainerReplica.ContainerReplicaBuilder()
.setContainerID(cont.containerID())
.setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails())
- .setDataChecksum(1234L)
+ .setChecksums(ContainerChecksums.of(1234L, 0L))
.setContainerState(s)
.build());
}
@@ -432,7 +433,7 @@ private Set<ContainerReplica>
generateMismatchedReplicas(ContainerInfo cont,
.setContainerID(cont.containerID())
.setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails())
.setContainerState(s)
- .setDataChecksum(checksum)
+ .setChecksums(ContainerChecksums.of(checksum, 0L))
.build());
checksum++;
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
index ca902ea3782..4210756d1cd 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
@@ -53,6 +53,7 @@
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
import org.apache.hadoop.hdds.scm.ContainerPlacementStatus;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.ContainerChecksums;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
@@ -691,7 +692,7 @@ private Set<ContainerReplica> getMockReplicas(
.setContainerState(s)
.setContainerID(ContainerID.valueOf(containerId))
.setSequenceId(1)
- .setDataChecksum(1234L)
+ .setChecksums(ContainerChecksums.of(1234L, 0L))
.build());
}
return replicas;
@@ -707,7 +708,7 @@ private Set<ContainerReplica>
getMockReplicasChecksumMismatch(
.setContainerState(s)
.setContainerID(ContainerID.valueOf(containerId))
.setSequenceId(1)
- .setDataChecksum(checksum)
+ .setChecksums(ContainerChecksums.of(checksum, 0L))
.build());
checksum++;
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
index d020a548fc8..9e8b3905a58 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.ContainerChecksums;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
@@ -642,8 +643,8 @@ private Set<ContainerReplica>
generateReplicas(ContainerInfo cont,
replicas.add(new ContainerReplica.ContainerReplicaBuilder()
.setContainerID(cont.containerID())
.setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails())
+ .setChecksums(ContainerChecksums.of(1234L, 0L))
.setContainerState(s)
- .setDataChecksum(1234L)
.build());
}
return replicas;
@@ -658,7 +659,7 @@ private Set<ContainerReplica>
generateMismatchedReplicas(ContainerInfo cont,
.setContainerID(cont.containerID())
.setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails())
.setContainerState(s)
- .setDataChecksum(checksum)
+ .setChecksums(ContainerChecksums.of(checksum, 0L))
.build());
checksum++;
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
index 81c9166b13a..1d871b9974b 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
@@ -40,6 +40,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
+import org.apache.hadoop.hdds.scm.container.ContainerChecksums;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
@@ -209,7 +210,8 @@ public void testUpdateAndRemoveContainerReplica()
.setUuid(uuid1).setHostName("host1").setIpAddress("127.0.0.1").build();
ContainerReplica containerReplica1 = ContainerReplica.newBuilder()
.setContainerID(containerID1).setContainerState(State.OPEN)
-
.setDatanodeDetails(datanodeDetails1).setSequenceId(1001L).setDataChecksum(1234L).build();
+ .setDatanodeDetails(datanodeDetails1).setSequenceId(1001L)
+ .setChecksums(ContainerChecksums.of(1234L, 0L)).build();
final ReconContainerManager containerManager = getContainerManager();
final Map<Long, Map<UUID, ContainerReplicaHistory>> repHistMap =
@@ -256,7 +258,8 @@ public void testUpdateAndRemoveContainerReplica()
.setUuid(uuid2).setHostName("host2").setIpAddress("127.0.0.2").build();
final ContainerReplica containerReplica2 = ContainerReplica.newBuilder()
.setContainerID(containerID1).setContainerState(State.OPEN)
-
.setDatanodeDetails(datanodeDetails2).setSequenceId(1051L).setDataChecksum(1234L).build();
+ .setDatanodeDetails(datanodeDetails2).setSequenceId(1051L)
+ .setChecksums(ContainerChecksums.of(1234L, 0L)).build();
// Add replica to DN02
containerManager.updateContainerReplica(containerID1, containerReplica2);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]