This is an automated email from the ASF dual-hosted git repository.
erose pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 5e03af12aa HDDS-12824. Optimize container checksum read during
datanode startup (#8604)
5e03af12aa is described below
commit 5e03af12aa872468871a16f152a4f4150d3402a3
Author: Aswin Shakil Balasubramanian <[email protected]>
AuthorDate: Wed Aug 6 10:53:39 2025 -0700
HDDS-12824. Optimize container checksum read during datanode startup (#8604)
---
.../java/org/apache/hadoop/ozone/OzoneConsts.java | 2 +
.../checksum/ContainerChecksumTreeManager.java | 17 +--
.../container/keyvalue/KeyValueContainerData.java | 5 +
.../ozone/container/keyvalue/KeyValueHandler.java | 16 +-
.../container/keyvalue/TarContainerPacker.java | 35 ++++-
.../keyvalue/helpers/KeyValueContainerUtil.java | 32 ++--
.../checksum/ContainerMerkleTreeTestUtils.java | 39 +++++
.../checksum/TestContainerChecksumTreeManager.java | 2 +
...stContainerReconciliationWithMockDatanodes.java | 6 +-
.../container/keyvalue/TestKeyValueContainer.java | 65 ++++++++
.../container/keyvalue/TestKeyValueHandler.java | 5 +-
.../container/keyvalue/TestTarContainerPacker.java | 86 ++++++-----
.../keyvalue/impl/TestFilePerBlockStrategy.java | 27 ++--
.../container/ozoneimpl/TestContainerReader.java | 164 +++++++++++++++++++++
...tBackgroundContainerDataScannerIntegration.java | 4 +
.../TestContainerScannerIntegrationAbstract.java | 4 +
.../TestOnDemandContainerScannerIntegration.java | 4 +
17 files changed, 424 insertions(+), 89 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 581d4d1071..1aab4fc28d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -88,6 +88,7 @@ public final class OzoneConsts {
public static final Path ROOT_PATH = Paths.get(OZONE_ROOT);
public static final String CONTAINER_EXTENSION = ".container";
+ public static final String CONTAINER_DATA_CHECKSUM_EXTENSION = ".tree";
public static final String CONTAINER_META_PATH = "metadata";
public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp";
public static final String CONTAINER_CHUNK_NAME_DELIMITER = ".";
@@ -141,6 +142,7 @@ public final class OzoneConsts {
public static final String CONTAINER_BYTES_USED = "#BYTESUSED";
public static final String PENDING_DELETE_BLOCK_COUNT =
"#PENDINGDELETEBLOCKCOUNT";
+ public static final String CONTAINER_DATA_CHECKSUM = "#DATACHECKSUM";
/**
* OM LevelDB prefixes.
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ContainerChecksumTreeManager.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ContainerChecksumTreeManager.java
index 8a1afc5a6b..0f14a17249 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ContainerChecksumTreeManager.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ContainerChecksumTreeManager.java
@@ -19,6 +19,7 @@
import static java.nio.file.StandardCopyOption.ATOMIC_MOVE;
import static org.apache.hadoop.hdds.HddsUtils.checksumToString;
+import static
org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DATA_CHECKSUM_EXTENSION;
import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs;
import com.google.common.annotations.VisibleForTesting;
@@ -33,7 +34,6 @@
import java.util.Collection;
import java.util.List;
import java.util.Map;
-import java.util.Optional;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeMap;
@@ -321,14 +321,13 @@ public static long
getDataChecksum(ContainerProtos.ContainerChecksumInfo checksu
/**
* Returns the container checksum tree file for the specified container
without deserializing it.
*/
- @VisibleForTesting
public static File getContainerChecksumFile(ContainerData data) {
- return new File(data.getMetadataPath(), data.getContainerID() + ".tree");
+ return new File(data.getMetadataPath(), data.getContainerID() +
CONTAINER_DATA_CHECKSUM_EXTENSION);
}
@VisibleForTesting
public static File getTmpContainerChecksumFile(ContainerData data) {
- return new File(data.getMetadataPath(), data.getContainerID() +
".tree.tmp");
+ return new File(data.getMetadataPath(), data.getContainerID() +
CONTAINER_DATA_CHECKSUM_EXTENSION + ".tmp");
}
private Lock getLock(long containerID) {
@@ -343,8 +342,7 @@ private Lock getLock(long containerID) {
*/
public ContainerProtos.ContainerChecksumInfo read(ContainerData data) throws
IOException {
try {
- return captureLatencyNs(metrics.getReadContainerMerkleTreeLatencyNS(),
() ->
-
readChecksumInfo(data).orElse(ContainerProtos.ContainerChecksumInfo.newBuilder().build()));
+ return captureLatencyNs(metrics.getReadContainerMerkleTreeLatencyNS(),
() -> readChecksumInfo(data));
} catch (IOException ex) {
metrics.incrementMerkleTreeReadFailures();
throw ex;
@@ -422,17 +420,18 @@ public ByteString
getContainerChecksumInfo(KeyValueContainerData data) throws IO
* Callers are not required to hold a lock while calling this since writes
are done to a tmp file and atomically
* swapped into place.
*/
- public static Optional<ContainerProtos.ContainerChecksumInfo>
readChecksumInfo(ContainerData data)
+ public static ContainerProtos.ContainerChecksumInfo
readChecksumInfo(ContainerData data)
throws IOException {
long containerID = data.getContainerID();
File checksumFile = getContainerChecksumFile(data);
try {
if (!checksumFile.exists()) {
LOG.debug("No checksum file currently exists for container {} at the
path {}", containerID, checksumFile);
- return Optional.empty();
+ return ContainerProtos.ContainerChecksumInfo.newBuilder().build();
}
+
try (InputStream inStream = Files.newInputStream(checksumFile.toPath()))
{
- return
Optional.of(ContainerProtos.ContainerChecksumInfo.parseFrom(inStream));
+ return ContainerProtos.ContainerChecksumInfo.parseFrom(inStream);
}
} catch (IOException ex) {
throw new IOException("Error occurred when reading container merkle tree
for containerID "
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index 022764c227..0c398d2449 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -23,6 +23,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_COUNT;
import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_BYTES_USED;
+import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DATA_CHECKSUM;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB;
import static org.apache.hadoop.ozone.OzoneConsts.DELETE_TRANSACTION_KEY;
@@ -424,6 +425,10 @@ public String getPendingDeleteBlockCountKey() {
return formatKey(PENDING_DELETE_BLOCK_COUNT);
}
+ public String getContainerDataChecksumKey() {
+ return formatKey(CONTAINER_DATA_CHECKSUM);
+ }
+
public String getDeletingBlockKeyPrefix() {
return formatKey(DELETING_KEY_PREFIX);
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index b9c80ce213..9963a2f7f6 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -1407,7 +1407,8 @@ private void
updateContainerChecksumFromMetadataIfNeeded(Container container) {
* This method does not send an ICR with the updated checksum info.
* @param container - Container for which the container merkle tree needs to
be updated.
*/
- private ContainerProtos.ContainerChecksumInfo
updateAndGetContainerChecksumFromMetadata(
+ @VisibleForTesting
+ public ContainerProtos.ContainerChecksumInfo
updateAndGetContainerChecksumFromMetadata(
KeyValueContainer container) throws IOException {
ContainerMerkleTreeWriter merkleTree = new ContainerMerkleTreeWriter();
try (DBHandle dbHandle = BlockUtils.getDB(container.getContainerData(),
conf);
@@ -1427,7 +1428,7 @@ private ContainerProtos.ContainerChecksumInfo
updateAndGetContainerChecksumFromM
private ContainerProtos.ContainerChecksumInfo
updateAndGetContainerChecksum(Container container,
ContainerMerkleTreeWriter treeWriter, boolean sendICR) throws
IOException {
- ContainerData containerData = container.getContainerData();
+ KeyValueContainerData containerData = (KeyValueContainerData)
container.getContainerData();
// Attempt to write the new data checksum to disk. If persisting this
fails, keep using the original data
// checksum to prevent divergence from what SCM sees in the ICR vs what
datanode peers will see when pulling the
@@ -1440,6 +1441,17 @@ private ContainerProtos.ContainerChecksumInfo
updateAndGetContainerChecksum(Cont
if (updatedDataChecksum != originalDataChecksum) {
containerData.setDataChecksum(updatedDataChecksum);
+ try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) {
+ // This value is only used during the datanode startup. If the update
fails, then it's okay as the merkle tree
+ // and in-memory checksum will still be the same. This will be updated
the next time we update the tree.
+ // Either scanner or reconciliation will update the checksum.
+
dbHandle.getStore().getMetadataTable().put(containerData.getContainerDataChecksumKey(),
updatedDataChecksum);
+ } catch (IOException e) {
+ LOG.error("Failed to update container data checksum in RocksDB for
container {}. " +
+ "Leaving the original checksum in RocksDB: {}",
containerData.getContainerID(),
+ checksumToString(originalDataChecksum), e);
+ }
+
if (sendICR) {
sendICR(container);
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java
index d42eb208e1..9ab8a8c580 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java
@@ -24,6 +24,7 @@
import static org.apache.hadoop.hdds.utils.Archiver.readEntry;
import static org.apache.hadoop.hdds.utils.Archiver.tar;
import static org.apache.hadoop.hdds.utils.Archiver.untar;
+import static
org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DATA_CHECKSUM_EXTENSION;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3;
import java.io.File;
@@ -44,7 +45,9 @@
import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
import
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker;
@@ -92,8 +95,9 @@ public byte[]
unpackContainerData(Container<KeyValueContainerData> container,
}
Path dbRoot = getDbPath(containerUntarDir, containerData);
- Path chunksRoot = getChunkPath(containerUntarDir, containerData);
- byte[] descriptorFileContent = innerUnpack(input, dbRoot, chunksRoot);
+ Path chunksRoot = getChunkPath(containerUntarDir);
+ Path tempContainerMetadataPath =
getTempContainerMetadataPath(containerUntarDir, containerData);
+ byte[] descriptorFileContent = innerUnpack(input, dbRoot, chunksRoot,
tempContainerMetadataPath);
if (!Files.exists(destContainerDir)) {
Files.createDirectories(destContainerDir);
@@ -111,9 +115,6 @@ public byte[]
unpackContainerData(Container<KeyValueContainerData> container,
// Before the atomic move, the destination dir is empty and doesn't have
a metadata directory.
// Writing the .container file will fail as the metadata dir doesn't
exist.
// So we instead save the container file to the containerUntarDir.
- Path containerMetadataPath =
Paths.get(container.getContainerData().getMetadataPath());
- Path tempContainerMetadataPath = Paths.get(containerUntarDir.toString(),
- containerMetadataPath.getName(containerMetadataPath.getNameCount() -
1).toString());
persistCustomContainerState(container, descriptorFileContent,
State.RECOVERING, tempContainerMetadataPath);
Files.move(containerUntarDir, destContainerDir,
StandardCopyOption.ATOMIC_MOVE,
@@ -146,6 +147,11 @@ public void pack(Container<KeyValueContainerData>
container,
includeFile(container.getContainerFile(), CONTAINER_FILE_NAME,
archiveOutput);
+ File containerChecksumFile =
ContainerChecksumTreeManager.getContainerChecksumFile(containerData);
+ if (containerChecksumFile.exists()) {
+ includeFile(containerChecksumFile, containerChecksumFile.getName(),
archiveOutput);
+ }
+
includePath(getDbPath(containerData), DB_DIR_NAME,
archiveOutput);
@@ -202,11 +208,20 @@ public static Path getDbPath(Path baseDir,
}
}
- public static Path getChunkPath(Path baseDir,
- KeyValueContainerData containerData) {
+ public static Path getChunkPath(Path baseDir) {
return
KeyValueContainerLocationUtil.getChunksLocationPath(baseDir.toString()).toPath();
}
+ private Path getContainerMetadataPath(ContainerData containerData) {
+ return Paths.get(containerData.getMetadataPath());
+ }
+
+ private Path getTempContainerMetadataPath(Path containerUntarDir,
ContainerData containerData) {
+ Path containerMetadataPath = getContainerMetadataPath(containerData);
+ return Paths.get(containerUntarDir.toString(),
+ containerMetadataPath.getName(containerMetadataPath.getNameCount() -
1).toString());
+ }
+
InputStream decompress(InputStream input) throws IOException {
return compression.wrap(input);
}
@@ -215,7 +230,7 @@ OutputStream compress(OutputStream output) throws
IOException {
return compression.wrap(output);
}
- private byte[] innerUnpack(InputStream input, Path dbRoot, Path chunksRoot)
+ private byte[] innerUnpack(InputStream input, Path dbRoot, Path chunksRoot,
Path metadataRoot)
throws IOException {
byte[] descriptorFileContent = null;
try (ArchiveInputStream<TarArchiveEntry> archiveInput =
untar(decompress(input))) {
@@ -233,6 +248,10 @@ private byte[] innerUnpack(InputStream input, Path dbRoot,
Path chunksRoot)
.resolve(name.substring(CHUNKS_DIR_NAME.length() + 1));
extractEntry(entry, archiveInput, size, chunksRoot,
destinationPath);
+ } else if (name.endsWith(CONTAINER_DATA_CHECKSUM_EXTENSION)) {
+ Path destinationPath = metadataRoot.resolve(name);
+ extractEntry(entry, archiveInput, size, metadataRoot,
+ destinationPath);
} else if (CONTAINER_FILE_NAME.equals(name)) {
//Don't do anything. Container file should be unpacked in a
//separated step by unpackContainerDescriptor call.
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index 1b2a8b9359..28b1711e98 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -26,7 +26,6 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
-import java.util.Optional;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -260,10 +259,10 @@ public static void
parseKVContainerData(KeyValueContainerData kvContainerData,
DatanodeStore store = null;
try {
try {
- boolean readOnly = ContainerInspectorUtil.isReadOnly(
- ContainerProtos.ContainerType.KeyValueContainer);
- store = BlockUtils.getUncachedDatanodeStore(
- kvContainerData, config, readOnly);
+ // Open RocksDB in write mode, as it is required for container
checksum updates and inspector repair operations.
+ // The method KeyValueContainerMetadataInspector.buildErrorAndRepair
will determine if write access to the DB
+ // is permitted based on the mode.
+ store = BlockUtils.getUncachedDatanodeStore(kvContainerData, config,
false);
} catch (IOException e) {
// If an exception is thrown, then it may indicate the RocksDB is
// already open in the container cache. As this code is only executed
at
@@ -288,17 +287,25 @@ public static void
parseKVContainerData(KeyValueContainerData kvContainerData,
}
}
- private static void populateContainerDataChecksum(KeyValueContainerData
kvContainerData) {
+ private static void loadAndSetContainerDataChecksum(KeyValueContainerData
kvContainerData,
+ Table<String, Long>
metadataTable) {
if (kvContainerData.isOpen()) {
return;
}
try {
- Optional<ContainerChecksumInfo> optionalContainerChecksumInfo =
ContainerChecksumTreeManager
- .readChecksumInfo(kvContainerData);
- if (optionalContainerChecksumInfo.isPresent()) {
- ContainerChecksumInfo containerChecksumInfo =
optionalContainerChecksumInfo.get();
-
kvContainerData.setDataChecksum(containerChecksumInfo.getContainerMerkleTree().getDataChecksum());
+ Long containerDataChecksum =
metadataTable.get(kvContainerData.getContainerDataChecksumKey());
+ if (containerDataChecksum != null &&
kvContainerData.needsDataChecksum()) {
+ kvContainerData.setDataChecksum(containerDataChecksum);
+ return;
+ }
+
+ ContainerChecksumInfo containerChecksumInfo =
ContainerChecksumTreeManager.readChecksumInfo(kvContainerData);
+ if (containerChecksumInfo != null &&
containerChecksumInfo.hasContainerMerkleTree()
+ && kvContainerData.needsDataChecksum()) {
+ containerDataChecksum =
containerChecksumInfo.getContainerMerkleTree().getDataChecksum();
+ kvContainerData.setDataChecksum(containerDataChecksum);
+ metadataTable.put(kvContainerData.getContainerDataChecksumKey(),
containerDataChecksum);
}
} catch (IOException ex) {
LOG.warn("Failed to read checksum info for container {}",
kvContainerData.getContainerID(), ex);
@@ -375,6 +382,8 @@ private static void populateContainerMetadata(
kvContainerData.markAsEmpty();
}
+ loadAndSetContainerDataChecksum(kvContainerData, metadataTable);
+
// Run advanced container inspection/repair operations if specified on
// startup. If this method is called but not as a part of startup,
// The inspectors will be unloaded and this will be a no-op.
@@ -382,7 +391,6 @@ private static void populateContainerMetadata(
// Load finalizeBlockLocalIds for container in memory.
populateContainerFinalizeBlock(kvContainerData, store);
- populateContainerDataChecksum(kvContainerData);
}
/**
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/checksum/ContainerMerkleTreeTestUtils.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/checksum/ContainerMerkleTreeTestUtils.java
index 47842ab23a..0f7794bac4 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/checksum/ContainerMerkleTreeTestUtils.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/checksum/ContainerMerkleTreeTestUtils.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.checksum;
import static
org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager.getContainerChecksumFile;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -36,6 +37,7 @@
import java.util.stream.Collectors;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
@@ -43,6 +45,9 @@
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
@@ -346,4 +351,38 @@ public static void
writeContainerDataTreeProto(ContainerData data, ContainerProt
}
data.setDataChecksum(checksumInfo.getContainerMerkleTree().getDataChecksum());
}
+
+ /**
+ * This function verifies that the in-memory data checksum matches the one
stored in the container data and
+ * the RocksDB.
+ *
+ * @param containerData The container data to verify.
+ * @param conf The Ozone configuration.
+ * @throws IOException If an error occurs while reading the checksum info or
RocksDB.
+ */
+ public static void verifyAllDataChecksumsMatch(KeyValueContainerData
containerData, OzoneConfiguration conf)
+ throws IOException {
+ assertNotNull(containerData, "Container data should not be null");
+ ContainerProtos.ContainerChecksumInfo containerChecksumInfo =
ContainerChecksumTreeManager
+ .readChecksumInfo(containerData);
+ assertNotNull(containerChecksumInfo);
+ long dataChecksum =
containerChecksumInfo.getContainerMerkleTree().getDataChecksum();
+ Long dbDataChecksum;
+ try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) {
+ dbDataChecksum =
dbHandle.getStore().getMetadataTable().get(containerData.getContainerDataChecksumKey());
+ }
+
+ if (containerData.getDataChecksum() == 0) {
+ assertEquals(containerData.getDataChecksum(), dataChecksum);
+ // RocksDB checksum can be null if the file doesn't exist or when the
file is created by
+ // the block deleting service. 0 checksum will be stored when the
container is loaded without
+ // merkle tree.
+ assertThat(dbDataChecksum).isIn(0L, null);
+ } else {
+ // In-Memory, Container Merkle Tree file, RocksDB checksum should be
equal
+ assertEquals(containerData.getDataChecksum(), dataChecksum, "In-memory
data checksum should match " +
+ "the one in the checksum file.");
+ assertEquals(dbDataChecksum, dataChecksum);
+ }
+ }
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/checksum/TestContainerChecksumTreeManager.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/checksum/TestContainerChecksumTreeManager.java
index 56592efe1a..65478afa30 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/checksum/TestContainerChecksumTreeManager.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/checksum/TestContainerChecksumTreeManager.java
@@ -97,6 +97,8 @@ public void init() {
container = mock(KeyValueContainerData.class);
when(container.getContainerID()).thenReturn(CONTAINER_ID);
when(container.getMetadataPath()).thenReturn(testDir.getAbsolutePath());
+ // File name is hardcoded here to check if the file name has been changed,
since this would
+ // need additional compatibility handling.
checksumFile = new File(testDir, CONTAINER_ID + ".tree");
checksumManager = new ContainerChecksumTreeManager(new
OzoneConfiguration());
config = new OzoneConfiguration();
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestContainerReconciliationWithMockDatanodes.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestContainerReconciliationWithMockDatanodes.java
index d567e209d4..b90df135f0 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestContainerReconciliationWithMockDatanodes.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestContainerReconciliationWithMockDatanodes.java
@@ -20,6 +20,7 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
import static
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.verifyAllDataChecksumsMatch;
import static
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
import static
org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded;
import static
org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet;
@@ -357,12 +358,13 @@ public
ContainerProtos.GetContainerChecksumInfoResponseProto getChecksumInfo(lon
*/
public long checkAndGetDataChecksum(long containerID) {
KeyValueContainer container = getContainer(containerID);
+ KeyValueContainerData containerData = container.getContainerData();
long dataChecksum = 0;
try {
ContainerProtos.ContainerChecksumInfo containerChecksumInfo =
handler.getChecksumManager()
- .read(container.getContainerData());
+ .read(containerData);
dataChecksum =
containerChecksumInfo.getContainerMerkleTree().getDataChecksum();
- assertEquals(container.getContainerData().getDataChecksum(),
dataChecksum);
+ verifyAllDataChecksumsMatch(containerData, conf);
} catch (IOException ex) {
fail("Failed to read container checksum from disk", ex);
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 7ba72f937e..28c118b517 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -20,6 +20,8 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3;
+import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.buildTestTree;
+import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.verifyAllDataChecksumsMatch;
import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED;
import static
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion;
import static
org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION;
@@ -73,6 +75,7 @@
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
@@ -291,6 +294,53 @@ public void testEmptyContainerImportExport(
checkContainerFilesPresent(data, 0);
}
+ @ContainerTestVersionInfo.ContainerTest
+ public void testEmptyMerkleTreeImportExport(ContainerTestVersionInfo
versionInfo) throws Exception {
+ init(versionInfo);
+ createContainer();
+ closeContainer();
+
+ KeyValueContainerData data = keyValueContainer.getContainerData();
+ // Create an empty checksum file that exists but has no valid merkle tree
+ File checksumFile =
ContainerChecksumTreeManager.getContainerChecksumFile(data);
+ ContainerProtos.ContainerChecksumInfo emptyContainerInfo =
ContainerProtos.ContainerChecksumInfo
+ .newBuilder().build();
+ try (OutputStream tmpOutputStream =
Files.newOutputStream(checksumFile.toPath())) {
+ emptyContainerInfo.writeTo(tmpOutputStream);
+ }
+
+ // Check state of original container.
+ checkContainerFilesPresent(data, 0);
+
+ //destination path
+ File exportTar = Files.createFile(
+ folder.toPath().resolve("export.tar")).toFile();
+ TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION);
+ //export the container
+ try (OutputStream fos = Files.newOutputStream(exportTar.toPath())) {
+ keyValueContainer.exportContainerData(fos, packer);
+ }
+
+ KeyValueContainerUtil.removeContainer(
+ keyValueContainer.getContainerData(), CONF);
+ keyValueContainer.delete();
+
+ // import container.
+ try (InputStream fis = Files.newInputStream(exportTar.toPath())) {
+ keyValueContainer.importContainerData(fis, packer);
+ }
+
+ // Make sure empty chunks dir was unpacked.
+ checkContainerFilesPresent(data, 0);
+ data = keyValueContainer.getContainerData();
+ ContainerProtos.ContainerChecksumInfo checksumInfo =
ContainerChecksumTreeManager.readChecksumInfo(data);
+ assertFalse(checksumInfo.hasContainerMerkleTree());
+ // The import should not fail and the checksum should be 0
+ assertEquals(0, data.getDataChecksum());
+ // The checksum is not stored in rocksDB as the container merkle tree
doesn't exist.
+ verifyAllDataChecksumsMatch(data, CONF);
+ }
+
@ContainerTestVersionInfo.ContainerTest
public void testUnhealthyContainerImportExport(
ContainerTestVersionInfo versionInfo) throws Exception {
@@ -340,6 +390,18 @@ public void
testContainerImportExport(ContainerTestVersionInfo versionInfo)
closeContainer();
populate(numberOfKeysToWrite);
+ // Create merkle tree and set data checksum to simulate actual key value
container.
+ File checksumFile = ContainerChecksumTreeManager.getContainerChecksumFile(
+ keyValueContainer.getContainerData());
+ ContainerProtos.ContainerMerkleTree containerMerkleTreeWriterProto =
buildTestTree(CONF).toProto();
+
keyValueContainerData.setDataChecksum(containerMerkleTreeWriterProto.getDataChecksum());
+ ContainerProtos.ContainerChecksumInfo containerInfo =
ContainerProtos.ContainerChecksumInfo.newBuilder()
+ .setContainerID(containerId)
+ .setContainerMerkleTree(containerMerkleTreeWriterProto).build();
+ try (OutputStream tmpOutputStream =
Files.newOutputStream(checksumFile.toPath())) {
+ containerInfo.writeTo(tmpOutputStream);
+ }
+
//destination path
File folderToExport = Files.createFile(
folder.toPath().resolve("export.tar")).toFile();
@@ -387,6 +449,9 @@ public void
testContainerImportExport(ContainerTestVersionInfo versionInfo)
containerData.getMaxSize());
assertEquals(keyValueContainerData.getBytesUsed(),
containerData.getBytesUsed());
+ assertEquals(keyValueContainerData.getDataChecksum(),
containerData.getDataChecksum());
+ verifyAllDataChecksumsMatch(containerData, CONF);
+
assertNotNull(containerData.getContainerFileChecksum());
assertNotEquals(containerData.ZERO_CHECKSUM,
container.getContainerData().getContainerFileChecksum());
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index c5c4597ed4..17efe57e59 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -27,6 +27,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.GB;
import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.assertTreesSortedAndMatch;
import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.buildTestTree;
+import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.verifyAllDataChecksumsMatch;
import static
org.apache.hadoop.ozone.container.common.ContainerTestUtils.createBlockMetaData;
import static
org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet;
import static org.assertj.core.api.Assertions.assertThat;
@@ -688,8 +689,8 @@ public void
testUpdateContainerChecksum(ContainerLayoutVersion layoutVersion) th
keyValueHandler.updateContainerChecksum(container, treeWriter);
// Check ICR sent. The ICR sender verifies that the expected checksum is
present in the report.
assertEquals(1, icrCount.get());
- // Check checksum in memory.
- assertEquals(updatedDataChecksum, containerData.getDataChecksum());
+ // Check all data checksums are updated correctly.
+ verifyAllDataChecksumsMatch(containerData, conf);
// Check disk content.
ContainerProtos.ContainerChecksumInfo checksumInfo =
checksumManager.read(containerData);
assertTreesSortedAndMatch(treeWriter.toProto(),
checksumInfo.getContainerMerkleTree());
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
index 7f5c4c6b27..8452893475 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
@@ -20,6 +20,8 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.nio.file.Files.newInputStream;
import static java.nio.file.Files.newOutputStream;
+import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.assertTreesSortedAndMatch;
+import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.buildTestTree;
import static
org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker.CONTAINER_FILE_NAME;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -44,16 +46,16 @@
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
-import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.Archiver;
+import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager;
+import org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeWriter;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
import org.apache.hadoop.ozone.container.replication.CopyContainerCompression;
import org.apache.ozone.test.SpyInputStream;
import org.apache.ozone.test.SpyOutputStream;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@@ -90,20 +92,21 @@ public class TestTarContainerPacker {
private TarContainerPacker packer;
- private static final Path SOURCE_CONTAINER_ROOT =
- Paths.get("target/test/data/packer-source-dir");
+ @TempDir
+ private Path sourceContainerRoot;
- private static final Path DEST_CONTAINER_ROOT =
- Paths.get("target/test/data/packer-dest-dir");
+ @TempDir
+ private Path destContainerRoot;
- private static final Path TEMP_DIR =
- Paths.get("target/test/data/packer-tmp-dir");
+ @TempDir
+ private Path tempDir;
private static final AtomicInteger CONTAINER_ID = new AtomicInteger(1);
private ContainerLayoutVersion layout;
private String schemaVersion;
private OzoneConfiguration conf;
+ private ContainerChecksumTreeManager checksumTreeManager;
private void initTests(ContainerTestVersionInfo versionInfo,
CopyContainerCompression compression) {
@@ -112,6 +115,7 @@ private void initTests(ContainerTestVersionInfo versionInfo,
this.conf = new OzoneConfiguration();
ContainerTestVersionInfo.setTestSchemaVersion(schemaVersion, conf);
packer = new TarContainerPacker(compression);
+ checksumTreeManager = new ContainerChecksumTreeManager(conf);
}
public static List<Arguments> getLayoutAndCompression() {
@@ -126,34 +130,16 @@ public static List<Arguments> getLayoutAndCompression() {
return parameterList;
}
- @BeforeAll
- public static void init() throws IOException {
- initDir(SOURCE_CONTAINER_ROOT);
- initDir(DEST_CONTAINER_ROOT);
- initDir(TEMP_DIR);
- }
-
- @AfterAll
- public static void cleanup() throws IOException {
- FileUtils.deleteDirectory(SOURCE_CONTAINER_ROOT.toFile());
- FileUtils.deleteDirectory(DEST_CONTAINER_ROOT.toFile());
- FileUtils.deleteDirectory(TEMP_DIR.toFile());
- }
-
- private static void initDir(Path path) throws IOException {
- if (path.toFile().exists()) {
- FileUtils.deleteDirectory(path.toFile());
- }
- Files.createDirectories(path);
- }
-
private KeyValueContainerData createContainer(Path dir) throws IOException {
- return createContainer(dir, true);
+ return createContainer(dir, true, true);
}
- private KeyValueContainerData createContainer(Path dir, boolean createDir)
+ private KeyValueContainerData createContainer(Path dir, boolean createDir,
boolean incrementId)
throws IOException {
- long id = CONTAINER_ID.getAndIncrement();
+ long id = CONTAINER_ID.get();
+ if (incrementId) {
+ id = CONTAINER_ID.getAndIncrement();
+ }
Path containerDir = dir.resolve(String.valueOf(id));
Path dataDir = containerDir.resolve("chunks");
@@ -183,7 +169,7 @@ public void pack(ContainerTestVersionInfo versionInfo,
initTests(versionInfo, compression);
//GIVEN
KeyValueContainerData sourceContainerData =
- createContainer(SOURCE_CONTAINER_ROOT);
+ createContainer(sourceContainerRoot, true, false);
KeyValueContainer sourceContainer =
new KeyValueContainer(sourceContainerData, conf);
@@ -194,10 +180,15 @@ public void pack(ContainerTestVersionInfo versionInfo,
//sample chunk file in the chunk directory
writeChunkFile(sourceContainerData, TEST_CHUNK_FILE_NAME);
+ //write container checksum file in the metadata directory
+ ContainerMerkleTreeWriter treeWriter = buildTestTree(conf);
+ checksumTreeManager.writeContainerDataTree(sourceContainerData,
treeWriter);
+
assertTrue(ContainerChecksumTreeManager.getContainerChecksumFile(sourceContainerData).exists());
+
//sample container descriptor file
writeDescriptor(sourceContainer);
- Path targetFile = TEMP_DIR.resolve("container.tar");
+ Path targetFile = tempDir.resolve("container.tar");
//WHEN: pack it
SpyOutputStream outputForPack =
@@ -239,7 +230,7 @@ public void pack(ContainerTestVersionInfo versionInfo,
inputForUnpackDescriptor.assertClosedExactlyOnce();
KeyValueContainerData destinationContainerData =
- createContainer(DEST_CONTAINER_ROOT, false);
+ createContainer(destContainerRoot, false, false);
KeyValueContainer destinationContainer =
new KeyValueContainer(destinationContainerData, conf);
@@ -249,7 +240,7 @@ public void pack(ContainerTestVersionInfo versionInfo,
new SpyInputStream(newInputStream(targetFile));
String descriptor = new String(
packer.unpackContainerData(destinationContainer, inputForUnpackData,
- TEMP_DIR, DEST_CONTAINER_ROOT.resolve(String.valueOf(
+ tempDir, destContainerRoot.resolve(String.valueOf(
destinationContainer.getContainerData().getContainerID()))),
UTF_8);
@@ -260,6 +251,11 @@ public void pack(ContainerTestVersionInfo versionInfo,
Paths.get(destinationContainerData.getChunksPath()),
TEST_CHUNK_FILE_NAME);
+ assertEquals(sourceContainerData.getContainerID(),
destinationContainerData.getContainerID());
+
assertTrue(ContainerChecksumTreeManager.getContainerChecksumFile(destinationContainerData).exists());
+
assertTreesSortedAndMatch(checksumTreeManager.read(sourceContainerData).getContainerMerkleTree(),
+
checksumTreeManager.read(destinationContainerData).getContainerMerkleTree());
+
String containerFileData = new
String(Files.readAllBytes(destinationContainer.getContainerFile().toPath()),
UTF_8);
assertTrue(containerFileData.contains("RECOVERING"),
"The state of the container is not 'RECOVERING' in the container
file");
@@ -276,7 +272,7 @@ public void unpackContainerDataWithValidRelativeDbFilePath(
initTests(versionInfo, compression);
//GIVEN
KeyValueContainerData sourceContainerData =
- createContainer(SOURCE_CONTAINER_ROOT);
+ createContainer(sourceContainerRoot);
String fileName = "sub/dir/" + TEST_DB_FILE_NAME;
File file = writeDbFile(sourceContainerData, fileName);
@@ -301,7 +297,7 @@ public void
unpackContainerDataWithValidRelativeChunkFilePath(
initTests(versionInfo, compression);
//GIVEN
KeyValueContainerData sourceContainerData =
- createContainer(SOURCE_CONTAINER_ROOT);
+ createContainer(sourceContainerRoot);
String fileName = "sub/dir/" + TEST_CHUNK_FILE_NAME;
File file = writeChunkFile(sourceContainerData, fileName);
@@ -325,7 +321,7 @@ public void
unpackContainerDataWithInvalidRelativeDbFilePath(
initTests(versionInfo, compression);
//GIVEN
KeyValueContainerData sourceContainerData =
- createContainer(SOURCE_CONTAINER_ROOT);
+ createContainer(sourceContainerRoot);
String fileName = "../db_file";
File file = writeDbFile(sourceContainerData, fileName);
@@ -346,7 +342,7 @@ public void
unpackContainerDataWithInvalidRelativeChunkFilePath(
initTests(versionInfo, compression);
//GIVEN
KeyValueContainerData sourceContainerData =
- createContainer(SOURCE_CONTAINER_ROOT);
+ createContainer(sourceContainerRoot);
String fileName = "../chunk_file";
File file = writeChunkFile(sourceContainerData, fileName);
@@ -361,10 +357,10 @@ public void
unpackContainerDataWithInvalidRelativeChunkFilePath(
private KeyValueContainerData unpackContainerData(File containerFile)
throws IOException {
try (InputStream input = newInputStream(containerFile.toPath())) {
- KeyValueContainerData data = createContainer(DEST_CONTAINER_ROOT, false);
+ KeyValueContainerData data = createContainer(destContainerRoot, false,
true);
KeyValueContainer container = new KeyValueContainer(data, conf);
- packer.unpackContainerData(container, input, TEMP_DIR,
- DEST_CONTAINER_ROOT.resolve(String.valueOf(data.getContainerID())));
+ packer.unpackContainerData(container, input, tempDir,
+ destContainerRoot.resolve(String.valueOf(data.getContainerID())));
return data;
}
}
@@ -406,7 +402,7 @@ private File writeSingleFile(Path parentPath, String
fileName,
private File packContainerWithSingleFile(File file, String entryName)
throws Exception {
- File targetFile = TEMP_DIR.resolve("container.tar").toFile();
+ File targetFile = tempDir.resolve("container.tar").toFile();
Path path = targetFile.toPath();
try (TarArchiveOutputStream archive = new
TarArchiveOutputStream(packer.compress(newOutputStream(path)))) {
archive.setBigNumberMode(TarArchiveOutputStream.BIGNUMBER_POSIX);
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
index 3a95af7f70..95475651d0 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
@@ -22,9 +22,11 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
import static
org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum;
+import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.verifyAllDataChecksumsMatch;
import static
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
import static
org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.io.File;
@@ -184,10 +186,11 @@ public void testWriteChunkForClosedContainer()
ChunkBuffer writeChunkData = ChunkBuffer.wrap(getData());
KeyValueContainer kvContainer = getKeyValueContainer();
KeyValueContainerData containerData = kvContainer.getContainerData();
- closedKeyValueContainer();
ContainerSet containerSet = newContainerSet();
containerSet.addContainer(kvContainer);
KeyValueHandler keyValueHandler = createKeyValueHandler(containerSet);
+ keyValueHandler.markContainerForClose(kvContainer);
+ keyValueHandler.closeContainer(kvContainer);
keyValueHandler.writeChunkForClosedContainer(getChunkInfo(), getBlockID(),
writeChunkData, kvContainer);
ChunkBufferToByteString readChunkData =
keyValueHandler.getChunkManager().readChunk(kvContainer,
getBlockID(), getChunkInfo(), WRITE_STAGE);
@@ -228,12 +231,16 @@ public void testWriteChunkForClosedContainer()
@Test
public void testPutBlockForClosedContainer() throws IOException {
+ OzoneConfiguration conf = new OzoneConfiguration();
KeyValueContainer kvContainer = getKeyValueContainer();
KeyValueContainerData containerData = kvContainer.getContainerData();
- closedKeyValueContainer();
ContainerSet containerSet = newContainerSet();
containerSet.addContainer(kvContainer);
KeyValueHandler keyValueHandler = createKeyValueHandler(containerSet);
+ keyValueHandler.markContainerForClose(kvContainer);
+ keyValueHandler.closeContainer(kvContainer);
+ assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
containerData.getState());
+ assertEquals(0L, containerData.getDataChecksum());
List<ContainerProtos.ChunkInfo> chunkInfoList = new ArrayList<>();
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d",
getBlockID().getLocalID(), 0), 0L, 20L);
@@ -244,11 +251,13 @@ public void testPutBlockForClosedContainer() throws
IOException {
ChunkBuffer chunkData = ContainerTestHelper.getData(20);
keyValueHandler.writeChunkForClosedContainer(info, getBlockID(),
chunkData, kvContainer);
keyValueHandler.putBlockForClosedContainer(kvContainer, putBlockData, 1L,
true);
+ keyValueHandler.updateAndGetContainerChecksumFromMetadata(kvContainer);
assertEquals(1L, containerData.getBlockCommitSequenceId());
assertEquals(1L, containerData.getBlockCount());
assertEquals(20L, containerData.getBytesUsed());
+ verifyAllDataChecksumsMatch(containerData, conf);
- try (DBHandle dbHandle = BlockUtils.getDB(containerData, new
OzoneConfiguration())) {
+ try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) {
long localID = putBlockData.getLocalID();
BlockData getBlockData = dbHandle.getStore().getBlockDataTable()
.get(containerData.getBlockKey(localID));
@@ -264,11 +273,15 @@ public void testPutBlockForClosedContainer() throws
IOException {
chunkData = ContainerTestHelper.getData(20);
keyValueHandler.writeChunkForClosedContainer(newChunkInfo, getBlockID(),
chunkData, kvContainer);
keyValueHandler.putBlockForClosedContainer(kvContainer, putBlockData, 2L,
true);
+ long previousDataChecksum = containerData.getDataChecksum();
+ keyValueHandler.updateAndGetContainerChecksumFromMetadata(kvContainer);
assertEquals(2L, containerData.getBlockCommitSequenceId());
assertEquals(1L, containerData.getBlockCount());
assertEquals(40L, containerData.getBytesUsed());
+ assertNotEquals(previousDataChecksum, containerData.getDataChecksum());
+ verifyAllDataChecksumsMatch(containerData, conf);
- try (DBHandle dbHandle = BlockUtils.getDB(containerData, new
OzoneConfiguration())) {
+ try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) {
long localID = putBlockData.getLocalID();
BlockData getBlockData = dbHandle.getStore().getBlockDataTable()
.get(containerData.getBlockKey(localID));
@@ -291,7 +304,7 @@ public void testPutBlockForClosedContainer() throws
IOException {
// Old chunk size 20, new chunk size 30, difference 10. So bytesUsed
should be 40 + 10 = 50
assertEquals(50L, containerData.getBytesUsed());
- try (DBHandle dbHandle = BlockUtils.getDB(containerData, new
OzoneConfiguration())) {
+ try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) {
long localID = putBlockData.getLocalID();
BlockData getBlockData = dbHandle.getStore().getBlockDataTable()
.get(containerData.getBlockKey(localID));
@@ -331,10 +344,6 @@ public KeyValueHandler createKeyValueHandler(ContainerSet
containerSet)
return ContainerTestUtils.getKeyValueHandler(conf, dnUuid, containerSet,
volumeSet);
}
- public void closedKeyValueContainer() {
-
getKeyValueContainer().getContainerData().setState(ContainerProtos.ContainerDataProto.State.CLOSED);
- }
-
@Override
protected ContainerLayoutTestInfo getStrategy() {
return ContainerLayoutTestInfo.FILE_PER_BLOCK;
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
index ec5c6743e7..69415972d0 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -20,12 +20,15 @@
import static
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.DELETED;
import static
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.RECOVERING;
import static
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY;
+import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.verifyAllDataChecksumsMatch;
import static
org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded;
+import static
org.apache.hadoop.ozone.container.common.ContainerTestUtils.getKeyValueHandler;
import static
org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.anyList;
@@ -39,8 +42,10 @@
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.UUID;
+import java.util.stream.Collectors;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -49,6 +54,9 @@
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager;
+import org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils;
+import org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeWriter;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
@@ -66,6 +74,7 @@
import org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
import org.apache.hadoop.util.Time;
@@ -92,6 +101,7 @@ public class TestContainerReader {
private ContainerLayoutVersion layout;
private String schemaVersion;
+ private KeyValueHandler keyValueHandler;
@TempDir
private Path tempDir;
@@ -140,6 +150,7 @@ private void setup(ContainerTestVersionInfo versionInfo)
throws Exception {
// so it does not affect the ContainerReader, which avoids using the cache
// at startup for performance reasons.
ContainerCache.getInstance(conf).shutdownCache();
+ keyValueHandler = getKeyValueHandler(conf, UUID.randomUUID().toString(),
containerSet, volumeSet);
}
@AfterEach
@@ -609,6 +620,159 @@ public void testMarkedDeletedContainerCleared(
}
}
+ @ContainerTestVersionInfo.ContainerTest
+ public void
testContainerLoadingWithMerkleTreePresent(ContainerTestVersionInfo versionInfo)
+ throws Exception {
+ setLayoutAndSchemaVersion(versionInfo);
+ setup(versionInfo);
+
+ // Create a container with blocks and write MerkleTree
+ KeyValueContainer container = createContainer(10L);
+ KeyValueContainerData containerData = container.getContainerData();
+ ContainerMerkleTreeWriter treeWriter =
ContainerMerkleTreeTestUtils.buildTestTree(conf);
+ ContainerChecksumTreeManager checksumManager =
keyValueHandler.getChecksumManager();
+ List<Long> deletedBlockIds = Arrays.asList(1L, 2L, 3L);
+ checksumManager.markBlocksAsDeleted(containerData, deletedBlockIds);
+ keyValueHandler.updateContainerChecksum(container, treeWriter);
+ long expectedDataChecksum =
checksumManager.read(containerData).getContainerMerkleTree().getDataChecksum();
+
+ // Test container loading
+ ContainerCache.getInstance(conf).shutdownCache();
+ ContainerReader containerReader = new ContainerReader(volumeSet,
hddsVolume, containerSet, conf, true);
+ containerReader.run();
+
+ // Verify container was loaded successfully and data checksum is set
+ Container<?> loadedContainer = containerSet.getContainer(10L);
+ assertNotNull(loadedContainer);
+ KeyValueContainerData loadedData = (KeyValueContainerData)
loadedContainer.getContainerData();
+ assertNotSame(containerData, loadedData);
+ assertEquals(expectedDataChecksum, loadedData.getDataChecksum());
+ ContainerProtos.ContainerChecksumInfo loadedChecksumInfo =
+ ContainerChecksumTreeManager.readChecksumInfo(loadedData);
+ verifyAllDataChecksumsMatch(loadedData, conf);
+
+ // Verify the deleted block IDs match what we set
+ List<Long> loadedDeletedBlockIds =
loadedChecksumInfo.getDeletedBlocksList().stream()
+ .map(ContainerProtos.BlockMerkleTree::getBlockID)
+ .sorted()
+ .collect(Collectors.toList());
+ assertEquals(3, loadedChecksumInfo.getDeletedBlocksCount());
+ assertEquals(deletedBlockIds, loadedDeletedBlockIds);
+ }
+
+ @ContainerTestVersionInfo.ContainerTest
+ public void
testContainerLoadingWithMerkleTreeFallbackToRocksDB(ContainerTestVersionInfo
versionInfo)
+ throws Exception {
+ setLayoutAndSchemaVersion(versionInfo);
+ setup(versionInfo);
+
+ KeyValueContainer container = createContainer(11L);
+ KeyValueContainerData containerData = container.getContainerData();
+ ContainerMerkleTreeWriter treeWriter =
ContainerMerkleTreeTestUtils.buildTestTree(conf);
+ ContainerChecksumTreeManager checksumManager = new
ContainerChecksumTreeManager(conf);
+ ContainerProtos.ContainerChecksumInfo checksumInfo =
+ checksumManager.writeContainerDataTree(containerData, treeWriter);
+ long dataChecksum =
checksumInfo.getContainerMerkleTree().getDataChecksum();
+
+ // Verify no checksum in RocksDB initially
+ try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) {
+ Long dbDataChecksum =
dbHandle.getStore().getMetadataTable().get(containerData.getContainerDataChecksumKey());
+ assertNull(dbDataChecksum);
+ }
+ ContainerCache.getInstance(conf).shutdownCache();
+
+ // Test container loading - should read from MerkleTree and store in
RocksDB
+ ContainerReader containerReader = new ContainerReader(volumeSet,
hddsVolume, containerSet, conf, true);
+ containerReader.run();
+
+ // Verify container uses checksum from MerkleTree
+ Container<?> loadedContainer = containerSet.getContainer(11L);
+ assertNotNull(loadedContainer);
+ KeyValueContainerData loadedData = (KeyValueContainerData)
loadedContainer.getContainerData();
+ assertNotSame(containerData, loadedData);
+ assertEquals(dataChecksum, loadedData.getDataChecksum());
+
+ // Verify checksum was stored in RocksDB as fallback
+ verifyAllDataChecksumsMatch(loadedData, conf);
+ }
+
+ @ContainerTestVersionInfo.ContainerTest
+ public void
testContainerLoadingWithNoChecksumAnywhere(ContainerTestVersionInfo versionInfo)
+ throws Exception {
+ setLayoutAndSchemaVersion(versionInfo);
+ setup(versionInfo);
+
+ KeyValueContainer container = createContainer(12L);
+ KeyValueContainerData containerData = container.getContainerData();
+ // Verify no checksum in RocksDB
+ try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) {
+ Long dbDataChecksum =
dbHandle.getStore().getMetadataTable().get(containerData.getContainerDataChecksumKey());
+ assertNull(dbDataChecksum);
+ }
+
+ File checksumFile =
ContainerChecksumTreeManager.getContainerChecksumFile(containerData);
+ assertFalse(checksumFile.exists());
+
+ // Test container loading - should default to 0
+ ContainerCache.getInstance(conf).shutdownCache();
+ ContainerReader containerReader = new ContainerReader(volumeSet,
hddsVolume, containerSet, conf, true);
+ containerReader.run();
+
+ // Verify container loads with default checksum of 0
+ Container<?> loadedContainer = containerSet.getContainer(12L);
+ assertNotNull(loadedContainer);
+ KeyValueContainerData loadedData = (KeyValueContainerData)
loadedContainer.getContainerData();
+ assertNotSame(containerData, loadedData);
+ assertEquals(0L, loadedData.getDataChecksum());
+
+ // The checksum is not stored in rocksDB as the checksum file doesn't
exist.
+ verifyAllDataChecksumsMatch(loadedData, conf);
+ }
+
+ @ContainerTestVersionInfo.ContainerTest
+ public void testContainerLoadingWithoutMerkleTree(ContainerTestVersionInfo
versionInfo)
+ throws Exception {
+ setLayoutAndSchemaVersion(versionInfo);
+ setup(versionInfo);
+
+ KeyValueContainer container = createContainer(13L);
+ KeyValueContainerData containerData = container.getContainerData();
+ ContainerMerkleTreeWriter treeWriter = new ContainerMerkleTreeWriter();
+ keyValueHandler.updateContainerChecksum(container, treeWriter);
+ // Create an empty checksum file that exists but has no valid merkle tree
+
assertTrue(ContainerChecksumTreeManager.getContainerChecksumFile(containerData).exists());
+
+ // Verify no checksum in RocksDB initially
+ try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) {
+ Long dbDataChecksum =
dbHandle.getStore().getMetadataTable().get(containerData.getContainerDataChecksumKey());
+ assertNull(dbDataChecksum);
+ }
+
+ ContainerCache.getInstance(conf).shutdownCache();
+
+ // Test container loading - should handle when checksum file is present
without the container merkle tree and
+ // default to 0.
+ ContainerReader containerReader = new ContainerReader(volumeSet,
hddsVolume, containerSet, conf, true);
+ containerReader.run();
+
+ // Verify container loads with default checksum of 0 when checksum file
doesn't have merkle tree
+ Container<?> loadedContainer = containerSet.getContainer(13L);
+ assertNotNull(loadedContainer);
+ KeyValueContainerData loadedData = (KeyValueContainerData)
loadedContainer.getContainerData();
+ assertNotSame(containerData, loadedData);
+ assertEquals(0L, loadedData.getDataChecksum());
+ verifyAllDataChecksumsMatch(loadedData, conf);
+ }
+
+ private KeyValueContainer createContainer(long containerId) throws Exception
{
+ KeyValueContainerData containerData = new
KeyValueContainerData(containerId, layout,
+ (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
datanodeId.toString());
+ containerData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
+ KeyValueContainer container = new KeyValueContainer(containerData, conf);
+ container.create(volumeSet, volumeChoosingPolicy, clusterId);
+ return container;
+ }
+
private long addDbEntry(KeyValueContainerData containerData)
throws Exception {
try (DBHandle dbHandle = BlockUtils.getDB(containerData, conf)) {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java
index 3c761f532f..87c5a719cd 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java
@@ -21,6 +21,7 @@
import static
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
import static
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.UNHEALTHY;
import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.readChecksumFile;
+import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.verifyAllDataChecksumsMatch;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
@@ -30,6 +31,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.TestContainerCorruptions;
import
org.apache.hadoop.ozone.container.ozoneimpl.BackgroundContainerDataScanner;
import
org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration;
@@ -106,6 +108,8 @@ void testCorruptionDetected(TestContainerCorruptions
corruption)
// Test that the scanner wrote updated checksum info to the disk.
assertReplicaChecksumMatches(container, newReportedDataChecksum);
assertFalse(container.getContainerData().needsDataChecksum());
+ KeyValueContainerData containerData = (KeyValueContainerData)
container.getContainerData();
+ verifyAllDataChecksumsMatch(containerData, getConf());
}
if (corruption == TestContainerCorruptions.TRUNCATED_BLOCK ||
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestContainerScannerIntegrationAbstract.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestContainerScannerIntegrationAbstract.java
index 3133a81b5c..086d4b41ef 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestContainerScannerIntegrationAbstract.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestContainerScannerIntegrationAbstract.java
@@ -220,4 +220,8 @@ private OzoneOutputStream createKey(String keyName) throws
Exception {
return TestHelper.createKey(
keyName, RATIS, ONE, 0, store, volumeName, bucketName);
}
+
+ protected OzoneConfiguration getConf() {
+ return cluster.getConf();
+ }
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestOnDemandContainerScannerIntegration.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestOnDemandContainerScannerIntegration.java
index 136a307e0a..d27b78aa59 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestOnDemandContainerScannerIntegration.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestOnDemandContainerScannerIntegration.java
@@ -20,6 +20,7 @@
import static
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
import static
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.UNHEALTHY;
import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.readChecksumFile;
+import static
org.apache.hadoop.ozone.container.checksum.ContainerMerkleTreeTestUtils.verifyAllDataChecksumsMatch;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
@@ -33,6 +34,7 @@
import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.utils.ContainerLogger;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.TestContainerCorruptions;
import
org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration;
import org.apache.hadoop.ozone.container.ozoneimpl.OnDemandContainerScanner;
@@ -144,6 +146,8 @@ void testCorruptionDetected(TestContainerCorruptions
corruption)
assertTrue(containerChecksumFileExists(containerID));
ContainerProtos.ContainerChecksumInfo updatedChecksumInfo =
readChecksumFile(container.getContainerData());
assertEquals(newReportedDataChecksum,
updatedChecksumInfo.getContainerMerkleTree().getDataChecksum());
+ KeyValueContainerData containerData = (KeyValueContainerData)
container.getContainerData();
+ verifyAllDataChecksumsMatch(containerData, getConf());
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]