This is an automated email from the ASF dual-hosted git repository.
swamirishi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new a8b86079a8d HDDS-13856. Change SstFileInfo to track fileName as the
name of the file without sst extension (#9221)
a8b86079a8d is described below
commit a8b86079a8dd9e9e535a48a2ad05355fa4d98207
Author: Swaminathan Balachandran <[email protected]>
AuthorDate: Fri Oct 31 08:44:41 2025 -0400
HDDS-13856. Change SstFileInfo to track fileName as the name of the file
without sst extension (#9221)
---
.../ozone/compaction/log/CompactionFileInfo.java | 1 +
.../log => rocksdb/util}/SstFileInfo.java | 6 +-
.../org/apache/ozone/rocksdiff/CompactionNode.java | 44 +++++-------
.../org/apache/ozone/rocksdiff/RocksDiffUtils.java | 3 +-
.../ozone/compaction/log/TestSstFileInfo.java | 47 +++++++++++++
.../hadoop/ozone/om/OmSnapshotLocalData.java | 2 +-
.../hadoop/ozone/om/OmSnapshotLocalDataYaml.java | 2 +-
.../om/snapshot/OmSnapshotLocalDataManager.java | 4 +-
.../ozone/om/TestOmSnapshotLocalDataYaml.java | 2 +-
.../hadoop/ozone/om/TestOmSnapshotManager.java | 78 ----------------------
.../snapshot/TestOmSnapshotLocalDataManager.java | 70 ++++++++++++++++---
11 files changed, 138 insertions(+), 121 deletions(-)
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java
index e44c2e8522e..535bf115ea8 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java
@@ -22,6 +22,7 @@
import java.util.Objects;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.rocksdb.LiveFileMetaData;
/**
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/SstFileInfo.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileInfo.java
similarity index 93%
rename from
hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/SstFileInfo.java
rename to
hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileInfo.java
index b1887ec3d1e..50f8c4c54d0 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/SstFileInfo.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileInfo.java
@@ -15,7 +15,9 @@
* limitations under the License.
*/
-package org.apache.ozone.compaction.log;
+package org.apache.ozone.rocksdb.util;
+
+import static org.apache.commons.io.FilenameUtils.getBaseName;
import java.util.Objects;
import org.apache.hadoop.hdds.StringUtils;
@@ -39,7 +41,7 @@ public SstFileInfo(String fileName, String startRange, String
endRange, String c
}
public SstFileInfo(LiveFileMetaData fileMetaData) {
- this(fileMetaData.fileName(),
StringUtils.bytes2String(fileMetaData.smallestKey()),
+ this(getBaseName(fileMetaData.fileName()),
StringUtils.bytes2String(fileMetaData.smallestKey()),
StringUtils.bytes2String(fileMetaData.largestKey()),
StringUtils.bytes2String(fileMetaData.columnFamilyName()));
}
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
index 7dddb6a3b77..969c0e0b00e 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
@@ -17,20 +17,17 @@
package org.apache.ozone.rocksdiff;
+import java.util.Objects;
import org.apache.ozone.compaction.log.CompactionFileInfo;
+import org.apache.ozone.rocksdb.util.SstFileInfo;
/**
* Node in the compaction DAG that represents an SST file.
*/
-public class CompactionNode {
- // Name of the SST file
- private final String fileName;
+public class CompactionNode extends SstFileInfo {
private final long snapshotGeneration;
private final long totalNumberOfKeys;
private long cumulativeKeysReverseTraversal;
- private final String startKey;
- private final String endKey;
- private final String columnFamily;
/**
* CompactionNode constructor.
@@ -38,13 +35,10 @@ public class CompactionNode {
* @param seqNum Snapshot generation (sequence number)
*/
public CompactionNode(String file, long seqNum, String startKey, String
endKey, String columnFamily) {
- fileName = file;
+ super(file, startKey, endKey, columnFamily);
totalNumberOfKeys = 0L;
snapshotGeneration = seqNum;
cumulativeKeysReverseTraversal = 0L;
- this.startKey = startKey;
- this.endKey = endKey;
- this.columnFamily = columnFamily;
}
public CompactionNode(CompactionFileInfo compactionFileInfo) {
@@ -54,11 +48,7 @@ public CompactionNode(CompactionFileInfo compactionFileInfo)
{
@Override
public String toString() {
- return String.format("Node{%s}", fileName);
- }
-
- public String getFileName() {
- return fileName;
+ return String.format("Node{%s}", getFileName());
}
public long getSnapshotGeneration() {
@@ -73,18 +63,6 @@ public long getCumulativeKeysReverseTraversal() {
return cumulativeKeysReverseTraversal;
}
- public String getStartKey() {
- return startKey;
- }
-
- public String getEndKey() {
- return endKey;
- }
-
- public String getColumnFamily() {
- return columnFamily;
- }
-
public void setCumulativeKeysReverseTraversal(
long cumulativeKeysReverseTraversal) {
this.cumulativeKeysReverseTraversal = cumulativeKeysReverseTraversal;
@@ -93,4 +71,16 @@ public void setCumulativeKeysReverseTraversal(
public void addCumulativeKeysReverseTraversal(long diff) {
this.cumulativeKeysReverseTraversal += diff;
}
+
+ // Not changing previous behaviour.
+ @Override
+ public final boolean equals(Object o) {
+ return this == o;
+ }
+
+ // Having hashcode only on the basis of the filename.
+ @Override
+ public int hashCode() {
+ return Objects.hash(getFileName());
+ }
}
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java
index 5bc14b1d949..86577147b62 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java
@@ -30,6 +30,7 @@
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.ozone.compaction.log.CompactionFileInfo;
+import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.rocksdb.LiveFileMetaData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -106,7 +107,7 @@ public static void filterRelevantSstFiles(Set<String>
inputFiles,
}
@VisibleForTesting
- static boolean shouldSkipNode(CompactionNode node,
+ static boolean shouldSkipNode(SstFileInfo node,
Map<String, String> columnFamilyToPrefixMap) {
// This is for backward compatibility. Before the compaction log table
// migration, startKey, endKey and columnFamily information is not
persisted
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/compaction/log/TestSstFileInfo.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/compaction/log/TestSstFileInfo.java
new file mode 100644
index 00000000000..660e3e75a1d
--- /dev/null
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/compaction/log/TestSstFileInfo.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ozone.compaction.log;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.hdds.StringUtils;
+import org.apache.ozone.rocksdb.util.SstFileInfo;
+import org.junit.jupiter.api.Test;
+import org.rocksdb.LiveFileMetaData;
+
+/**
+ * Test class for Base SstFileInfo class.
+ */
+public class TestSstFileInfo {
+
+ @Test
+ public void testSstFileInfo() {
+ String smallestKey = "/smallestKey/1";
+ String largestKey = "/largestKey/2";
+ String columnFamily = "columnFamily/123";
+ LiveFileMetaData lfm = mock(LiveFileMetaData.class);
+ when(lfm.fileName()).thenReturn("/1.sst");
+
when(lfm.columnFamilyName()).thenReturn(StringUtils.string2Bytes(columnFamily));
+ when(lfm.smallestKey()).thenReturn(StringUtils.string2Bytes(smallestKey));
+ when(lfm.largestKey()).thenReturn(StringUtils.string2Bytes(largestKey));
+ SstFileInfo expectedSstFileInfo = new SstFileInfo("1", smallestKey,
largestKey, columnFamily);
+ assertEquals(expectedSstFileInfo, new SstFileInfo(lfm));
+ }
+}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java
index 02e07914b31..91ec8b673a8 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java
@@ -33,7 +33,7 @@
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.CopyObject;
import org.apache.hadoop.ozone.util.WithChecksum;
-import org.apache.ozone.compaction.log.SstFileInfo;
+import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.rocksdb.LiveFileMetaData;
import org.yaml.snakeyaml.Yaml;
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java
index 344d7305db4..ad8046d719e 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java
@@ -27,7 +27,7 @@
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta;
-import org.apache.ozone.compaction.log.SstFileInfo;
+import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.yaml.snakeyaml.DumperOptions;
import org.yaml.snakeyaml.LoaderOptions;
import org.yaml.snakeyaml.TypeDescription;
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java
index 3411c4879dd..38ecdeca09e 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java
@@ -140,10 +140,12 @@ public static String
getSnapshotLocalPropertyYamlPath(Path snapshotPath) {
* @param snapshotInfo snapshot metadata
* @return the path to the snapshot's local property YAML file
*/
+ @VisibleForTesting
public String getSnapshotLocalPropertyYamlPath(SnapshotInfo snapshotInfo) {
return getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId());
}
+ @VisibleForTesting
public String getSnapshotLocalPropertyYamlPath(UUID snapshotId) {
Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager,
snapshotId);
return getSnapshotLocalPropertyYamlPath(snapshotPath);
@@ -193,7 +195,7 @@ public WritableOmSnapshotLocalDataProvider
getWritableOmSnapshotLocalData(UUID s
return new WritableOmSnapshotLocalDataProvider(snapshotId);
}
- public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath)
throws IOException {
+ OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws
IOException {
return snapshotLocalDataSerializer.load(snapshotDataPath);
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java
index 2f8b7be9a19..81f111e8464 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java
@@ -48,7 +48,7 @@
import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta;
import org.apache.hadoop.ozone.util.ObjectSerializer;
import org.apache.hadoop.ozone.util.YamlSerializer;
-import org.apache.ozone.compaction.log.SstFileInfo;
+import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
index 6ec49935b35..8f9375f3962 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.om;
import static org.apache.commons.io.file.PathUtils.copyDirectory;
-import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
import static org.apache.hadoop.hdds.utils.HAUtils.getExistingFiles;
import static org.apache.hadoop.ozone.OzoneConsts.OM_CHECKPOINT_DIR;
import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
@@ -28,11 +27,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.SNAPSHOT_INFO_TABLE;
import static org.apache.hadoop.ozone.om.OMDBCheckpointServlet.processFile;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE;
-import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.BUCKET_TABLE;
-import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE;
-import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
-import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.VOLUME_TABLE;
import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.getINode;
import static org.assertj.core.api.Assertions.assertThat;
@@ -48,7 +43,6 @@
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
-import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.io.File;
import java.io.IOException;
@@ -56,7 +50,6 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
-import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
@@ -64,18 +57,14 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.TreeMap;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
import org.apache.hadoop.hdds.utils.db.RDBStore;
-import org.apache.hadoop.hdds.utils.db.RocksDatabase;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TypedTable;
import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -85,7 +74,6 @@
import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager;
import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils;
import org.apache.hadoop.util.Time;
-import org.apache.ozone.compaction.log.SstFileInfo;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
import org.junit.jupiter.api.AfterAll;
@@ -95,7 +83,6 @@
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.io.TempDir;
-import org.rocksdb.LiveFileMetaData;
import org.slf4j.event.Level;
/**
@@ -272,71 +259,6 @@ public void testCloseOnEviction() throws IOException,
}, 100, 30_000);
}
- private LiveFileMetaData createMockLiveFileMetadata(String cfname, String
fileName) {
- LiveFileMetaData lfm = mock(LiveFileMetaData.class);
-
when(lfm.columnFamilyName()).thenReturn(cfname.getBytes(StandardCharsets.UTF_8));
- when(lfm.fileName()).thenReturn(fileName);
- when(lfm.smallestKey()).thenReturn(string2Bytes("k1"));
- when(lfm.largestKey()).thenReturn(string2Bytes("k2"));
- return lfm;
- }
-
- @Test
- public void testCreateNewSnapshotLocalYaml() throws IOException {
- SnapshotInfo snapshotInfo = createSnapshotInfo("vol1", "buck1");
-
- Map<String, List<String>> expNotDefraggedSSTFileList = new TreeMap<>();
- OmSnapshotLocalData.VersionMeta notDefraggedVersionMeta = new
OmSnapshotLocalData.VersionMeta(0,
- ImmutableList.of(new SstFileInfo("dt1.sst", "k1", "k2",
DIRECTORY_TABLE),
- new SstFileInfo("dt2.sst", "k1", "k2", DIRECTORY_TABLE),
- new SstFileInfo("ft1.sst", "k1", "k2", FILE_TABLE),
- new SstFileInfo("ft2.sst", "k1", "k2", FILE_TABLE),
- new SstFileInfo("kt1.sst", "k1", "k2", KEY_TABLE),
- new SstFileInfo("kt2.sst", "k1", "k2", KEY_TABLE)));
- expNotDefraggedSSTFileList.put(KEY_TABLE, Stream.of("kt1.sst",
"kt2.sst").collect(Collectors.toList()));
- expNotDefraggedSSTFileList.put(FILE_TABLE, Stream.of("ft1.sst",
"ft2.sst").collect(Collectors.toList()));
- expNotDefraggedSSTFileList.put(DIRECTORY_TABLE, Stream.of("dt1.sst",
"dt2.sst").collect(Collectors.toList()));
-
- List<LiveFileMetaData> mockedLiveFiles = new ArrayList<>();
- for (Map.Entry<String, List<String>> entry :
expNotDefraggedSSTFileList.entrySet()) {
- String cfname = entry.getKey();
- for (String fname : entry.getValue()) {
- mockedLiveFiles.add(createMockLiveFileMetadata(cfname, fname));
- }
- }
- // Add some other column families and files that should be ignored
- mockedLiveFiles.add(createMockLiveFileMetadata("otherTable", "ot1.sst"));
- mockedLiveFiles.add(createMockLiveFileMetadata("otherTable", "ot2.sst"));
-
- RDBStore mockedStore = mock(RDBStore.class);
- RocksDatabase mockedDb = mock(RocksDatabase.class);
- when(mockedStore.getDb()).thenReturn(mockedDb);
- when(mockedDb.getLiveFilesMetaData()).thenReturn(mockedLiveFiles);
-
- Path snapshotYaml =
Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo));
-
when(mockedStore.getDbLocation()).thenReturn(getSnapshotPath(omMetadataManager,
snapshotInfo).toFile());
- // Create an existing YAML file for the snapshot
- assertTrue(snapshotYaml.toFile().createNewFile());
- assertEquals(0, Files.size(snapshotYaml));
- // Create a new YAML file for the snapshot
- snapshotLocalDataManager.createNewOmSnapshotLocalDataFile(mockedStore,
snapshotInfo);
- // Verify that previous file was overwritten
- assertTrue(Files.exists(snapshotYaml));
- assertTrue(Files.size(snapshotYaml) > 0);
- // Verify the contents of the YAML file
- OmSnapshotLocalData localData =
snapshotLocalDataManager.getOmSnapshotLocalData(snapshotYaml.toFile());
- assertNotNull(localData);
- assertEquals(0, localData.getVersion());
- assertEquals(notDefraggedVersionMeta,
localData.getVersionSstFileInfos().get(0));
- assertFalse(localData.getSstFiltered());
- assertEquals(0L, localData.getLastDefragTime());
- assertFalse(localData.getNeedsDefrag());
- assertEquals(1, localData.getVersionSstFileInfos().size());
-
- // Cleanup
- Files.delete(snapshotYaml);
- }
-
@Test
public void testValidateSnapshotLimit() throws IOException {
TypedTable<String, SnapshotInfo> snapshotInfoTable =
mock(TypedTable.class);
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java
index bfaa48c04fe..152e0054f07 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java
@@ -17,12 +17,14 @@
package org.apache.hadoop.ozone.om.snapshot;
+import static org.apache.hadoop.hdds.StringUtils.bytes2String;
import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR;
import static
org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -37,6 +39,7 @@
import com.google.common.collect.ImmutableMap;
import java.io.File;
import java.io.IOException;
+import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
@@ -48,6 +51,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.TreeMap;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
@@ -72,7 +76,7 @@
import
org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider;
import
org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.WritableOmSnapshotLocalDataProvider;
import org.apache.hadoop.ozone.util.YamlSerializer;
-import org.apache.ozone.compaction.log.SstFileInfo;
+import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.assertj.core.util.Lists;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
@@ -595,14 +599,61 @@ public void
testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOExce
assertEquals(expectedYamlPath.toAbsolutePath().toString(),
yamlPath.getAbsolutePath());
}
+ @Test
+ public void testCreateNewSnapshotLocalYaml() throws IOException {
+ UUID snapshotId = UUID.randomUUID();
+ SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null);
+
+ Map<String, List<String>> expNotDefraggedSSTFileList = new TreeMap<>();
+ OmSnapshotLocalData.VersionMeta notDefraggedVersionMeta = new
OmSnapshotLocalData.VersionMeta(0,
+ ImmutableList.of(new SstFileInfo("dt1", "k1", "k2", DIRECTORY_TABLE),
+ new SstFileInfo("dt2", "k1", "k2", DIRECTORY_TABLE),
+ new SstFileInfo("ft1", "k1", "k2", FILE_TABLE),
+ new SstFileInfo("ft2", "k1", "k2", FILE_TABLE),
+ new SstFileInfo("kt1", "k1", "k2", KEY_TABLE),
+ new SstFileInfo("kt2", "k1", "k2", KEY_TABLE)));
+ expNotDefraggedSSTFileList.put(KEY_TABLE, Stream.of("kt1",
"kt2").collect(Collectors.toList()));
+ expNotDefraggedSSTFileList.put(FILE_TABLE, Stream.of("ft1",
"ft2").collect(Collectors.toList()));
+ expNotDefraggedSSTFileList.put(DIRECTORY_TABLE, Stream.of("dt1",
"dt2").collect(Collectors.toList()));
+
+ List<LiveFileMetaData> mockedLiveFiles = new ArrayList<>();
+ for (Map.Entry<String, List<String>> entry :
expNotDefraggedSSTFileList.entrySet()) {
+ String cfname = entry.getKey();
+ for (String fname : entry.getValue()) {
+ mockedLiveFiles.add(createMockLiveFileMetaData("/" + fname + ".sst",
cfname, "k1", "k2"));
+ }
+ }
+ // Add some other column families and files that should be ignored
+ mockedLiveFiles.add(createMockLiveFileMetaData("ot1.sst", "otherTable",
"k1", "k2"));
+ mockedLiveFiles.add(createMockLiveFileMetaData("ot2.sst", "otherTable",
"k1", "k2"));
+
+ mockSnapshotStore(snapshotId, mockedLiveFiles);
+ localDataManager = new OmSnapshotLocalDataManager(omMetadataManager);
+ Path snapshotYaml =
Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo));
+ // Create an existing YAML file for the snapshot
+ assertTrue(snapshotYaml.toFile().createNewFile());
+ assertEquals(0, Files.size(snapshotYaml));
+ // Create a new YAML file for the snapshot
+ localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore,
snapshotInfo);
+ // Verify that previous file was overwritten
+ assertTrue(Files.exists(snapshotYaml));
+ assertTrue(Files.size(snapshotYaml) > 0);
+ // Verify the contents of the YAML file
+ OmSnapshotLocalData localData =
localDataManager.getOmSnapshotLocalData(snapshotYaml.toFile());
+ assertNotNull(localData);
+ assertEquals(0, localData.getVersion());
+ assertEquals(notDefraggedVersionMeta,
localData.getVersionSstFileInfos().get(0));
+ assertFalse(localData.getSstFiltered());
+ assertEquals(0L, localData.getLastDefragTime());
+ assertFalse(localData.getNeedsDefrag());
+ assertEquals(1, localData.getVersionSstFileInfos().size());
+ }
+
@Test
public void testCreateNewOmSnapshotLocalDataFile() throws IOException {
UUID snapshotId = UUID.randomUUID();
SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null);
-
// Setup snapshot store mock
- File snapshotDbLocation =
OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile();
- assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs());
List<LiveFileMetaData> sstFiles = new ArrayList<>();
sstFiles.add(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1",
"key7"));
@@ -612,11 +663,12 @@ public void testCreateNewOmSnapshotLocalDataFile() throws
IOException {
sstFiles.add(createMockLiveFileMetaData("file5.sst", DIRECTORY_TABLE,
"key1", "key7"));
sstFiles.add(createMockLiveFileMetaData("file6.sst", "colFamily1", "key1",
"key7"));
List<SstFileInfo> sstFileInfos = IntStream.range(0, sstFiles.size() - 1)
-
.mapToObj(sstFiles::get).map(SstFileInfo::new).collect(Collectors.toList());
- when(snapshotStore.getDbLocation()).thenReturn(snapshotDbLocation);
- RocksDatabase rocksDatabase = mock(RocksDatabase.class);
- when(snapshotStore.getDb()).thenReturn(rocksDatabase);
- when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles);
+ .mapToObj(sstFiles::get).map(lfm ->
+ new SstFileInfo(lfm.fileName().replace(".sst", ""),
+ bytes2String(lfm.smallestKey()),
+ bytes2String(lfm.largestKey()),
bytes2String(lfm.columnFamilyName()))).collect(Collectors.toList());
+ mockSnapshotStore(snapshotId, sstFiles);
+
localDataManager = new OmSnapshotLocalDataManager(omMetadataManager);
localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore,
snapshotInfo);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]