This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 7e675d743a HDDS-12053. Make print-log-dag command run locally and
offline (#8016)
7e675d743a is described below
commit 7e675d743ab3d22a7f722e9b3c3c425c82db8bb1
Author: Tejaskriya <[email protected]>
AuthorDate: Tue May 6 01:39:52 2025 +0530
HDDS-12053. Make print-log-dag command run locally and offline (#8016)
---
.../org/apache/ozone/rocksdiff/CompactionDag.java | 154 +++++++++++
.../org/apache/ozone/rocksdiff/CompactionNode.java | 14 +-
.../ozone/rocksdiff/RocksDBCheckpointDiffer.java | 286 ++++-----------------
.../rocksdiff/TestRocksDBCheckpointDiffer.java | 38 ++-
.../apache/ozone/rocksdiff/TestRocksDiffUtils.java | 4 +-
.../ozone/client/protocol/ClientProtocol.java | 1 +
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 1 +
.../main/java/org/apache/hadoop/ozone/OmUtils.java | 2 +
.../ozone/om/protocol/OzoneManagerProtocol.java | 4 +-
...OzoneManagerProtocolClientSideTranslatorPB.java | 25 --
.../src/main/proto/OmClientProtocol.proto | 6 +-
.../org/apache/hadoop/ozone/om/OzoneManager.java | 32 ---
.../hadoop/ozone/client/ClientProtocolStub.java | 1 +
.../ozone/debug/CompactionLogDagPrinter.java | 63 -----
.../ozone/debug/om/CompactionLogDagPrinter.java | 111 ++++++++
.../org/apache/hadoop/ozone/debug/om/OMDebug.java | 11 +
.../apache/hadoop/ozone/debug/om/PrefixParser.java | 16 +-
.../src/main/java/org/apache/ozone/graph/Edge.java | 0
.../org/apache/ozone/graph/PrintableGraph.java | 0
.../java/org/apache/ozone/graph/package-info.java | 0
.../org/apache/ozone/graph/TestPrintableGraph.java | 12 +-
21 files changed, 367 insertions(+), 414 deletions(-)
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionDag.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionDag.java
new file mode 100644
index 0000000000..a7d78d16a8
--- /dev/null
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionDag.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ozone.rocksdiff;
+
+import com.google.common.graph.GraphBuilder;
+import com.google.common.graph.MutableGraph;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import org.apache.ozone.compaction.log.CompactionFileInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Wrapper class storing DAGs of SST files for tracking compactions.
+ */
+public class CompactionDag {
+ private static final Logger LOG =
LoggerFactory.getLogger(CompactionDag.class);
+
+ private final ConcurrentMap<String, CompactionNode> compactionNodeMap = new
ConcurrentHashMap<>();
+ private final MutableGraph<CompactionNode> forwardCompactionDAG =
GraphBuilder.directed().build();
+ private final MutableGraph<CompactionNode> backwardCompactionDAG =
GraphBuilder.directed().build();
+
+ private CompactionNode addNodeToDAG(String file, long seqNum, String
startKey, String endKey, String columnFamily) {
+ CompactionNode fileNode = new CompactionNode(file, seqNum, startKey,
endKey, columnFamily);
+ backwardCompactionDAG.addNode(fileNode);
+ forwardCompactionDAG.addNode(fileNode);
+ return fileNode;
+ }
+
+ /**
+ * Populate the compaction DAG with input and output SST files lists.
+ *
+ * @param inputFiles List of compaction input files.
+ * @param outputFiles List of compaction output files.
+ * @param seqNum DB transaction sequence number.
+ */
+ public void populateCompactionDAG(List<CompactionFileInfo> inputFiles,
+ List<CompactionFileInfo> outputFiles,
+ long seqNum) {
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Input files: {} -> Output files: {}", inputFiles,
outputFiles);
+ }
+
+ for (CompactionFileInfo outfile : outputFiles) {
+ final CompactionNode outfileNode =
compactionNodeMap.computeIfAbsent(outfile.getFileName(),
+ file -> addNodeToDAG(file, seqNum, outfile.getStartKey(),
outfile.getEndKey(), outfile.getColumnFamily()));
+
+ for (CompactionFileInfo infile : inputFiles) {
+ final CompactionNode infileNode =
compactionNodeMap.computeIfAbsent(infile.getFileName(),
+ file -> addNodeToDAG(file, seqNum, infile.getStartKey(),
infile.getEndKey(), infile.getColumnFamily()));
+
+ // Draw the edges
+ if (!Objects.equals(outfileNode.getFileName(),
infileNode.getFileName())) {
+ forwardCompactionDAG.putEdge(outfileNode, infileNode);
+ backwardCompactionDAG.putEdge(infileNode, outfileNode);
+ }
+ }
+ }
+ }
+
+ public Set<String> pruneNodesFromDag(Set<CompactionNode> nodesToRemove) {
+ pruneBackwardDag(backwardCompactionDAG, nodesToRemove);
+ Set<String> sstFilesPruned = pruneForwardDag(forwardCompactionDAG,
nodesToRemove);
+ // Remove SST file nodes from compactionNodeMap too,
+ // since those nodes won't be needed after clean up.
+ nodesToRemove.forEach(compactionNodeMap::remove);
+ return sstFilesPruned;
+ }
+
+ /**
+ * Prunes backward DAG's upstream from the level, that needs to be removed.
+ */
+ Set<String> pruneBackwardDag(MutableGraph<CompactionNode> backwardDag,
Set<CompactionNode> startNodes) {
+ Set<String> removedFiles = new HashSet<>();
+ Set<CompactionNode> currentLevel = startNodes;
+
+ while (!currentLevel.isEmpty()) {
+ Set<CompactionNode> nextLevel = new HashSet<>();
+ for (CompactionNode current : currentLevel) {
+ if (!backwardDag.nodes().contains(current)) {
+ continue;
+ }
+
+ nextLevel.addAll(backwardDag.predecessors(current));
+ backwardDag.removeNode(current);
+ removedFiles.add(current.getFileName());
+ }
+ currentLevel = nextLevel;
+ }
+
+ return removedFiles;
+ }
+
+ /**
+ * Prunes forward DAG's downstream from the level that needs to be removed.
+ */
+ Set<String> pruneForwardDag(MutableGraph<CompactionNode> forwardDag,
Set<CompactionNode> startNodes) {
+ Set<String> removedFiles = new HashSet<>();
+ Set<CompactionNode> currentLevel = new HashSet<>(startNodes);
+
+ while (!currentLevel.isEmpty()) {
+ Set<CompactionNode> nextLevel = new HashSet<>();
+ for (CompactionNode current : currentLevel) {
+ if (!forwardDag.nodes().contains(current)) {
+ continue;
+ }
+
+ nextLevel.addAll(forwardDag.successors(current));
+ forwardDag.removeNode(current);
+ removedFiles.add(current.getFileName());
+ }
+
+ currentLevel = nextLevel;
+ }
+
+ return removedFiles;
+ }
+
+ public MutableGraph<CompactionNode> getForwardCompactionDAG() {
+ return forwardCompactionDAG;
+ }
+
+ public MutableGraph<CompactionNode> getBackwardCompactionDAG() {
+ return backwardCompactionDAG;
+ }
+
+ public ConcurrentMap<String, CompactionNode> getCompactionMap() {
+ return compactionNodeMap;
+ }
+
+ public CompactionNode getCompactionNode(String fileName) {
+ return compactionNodeMap.get(fileName);
+ }
+}
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
index 4e7c38c62c..91c7272d23 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
@@ -32,17 +32,9 @@ public class CompactionNode {
private final String endKey;
private final String columnFamily;
- /**
- * CompactionNode constructor.
- * @param file SST file (filename without extension)
- * @param numKeys Number of keys in the SST
- * @param seqNum Snapshot generation (sequence number)
- */
-
- public CompactionNode(String file, long numKeys, long seqNum,
- String startKey, String endKey, String columnFamily) {
+ public CompactionNode(String file, long seqNum, String startKey, String
endKey, String columnFamily) {
fileName = file;
- totalNumberOfKeys = numKeys;
+ totalNumberOfKeys = 0L;
snapshotGeneration = seqNum;
cumulativeKeysReverseTraversal = 0L;
this.startKey = startKey;
@@ -51,7 +43,7 @@ public CompactionNode(String file, long numKeys, long seqNum,
}
public CompactionNode(CompactionFileInfo compactionFileInfo) {
- this(compactionFileInfo.getFileName(), -1, -1,
compactionFileInfo.getStartKey(),
+ this(compactionFileInfo.getFileName(), -1,
compactionFileInfo.getStartKey(),
compactionFileInfo.getEndKey(), compactionFileInfo.getColumnFamily());
}
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
index c9f8c726d2..3dae2bc698 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
@@ -26,12 +26,10 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableSet;
-import com.google.common.graph.GraphBuilder;
import com.google.common.graph.MutableGraph;
import com.google.protobuf.InvalidProtocolBufferException;
import java.io.BufferedWriter;
import java.io.File;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Serializable;
import java.nio.file.FileAlreadyExistsException;
@@ -47,7 +45,6 @@
import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
@@ -65,15 +62,11 @@
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.Scheduler;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader;
import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
import org.apache.ozone.compaction.log.CompactionFileInfo;
import org.apache.ozone.compaction.log.CompactionLogEntry;
-import org.apache.ozone.graph.PrintableGraph;
-import org.apache.ozone.graph.PrintableGraph.GraphType;
import org.apache.ozone.rocksdb.util.RdbUtil;
import org.rocksdb.AbstractEventListener;
import org.rocksdb.ColumnFamilyHandle;
@@ -81,7 +74,6 @@
import org.rocksdb.LiveFileMetaData;
import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException;
-import org.rocksdb.TableProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -103,8 +95,6 @@ public class RocksDBCheckpointDiffer implements
AutoCloseable,
private final String metadataDir;
private final String sstBackupDir;
- private final String activeDBLocationStr;
-
private final String compactionLogDir;
public static final String COMPACTION_LOG_FILE_NAME_SUFFIX = ".log";
@@ -170,7 +160,7 @@ public class RocksDBCheckpointDiffer implements
AutoCloseable,
private ColumnFamilyHandle compactionLogTableCFHandle;
private ManagedRocksDB activeRocksDB;
- private ConcurrentMap<String, CompactionFileInfo> inflightCompactions;
+ private final ConcurrentMap<String, CompactionFileInfo> inflightCompactions;
/**
* For snapshot diff calculation we only need to track following column
@@ -179,30 +169,7 @@ public class RocksDBCheckpointDiffer implements
AutoCloseable,
public static final Set<String> COLUMN_FAMILIES_TO_TRACK_IN_DAG =
ImmutableSet.of("keyTable", "directoryTable", "fileTable");
- // Hash table to track CompactionNode for a given SST File.
- private final ConcurrentHashMap<String, CompactionNode> compactionNodeMap =
- new ConcurrentHashMap<>();
-
- // We are maintaining a two way DAG. This allows easy traversal from
- // source snapshot to destination snapshot as well as the other direction.
-
- private final MutableGraph<CompactionNode> forwardCompactionDAG =
- GraphBuilder.directed().build();
-
- private final MutableGraph<CompactionNode> backwardCompactionDAG =
- GraphBuilder.directed().build();
-
- public static final Integer DEBUG_DAG_BUILD_UP = 2;
- public static final Integer DEBUG_DAG_TRAVERSAL = 3;
- public static final Integer DEBUG_DAG_LIVE_NODES = 4;
- public static final Integer DEBUG_READ_ALL_DB_KEYS = 5;
- private static final HashSet<Integer> DEBUG_LEVEL = new HashSet<>();
-
- static {
- addDebugLevel(DEBUG_DAG_BUILD_UP);
- addDebugLevel(DEBUG_DAG_TRAVERSAL);
- addDebugLevel(DEBUG_DAG_LIVE_NODES);
- }
+ private final CompactionDag compactionDag;
static {
RocksDB.loadLibrary();
@@ -238,8 +205,6 @@ public class RocksDBCheckpointDiffer implements
AutoCloseable,
this.sstBackupDir = Paths.get(metadataDirName, sstBackupDirName) + "/";
createSstBackUpDir();
- // Active DB location is used in getSSTFileSummary
- this.activeDBLocationStr = activeDBLocationName + "/";
this.maxAllowedTimeInDag = configuration.getTimeDuration(
OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED,
OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT,
@@ -272,10 +237,11 @@ public class RocksDBCheckpointDiffer implements
AutoCloseable,
this.scheduler = null;
}
this.inflightCompactions = new ConcurrentHashMap<>();
+ this.compactionDag = new CompactionDag();
}
private String createCompactionLogDir(String metadataDirName,
- String compactionLogDirName) {
+ String compactionLogDirName) {
final File parentDir = new File(metadataDirName);
if (!parentDir.exists()) {
@@ -338,10 +304,6 @@ public void close() {
}
}
- public static void addDebugLevel(Integer level) {
- DEBUG_LEVEL.add(level);
- }
-
public void setRocksDBForCompactionTracking(ManagedDBOptions rocksOptions) {
List<AbstractEventListener> events = new ArrayList<>();
events.add(newCompactionBeginListener());
@@ -519,7 +481,7 @@ public void onCompactionCompleted(RocksDB db,
addToCompactionLogTable(compactionLogEntry);
// Populate the DAG
- populateCompactionDAG(compactionLogEntry.getInputFileInfoList(),
+
compactionDag.populateCompactionDAG(compactionLogEntry.getInputFileInfoList(),
compactionLogEntry.getOutputFileInfoList(),
compactionLogEntry.getDbSequenceNumber());
for (String inputFile : inputFileCompactions.keySet()) {
@@ -576,47 +538,6 @@ private void createLink(Path link, Path source) {
}
}
- /**
- * Get number of keys in an SST file.
- * @param filename SST filename
- * @return number of keys
- */
- private long getSSTFileSummary(String filename)
- throws RocksDBException, FileNotFoundException {
-
- if (!filename.endsWith(SST_FILE_EXTENSION)) {
- filename += SST_FILE_EXTENSION;
- }
-
- try (ManagedOptions option = new ManagedOptions();
- ManagedSstFileReader reader = new ManagedSstFileReader(option)) {
-
- reader.open(getAbsoluteSstFilePath(filename));
-
- TableProperties properties = reader.getTableProperties();
- if (LOG.isDebugEnabled()) {
- LOG.debug("{} has {} keys", filename, properties.getNumEntries());
- }
- return properties.getNumEntries();
- }
- }
-
- private String getAbsoluteSstFilePath(String filename)
- throws FileNotFoundException {
- if (!filename.endsWith(SST_FILE_EXTENSION)) {
- filename += SST_FILE_EXTENSION;
- }
- File sstFile = new File(sstBackupDir + filename);
- File sstFileInActiveDB = new File(activeDBLocationStr + filename);
- if (sstFile.exists()) {
- return sstBackupDir + filename;
- } else if (sstFileInActiveDB.exists()) {
- return activeDBLocationStr + filename;
- } else {
- throw new FileNotFoundException("Can't find SST file: " + filename);
- }
- }
-
/**
* Helper method to trim the filename retrieved from LiveFileMetaData.
*/
@@ -753,27 +674,33 @@ public void
addEntriesFromLogFilesToDagAndCompactionLogTable() {
/**
* Load existing compaction log from table to the in-memory DAG.
* This only needs to be done once during OM startup.
+ * It is only for backward compatibility.
*/
public void loadAllCompactionLogs() {
synchronized (this) {
preconditionChecksForLoadAllCompactionLogs();
addEntriesFromLogFilesToDagAndCompactionLogTable();
- try (ManagedRocksIterator managedRocksIterator = new
ManagedRocksIterator(
- activeRocksDB.get().newIterator(compactionLogTableCFHandle))) {
- managedRocksIterator.get().seekToFirst();
- while (managedRocksIterator.get().isValid()) {
- byte[] value = managedRocksIterator.get().value();
- CompactionLogEntry compactionLogEntry =
- CompactionLogEntry.getFromProtobuf(
- CompactionLogEntryProto.parseFrom(value));
- populateCompactionDAG(compactionLogEntry.getInputFileInfoList(),
- compactionLogEntry.getOutputFileInfoList(),
- compactionLogEntry.getDbSequenceNumber());
- managedRocksIterator.get().next();
- }
- } catch (InvalidProtocolBufferException e) {
- throw new RuntimeException(e);
+ loadCompactionDagFromDB();
+ }
+ }
+
+ /**
+ * Read a compactionLofTable and create entries in the dags.
+ */
+ private void loadCompactionDagFromDB() {
+ try (ManagedRocksIterator managedRocksIterator = new ManagedRocksIterator(
+ activeRocksDB.get().newIterator(compactionLogTableCFHandle))) {
+ managedRocksIterator.get().seekToFirst();
+ while (managedRocksIterator.get().isValid()) {
+ byte[] value = managedRocksIterator.get().value();
+ CompactionLogEntry compactionLogEntry =
+
CompactionLogEntry.getFromProtobuf(CompactionLogEntryProto.parseFrom(value));
+
compactionDag.populateCompactionDAG(compactionLogEntry.getInputFileInfoList(),
+ compactionLogEntry.getOutputFileInfoList(),
compactionLogEntry.getDbSequenceNumber());
+ managedRocksIterator.get().next();
}
+ } catch (InvalidProtocolBufferException e) {
+ throw new RuntimeException(e);
}
}
@@ -833,8 +760,8 @@ private String getSSTFullPath(String
sstFilenameWithoutExtension,
* "/path/to/sstBackupDir/000060.sst"]
*/
public synchronized Optional<List<String>>
getSSTDiffListWithFullPath(DifferSnapshotInfo src,
-
DifferSnapshotInfo dest,
- String
sstFilesDirForSnapDiffJob) {
+ DifferSnapshotInfo dest,
+ String sstFilesDirForSnapDiffJob) {
Optional<List<String>> sstDiffList = getSSTDiffList(src, dest);
@@ -863,7 +790,7 @@ public synchronized Optional<List<String>>
getSSTDiffListWithFullPath(DifferSnap
* @return A list of SST files without extension. e.g. ["000050", "000060"]
*/
public synchronized Optional<List<String>> getSSTDiffList(DifferSnapshotInfo
src,
- DifferSnapshotInfo
dest) {
+ DifferSnapshotInfo dest) {
// TODO: Reject or swap if dest is taken after src, once snapshot chain
// integration is done.
@@ -905,8 +832,8 @@ public synchronized Optional<List<String>>
getSSTDiffList(DifferSnapshotInfo src
}
if (src.getTablePrefixes() != null && !src.getTablePrefixes().isEmpty()) {
- RocksDiffUtils.filterRelevantSstFiles(fwdDAGDifferentFiles,
src.getTablePrefixes(), compactionNodeMap,
- src.getRocksDB(), dest.getRocksDB());
+ RocksDiffUtils.filterRelevantSstFiles(fwdDAGDifferentFiles,
src.getTablePrefixes(),
+ compactionDag.getCompactionMap(), src.getRocksDB(),
dest.getRocksDB());
}
return Optional.of(new ArrayList<>(fwdDAGDifferentFiles));
}
@@ -939,7 +866,7 @@ synchronized void internalGetSSTDiffList(
continue;
}
- CompactionNode infileNode = compactionNodeMap.get(fileName);
+ CompactionNode infileNode = compactionDag.getCompactionNode(fileName);
if (infileNode == null) {
LOG.debug("Source '{}' SST file '{}' is never compacted",
src.getDbPath(), fileName);
@@ -983,8 +910,7 @@ synchronized void internalGetSSTDiffList(
continue;
}
- Set<CompactionNode> successors =
- forwardCompactionDAG.successors(current);
+ Set<CompactionNode> successors =
compactionDag.getForwardCompactionDAG().successors(current);
if (successors.isEmpty()) {
LOG.debug("No further compaction happened to the current file. " +
"Src '{}' and dest '{}' have different file: {}",
@@ -1038,7 +964,7 @@ public Comparator<CompactionNode> reversed() {
@VisibleForTesting
void dumpCompactionNodeTable() {
- List<CompactionNode> nodeList = compactionNodeMap.values().stream()
+ List<CompactionNode> nodeList =
compactionDag.getCompactionMap().values().stream()
.sorted(new NodeComparator()).collect(Collectors.toList());
for (CompactionNode n : nodeList) {
LOG.debug("File '{}' total keys: {}",
@@ -1050,74 +976,12 @@ void dumpCompactionNodeTable() {
@VisibleForTesting
public MutableGraph<CompactionNode> getForwardCompactionDAG() {
- return forwardCompactionDAG;
+ return compactionDag.getForwardCompactionDAG();
}
@VisibleForTesting
public MutableGraph<CompactionNode> getBackwardCompactionDAG() {
- return backwardCompactionDAG;
- }
-
- /**
- * Helper method to add a new file node to the DAG.
- * @return CompactionNode
- */
- private CompactionNode addNodeToDAG(String file, long seqNum, String
startKey,
- String endKey, String columnFamily) {
- long numKeys = 0L;
- try {
- numKeys = getSSTFileSummary(file);
- } catch (RocksDBException e) {
- LOG.warn("Can't get num of keys in SST '{}': {}", file, e.getMessage());
- } catch (FileNotFoundException e) {
- LOG.info("Can't find SST '{}'", file);
- }
-
- CompactionNode fileNode = new CompactionNode(file, numKeys,
- seqNum, startKey, endKey, columnFamily);
-
- forwardCompactionDAG.addNode(fileNode);
- backwardCompactionDAG.addNode(fileNode);
-
- return fileNode;
- }
-
- /**
- * Populate the compaction DAG with input and output SST files lists.
- * @param inputFiles List of compaction input files.
- * @param outputFiles List of compaction output files.
- * @param seqNum DB transaction sequence number.
- */
- private void populateCompactionDAG(List<CompactionFileInfo> inputFiles,
- List<CompactionFileInfo> outputFiles,
- long seqNum) {
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("Input files: {} -> Output files: {}", inputFiles,
outputFiles);
- }
-
- for (CompactionFileInfo outfile : outputFiles) {
- final CompactionNode outfileNode = compactionNodeMap.computeIfAbsent(
- outfile.getFileName(),
-
- file -> addNodeToDAG(file, seqNum, outfile.getStartKey(),
- outfile.getEndKey(), outfile.getColumnFamily()));
-
-
- for (CompactionFileInfo infile : inputFiles) {
- final CompactionNode infileNode = compactionNodeMap.computeIfAbsent(
- infile.getFileName(),
-
- file -> addNodeToDAG(file, seqNum, infile.getStartKey(),
- infile.getEndKey(), infile.getColumnFamily()));
-
- // Draw the edges
- if (!outfileNode.getFileName().equals(infileNode.getFileName())) {
- forwardCompactionDAG.putEdge(outfileNode, infileNode);
- backwardCompactionDAG.putEdge(infileNode, outfileNode);
- }
- }
- }
+ return compactionDag.getBackwardCompactionDAG();
}
private void addFileInfoToCompactionLogTable(
@@ -1245,7 +1109,7 @@ private void removeSstFiles(Set<String> sstFileNodes) {
public Set<String> pruneSstFileNodesFromDag(Set<String> sstFileNodes) {
Set<CompactionNode> startNodes = new HashSet<>();
for (String sstFileNode : sstFileNodes) {
- CompactionNode infileNode = compactionNodeMap.get(sstFileNode);
+ CompactionNode infileNode = compactionDag.getCompactionNode(sstFileNode);
if (infileNode == null) {
LOG.warn("Compaction node doesn't exist for sstFile: {}.",
sstFileNode);
continue;
@@ -1255,14 +1119,7 @@ public Set<String> pruneSstFileNodesFromDag(Set<String>
sstFileNodes) {
}
synchronized (this) {
- pruneBackwardDag(backwardCompactionDAG, startNodes);
- Set<String> sstFilesPruned = pruneForwardDag(forwardCompactionDAG,
- startNodes);
-
- // Remove SST file nodes from compactionNodeMap too,
- // since those nodes won't be needed after clean up.
- sstFilesPruned.forEach(compactionNodeMap::remove);
- return sstFilesPruned;
+ return compactionDag.pruneNodesFromDag(startNodes);
}
}
@@ -1272,26 +1129,7 @@ public Set<String> pruneSstFileNodesFromDag(Set<String>
sstFileNodes) {
@VisibleForTesting
Set<String> pruneBackwardDag(MutableGraph<CompactionNode> backwardDag,
Set<CompactionNode> startNodes) {
- Set<String> removedFiles = new HashSet<>();
- Set<CompactionNode> currentLevel = startNodes;
-
- synchronized (this) {
- while (!currentLevel.isEmpty()) {
- Set<CompactionNode> nextLevel = new HashSet<>();
- for (CompactionNode current : currentLevel) {
- if (!backwardDag.nodes().contains(current)) {
- continue;
- }
-
- nextLevel.addAll(backwardDag.predecessors(current));
- backwardDag.removeNode(current);
- removedFiles.add(current.getFileName());
- }
- currentLevel = nextLevel;
- }
- }
-
- return removedFiles;
+ return compactionDag.pruneBackwardDag(backwardDag, startNodes);
}
/**
@@ -1300,27 +1138,7 @@ Set<String>
pruneBackwardDag(MutableGraph<CompactionNode> backwardDag,
@VisibleForTesting
Set<String> pruneForwardDag(MutableGraph<CompactionNode> forwardDag,
Set<CompactionNode> startNodes) {
- Set<String> removedFiles = new HashSet<>();
- Set<CompactionNode> currentLevel = new HashSet<>(startNodes);
-
- synchronized (this) {
- while (!currentLevel.isEmpty()) {
- Set<CompactionNode> nextLevel = new HashSet<>();
- for (CompactionNode current : currentLevel) {
- if (!forwardDag.nodes().contains(current)) {
- continue;
- }
-
- nextLevel.addAll(forwardDag.successors(current));
- forwardDag.removeNode(current);
- removedFiles.add(current.getFileName());
- }
-
- currentLevel = nextLevel;
- }
- }
-
- return removedFiles;
+ return compactionDag.pruneForwardDag(forwardDag, startNodes);
}
private long getSnapshotCreationTimeFromLogLine(String logLine) {
@@ -1364,8 +1182,8 @@ public void pruneSstFiles() {
// when nodes are added to the graph, but arcs are still in progress.
// Hence, the lock is taken.
synchronized (this) {
- nonLeafSstFiles = forwardCompactionDAG.nodes().stream()
- .filter(node -> !forwardCompactionDAG.successors(node).isEmpty())
+ nonLeafSstFiles =
compactionDag.getForwardCompactionDAG().nodes().stream()
+ .filter(node ->
!compactionDag.getForwardCompactionDAG().successors(node).isEmpty())
.map(node -> node.getFileName())
.collect(Collectors.toSet());
}
@@ -1386,14 +1204,9 @@ public boolean shouldRun() {
return !suspended.get();
}
- @VisibleForTesting
- public boolean debugEnabled(Integer level) {
- return DEBUG_LEVEL.contains(level);
- }
-
@VisibleForTesting
public ConcurrentHashMap<String, CompactionNode> getCompactionNodeMap() {
- return compactionNodeMap;
+ return (ConcurrentHashMap<String, CompactionNode>)
compactionDag.getCompactionMap();
}
@VisibleForTesting
@@ -1446,19 +1259,6 @@ public BootstrapStateHandler.Lock
getBootstrapStateLock() {
return lock;
}
- public void pngPrintMutableGraph(String filePath, GraphType graphType)
- throws IOException {
- Objects.requireNonNull(filePath, "Image file path is required.");
- Objects.requireNonNull(graphType, "Graph type is required.");
-
- PrintableGraph graph;
- synchronized (this) {
- graph = new PrintableGraph(backwardCompactionDAG, graphType);
- }
-
- graph.generateImage(filePath);
- }
-
private Map<String, CompactionFileInfo> toFileInfoList(List<String>
sstFiles, RocksDB db) {
if (CollectionUtils.isEmpty(sstFiles)) {
return Collections.emptyMap();
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index 50be6bfb70..6c6ba6f1ac 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -28,8 +28,6 @@
import static org.apache.hadoop.util.Time.now;
import static
org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COLUMN_FAMILIES_TO_TRACK_IN_DAG;
import static
org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COMPACTION_LOG_FILE_NAME_SUFFIX;
-import static
org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DEBUG_DAG_LIVE_NODES;
-import static
org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DEBUG_READ_ALL_DB_KEYS;
import static
org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.SST_FILE_EXTENSION;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -65,6 +63,7 @@
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -138,7 +137,6 @@ public class TestRocksDBCheckpointDiffer {
.map(
sstFile -> new CompactionNode(sstFile,
1000L,
- Long.parseLong(sstFile.substring(0, 6)),
null, null, null
))
.collect(Collectors.toList()))
@@ -310,6 +308,18 @@ public class TestRocksDBCheckpointDiffer {
private ColumnFamilyHandle fileTableCFHandle;
private ColumnFamilyHandle compactionLogTableCFHandle;
+ public static final Integer DEBUG_DAG_BUILD_UP = 2;
+ public static final Integer DEBUG_DAG_TRAVERSAL = 3;
+ public static final Integer DEBUG_DAG_LIVE_NODES = 4;
+ public static final Integer DEBUG_READ_ALL_DB_KEYS = 5;
+ private static final HashSet<Integer> DEBUG_LEVEL = new HashSet<>();
+
+ static {
+ DEBUG_LEVEL.add(DEBUG_DAG_BUILD_UP);
+ DEBUG_LEVEL.add(DEBUG_DAG_TRAVERSAL);
+ DEBUG_LEVEL.add(DEBUG_DAG_LIVE_NODES);
+ }
+
@BeforeEach
public void init() throws RocksDBException {
// Checkpoint differ log level. Set to DEBUG for verbose output
@@ -1035,7 +1045,7 @@ private void readRocksDBInstance(String dbPathArg,
LOG.debug("\tLevel: {}", m.level());
LOG.debug("\tTable: {}", bytes2String(m.columnFamilyName()));
LOG.debug("\tKey Range: {}", bytes2String(m.smallestKey()) + " <-> " +
bytes2String(m.largestKey()));
- if (differ.debugEnabled(DEBUG_DAG_LIVE_NODES)) {
+ if (debugEnabled(DEBUG_DAG_LIVE_NODES)) {
printMutableGraphFromAGivenNode(
differ.getCompactionNodeMap(),
m.fileName(), m.level(),
@@ -1043,7 +1053,7 @@ private void readRocksDBInstance(String dbPathArg,
}
}
- if (differ.debugEnabled(DEBUG_READ_ALL_DB_KEYS)) {
+ if (debugEnabled(DEBUG_READ_ALL_DB_KEYS)) {
try (ManagedRocksIterator iter = new
ManagedRocksIterator(rocksDB.get().newIterator())) {
for (iter.get().seekToFirst(); iter.get().isValid();
iter.get().next()) {
LOG.debug(
@@ -1065,6 +1075,10 @@ private void readRocksDBInstance(String dbPathArg,
}
}
+ public boolean debugEnabled(Integer level) {
+ return DEBUG_LEVEL.contains(level);
+ }
+
/**
* Helper that traverses the graphs for testing.
* @param compactionNodeMap
@@ -1072,7 +1086,7 @@ private void readRocksDBInstance(String dbPathArg,
* @param fwdMutableGraph
*/
private void traverseGraph(
- ConcurrentHashMap<String, CompactionNode> compactionNodeMap,
+ ConcurrentMap<String, CompactionNode> compactionNodeMap,
MutableGraph<CompactionNode> reverseMutableGraph,
MutableGraph<CompactionNode> fwdMutableGraph) {
@@ -1968,14 +1982,10 @@ public void testShouldSkipNode(Map<String, String>
columnFamilyToPrefixMap,
}
private static Stream<Arguments> shouldSkipNodeEdgeCases() {
- CompactionNode node = new CompactionNode("fileName",
- 100, 100, "startKey", "endKey", "columnFamily");
- CompactionNode nullColumnFamilyNode = new CompactionNode("fileName",
- 100, 100, "startKey", "endKey", null);
- CompactionNode nullStartKeyNode = new CompactionNode("fileName",
- 100, 100, null, "endKey", "columnFamily");
- CompactionNode nullEndKeyNode = new CompactionNode("fileName",
- 100, 100, "startKey", null, "columnFamily");
+ CompactionNode node = new CompactionNode("fileName", 100, "startKey",
"endKey", "columnFamily");
+ CompactionNode nullColumnFamilyNode = new CompactionNode("fileName", 100,
"startKey", "endKey", null);
+ CompactionNode nullStartKeyNode = new CompactionNode("fileName", 100,
null, "endKey", "columnFamily");
+ CompactionNode nullEndKeyNode = new CompactionNode("fileName", 100,
"startKey", null, "columnFamily");
return Stream.of(
Arguments.of(node, Collections.emptyMap(), false),
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java
index d68004a775..324c29015e 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java
@@ -105,9 +105,9 @@ public void
testFilterRelevantSstFilesWithPreExistingCompactionInfo(String valid
validSSTFileStartRange.charAt(0)) / 2));
Set<String> sstFile = Sets.newTreeSet(validSstFile, invalidSstFile,
untrackedSstFile);
RocksDiffUtils.filterRelevantSstFiles(sstFile,
ImmutableMap.of(validSSTColumnFamilyName, expectedPrefix),
- ImmutableMap.of("validSSTFile", new CompactionNode(validSstFile, 0,
0, validSSTFileStartRange,
+ ImmutableMap.of("validSSTFile", new CompactionNode(validSstFile, 0,
validSSTFileStartRange,
validSSTFileEndRange, validSSTColumnFamilyName),
"invalidSSTFile",
- new CompactionNode(invalidSstFile, 0, 0,
invalidSSTFileStartRange,
+ new CompactionNode(invalidSstFile, 0, invalidSSTFileStartRange,
invalidSSTFileEndRange, invalidColumnFamilyName)));
Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile),
sstFile);
}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index edbe74205b..90da0d6769 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -1207,6 +1207,7 @@ OzoneSnapshot getSnapshotInfo(String volumeName,
* @return message which tells the image name, parent dir and OM leader
* node information.
*/
+ @Deprecated
String printCompactionLogDag(String fileNamePrefix, String graphType)
throws IOException;
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index bcb08f0c3d..1befbb3c9e 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1028,6 +1028,7 @@ public OzoneSnapshot getSnapshotInfo(String volumeName,
* @return message which tells the image name, parent dir and OM leader
* node information.
*/
+ @Deprecated
@Override
public String printCompactionLogDag(String fileNamePrefix,
String graphType) throws IOException {
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index 9ca59b9c38..8158838176 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -274,6 +274,8 @@ public static boolean isReadOnly(
case TransferLeadership:
case SetSafeMode:
case PrintCompactionLogDag:
+ // printCompactionLogDag is deprecated by HDDS-12053,
+ // keeping it here for compatibility
case GetSnapshotInfo:
case GetObjectTagging:
case GetQuotaRepairStatus:
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
index d164587ffa..3bcf190662 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -760,10 +760,10 @@ default SnapshotInfo getSnapshotInfo(String volumeName,
* @return message which tells the image name, parent dir and OM leader
* node information.
*/
+ @Deprecated
default String printCompactionLogDag(String fileNamePrefix, String graphType)
throws IOException {
- throw new UnsupportedOperationException("OzoneManager does not require " +
- "this to be implemented");
+ throw new UnsupportedOperationException("This API has been deprecated.");
}
/**
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 0e23fc1937..671a93a486 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -187,7 +187,6 @@
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareResponse;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusRequest;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse;
-import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrintCompactionLogDagRequest;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncRequest;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncResponse;
@@ -1327,30 +1326,6 @@ public SnapshotInfo getSnapshotInfo(String volumeName,
String bucketName,
.getSnapshotInfo());
}
- /**
- * {@inheritDoc}
- */
- @Override
- public String printCompactionLogDag(String fileNamePrefix, String graphType)
- throws IOException {
- final PrintCompactionLogDagRequest.Builder request =
- PrintCompactionLogDagRequest.newBuilder();
-
- if (fileNamePrefix != null) {
- request.setFileNamePrefix(fileNamePrefix);
- }
- if (graphType != null) {
- request.setGraphType(graphType);
- }
-
- final OMRequest omRequest = createOMRequest(Type.PrintCompactionLogDag)
- .setPrintCompactionLogDagRequest(request.build())
- .build();
- final OMResponse omResponse = submitRequest(omRequest);
- handleError(omResponse);
- return omResponse.getPrintCompactionLogDagResponse().getMessage();
- }
-
/**
* {@inheritDoc}
*/
diff --git
a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 92edafa942..c7ff385016 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -141,7 +141,7 @@ enum Type {
ListSnapshotDiffJobs = 122;
CancelSnapshotDiff = 123;
SetSafeMode = 124;
- PrintCompactionLogDag = 125;
+ PrintCompactionLogDag = 125; // [deprecated = true] by HDDS-12053
ListKeysLight = 126;
AbortExpiredMultiPartUploads = 127;
SetSnapshotProperty = 128;
@@ -287,7 +287,7 @@ message OMRequest {
optional ListSnapshotDiffJobRequest ListSnapshotDiffJobRequest =
122;
optional CancelSnapshotDiffRequest CancelSnapshotDiffRequest =
123;
optional SetSafeModeRequest SetSafeModeRequest =
124;
- optional PrintCompactionLogDagRequest PrintCompactionLogDagRequest =
125;
+ optional PrintCompactionLogDagRequest PrintCompactionLogDagRequest =
125 [deprecated = true];
optional MultipartUploadsExpiredAbortRequest
multipartUploadsExpiredAbortRequest = 126;
optional SetSnapshotPropertyRequest SetSnapshotPropertyRequest =
127;
@@ -420,7 +420,7 @@ message OMResponse {
optional ListSnapshotDiffJobResponse ListSnapshotDiffJobResponse =
122;
optional CancelSnapshotDiffResponse cancelSnapshotDiffResponse =
123;
optional SetSafeModeResponse SetSafeModeResponse =
124;
- optional PrintCompactionLogDagResponse PrintCompactionLogDagResponse =
125;
+ optional PrintCompactionLogDagResponse PrintCompactionLogDagResponse =
125 [deprecated = true];
optional ListKeysLightResponse listKeysLightResponse =
126;
optional MultipartUploadsExpiredAbortResponse
multipartUploadsExpiredAbortResponse = 127;
optional SetSnapshotPropertyResponse SetSnapshotPropertyResponse =
128;
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 51db6a5f3a..23a7e7021c 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -102,7 +102,6 @@
import static org.apache.hadoop.security.UserGroupInformation.getCurrentUser;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.Time.monotonicNow;
-import static org.apache.ozone.graph.PrintableGraph.GraphType.FILE_NAME;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
@@ -330,7 +329,6 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.KMSUtil;
import org.apache.hadoop.util.Time;
-import org.apache.ozone.graph.PrintableGraph;
import org.apache.ratis.grpc.GrpcTlsConfig;
import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
import org.apache.ratis.protocol.RaftGroupId;
@@ -5029,36 +5027,6 @@ public ListSnapshotDiffJobResponse listSnapshotDiffJobs(
jobStatus, listAllStatus, prevSnapshotDiffJob, maxListResult);
}
- @Override
- public String printCompactionLogDag(String fileNamePrefix,
- String graphType)
- throws IOException {
- checkAdminUserPrivilege("print compaction DAG.");
-
- if (StringUtils.isBlank(fileNamePrefix)) {
- fileNamePrefix = "dag-";
- } else {
- fileNamePrefix = fileNamePrefix + "-";
- }
- File tempFile = File.createTempFile(fileNamePrefix, ".png");
-
- PrintableGraph.GraphType type;
-
- try {
- type = PrintableGraph.GraphType.valueOf(graphType);
- } catch (IllegalArgumentException e) {
- type = FILE_NAME;
- }
-
- getMetadataManager()
- .getStore()
- .getRocksDBCheckpointDiffer()
- .pngPrintMutableGraph(tempFile.getAbsolutePath(), type);
-
- return String.format("Graph was generated at '\\tmp\\%s' on OM " +
- "node '%s'.", tempFile.getName(), getOMNodeId());
- }
-
private String reconfOzoneAdmins(String newVal) {
getConfiguration().set(OZONE_ADMINISTRATORS, newVal);
Collection<String> admins =
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
index b11322a351..17f62e77ce 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
@@ -724,6 +724,7 @@ public OzoneSnapshot getSnapshotInfo(String volumeName,
String bucketName,
}
@Override
+ @Deprecated
public String printCompactionLogDag(String fileNamePrefix,
String graphType) throws IOException {
return null;
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java
deleted file mode 100644
index 504ec23fda..0000000000
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.debug;
-
-import java.io.IOException;
-import org.apache.hadoop.hdds.cli.DebugSubcommand;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.shell.Handler;
-import org.apache.hadoop.ozone.shell.OzoneAddress;
-import org.kohsuke.MetaInfServices;
-import picocli.CommandLine;
-
-/**
- * Handler to generate image for current compaction DAG in the OM leader node.
- * ozone sh snapshot print-log-dag.
- */
[email protected](
- name = "print-log-dag",
- aliases = "pld",
- description = "Create an image of the current compaction log DAG in OM.")
-@MetaInfServices(DebugSubcommand.class)
-public class CompactionLogDagPrinter extends Handler
- implements DebugSubcommand {
-
- @CommandLine.Option(names = {"-f", "--file-name-prefix"},
- description = "Prefix to be use in image file name. (optional)")
- private String fileNamePrefix;
-
- // TODO: Change graphType to enum.
- @CommandLine.Option(names = {"-t", "--graph-type"},
- description = "Type of node name to use in the graph image. " +
- "(optional)\n Accepted values are: \n" +
- " file_name (default) : to use file name as node name in DAG,\n" +
- " key_size: to show the no. of keys in the file along with file " +
- "name in the DAG node name,\n" +
- " cumulative_size: to show the cumulative size along with file " +
- "name in the DAG node name.",
- defaultValue = "file_name")
- private String graphType;
-
- @Override
- protected void execute(OzoneClient client, OzoneAddress address)
- throws IOException {
- String message = client.getObjectStore()
- .printCompactionLogDag(fileNamePrefix, graphType);
- System.out.println(message);
- }
-}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/CompactionLogDagPrinter.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/CompactionLogDagPrinter.java
new file mode 100644
index 0000000000..1e460b3a53
--- /dev/null
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/CompactionLogDagPrinter.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug.om;
+
+import static org.apache.hadoop.ozone.OzoneConsts.COMPACTION_LOG_TABLE;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.Callable;
+import org.apache.hadoop.hdds.cli.AbstractSubcommand;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
+import org.apache.hadoop.ozone.debug.RocksDBUtils;
+import org.apache.ozone.compaction.log.CompactionLogEntry;
+import org.apache.ozone.graph.PrintableGraph;
+import org.apache.ozone.rocksdiff.CompactionDag;
+import org.rocksdb.ColumnFamilyDescriptor;
+import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.RocksDBException;
+import picocli.CommandLine;
+
+/**
+ * Handler to generate image for current compaction DAG.
+ * ozone debug om generate-compaction-dag.
+ */
[email protected](
+ name = "generate-compaction-dag",
+ aliases = "gcd",
+ description = "Create an image of the current compaction log DAG. " +
+ "This command is an offline command. i.e., it can run on any instance
of om.db " +
+ "and does not require OM to be up.")
+public class CompactionLogDagPrinter extends AbstractSubcommand implements
Callable<Void> {
+
+ @CommandLine.ParentCommand
+ private OMDebug parent;
+
+ @CommandLine.Option(names = {"-o", "--output-file"},
+ required = true,
+ description = "Path to location at which image will be downloaded. " +
+ "Should include the image file name with \".png\" extension.")
+ private String imageLocation;
+
+ @Override
+ public Void call() throws Exception {
+ try {
+ final List<ColumnFamilyHandle> cfHandleList = new ArrayList<>();
+ List<ColumnFamilyDescriptor> cfDescList =
RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath());
+ ManagedRocksDB activeRocksDB =
ManagedRocksDB.openReadOnly(parent.getDbPath(), cfDescList, cfHandleList);
+ ColumnFamilyHandle compactionLogTableCFHandle =
+ RocksDBUtils.getColumnFamilyHandle(COMPACTION_LOG_TABLE,
cfHandleList);
+
+ CompactionDag compactionDag = new CompactionDag();
+ loadCompactionDagFromDB(activeRocksDB, compactionLogTableCFHandle,
compactionDag);
+
+ pngPrintMutableGraph(compactionDag, imageLocation);
+ out().println("Graph was generated at '" + imageLocation + "'.");
+ } catch (RocksDBException ex) {
+ err().println("Failed to open RocksDB: " + ex);
+ throw ex;
+ }
+ return null;
+ }
+
+ /**
+ * Read a compactionLofTable and create entries in the dags.
+ */
+ private void loadCompactionDagFromDB(ManagedRocksDB activeRocksDB,
+ ColumnFamilyHandle compactionLogTableCFHandle, CompactionDag
compactionDag) {
+ try (ManagedRocksIterator managedRocksIterator = new ManagedRocksIterator(
+ activeRocksDB.get().newIterator(compactionLogTableCFHandle))) {
+ managedRocksIterator.get().seekToFirst();
+ while (managedRocksIterator.get().isValid()) {
+ byte[] value = managedRocksIterator.get().value();
+ CompactionLogEntry compactionLogEntry =
+
CompactionLogEntry.getFromProtobuf(HddsProtos.CompactionLogEntryProto.parseFrom(value));
+
compactionDag.populateCompactionDAG(compactionLogEntry.getInputFileInfoList(),
+ compactionLogEntry.getOutputFileInfoList(),
compactionLogEntry.getDbSequenceNumber());
+ managedRocksIterator.get().next();
+ }
+ } catch (InvalidProtocolBufferException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public void pngPrintMutableGraph(CompactionDag helper, String filePath)
+ throws IOException {
+ Objects.requireNonNull(filePath, "Image file path is required.");
+ PrintableGraph graph;
+ graph = new PrintableGraph(helper.getBackwardCompactionDAG(),
PrintableGraph.GraphType.FILE_NAME);
+ graph.generateImage(filePath);
+ }
+}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/OMDebug.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/OMDebug.java
index 06b30b335e..dc8c5cb59e 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/OMDebug.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/OMDebug.java
@@ -28,9 +28,20 @@
name = "om",
description = "Debug commands related to OM.",
subcommands = {
+ CompactionLogDagPrinter.class,
PrefixParser.class
}
)
@MetaInfServices(DebugSubcommand.class)
public class OMDebug implements DebugSubcommand {
+
+ @CommandLine.Option(names = {"--db"},
+ required = true,
+ scope = CommandLine.ScopeType.INHERIT,
+ description = "Path to OM RocksDB")
+ private String dbPath;
+
+ public String getDbPath() {
+ return dbPath;
+ }
}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java
index 31b14754ee..35e21f5811 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java
@@ -48,12 +48,10 @@
description = "Parse prefix contents")
public class PrefixParser implements Callable<Void> {
- private final int[] parserStats = new int[Types.values().length];
+ @CommandLine.ParentCommand
+ private OMDebug parent;
- @CommandLine.Option(names = {"--db"},
- required = true,
- description = "Database File Path")
- private String dbPath;
+ private final int[] parserStats = new int[Types.values().length];
@CommandLine.Option(names = {"--path"},
required = true,
@@ -71,16 +69,12 @@ public class PrefixParser implements Callable<Void> {
private String volume;
public String getDbPath() {
- return dbPath;
- }
-
- public void setDbPath(String dbPath) {
- this.dbPath = dbPath;
+ return parent.getDbPath();
}
@Override
public Void call() throws Exception {
- parse(volume, bucket, dbPath, filePath);
+ parse(volume, bucket, getDbPath(), filePath);
return null;
}
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/graph/Edge.java
b/hadoop-ozone/tools/src/main/java/org/apache/ozone/graph/Edge.java
similarity index 100%
rename from
hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/graph/Edge.java
rename to hadoop-ozone/tools/src/main/java/org/apache/ozone/graph/Edge.java
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/graph/PrintableGraph.java
b/hadoop-ozone/tools/src/main/java/org/apache/ozone/graph/PrintableGraph.java
similarity index 100%
rename from
hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/graph/PrintableGraph.java
rename to
hadoop-ozone/tools/src/main/java/org/apache/ozone/graph/PrintableGraph.java
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/graph/package-info.java
b/hadoop-ozone/tools/src/main/java/org/apache/ozone/graph/package-info.java
similarity index 100%
rename from
hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/graph/package-info.java
rename to
hadoop-ozone/tools/src/main/java/org/apache/ozone/graph/package-info.java
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java
b/hadoop-ozone/tools/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java
similarity index 87%
rename from
hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java
rename to
hadoop-ozone/tools/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java
index c742a83eb9..fb7dbe3199 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java
+++
b/hadoop-ozone/tools/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java
@@ -63,14 +63,10 @@ void testPrintNoGraphMessage(PrintableGraph.GraphType
graphType) {
@EnumSource(PrintableGraph.GraphType.class)
void testPrintActualGraph(PrintableGraph.GraphType graphType) throws
IOException {
Set<CompactionNode> nodes = Stream.of(
- new CompactionNode("fileName1",
- 100, 100, "startKey1", "endKey1", "columnFamily1"),
- new CompactionNode("fileName2",
- 200, 200, "startKey2", "endKey2", null),
- new CompactionNode("fileName3",
- 300, 300, null, "endKey3", "columnFamily3"),
- new CompactionNode("fileName4",
- 400, 400, "startKey4", null, "columnFamily4")
+ new CompactionNode("fileName1", 100, "startKey1", "endKey1",
"columnFamily1"),
+ new CompactionNode("fileName2", 200, "startKey2", "endKey2", null),
+ new CompactionNode("fileName3", 300, null, "endKey3", "columnFamily3"),
+ new CompactionNode("fileName4", 400, "startKey4", null,
"columnFamily4")
).collect(Collectors.toSet());
when(mutableGraph.nodes()).thenReturn(nodes);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]