This is an automated email from the ASF dual-hosted git repository.

ivandika pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new f468943fd0 HDDS-13422. Ozone tool should preserve previous RocksDB 
options (#8818)
f468943fd0 is described below

commit f468943fd0e621dc9be32359335803d47015049a
Author: Ivan Andika <[email protected]>
AuthorDate: Mon Jul 28 22:03:07 2025 +0800

    HDDS-13422. Ozone tool should preserve previous RocksDB options (#8818)
---
 .../utils/db/managed/ManagedConfigOptions.java     |  40 +++++
 .../hdds/utils/db/managed/ManagedRocksDB.java      |  22 ++-
 .../hadoop/ozone/repair/TransactionInfoRepair.java |  12 +-
 .../ozone/repair/ldb/RocksDBManualCompaction.java  |  12 +-
 .../ozone/repair/om/SnapshotChainRepair.java       |  11 +-
 .../ozone/repair/TestTransactionInfoRepair.java    |   9 +-
 .../hadoop/ozone/repair/ldb/TestLdbRepair.java     | 180 ++++++++++++++++++++-
 .../ozone/repair/om/TestSnapshotChainRepair.java   |  19 ++-
 8 files changed, 279 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedConfigOptions.java
 
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedConfigOptions.java
new file mode 100644
index 0000000000..aef0cddc31
--- /dev/null
+++ 
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedConfigOptions.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.utils.db.managed;
+
+import static 
org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;
+
+import org.apache.ratis.util.UncheckedAutoCloseable;
+import org.rocksdb.ConfigOptions;
+
+/**
+ * Managed ConfigOptions.
+ */
+public class ManagedConfigOptions extends ConfigOptions {
+
+  private final UncheckedAutoCloseable leakTracker = track(this);
+
+  @Override
+  public void close() {
+    try {
+      super.close();
+    } finally {
+      leakTracker.close();
+    }
+  }
+}
diff --git 
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java
 
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java
index 0fff6098bf..3401469f68 100644
--- 
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java
+++ 
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java
@@ -28,6 +28,7 @@
 import org.rocksdb.ColumnFamilyHandle;
 import org.rocksdb.DBOptions;
 import org.rocksdb.LiveFileMetaData;
+import org.rocksdb.OptionsUtil;
 import org.rocksdb.RocksDB;
 import org.rocksdb.RocksDBException;
 import org.slf4j.Logger;
@@ -85,13 +86,28 @@ public static ManagedRocksDB open(
     );
   }
 
-  public static ManagedRocksDB open(
-      final String path,
+  /**
+   * Open a RocksDB option with the latest options. Other than {@link 
ManagedConfigOptions} and
+   * path, the other options and handles should be empty since it will be 
populated with
+   * loadLatestOptions. Nevertheless, the caller is still responsible in 
cleaning up / closing the resources.
+   * @param configOptions Config options.
+   * @param options DBOptions. This would be modified based on the latest 
options.
+   * @param path DB path.
+   * @param columnFamilyDescriptors Column family descriptors. These would be 
modified based on the latest options.
+   * @param columnFamilyHandles Column family handles of the underlying column 
family
+   * @return A RocksDB instance.
+   * @throws RocksDBException thrown if error happens in underlying native 
library.
+   */
+  public static ManagedRocksDB openWithLatestOptions(
+      final ManagedConfigOptions configOptions,
+      final DBOptions options, final String path,
       final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
       final List<ColumnFamilyHandle> columnFamilyHandles)
       throws RocksDBException {
+    // Preserve all the previous DB options
+    OptionsUtil.loadLatestOptions(configOptions, path, options, 
columnFamilyDescriptors);
     return new ManagedRocksDB(
-        RocksDB.open(path, columnFamilyDescriptors, columnFamilyHandles)
+        RocksDB.open(options, path, columnFamilyDescriptors, 
columnFamilyHandles)
     );
   }
 
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java
index 2c547959c1..2bb107f4a5 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java
@@ -29,6 +29,8 @@
 import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
 import org.apache.hadoop.hdds.utils.db.StringCodec;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedConfigOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
 import org.apache.hadoop.ozone.debug.RocksDBUtils;
 import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
@@ -65,12 +67,14 @@ public class TransactionInfoRepair extends RepairTool {
 
   @Override
   public void execute() throws Exception {
+    ManagedConfigOptions configOptions = new ManagedConfigOptions();
+    ManagedDBOptions dbOptions = new ManagedDBOptions();
     List<ColumnFamilyHandle> cfHandleList = new ArrayList<>();
-    List<ColumnFamilyDescriptor> cfDescList = 
RocksDBUtils.getColumnFamilyDescriptors(
-        dbPath);
+    List<ColumnFamilyDescriptor> cfDescList = new ArrayList<>();
     String columnFamilyName = getColumnFamily(serviceToBeOffline()).getName();
 
-    try (ManagedRocksDB db = ManagedRocksDB.open(dbPath, cfDescList, 
cfHandleList)) {
+    try (ManagedRocksDB db = ManagedRocksDB.openWithLatestOptions(
+        configOptions, dbOptions, dbPath, cfDescList, cfHandleList)) {
       ColumnFamilyHandle transactionInfoCfh = 
RocksDBUtils.getColumnFamilyHandle(columnFamilyName, cfHandleList);
       if (transactionInfoCfh == null) {
         throw new IllegalArgumentException(columnFamilyName +
@@ -99,6 +103,8 @@ public void execute() throws Exception {
       error("Failed to update the RocksDB for the given path: %s", dbPath);
       throw new IOException("Failed to update RocksDB.", exception);
     } finally {
+      IOUtils.closeQuietly(configOptions);
+      IOUtils.closeQuietly(dbOptions);
       IOUtils.closeQuietly(cfHandleList);
     }
   }
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RocksDBManualCompaction.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RocksDBManualCompaction.java
index 382bf8d06e..c021157267 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RocksDBManualCompaction.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RocksDBManualCompaction.java
@@ -23,6 +23,8 @@
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.utils.IOUtils;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedCompactRangeOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedConfigOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
 import org.apache.hadoop.ozone.debug.RocksDBUtils;
 import org.apache.hadoop.ozone.repair.RepairTool;
@@ -57,11 +59,13 @@ public class RocksDBManualCompaction extends RepairTool {
 
   @Override
   public void execute() throws Exception {
+    ManagedConfigOptions configOptions = new ManagedConfigOptions();
+    ManagedDBOptions dbOptions = new ManagedDBOptions();
     List<ColumnFamilyHandle> cfHandleList = new ArrayList<>();
-    List<ColumnFamilyDescriptor> cfDescList = 
RocksDBUtils.getColumnFamilyDescriptors(
-        dbPath);
+    List<ColumnFamilyDescriptor> cfDescList = new ArrayList<>();
 
-    try (ManagedRocksDB db = ManagedRocksDB.open(dbPath, cfDescList, 
cfHandleList)) {
+    try (ManagedRocksDB db = ManagedRocksDB.openWithLatestOptions(
+        configOptions, dbOptions, dbPath, cfDescList, cfHandleList)) {
       ColumnFamilyHandle cfh = 
RocksDBUtils.getColumnFamilyHandle(columnFamilyName, cfHandleList);
       if (cfh == null) {
         throw new IllegalArgumentException(columnFamilyName +
@@ -84,6 +88,8 @@ public void execute() throws Exception {
           ", column family: " + columnFamilyName;
       throw new IOException(errorMsg, exception);
     } finally {
+      IOUtils.closeQuietly(configOptions);
+      IOUtils.closeQuietly(dbOptions);
       IOUtils.closeQuietly(cfHandleList);
     }
   }
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java
index 2e0d42bb86..adb900ada1 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java
@@ -30,6 +30,8 @@
 import java.util.UUID;
 import org.apache.hadoop.hdds.utils.IOUtils;
 import org.apache.hadoop.hdds.utils.db.StringCodec;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedConfigOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
 import org.apache.hadoop.ozone.debug.RocksDBUtils;
@@ -83,10 +85,13 @@ protected Component serviceToBeOffline() {
 
   @Override
   public void execute() throws Exception {
+    ManagedConfigOptions configOptions = new ManagedConfigOptions();
+    ManagedDBOptions dbOptions = new ManagedDBOptions();
     List<ColumnFamilyHandle> cfHandleList = new ArrayList<>();
-    List<ColumnFamilyDescriptor> cfDescList = 
RocksDBUtils.getColumnFamilyDescriptors(dbPath);
+    List<ColumnFamilyDescriptor> cfDescList = new ArrayList<>();
 
-    try (ManagedRocksDB db = ManagedRocksDB.open(dbPath, cfDescList, 
cfHandleList)) {
+    try (ManagedRocksDB db = ManagedRocksDB.openWithLatestOptions(
+        configOptions, dbOptions, dbPath, cfDescList, cfHandleList)) {
       ColumnFamilyHandle snapshotInfoCfh = 
RocksDBUtils.getColumnFamilyHandle(SNAPSHOT_INFO_TABLE, cfHandleList);
       if (snapshotInfoCfh == null) {
         error("%s is not in a column family in DB for the given path.", 
SNAPSHOT_INFO_TABLE);
@@ -153,6 +158,8 @@ public void execute() throws Exception {
           "Make sure that Ozone entity (OM, SCM or DN) is not running for the 
give dbPath and current host.");
       LOG.error(exception.toString());
     } finally {
+      IOUtils.closeQuietly(configOptions);
+      IOUtils.closeQuietly(dbOptions);
       IOUtils.closeQuietly(cfHandleList);
     }
   }
diff --git 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java
 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java
index da197e2a70..a9299aa3bd 100644
--- 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java
+++ 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
 import org.apache.hadoop.hdds.utils.IOUtils;
 import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedConfigOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
 import org.apache.hadoop.ozone.debug.RocksDBUtils;
 import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
@@ -43,6 +44,8 @@
 import org.junit.jupiter.params.provider.ValueSource;
 import org.mockito.MockedStatic;
 import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.DBOptions;
+import org.rocksdb.OptionsUtil;
 import org.rocksdb.RocksDB;
 import org.rocksdb.RocksDBException;
 import picocli.CommandLine;
@@ -110,8 +113,10 @@ public void 
testCommandWhenFailToUpdateRocksDBForGivenPath(String component) thr
   private void testCommand(String component, ManagedRocksDB mdb, 
ColumnFamilyHandle columnFamilyHandle) {
     final String expectedColumnFamilyName = getColumnFamilyName(component);
     try (MockedStatic<ManagedRocksDB> mocked = 
mockStatic(ManagedRocksDB.class);
-         MockedStatic<RocksDBUtils> mockUtil = mockStatic(RocksDBUtils.class)) 
{
-      mocked.when(() -> ManagedRocksDB.open(anyString(), anyList(), 
anyList())).thenReturn(mdb);
+         MockedStatic<RocksDBUtils> mockUtil = mockStatic(RocksDBUtils.class);
+         MockedStatic<OptionsUtil> mockOptionsUtil = 
mockStatic(OptionsUtil.class)) {
+      mocked.when(() -> 
ManagedRocksDB.openWithLatestOptions(any(ManagedConfigOptions.class), 
any(DBOptions.class),
+          anyString(), anyList(), anyList())).thenReturn(mdb);
       mockUtil.when(() -> 
RocksDBUtils.getColumnFamilyHandle(eq(expectedColumnFamilyName), anyList()))
           .thenReturn(columnFamilyHandle);
 
diff --git 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestLdbRepair.java
 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestLdbRepair.java
index 0af8d3b4d5..ea2a1ef1e9 100644
--- 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestLdbRepair.java
+++ 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestLdbRepair.java
@@ -19,6 +19,7 @@
 
 import static org.apache.ozone.test.IntLambda.withTextFromSystemIn;
 import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
@@ -28,13 +29,19 @@
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Optional;
 import java.util.stream.Stream;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.IOUtils;
 import org.apache.hadoop.hdds.utils.db.CodecBuffer;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.hdds.utils.db.RDBStore;
 import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableConfig;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedBlockBasedTableConfig;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedConfigOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
 import org.apache.hadoop.ozone.debug.RocksDBUtils;
 import org.apache.ozone.rocksdb.util.RdbUtil;
@@ -44,6 +51,7 @@
 import org.junit.jupiter.api.io.TempDir;
 import org.rocksdb.ColumnFamilyDescriptor;
 import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.ColumnFamilyOptions;
 import org.rocksdb.LiveFileMetaData;
 import picocli.CommandLine;
 
@@ -54,7 +62,7 @@ public class TestLdbRepair {
 
   private static final String TEST_CF_NAME = "testColumnFamily";
   private static final int NUM_KEYS = 100;
-  
+
   @TempDir
   private Path tempDir;
   private Path dbPath;
@@ -70,7 +78,7 @@ public void setUp() throws Exception {
         .addTable(TEST_CF_NAME)
         .build();
   }
-  
+
   @AfterEach
   public void tearDown() throws Exception {
     if (rdbStore != null && !rdbStore.isClosed()) {
@@ -87,6 +95,7 @@ public void tearDown() throws Exception {
    *    Compaction command executes successfully.
    *    Tombstones are removed (i.e., numDeletions in SST files is 0).
    *    Size of the db reduces after compaction.
+   *    The RocksDB options remain unchanged after the compaction.
    */
   @Test
   public void testRocksDBManualCompaction() throws Exception {
@@ -110,6 +119,7 @@ public void testRocksDBManualCompaction() throws Exception {
     long sizeAfterKeysDelete = calculateSstFileSize(dbPath);
     rdbStore.close();
 
+    DatabaseOptions optionsBeforeCompaction = readDatabaseOptions();
     // Trigger compaction of the table
     RocksDBManualCompaction compactionTool = new RocksDBManualCompaction();
     CommandLine cmd = new CommandLine(compactionTool);
@@ -136,7 +146,7 @@ public void testRocksDBManualCompaction() throws Exception {
     // check all tombstones were removed
     List<ColumnFamilyHandle> cfHandleList = new ArrayList<>();
     List<ColumnFamilyDescriptor> cfDescList = 
RocksDBUtils.getColumnFamilyDescriptors(dbPath.toString());
-    try (ManagedRocksDB db = ManagedRocksDB.open(dbPath.toString(), 
cfDescList, cfHandleList)) {
+    try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath.toString(), 
cfDescList, cfHandleList)) {
       List<LiveFileMetaData> liveFileMetaDataList = RdbUtil
           .getLiveSSTFilesForCFs(db, Collections.singletonList(TEST_CF_NAME));
       for (LiveFileMetaData liveMetadata : liveFileMetaDataList) {
@@ -144,6 +154,9 @@ public void testRocksDBManualCompaction() throws Exception {
             "Tombstones found in file: " + liveMetadata.fileName());
       }
     }
+
+    DatabaseOptions optionsAfterCompaction = readDatabaseOptions();
+    optionsBeforeCompaction.assertEqualOptions(optionsAfterCompaction);
   }
 
   private long calculateSstFileSize(Path db) throws IOException {
@@ -165,4 +178,165 @@ private long calculateSstFileSize(Path db) throws 
IOException {
           .sum();
     }
   }
+
+  /**
+   * Helper method to read the latest RocksDB options from a database and test 
column family.
+   */
+  private DatabaseOptions readDatabaseOptions() throws Exception {
+    assertNotNull(dbPath);
+    ManagedConfigOptions configOptions = new ManagedConfigOptions();
+    ManagedDBOptions dbOptions = new ManagedDBOptions();
+    List<ColumnFamilyHandle> cfHandleList = new ArrayList<>();
+    List<ColumnFamilyDescriptor> cfDescList = new ArrayList<>();
+
+    try {
+      try (ManagedRocksDB db = ManagedRocksDB.openWithLatestOptions(
+          configOptions, dbOptions, dbPath.toString(), cfDescList, 
cfHandleList)) {
+        DatabaseOptions.Builder builder = new DatabaseOptions.Builder();
+        
builder.setMaxBackgroundCompactions(dbOptions.maxBackgroundCompactions())
+            .setMaxBackgroundFlushes(dbOptions.maxBackgroundFlushes())
+            .setBytesPerSync(dbOptions.bytesPerSync())
+            .setMaxLogFileSize(dbOptions.maxLogFileSize())
+            .setKeepLogFileNum(dbOptions.keepLogFileNum())
+            .setWalTTL(dbOptions.walTtlSeconds())
+            .setWalSizeLimit(dbOptions.walSizeLimitMB());
+
+        Optional<ColumnFamilyDescriptor> testCfDesc = 
cfDescList.stream().filter(cfDesc ->
+            
TableConfig.toName(cfDesc.getName()).equals(TEST_CF_NAME)).findFirst();
+        if (testCfDesc.isPresent()) {
+          ColumnFamilyOptions cfOptions = testCfDesc.get().getOptions();
+          builder.setWriteBufferSize(cfOptions.writeBufferSize());
+          if (cfOptions.tableFormatConfig() instanceof 
ManagedBlockBasedTableConfig) {
+            ManagedBlockBasedTableConfig tableConfig = 
(ManagedBlockBasedTableConfig) cfOptions.tableFormatConfig();
+            builder.setBlockSize(tableConfig.blockSize());
+            
builder.setPinL0FilterAndIndexBlocksInCache(tableConfig.pinL0FilterAndIndexBlocksInCache());
+          }
+        }
+        return builder.build();
+      }
+    } finally {
+      configOptions.close();
+      dbOptions.close();
+      IOUtils.closeQuietly(cfHandleList);
+    }
+  }
+
+  /**
+   * Simple data class to hold database options for comparison.
+   * This consist of sampled options from the options set in {@link 
org.apache.hadoop.hdds.utils.db.DBProfile}.
+   */
+  private static final class DatabaseOptions {
+    // DBOptions
+    private final int maxBackgroundCompactions;
+    private final int maxBackgroundFlushes;
+    private final long bytesPerSync;
+    private final long maxLogFileSize;
+    private final long keepLogFileNum;
+    private final long walTTL;
+    private final long walSizeLimit;
+
+    // ColumnFamilyOptions
+    private final long writeBufferSize;
+    // BlockBasedTableConfig
+    private final long blockSize;
+    private final boolean pinL0FilterAndIndexBlocksInCache;
+
+    private DatabaseOptions(Builder b) {
+      this.maxBackgroundCompactions = b.maxBackgroundCompactions;
+      this.maxBackgroundFlushes = b.maxBackgroundFlushes;
+      this.bytesPerSync = b.bytesPerSync;
+      this.maxLogFileSize = b.maxLogFileSize;
+      this.keepLogFileNum = b.keepLogFileNum;
+      this.walTTL = b.walTTL;
+      this.walSizeLimit = b.walSizeLimit;
+      this.writeBufferSize = b.writeBufferSize;
+      this.blockSize = b.blockSize;
+      this.pinL0FilterAndIndexBlocksInCache = 
b.pinL0FilterAndIndexBlocksInCache;
+    }
+
+    public void assertEqualOptions(DatabaseOptions other) {
+      assertEquals(this.maxBackgroundCompactions, 
other.maxBackgroundCompactions);
+      assertEquals(this.maxBackgroundFlushes, other.maxBackgroundFlushes);
+      assertEquals(this.maxLogFileSize, other.maxLogFileSize);
+      assertEquals(this.keepLogFileNum, other.keepLogFileNum);
+      assertEquals(this.walTTL, other.walTTL);
+      assertEquals(this.walSizeLimit, other.walSizeLimit);
+
+      assertEquals(this.bytesPerSync, other.bytesPerSync);
+      assertEquals(this.writeBufferSize, other.writeBufferSize);
+      assertEquals(this.blockSize, other.blockSize);
+      assertEquals(this.pinL0FilterAndIndexBlocksInCache, 
other.pinL0FilterAndIndexBlocksInCache);
+    }
+
+    private static class Builder {
+      // DBOptions
+      private int maxBackgroundCompactions;
+      private int maxBackgroundFlushes;
+      private long bytesPerSync;
+      private long maxLogFileSize;
+      private long keepLogFileNum;
+      private long walTTL;
+      private long walSizeLimit;
+
+      // ColumnFamilyOptions
+      private long writeBufferSize;
+      // BlockBasedTableConfig
+      private long blockSize;
+      private boolean pinL0FilterAndIndexBlocksInCache;
+
+      public Builder setMaxBackgroundCompactions(int maxBackgroundCompactions) 
{
+        this.maxBackgroundCompactions = maxBackgroundCompactions;
+        return this;
+      }
+
+      public Builder setMaxBackgroundFlushes(int maxBackgroundFlushes) {
+        this.maxBackgroundFlushes = maxBackgroundFlushes;
+        return this;
+      }
+
+      public Builder setBytesPerSync(long bytesPerSync) {
+        this.bytesPerSync = bytesPerSync;
+        return this;
+      }
+
+      public Builder setMaxLogFileSize(long maxLogFileSize) {
+        this.maxLogFileSize = maxLogFileSize;
+        return this;
+      }
+
+      public Builder setKeepLogFileNum(long keepLogFileNum) {
+        this.keepLogFileNum = keepLogFileNum;
+        return this;
+      }
+
+      public Builder setWalTTL(long walTTL) {
+        this.walTTL = walTTL;
+        return this;
+      }
+
+      public Builder setWalSizeLimit(long walSizeLimit) {
+        this.walSizeLimit = walSizeLimit;
+        return this;
+      }
+
+      public Builder setWriteBufferSize(long writeBufferSize) {
+        this.writeBufferSize = writeBufferSize;
+        return this;
+      }
+
+      public Builder setBlockSize(long blockSize) {
+        this.blockSize = blockSize;
+        return this;
+      }
+
+      public Builder setPinL0FilterAndIndexBlocksInCache(boolean 
pinL0FilterAndIndexBlocksInCache) {
+        this.pinL0FilterAndIndexBlocksInCache = 
pinL0FilterAndIndexBlocksInCache;
+        return this;
+      }
+
+      public DatabaseOptions build() {
+        return new DatabaseOptions(this);
+      }
+    }
+  }
 }
diff --git 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestSnapshotChainRepair.java
 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestSnapshotChainRepair.java
index fe29154504..cef2644632 100644
--- 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestSnapshotChainRepair.java
+++ 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestSnapshotChainRepair.java
@@ -20,6 +20,7 @@
 import static org.apache.hadoop.ozone.OzoneConsts.SNAPSHOT_INFO_TABLE;
 import static org.apache.ozone.test.IntLambda.withTextFromSystemIn;
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyList;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.ArgumentMatchers.eq;
@@ -37,6 +38,7 @@
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.hdds.utils.IOUtils;
 import org.apache.hadoop.hdds.utils.db.StringCodec;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedConfigOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
 import org.apache.hadoop.ozone.debug.RocksDBUtils;
 import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
@@ -48,8 +50,9 @@
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.ValueSource;
 import org.mockito.MockedStatic;
-import org.rocksdb.ColumnFamilyDescriptor;
 import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.DBOptions;
+import org.rocksdb.OptionsUtil;
 import org.rocksdb.RocksDB;
 import org.rocksdb.RocksDBException;
 import org.rocksdb.RocksIterator;
@@ -70,6 +73,7 @@ public class TestSnapshotChainRepair {
 
   private MockedStatic<ManagedRocksDB> mockedDB;
   private MockedStatic<RocksDBUtils> mockedUtils;
+  private MockedStatic<OptionsUtil> mockedOptionsUtil;
 
   private GenericTestUtils.PrintStreamCapturer out;
   private GenericTestUtils.PrintStreamCapturer err;
@@ -82,11 +86,12 @@ public void setup() throws Exception {
     // Initialize static mocks
     mockedDB = mockStatic(ManagedRocksDB.class);
     mockedUtils = mockStatic(RocksDBUtils.class);
+    mockedOptionsUtil = mockStatic(OptionsUtil.class);
   }
 
   @AfterEach
   public void tearDown() {
-    IOUtils.closeQuietly(out, err, mockedDB, mockedUtils);
+    IOUtils.closeQuietly(out, err, mockedDB, mockedUtils, mockedOptionsUtil);
   }
 
   private void setupMockDB(SnapshotInfo snapshotInfo,
@@ -98,15 +103,9 @@ private void setupMockDB(SnapshotInfo snapshotInfo,
 
     when(managedRocksDB.get()).thenReturn(rocksDB);
 
-    // Mock column family descriptors
-    List<ColumnFamilyDescriptor> cfDescList = new ArrayList<>();
-    cfDescList.add(new ColumnFamilyDescriptor(new byte[] {1}));
-
-    mockedUtils.when(() -> 
RocksDBUtils.getColumnFamilyDescriptors(eq(DB_PATH)))
-        .thenReturn(cfDescList);
-
     // Mock DB open
-    mockedDB.when(() -> ManagedRocksDB.open(eq(DB_PATH), eq(cfDescList), 
eq(new ArrayList<>())))
+    mockedDB.when(() -> 
ManagedRocksDB.openWithLatestOptions(any(ManagedConfigOptions.class),
+            any(DBOptions.class), eq(DB_PATH), eq(new ArrayList<>()), eq(new 
ArrayList<>())))
         .thenReturn(managedRocksDB);
 
     // Mock column family handle


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to