This is an automated email from the ASF dual-hosted git repository.
swamirishi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 7508da8c99e HDDS-14024. Clean up handles when dropping column family
(#9388)
7508da8c99e is described below
commit 7508da8c99e572a932c4986589f7235574b4a157
Author: Swaminathan Balachandran <[email protected]>
AuthorDate: Thu Dec 4 11:02:26 2025 -0500
HDDS-14024. Clean up handles when dropping column family (#9388)
---
.../org/apache/hadoop/hdds/utils/db/RDBStore.java | 9 +----
.../apache/hadoop/hdds/utils/db/RocksDatabase.java | 39 ++++++++++++++++++----
2 files changed, 33 insertions(+), 15 deletions(-)
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index fec46db51d7..cbd1c32c438 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -363,14 +363,7 @@ public Map<Integer, String> getTableNames() {
*/
@Override
public void dropTable(String tableName) throws RocksDatabaseException {
- ColumnFamily columnFamily = db.getColumnFamily(tableName);
- if (columnFamily != null) {
- try {
-
db.getManagedRocksDb().get().dropColumnFamily(columnFamily.getHandle());
- } catch (RocksDBException e) {
- throw new RocksDatabaseException("Failed to drop " + tableName, e);
- }
- }
+ db.dropColumnFamily(tableName);
}
public Collection<ColumnFamily> getColumnFamilies() {
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
index 64bbb371101..bdc5124ac3b 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
@@ -35,6 +35,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
@@ -89,7 +90,7 @@ public final class RocksDatabase implements Closeable {
private final ManagedRocksDB db;
private final ManagedDBOptions dbOptions;
private final ManagedWriteOptions writeOptions;
- private final List<ColumnFamilyDescriptor> descriptors;
+ private final Map<String, ColumnFamilyDescriptor> descriptors;
/** column family names -> {@link ColumnFamily}. */
private final Map<String, ColumnFamily> columnFamilies;
/** {@link ColumnFamilyHandle#getID()} -> column family names. */
@@ -201,7 +202,7 @@ public String toString() {
}
private static void close(Map<String, ColumnFamily> columnFamilies,
- ManagedRocksDB db, List<ColumnFamilyDescriptor> descriptors,
+ ManagedRocksDB db, Collection<ColumnFamilyDescriptor> descriptors,
ManagedWriteOptions writeOptions, ManagedDBOptions dbOptions) {
if (columnFamilies != null) {
for (ColumnFamily f : columnFamilies.values()) {
@@ -370,18 +371,21 @@ private RocksDatabase(File dbFile, ManagedRocksDB db,
this.db = db;
this.dbOptions = dbOptions;
this.writeOptions = writeOptions;
- this.descriptors = descriptors;
+ this.descriptors = descriptors.stream().collect(Collectors.toMap(d ->
bytes2String(d.getName()), d -> d,
+ (d1, d2) -> {
+ throw new IllegalStateException("Duplicate key " +
bytes2String(d1.getName()));
+ }, ConcurrentHashMap::new));
this.columnFamilies = toColumnFamilyMap(handles);
this.columnFamilyNames = MemoizedSupplier.valueOf(() ->
toColumnFamilyNameMap(columnFamilies.values()));
}
private Map<String, ColumnFamily> toColumnFamilyMap(List<ColumnFamilyHandle>
handles) throws RocksDBException {
- final Map<String, ColumnFamily> map = new HashMap<>();
+ final Map<String, ColumnFamily> map = new
ConcurrentHashMap<>(handles.size());
for (ColumnFamilyHandle h : handles) {
final ColumnFamily f = new ColumnFamily(h);
map.put(f.getName(), f);
}
- return Collections.unmodifiableMap(map);
+ return map;
}
private static Map<Integer, String>
toColumnFamilyNameMap(Collection<ColumnFamily> families) {
@@ -421,14 +425,14 @@ private void waitAndClose() {
try {
Thread.currentThread().sleep(1);
} catch (InterruptedException e) {
- close(columnFamilies, db, descriptors, writeOptions, dbOptions);
+ close(columnFamilies, db, descriptors.values(), writeOptions,
dbOptions);
Thread.currentThread().interrupt();
return;
}
}
// close when counter is 0, no more operation
- close(columnFamilies, db, descriptors, writeOptions, dbOptions);
+ close(columnFamilies, db, descriptors.values(), writeOptions, dbOptions);
}
private void closeOnError(RocksDBException e) {
@@ -665,6 +669,27 @@ public Collection<ColumnFamily> getExtraColumnFamilies() {
return Collections.unmodifiableCollection(columnFamilies.values());
}
+ public void dropColumnFamily(String tableName) throws RocksDatabaseException
{
+ try (UncheckedAutoCloseable ignored = acquire()) {
+ ColumnFamily columnFamily = columnFamilies.get(tableName);
+ if (columnFamily != null) {
+ try {
+ getManagedRocksDb().get().dropColumnFamily(columnFamily.getHandle());
+ ColumnFamilyDescriptor descriptor = descriptors.get(tableName);
+ columnFamily.getHandle().close();
+ if (descriptor != null) {
+ RocksDatabase.close(descriptor);
+ }
+ columnFamilies.remove(tableName);
+ descriptors.remove(tableName);
+ } catch (RocksDBException e) {
+ closeOnError(e);
+ throw toRocksDatabaseException(this, "DropColumnFamily " +
tableName, e);
+ }
+ }
+ }
+ }
+
byte[] get(ColumnFamily family, byte[] key) throws RocksDatabaseException {
try (UncheckedAutoCloseable ignored = acquire()) {
return db.get().get(family.getHandle(), key);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]