This is an automated email from the ASF dual-hosted git repository.
swamirishi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new d5664258c4a HDDS-14002. Refactor DBStore interface to return Table
instead TypedTable (#9360)
d5664258c4a is described below
commit d5664258c4a01d904def1d0305ca418f64f2b6ae
Author: Swaminathan Balachandran <[email protected]>
AuthorDate: Tue Nov 25 13:11:53 2025 -0500
HDDS-14002. Refactor DBStore interface to return Table instead TypedTable
(#9360)
---
.../TestDatanodeUpgradeToContainerIdsTable.java | 5 +--
.../hdds/utils/db/DBColumnFamilyDefinition.java | 4 +-
.../org/apache/hadoop/hdds/utils/db/DBStore.java | 4 +-
.../org/apache/hadoop/hdds/utils/db/Table.java | 2 +-
.../hadoop/hdds/utils/db/TestRDBTableStore.java | 4 +-
.../hadoop/ozone/om/OmMetadataManagerImpl.java | 51 +++++++++++-----------
.../hadoop/ozone/repair/om/FSORepairTool.java | 5 +--
7 files changed, 36 insertions(+), 39 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java
index d0e6d9bb070..10bf849e044 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java
@@ -33,7 +33,6 @@
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.db.StringCodec;
import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TypedTable;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.ozone.container.common.ScmTestMock;
@@ -102,7 +101,7 @@ public void testContainerTableAccessBeforeAndAfterUpgrade()
throws Exception {
// check if the containerIds table is in old format
WitnessedContainerMetadataStore metadataStore =
dsm.getContainer().getWitnessedContainerMetadataStore();
- TypedTable<ContainerID, String> tableWithStringCodec =
metadataStore.getStore().getTable(
+ Table<ContainerID, String> tableWithStringCodec =
metadataStore.getStore().getTable(
metadataStore.getContainerCreateInfoTable().getName(),
ContainerID.getCodec(), StringCodec.get());
assertEquals("containerIds",
metadataStore.getContainerCreateInfoTable().getName());
assertEquals(OPEN.name(),
tableWithStringCodec.get(ContainerID.valueOf(containerID)));
@@ -138,7 +137,7 @@ public void testContainerTableFinalizeRetry() throws
Exception {
// check if the containerIds table is in old format
WitnessedContainerMetadataStore metadataStore =
dsm.getContainer().getWitnessedContainerMetadataStore();
- TypedTable<ContainerID, String> tableWithStringCodec =
metadataStore.getStore().getTable(
+ Table<ContainerID, String> tableWithStringCodec =
metadataStore.getStore().getTable(
metadataStore.getContainerCreateInfoTable().getName(),
ContainerID.getCodec(), StringCodec.get());
assertEquals("containerIds",
metadataStore.getContainerCreateInfoTable().getName());
assertEquals(OPEN.name(),
tableWithStringCodec.get(ContainerID.valueOf(containerID)));
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java
index d5c61827258..72c1f47acd1 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java
@@ -71,11 +71,11 @@ public DBColumnFamilyDefinition(String tableName,
Codec<KEY> keyCodec, Codec<VAL
DBColumnFamilyDefinition::getName);
}
- public TypedTable<KEY, VALUE> getTable(DBStore db) throws
RocksDatabaseException, CodecException {
+ public Table<KEY, VALUE> getTable(DBStore db) throws RocksDatabaseException,
CodecException {
return db.getTable(tableName, keyCodec, valueCodec);
}
- public TypedTable<KEY, VALUE> getTable(DBStore db, CacheType cacheType)
+ public Table<KEY, VALUE> getTable(DBStore db, CacheType cacheType)
throws RocksDatabaseException, CodecException {
return db.getTable(tableName, keyCodec, valueCodec, cacheType);
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
index f71ffe42197..a269ebc56b9 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
@@ -46,7 +46,7 @@ public interface DBStore extends UncheckedAutoCloseable,
BatchOperationHandler {
Table<byte[], byte[]> getTable(String name) throws RocksDatabaseException;
/** The same as getTable(name, keyCodec, valueCodec,
CacheType.PARTIAL_CACHE). */
- default <KEY, VALUE> TypedTable<KEY, VALUE> getTable(String name, Codec<KEY>
keyCodec, Codec<VALUE> valueCodec)
+ default <KEY, VALUE> Table<KEY, VALUE> getTable(String name, Codec<KEY>
keyCodec, Codec<VALUE> valueCodec)
throws RocksDatabaseException, CodecException {
return getTable(name, keyCodec, valueCodec, CacheType.PARTIAL_CACHE);
}
@@ -60,7 +60,7 @@ default <KEY, VALUE> TypedTable<KEY, VALUE> getTable(String
name, Codec<KEY> key
* @param cacheType - cache type
* @return - Table Store
*/
- <KEY, VALUE> TypedTable<KEY, VALUE> getTable(
+ <KEY, VALUE> Table<KEY, VALUE> getTable(
String name, Codec<KEY> keyCodec, Codec<VALUE> valueCodec,
TableCache.CacheType cacheType)
throws RocksDatabaseException, CodecException;
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index 7f5d74ad4ee..3b7f5de3b37 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -268,7 +268,7 @@ default void cleanupCache(List<Long> epochs) {
/**
* Create the metrics datasource that emits table cache metrics.
*/
- default TableCacheMetrics createCacheMetrics() throws RocksDatabaseException
{
+ default TableCacheMetrics createCacheMetrics() {
throw new NotImplementedException("getCacheValue is not implemented");
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
index bd8a00becf0..cd155f27a96 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
@@ -570,7 +570,7 @@ public void testStringPrefixedIterator() throws Exception {
final List<String> prefixes = generatePrefixes(prefixCount);
final List<Map<String, String>> data = generateKVs(prefixes, keyCount);
- TypedTable<String, String> table = rdbStore.getTable("PrefixFirst",
StringCodec.get(), StringCodec.get());
+ Table<String, String> table = rdbStore.getTable("PrefixFirst",
StringCodec.get(), StringCodec.get());
populateTable(table, data);
for (String prefix : prefixes) {
assertIterator(keyCount, prefix, table);
@@ -582,7 +582,7 @@ public void testStringPrefixedIterator() throws Exception {
}
static void assertIterator(int expectedCount, String prefix,
- TypedTable<String, String> table) throws Exception {
+ Table<String, String> table) throws Exception {
try (Table.KeyValueIterator<String, String> i = table.iterator(prefix)) {
int keyCount = 0;
for (; i.hasNext(); keyCount++) {
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 042da02a4cb..f9c31f05295 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -86,7 +86,6 @@
import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.TablePrefixInfo;
-import org.apache.hadoop.hdds.utils.db.TypedTable;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.hdds.utils.db.cache.TableCache.CacheType;
@@ -154,34 +153,34 @@ public class OmMetadataManagerImpl implements
OMMetadataManager,
private final IOzoneManagerLock lock;
private final HierarchicalResourceLockManager hierarchicalLockManager;
- private TypedTable<String, PersistedUserVolumeInfo> userTable;
- private TypedTable<String, OmVolumeArgs> volumeTable;
- private TypedTable<String, OmBucketInfo> bucketTable;
- private TypedTable<String, OmKeyInfo> keyTable;
+ private Table<String, PersistedUserVolumeInfo> userTable;
+ private Table<String, OmVolumeArgs> volumeTable;
+ private Table<String, OmBucketInfo> bucketTable;
+ private Table<String, OmKeyInfo> keyTable;
- private TypedTable<String, OmKeyInfo> openKeyTable;
- private TypedTable<String, OmMultipartKeyInfo> multipartInfoTable;
- private TypedTable<String, RepeatedOmKeyInfo> deletedTable;
+ private Table<String, OmKeyInfo> openKeyTable;
+ private Table<String, OmMultipartKeyInfo> multipartInfoTable;
+ private Table<String, RepeatedOmKeyInfo> deletedTable;
- private TypedTable<String, OmDirectoryInfo> dirTable;
- private TypedTable<String, OmKeyInfo> fileTable;
- private TypedTable<String, OmKeyInfo> openFileTable;
- private TypedTable<String, OmKeyInfo> deletedDirTable;
+ private Table<String, OmDirectoryInfo> dirTable;
+ private Table<String, OmKeyInfo> fileTable;
+ private Table<String, OmKeyInfo> openFileTable;
+ private Table<String, OmKeyInfo> deletedDirTable;
- private TypedTable<String, S3SecretValue> s3SecretTable;
- private TypedTable<OzoneTokenIdentifier, Long> dTokenTable;
- private TypedTable<String, OmPrefixInfo> prefixTable;
- private TypedTable<String, TransactionInfo> transactionInfoTable;
- private TypedTable<String, String> metaTable;
+ private Table<String, S3SecretValue> s3SecretTable;
+ private Table<OzoneTokenIdentifier, Long> dTokenTable;
+ private Table<String, OmPrefixInfo> prefixTable;
+ private Table<String, TransactionInfo> transactionInfoTable;
+ private Table<String, String> metaTable;
// Tables required for multi-tenancy
- private TypedTable<String, OmDBAccessIdInfo> tenantAccessIdTable;
- private TypedTable<String, OmDBUserPrincipalInfo> principalToAccessIdsTable;
- private TypedTable<String, OmDBTenantState> tenantStateTable;
+ private Table<String, OmDBAccessIdInfo> tenantAccessIdTable;
+ private Table<String, OmDBUserPrincipalInfo> principalToAccessIdsTable;
+ private Table<String, OmDBTenantState> tenantStateTable;
- private TypedTable<String, SnapshotInfo> snapshotInfoTable;
- private TypedTable<String, String> snapshotRenamedTable;
- private TypedTable<String, CompactionLogEntry> compactionLogTable;
+ private Table<String, SnapshotInfo> snapshotInfoTable;
+ private Table<String, String> snapshotRenamedTable;
+ private Table<String, CompactionLogEntry> compactionLogTable;
private OzoneManager ozoneManager;
@@ -1968,17 +1967,17 @@ private class TableInitializer {
this.addCacheMetrics = addCacheMetrics;
}
- <KEY, VALUE> TypedTable<KEY, VALUE> get(DBColumnFamilyDefinition<KEY,
VALUE> definition)
+ <KEY, VALUE> Table<KEY, VALUE> get(DBColumnFamilyDefinition<KEY, VALUE>
definition)
throws IOException {
return get(definition.getTable(store));
}
- <KEY, VALUE> TypedTable<KEY, VALUE> get(DBColumnFamilyDefinition<KEY,
VALUE> definition, CacheType cacheType)
+ <KEY, VALUE> Table<KEY, VALUE> get(DBColumnFamilyDefinition<KEY, VALUE>
definition, CacheType cacheType)
throws IOException {
return get(definition.getTable(store, cacheType));
}
- private <KEY, VALUE> TypedTable<KEY, VALUE> get(TypedTable<KEY, VALUE>
table) {
+ private <KEY, VALUE> Table<KEY, VALUE> get(Table<KEY, VALUE> table) {
Objects.requireNonNull(table, "table == null");
final String name = table.getName();
final Table<?, ?> previousTable = tableMap.put(name, table);
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java
index 8aa5a563de2..40ee4f2fcb9 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java
@@ -36,7 +36,6 @@
import org.apache.hadoop.hdds.utils.db.StringCodec;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.apache.hadoop.hdds.utils.db.TypedTable;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
@@ -136,8 +135,8 @@ private class Impl {
private final Table<String, RepeatedOmKeyInfo> deletedTable;
private final Table<String, SnapshotInfo> snapshotInfoTable;
private DBStore tempDB;
- private TypedTable<String, byte[]> reachableTable;
- private TypedTable<String, byte[]> unreachableTable;
+ private Table<String, byte[]> reachableTable;
+ private Table<String, byte[]> unreachableTable;
private final ReportStatistics reachableStats;
private final ReportStatistics unreachableStats;
private final ReportStatistics unreferencedStats;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]