This is an automated email from the ASF dual-hosted git repository.
szetszwo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new eed5924251 HDDS-12770. Use ContainerID instead of Long in
CONTAINER_IDS_TABLE. (#8247)
eed5924251 is described below
commit eed592425198e8f1df87804887c71cabc20b0f01
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Fri Apr 11 11:39:04 2025 -0700
HDDS-12770. Use ContainerID instead of Long in CONTAINER_IDS_TABLE. (#8247)
---
.../ozone/container/common/impl/ContainerSet.java | 21 +--
.../container/common/impl/HddsDispatcher.java | 3 +-
.../metadata/WitnessedContainerDBDefinition.java | 8 +-
.../metadata/WitnessedContainerMetadataStore.java | 3 +-
.../WitnessedContainerMetadataStoreImpl.java | 7 +-
.../ozone/container/ozoneimpl/OzoneContainer.java | 7 +-
.../apache/hadoop/hdds/utils/db/TestRDBStore.java | 26 ++--
.../hadoop/hdds/utils/db/TestTypedTable.java | 147 +++++++++++++++++++++
8 files changed, 191 insertions(+), 31 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index 22454e60a9..68d73f3c41 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -37,9 +37,11 @@
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.ToLongFunction;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
import
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -60,7 +62,7 @@ public static ContainerSet newReadOnlyContainerSet(long
recoveringTimeout) {
return new ContainerSet(null, recoveringTimeout);
}
- public static ContainerSet newRwContainerSet(Table<Long, String>
containerIdsTable, long recoveringTimeout) {
+ public static ContainerSet newRwContainerSet(Table<ContainerID, String>
containerIdsTable, long recoveringTimeout) {
Objects.requireNonNull(containerIdsTable, "containerIdsTable == null");
return new ContainerSet(containerIdsTable, recoveringTimeout);
}
@@ -73,13 +75,13 @@ public static ContainerSet newRwContainerSet(Table<Long,
String> containerIdsTab
new ConcurrentSkipListMap<>();
private final Clock clock;
private long recoveringTimeout;
- private final Table<Long, String> containerIdsTable;
+ private final Table<ContainerID, String> containerIdsTable;
- private ContainerSet(Table<Long, String> continerIdsTable, long
recoveringTimeout) {
+ private ContainerSet(Table<ContainerID, String> continerIdsTable, long
recoveringTimeout) {
this(continerIdsTable, recoveringTimeout, null);
}
- ContainerSet(Table<Long, String> continerIdsTable, long recoveringTimeout,
Clock clock) {
+ ContainerSet(Table<ContainerID, String> continerIdsTable, long
recoveringTimeout, Clock clock) {
this.clock = clock != null ? clock : Clock.systemUTC();
this.containerIdsTable = continerIdsTable;
this.recoveringTimeout = recoveringTimeout;
@@ -146,7 +148,7 @@ private boolean addContainer(Container<?> container,
boolean overwrite) throws
}
try {
if (containerIdsTable != null) {
- containerIdsTable.put(containerId, containerState.toString());
+ containerIdsTable.put(ContainerID.valueOf(containerId),
containerState.toString());
}
} catch (IOException e) {
throw new StorageContainerException(e,
ContainerProtos.Result.IO_EXCEPTION);
@@ -230,7 +232,7 @@ private boolean removeContainer(long containerId, boolean
markMissing, boolean r
if (removeFromDB) {
try {
if (containerIdsTable != null) {
- containerIdsTable.delete(containerId);
+ containerIdsTable.delete(ContainerID.valueOf(containerId));
}
} catch (IOException e) {
throw new StorageContainerException(e,
ContainerProtos.Result.IO_EXCEPTION);
@@ -461,7 +463,7 @@ public Set<Long> getMissingContainerSet() {
return missingContainerSet;
}
- public Table<Long, String> getContainerIdsTable() {
+ public Table<ContainerID, String> getContainerIdsTable() {
return containerIdsTable;
}
@@ -475,10 +477,9 @@ public Table<Long, String> getContainerIdsTable() {
* @param container2BCSIDMap Map of containerId to BCSID persisted in the
* Ratis snapshot
*/
- public void buildMissingContainerSetAndValidate(
- Map<Long, Long> container2BCSIDMap) {
+ public <T> void buildMissingContainerSetAndValidate(Map<T, Long>
container2BCSIDMap, ToLongFunction<T> getId) {
container2BCSIDMap.entrySet().parallelStream().forEach((mapEntry) -> {
- long id = mapEntry.getKey();
+ final long id = getId.applyAsLong(mapEntry.getKey());
if (!containerMap.containsKey(id)) {
LOG.warn("Adding container {} to missing container set.", id);
missingContainerSet.add(id);
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 26c6bcf4ee..0ab5094ac8 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -178,8 +178,7 @@ private boolean canIgnoreException(Result result) {
@Override
public void buildMissingContainerSetAndValidate(
Map<Long, Long> container2BCSIDMap) {
- containerSet
- .buildMissingContainerSetAndValidate(container2BCSIDMap);
+ containerSet.buildMissingContainerSetAndValidate(container2BCSIDMap, n ->
n);
}
@Override
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java
index 3e4bb9c558..a1e76b19f4 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java
@@ -19,9 +19,9 @@
import java.util.Map;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
import org.apache.hadoop.hdds.utils.db.DBDefinition;
-import org.apache.hadoop.hdds.utils.db.LongCodec;
import org.apache.hadoop.hdds.utils.db.StringCodec;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -32,10 +32,10 @@ public final class WitnessedContainerDBDefinition extends
DBDefinition.WithMap {
private static final String CONTAINER_IDS_TABLE_NAME = "containerIds";
- public static final DBColumnFamilyDefinition<Long, String>
+ public static final DBColumnFamilyDefinition<ContainerID, String>
CONTAINER_IDS_TABLE = new DBColumnFamilyDefinition<>(
CONTAINER_IDS_TABLE_NAME,
- LongCodec.get(),
+ ContainerID.getCodec(),
StringCodec.get());
private static final Map<String, DBColumnFamilyDefinition<?, ?>>
@@ -62,7 +62,7 @@ public String getLocationConfigKey() {
return ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR;
}
- public DBColumnFamilyDefinition<Long, String> getContainerIdsTable() {
+ DBColumnFamilyDefinition<ContainerID, String> getContainerIdsTable() {
return CONTAINER_IDS_TABLE;
}
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java
index 7e063d05c3..815879a9ad 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.container.metadata;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.utils.db.Table;
/**
@@ -28,5 +29,5 @@ public interface WitnessedContainerMetadataStore extends
DBStoreManager {
*
* @return Table
*/
- Table<Long, String> getContainerIdsTable();
+ Table<ContainerID, String> getContainerIdsTable();
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
index 23c3eae9eb..072185a766 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
@@ -22,6 +22,7 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.hdds.utils.db.Table;
@@ -33,7 +34,7 @@
public final class WitnessedContainerMetadataStoreImpl extends
AbstractRDBStore<WitnessedContainerDBDefinition>
implements WitnessedContainerMetadataStore {
- private Table<Long, String> containerIdsTable;
+ private Table<ContainerID, String> containerIdsTable;
private static final ConcurrentMap<String, WitnessedContainerMetadataStore>
INSTANCES =
new ConcurrentHashMap<>();
@@ -63,13 +64,13 @@ private
WitnessedContainerMetadataStoreImpl(ConfigurationSource config, boolean
@Override
protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder,
ManagedDBOptions options, ConfigurationSource config)
throws IOException {
- DBStore dbStore = dbStoreBuilder.build();
+ final DBStore dbStore = dbStoreBuilder.build();
this.containerIdsTable =
this.getDbDef().getContainerIdsTable().getTable(dbStore);
return dbStore;
}
@Override
- public Table<Long, String> getContainerIdsTable() {
+ public Table<ContainerID, String> getContainerIdsTable() {
return containerIdsTable;
}
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index cde382ede8..fffc17db35 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -55,6 +55,7 @@
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
import
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.security.SecurityConfig;
import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient;
@@ -343,13 +344,13 @@ public void buildContainerSet() throws IOException {
for (Thread volumeThread : volumeThreads) {
volumeThread.join();
}
- try (TableIterator<Long, ? extends Table.KeyValue<Long, String>> itr =
+ try (TableIterator<ContainerID, ? extends Table.KeyValue<ContainerID,
String>> itr =
containerSet.getContainerIdsTable().iterator()) {
- Map<Long, Long> containerIds = new HashMap<>();
+ final Map<ContainerID, Long> containerIds = new HashMap<>();
while (itr.hasNext()) {
containerIds.put(itr.next().getKey(), 0L);
}
- containerSet.buildMissingContainerSetAndValidate(containerIds);
+ containerSet.buildMissingContainerSetAndValidate(containerIds,
ContainerID::getId);
}
} catch (InterruptedException ex) {
LOG.error("Volume Threads Interrupted exception", ex);
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index a2f91d5b2e..f8c14143e2 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -56,6 +56,22 @@
* RDBStore Tests.
*/
public class TestRDBStore {
+ static ManagedDBOptions newManagedDBOptions() {
+ final ManagedDBOptions options = new ManagedDBOptions();
+ options.setCreateIfMissing(true);
+ options.setCreateMissingColumnFamilies(true);
+
+ Statistics statistics = new Statistics();
+ statistics.setStatsLevel(StatsLevel.ALL);
+ options.setStatistics(statistics);
+ return options;
+ }
+
+ static RDBStore newRDBStore(File dbFile, ManagedDBOptions options,
Set<TableConfig> families)
+ throws IOException {
+ return newRDBStore(dbFile, options, families,
MAX_DB_UPDATES_SIZE_THRESHOLD);
+ }
+
public static RDBStore newRDBStore(File dbFile, ManagedDBOptions options,
Set<TableConfig> families,
long maxDbUpdatesSizeThreshold)
@@ -72,20 +88,14 @@ public static RDBStore newRDBStore(File dbFile,
ManagedDBOptions options,
"Fourth", "Fifth",
"Sixth");
private RDBStore rdbStore = null;
- private ManagedDBOptions options = null;
+ private ManagedDBOptions options;
private Set<TableConfig> configSet;
@BeforeEach
public void setUp(@TempDir File tempDir) throws Exception {
CodecBuffer.enableLeakDetection();
- options = new ManagedDBOptions();
- options.setCreateIfMissing(true);
- options.setCreateMissingColumnFamilies(true);
-
- Statistics statistics = new Statistics();
- statistics.setStatsLevel(StatsLevel.ALL);
- options.setStatistics(statistics);
+ options = newManagedDBOptions();
configSet = new HashSet<>();
for (String name : families) {
TableConfig newConfig = new TableConfig(name,
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedTable.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedTable.java
new file mode 100644
index 0000000000..a75de4386e
--- /dev/null
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedTable.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.utils.db;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.function.LongFunction;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hdds.StringUtils;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.utils.db.cache.TableCache;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
+import org.apache.ratis.util.UncheckedAutoCloseable;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+import org.rocksdb.RocksDB;
+
+/**
+ * Tests for RocksDBTable Store.
+ */
+public class TestTypedTable {
+ private final List<String> families =
Arrays.asList(StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
+ "First", "Second");
+
+ private RDBStore rdb;
+ private final List<UncheckedAutoCloseable> closeables = new ArrayList<>();
+
+ static TableConfig newTableConfig(String name, List<UncheckedAutoCloseable>
closeables) {
+ final ManagedColumnFamilyOptions option = new ManagedColumnFamilyOptions();
+ closeables.add(option::close);
+ return new TableConfig(name, option);
+ }
+
+ @BeforeEach
+ public void setUp(@TempDir File tempDir) throws Exception {
+ CodecBuffer.enableLeakDetection();
+
+ final Set<TableConfig> configSet = families.stream()
+ .map(name -> newTableConfig(name, closeables))
+ .collect(Collectors.toSet());
+ final ManagedDBOptions options = TestRDBStore.newManagedDBOptions();
+ closeables.add(options::close);
+ rdb = TestRDBStore.newRDBStore(tempDir, options, configSet);
+ }
+
+ @AfterEach
+ public void tearDown() throws Exception {
+ rdb.close();
+ closeables.forEach(UncheckedAutoCloseable::close);
+ closeables.clear();
+ CodecBuffer.assertNoLeaks();
+ }
+
+ <K, V> TypedTable<K, V> newTypedTable(int index, Codec<K> keyCodec, Codec<V>
valueCodec) throws IOException {
+ final RDBTable rawTable = rdb.getTable(families.get(index));
+ return new TypedTable<>(rawTable, keyCodec, valueCodec,
TableCache.CacheType.PARTIAL_CACHE);
+ }
+
+ static <V> V put(Map<Long, V> map, long key, LongFunction<V> constructor) {
+ return map.put(key, constructor.apply(key));
+ }
+
+ static <V> Map<Long, V> newMap(LongFunction<V> constructor) {
+ final Map<Long, V> map = new HashMap<>();
+ for (long n = 1; n > 0; n <<= 1) {
+ put(map, n, constructor);
+ put(map, n - 1, constructor);
+ put(map, n + 1, constructor);
+ }
+ put(map, Long.MAX_VALUE, constructor);
+ for (int i = 0; i < 1000; i++) {
+ final long key = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE) +
1;
+ put(map, key, constructor);
+ }
+ return map;
+ }
+
+ @Test
+ public void testContainerIDvsLong() throws Exception {
+ final Map<Long, ContainerID> keys = newMap(ContainerID::valueOf);
+
+ // Table 1: ContainerID -> String
+ // Table 2: Long -> String
+ try (TypedTable<ContainerID, String> idTable = newTypedTable(
+ 1, ContainerID.getCodec(), StringCodec.get());
+ TypedTable<Long, String> longTable = newTypedTable(
+ 2, LongCodec.get(), StringCodec.get())) {
+
+ for (Map.Entry<Long, ContainerID> e : keys.entrySet()) {
+ final long n = e.getKey();
+ final ContainerID id = e.getValue();
+ final String value = id.toString();
+ // put the same value to both tables
+ idTable.put(id, value);
+ longTable.put(n, value);
+ }
+ }
+
+ // Reopen tables with different key types
+
+ // Table 1: Long -> String
+ // Table 2: ContainerID -> String
+ try (TypedTable<ContainerID, String> idTable = newTypedTable(
+ 2, ContainerID.getCodec(), StringCodec.get());
+ TypedTable<Long, String> longTable = newTypedTable(
+ 1, LongCodec.get(), StringCodec.get())) {
+
+ for (Map.Entry<Long, ContainerID> e : keys.entrySet()) {
+ final long n = e.getKey();
+ final ContainerID id = e.getValue();
+ final String expected = id.toString();
+ // Read the value using a different key type
+ final String idValue = idTable.get(id);
+ assertEquals(expected, idValue);
+ final String longValue = longTable.get(n);
+ assertEquals(expected, longValue);
+ }
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]