This is an automated email from the ASF dual-hosted git repository.

sumitagrawal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 96f1db81afc HDDS-13176. containerIds table value format change to 
proto from string (#8589)
96f1db81afc is described below

commit 96f1db81afc23412ec67c6ce694a17c91084f8e3
Author: Sumit Agrawal <[email protected]>
AuthorDate: Wed Jul 23 12:07:03 2025 +0530

    HDDS-13176. containerIds table value format change to proto from string 
(#8589)
---
 .../hadoop/hdds/upgrade/HDDSLayoutFeature.java     |   3 +-
 .../ozone/container/common/impl/ContainerSet.java  |  63 ++++---
 .../container/metadata/ContainerCreateInfo.java    |  76 ++++++++
 .../metadata/WitnessedContainerDBDefinition.java   |  17 +-
 .../metadata/WitnessedContainerMetadataStore.java  |   2 +-
 .../WitnessedContainerMetadataStoreImpl.java       |  55 +++++-
 .../ozone/container/ozoneimpl/OzoneContainer.java  |   9 +-
 .../ContainerTableSchemaFinalizeAction.java        |  97 +++++++++++
 .../common/impl/ContainerImplTestUtils.java        |  12 +-
 .../TestDatanodeUpgradeToContainerIdsTable.java    | 193 +++++++++++++++++++++
 .../src/main/proto/DatanodeClientProtocol.proto    |   4 +
 .../ozone/freon/ClosedContainerReplicator.java     |   2 +-
 12 files changed, 485 insertions(+), 48 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java
index 02e68515f38..f6ac0a4872c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java
@@ -41,7 +41,8 @@ public enum HDDSLayoutFeature implements LayoutFeature {
   HADOOP_PRC_PORTS_IN_DATANODEDETAILS(7, "Adding Hadoop RPC ports " +
                                      "to DatanodeDetails."),
   HBASE_SUPPORT(8, "Datanode RocksDB Schema Version 3 has an extra table " +
-          "for the last chunk of blocks to support HBase.)");
+          "for the last chunk of blocks to support HBase.)"),
+  WITNESSED_CONTAINER_DB_PROTO_VALUE(9, "ContainerID table schema to use value 
type as proto");
 
   //////////////////////////////  //////////////////////////////
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index 8e998c1aef2..420076506d9 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -23,6 +23,7 @@
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import com.google.protobuf.Message;
+import jakarta.annotation.Nullable;
 import java.io.IOException;
 import java.time.Clock;
 import java.util.ArrayList;
@@ -43,11 +44,12 @@
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.common.utils.ContainerLogger;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.metadata.ContainerCreateInfo;
+import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore;
 import org.apache.hadoop.ozone.container.ozoneimpl.OnDemandContainerScanner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -67,7 +69,8 @@ public class ContainerSet implements Iterable<Container<?>> {
       new ConcurrentSkipListMap<>();
   private final Clock clock;
   private long recoveringTimeout;
-  private final Table<ContainerID, String> containerIdsTable;
+  @Nullable
+  private final WitnessedContainerMetadataStore containerMetadataStore;
   // Handler that will be invoked when a scan of a container in this set is 
requested.
   private OnDemandContainerScanner containerScanner;
 
@@ -75,18 +78,19 @@ public static ContainerSet newReadOnlyContainerSet(long 
recoveringTimeout) {
     return new ContainerSet(null, recoveringTimeout);
   }
 
-  public static ContainerSet newRwContainerSet(Table<ContainerID, String> 
containerIdsTable, long recoveringTimeout) {
-    Objects.requireNonNull(containerIdsTable, "containerIdsTable == null");
-    return new ContainerSet(containerIdsTable, recoveringTimeout);
+  public static ContainerSet newRwContainerSet(
+      WitnessedContainerMetadataStore metadataStore, long recoveringTimeout) {
+    Objects.requireNonNull(metadataStore, "WitnessedContainerMetadataStore == 
null");
+    return new ContainerSet(metadataStore, recoveringTimeout);
   }
 
-  private ContainerSet(Table<ContainerID, String> continerIdsTable, long 
recoveringTimeout) {
-    this(continerIdsTable, recoveringTimeout, null);
+  private ContainerSet(WitnessedContainerMetadataStore containerMetadataStore, 
long recoveringTimeout) {
+    this(containerMetadataStore, recoveringTimeout, null);
   }
 
-  ContainerSet(Table<ContainerID, String> continerIdsTable, long 
recoveringTimeout, Clock clock) {
+  ContainerSet(WitnessedContainerMetadataStore containerMetadataStore, long 
recoveringTimeout, Clock clock) {
     this.clock = clock != null ? clock : Clock.systemUTC();
-    this.containerIdsTable = continerIdsTable;
+    this.containerMetadataStore = containerMetadataStore;
     this.recoveringTimeout = recoveringTimeout;
   }
 
@@ -188,13 +192,7 @@ private boolean addContainer(Container<?> container, 
boolean overwrite) throws
         LOG.debug("Container with container Id {} is added to containerMap",
             containerId);
       }
-      try {
-        if (containerIdsTable != null) {
-          containerIdsTable.put(ContainerID.valueOf(containerId), 
containerState.toString());
-        }
-      } catch (IOException e) {
-        throw new StorageContainerException(e, 
ContainerProtos.Result.IO_EXCEPTION);
-      }
+      updateContainerIdTable(containerId, containerState);
       missingContainerSet.remove(containerId);
       if (container.getContainerData().getState() == RECOVERING) {
         recoveringContainerMap.put(
@@ -209,6 +207,17 @@ private boolean addContainer(Container<?> container, 
boolean overwrite) throws
     }
   }
 
+  private void updateContainerIdTable(long containerId, State containerState) 
throws StorageContainerException {
+    if (null != containerMetadataStore) {
+      try {
+        
containerMetadataStore.getContainerCreateInfoTable().put(ContainerID.valueOf(containerId),
+            ContainerCreateInfo.valueOf(containerState));
+      } catch (IOException e) {
+        throw new StorageContainerException(e, 
ContainerProtos.Result.IO_EXCEPTION);
+      }
+    }
+  }
+
   /**
    * Returns the Container with specified containerId.
    * @param containerId ID of the container to get
@@ -270,13 +279,7 @@ private boolean removeContainer(long containerId, boolean 
markMissing, boolean r
     }
     Container<?> removed = containerMap.remove(containerId);
     if (removeFromDB) {
-      try {
-        if (containerIdsTable != null) {
-          containerIdsTable.delete(ContainerID.valueOf(containerId));
-        }
-      } catch (IOException e) {
-        throw new StorageContainerException(e, 
ContainerProtos.Result.IO_EXCEPTION);
-      }
+      deleteFromContainerTable(containerId);
     }
     if (removed == null) {
       LOG.debug("Container with containerId {} is not present in " +
@@ -289,6 +292,16 @@ private boolean removeContainer(long containerId, boolean 
markMissing, boolean r
     }
   }
 
+  private void deleteFromContainerTable(long containerId) throws 
StorageContainerException {
+    if (null != containerMetadataStore) {
+      try {
+        
containerMetadataStore.getContainerCreateInfoTable().delete(ContainerID.valueOf(containerId));
+      } catch (IOException e) {
+        throw new StorageContainerException(e, 
ContainerProtos.Result.IO_EXCEPTION);
+      }
+    }
+  }
+
   /**
    * Removes the Recovering Container matching with specified containerId.
    * @param containerId ID of the container to remove.
@@ -503,10 +516,6 @@ public Set<Long> getMissingContainerSet() {
     return missingContainerSet;
   }
 
-  public Table<ContainerID, String> getContainerIdsTable() {
-    return containerIdsTable;
-  }
-
   /**
    * Builds the missing container set by taking a diff between total no
    * containers actually found and number of containers which actually
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/ContainerCreateInfo.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/ContainerCreateInfo.java
new file mode 100644
index 00000000000..ab7700c6ff3
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/ContainerCreateInfo.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.metadata;
+
+import java.util.function.Supplier;
+import net.jcip.annotations.Immutable;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
+import org.apache.hadoop.hdds.utils.db.Proto3Codec;
+import org.apache.ratis.util.MemoizedSupplier;
+
+/**
+ * ContainerCreateInfo is a class that holds information about the state and 
other information on creation
+ * This class is immutable.
+ */
+@Immutable
+public final class ContainerCreateInfo {
+  private static final Codec<ContainerCreateInfo> CODEC = new DelegatedCodec<>(
+      
Proto3Codec.get(ContainerProtos.ContainerCreateInfo.getDefaultInstance()),
+      ContainerCreateInfo::getFromProtobuf, ContainerCreateInfo::getProtobuf,
+      ContainerCreateInfo.class);
+
+  private final ContainerProtos.ContainerDataProto.State state;
+  private final Supplier<ContainerProtos.ContainerCreateInfo> proto;
+
+  public static Codec<ContainerCreateInfo> getCodec() {
+    return CODEC;
+  }
+
+  public static Codec<ContainerCreateInfo> getNewCodec() {
+    return CODEC;
+  }
+
+  private ContainerCreateInfo(ContainerProtos.ContainerDataProto.State state) {
+    this.state = state;
+    this.proto = MemoizedSupplier.valueOf(
+        () -> 
ContainerProtos.ContainerCreateInfo.newBuilder().setState(state).build());
+  }
+
+  /**
+   * Factory method for creation of ContainerCreateInfo.
+   * @param state  State
+   * @return ContainerCreateInfo.
+   */
+  public static ContainerCreateInfo valueOf(final 
ContainerProtos.ContainerDataProto.State state) {
+    return new ContainerCreateInfo(state);
+  }
+
+  public ContainerProtos.ContainerCreateInfo getProtobuf() {
+    return proto.get();
+  }
+
+  public static ContainerCreateInfo 
getFromProtobuf(ContainerProtos.ContainerCreateInfo proto) {
+    return ContainerCreateInfo.valueOf(proto.getState());
+  }
+
+  public ContainerProtos.ContainerDataProto.State getState() {
+    return state;
+  }
+}
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java
index a1e76b19f43..f5869737acd 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java
@@ -22,7 +22,6 @@
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
 import org.apache.hadoop.hdds.utils.db.DBDefinition;
-import org.apache.hadoop.hdds.utils.db.StringCodec;
 import org.apache.hadoop.ozone.OzoneConsts;
 
 /**
@@ -30,17 +29,17 @@
  */
 public final class WitnessedContainerDBDefinition extends DBDefinition.WithMap 
{
 
-  private static final String CONTAINER_IDS_TABLE_NAME = "containerIds";
+  private static final String CONTAINER_CREATE_INFO_TABLE_NAME = 
"ContainerCreateInfoTable";
 
-  public static final DBColumnFamilyDefinition<ContainerID, String>
-      CONTAINER_IDS_TABLE = new DBColumnFamilyDefinition<>(
-      CONTAINER_IDS_TABLE_NAME,
+  public static final DBColumnFamilyDefinition<ContainerID, 
ContainerCreateInfo>
+      CONTAINER_CREATE_INFO_TABLE_DEF = new DBColumnFamilyDefinition<>(
+      CONTAINER_CREATE_INFO_TABLE_NAME,
       ContainerID.getCodec(),
-      StringCodec.get());
+      ContainerCreateInfo.getCodec());
 
   private static final Map<String, DBColumnFamilyDefinition<?, ?>>
       COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap(
-      CONTAINER_IDS_TABLE);
+      CONTAINER_CREATE_INFO_TABLE_DEF);
 
   private static final WitnessedContainerDBDefinition INSTANCE = new 
WitnessedContainerDBDefinition();
 
@@ -62,7 +61,7 @@ public String getLocationConfigKey() {
     return ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR;
   }
 
-  DBColumnFamilyDefinition<ContainerID, String> getContainerIdsTable() {
-    return CONTAINER_IDS_TABLE;
+  DBColumnFamilyDefinition<ContainerID, ContainerCreateInfo> 
getContainerCreateInfoTableDef() {
+    return CONTAINER_CREATE_INFO_TABLE_DEF;
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java
index 815879a9ada..64d1b1abbd4 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java
@@ -29,5 +29,5 @@ public interface WitnessedContainerMetadataStore extends 
DBStoreManager {
    *
    * @return Table
    */
-  Table<ContainerID, String> getContainerIdsTable();
+  Table<ContainerID, ContainerCreateInfo> getContainerCreateInfoTable();
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
index 2e1e0386aab..a389d0497ff 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
@@ -22,13 +22,18 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
 import org.apache.hadoop.hdds.utils.db.CodecException;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
 import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
+import org.apache.hadoop.hdds.utils.db.StringCodec;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
+import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
 
 /**
  * Class for interacting with database in the master volume of a datanode.
@@ -36,7 +41,9 @@
 public final class WitnessedContainerMetadataStoreImpl extends 
AbstractRDBStore<WitnessedContainerDBDefinition>
     implements WitnessedContainerMetadataStore {
 
-  private Table<ContainerID, String> containerIdsTable;
+  private Table<ContainerID, ContainerCreateInfo> containerCreateInfoTable;
+  private PreviousVersionTables previousVersionTables;
+
   private static final ConcurrentMap<String, WitnessedContainerMetadataStore> 
INSTANCES =
       new ConcurrentHashMap<>();
 
@@ -67,13 +74,53 @@ private 
WitnessedContainerMetadataStoreImpl(ConfigurationSource config, boolean
   @Override
   protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder, 
ManagedDBOptions options, ConfigurationSource config)
       throws RocksDatabaseException, CodecException {
+    previousVersionTables = new PreviousVersionTables();
+    previousVersionTables.addTables(dbStoreBuilder);
     final DBStore dbStore = dbStoreBuilder.build();
-    this.containerIdsTable = 
this.getDbDef().getContainerIdsTable().getTable(dbStore);
+    previousVersionTables.init(dbStore);
+    this.containerCreateInfoTable = 
this.getDbDef().getContainerCreateInfoTableDef().getTable(dbStore);
     return dbStore;
   }
 
   @Override
-  public Table<ContainerID, String> getContainerIdsTable() {
-    return containerIdsTable;
+  public Table<ContainerID, ContainerCreateInfo> getContainerCreateInfoTable() 
{
+    if 
(!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.WITNESSED_CONTAINER_DB_PROTO_VALUE))
 {
+      return previousVersionTables.getContainerIdsTable();
+    }
+    return containerCreateInfoTable;
+  }
+
+  public PreviousVersionTables getPreviousVersionTables() {
+    return previousVersionTables;
+  }
+
+  /**
+   * this will hold old version tables required during upgrade, and these are 
initialized based on version only.
+   */
+  public static class PreviousVersionTables {
+    private static final String CONTAINER_IDS_STR_VAL_TABLE = "containerIds";
+    private Table<ContainerID, ContainerCreateInfo> containerIdsTable;
+
+    public PreviousVersionTables() {
+    }
+
+    public void addTables(DBStoreBuilder dbStoreBuilder) {
+      if 
(!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.WITNESSED_CONTAINER_DB_PROTO_VALUE))
 {
+        dbStoreBuilder.addTable(CONTAINER_IDS_STR_VAL_TABLE);
+      }
+    }
+
+    public void init(DBStore dbStore) throws RocksDatabaseException, 
CodecException {
+      if 
(!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.WITNESSED_CONTAINER_DB_PROTO_VALUE))
 {
+        this.containerIdsTable = dbStore.getTable(CONTAINER_IDS_STR_VAL_TABLE, 
ContainerID.getCodec(),
+            new DelegatedCodec<>(StringCodec.get(),
+                (strVal) -> 
ContainerCreateInfo.valueOf(ContainerProtos.ContainerDataProto.State.valueOf(strVal)),
+                (obj) -> obj.getState().name(), ContainerCreateInfo.class));
+      }
+    }
+
+    public Table<ContainerID, ContainerCreateInfo> getContainerIdsTable() {
+      return containerIdsTable;
+    }
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 985909a294e..91816e97a77 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -197,8 +197,7 @@ public OzoneContainer(HddsDatanodeService 
hddsDatanodeService,
         OZONE_RECOVERING_CONTAINER_TIMEOUT,
         OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
     this.witnessedContainerMetadataStore = 
WitnessedContainerMetadataStoreImpl.get(conf);
-    containerSet = 
ContainerSet.newRwContainerSet(witnessedContainerMetadataStore.getContainerIdsTable(),
-        recoveringContainerTimeout);
+    containerSet = 
ContainerSet.newRwContainerSet(witnessedContainerMetadataStore, 
recoveringContainerTimeout);
     volumeSet.setGatherContainerUsages(this::gatherContainerUsages);
     metadataScanner = null;
 
@@ -351,7 +350,8 @@ public void buildContainerSet() throws IOException {
       for (Thread volumeThread : volumeThreads) {
         volumeThread.join();
       }
-      try (TableIterator<ContainerID, ContainerID> itr = 
containerSet.getContainerIdsTable().keyIterator()) {
+      try (TableIterator<ContainerID, ContainerID> itr
+               = 
getWitnessedContainerMetadataStore().getContainerCreateInfoTable().keyIterator())
 {
         final Map<ContainerID, Long> containerIds = new HashMap<>();
         while (itr.hasNext()) {
           containerIds.put(itr.next(), 0L);
@@ -686,4 +686,7 @@ public void compactDb() {
     }
   }
 
+  public WitnessedContainerMetadataStore getWitnessedContainerMetadataStore() {
+    return witnessedContainerMetadataStore;
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/ContainerTableSchemaFinalizeAction.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/ContainerTableSchemaFinalizeAction.java
new file mode 100644
index 00000000000..ed037070d80
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/ContainerTableSchemaFinalizeAction.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.upgrade;
+
+import static 
org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.WITNESSED_CONTAINER_DB_PROTO_VALUE;
+import static 
org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeActionType.ON_FINALIZE;
+import static 
org.apache.hadoop.ozone.upgrade.UpgradeActionHdds.Component.DATANODE;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.CodecException;
+import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.metadata.ContainerCreateInfo;
+import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerDBDefinition;
+import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore;
+import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStoreImpl;
+import org.apache.hadoop.ozone.upgrade.UpgradeActionHdds;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Upgrade Action for DataNode for update the table schema data of 
containerIds Table.
+ */
+@UpgradeActionHdds(feature = WITNESSED_CONTAINER_DB_PROTO_VALUE, component = 
DATANODE, type = ON_FINALIZE)
+public class ContainerTableSchemaFinalizeAction
+    implements HDDSUpgradeAction<DatanodeStateMachine> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ContainerTableSchemaFinalizeAction.class);
+
+  @Override
+  public void execute(DatanodeStateMachine arg) throws Exception {
+    WitnessedContainerMetadataStore metadataStore = 
arg.getContainer().getWitnessedContainerMetadataStore();
+    Table<ContainerID, ContainerCreateInfo> previousTable
+        = ((WitnessedContainerMetadataStoreImpl) 
metadataStore).getPreviousVersionTables().getContainerIdsTable();
+    Table<ContainerID, ContainerCreateInfo> currTable =
+        
WitnessedContainerDBDefinition.CONTAINER_CREATE_INFO_TABLE_DEF.getTable(metadataStore.getStore());
+
+    // data is moved from old table to new table, no need cleanup if previous 
exist as this is just overwrite of data
+    try (TableIterator<ContainerID, ContainerID> curTblItr = 
currTable.keyIterator()) {
+      truncateCurrentTable(curTblItr, currTable);
+    }
+
+    try (BatchOperation batch = metadataStore.getStore().initBatchOperation();
+         TableIterator<ContainerID, ? extends Table.KeyValue<ContainerID, 
ContainerCreateInfo>> iterator =
+             previousTable.iterator()) {
+      while (iterator.hasNext()) {
+        Table.KeyValue<ContainerID, ContainerCreateInfo> next = 
iterator.next();
+        currTable.putWithBatch(batch, next.getKey(), next.getValue());
+      }
+      metadataStore.getStore().commitBatchOperation(batch);
+      LOG.info("Finished copy to containerIdsTable from previous table");
+    }
+  }
+
+  private static void truncateCurrentTable(
+      TableIterator<ContainerID, ContainerID> curTblItr,
+      Table<ContainerID, ContainerCreateInfo> currTable) throws 
RocksDatabaseException, CodecException {
+    // delete all previous entry if present in current table, this might come 
if previous upgrade is not finalized
+    // and crashed in between. Below logic for deleteRange is used to avoid 
tombstone creation for each entry
+    ContainerID startContainerID = null;
+    ContainerID endContainerID = null;
+    if (curTblItr.hasNext()) {
+      startContainerID = curTblItr.next();
+      endContainerID = startContainerID;
+    }
+    while (curTblItr.hasNext()) {
+      endContainerID = curTblItr.next();
+    }
+    if (startContainerID != null) {
+      if (startContainerID != endContainerID) {
+        currTable.deleteRange(startContainerID, endContainerID);
+      }
+      // remove last one entry as its exclusive end of range
+      currTable.delete(endContainerID);
+    }
+  }
+}
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/ContainerImplTestUtils.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/ContainerImplTestUtils.java
index 1e27e748d69..e005c426333 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/ContainerImplTestUtils.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/ContainerImplTestUtils.java
@@ -17,8 +17,12 @@
 
 package org.apache.hadoop.ozone.container.common.impl;
 
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 import java.time.Clock;
 import org.apache.hadoop.hdds.utils.db.InMemoryTestTable;
+import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore;
 
 /**
  * Helper utility to test container impl.
@@ -33,10 +37,14 @@ public static ContainerSet newContainerSet() {
   }
 
   public static ContainerSet newContainerSet(long recoveringTimeout) {
-    return ContainerSet.newRwContainerSet(new InMemoryTestTable<>(), 
recoveringTimeout);
+    WitnessedContainerMetadataStore mockMetadataStore = 
mock(WitnessedContainerMetadataStore.class);
+    when(mockMetadataStore.getContainerCreateInfoTable()).thenReturn(new 
InMemoryTestTable<>());
+    return ContainerSet.newRwContainerSet(mockMetadataStore, 
recoveringTimeout);
   }
 
   public static ContainerSet newContainerSet(long recoveringTimeout, Clock 
clock) {
-    return new ContainerSet(new InMemoryTestTable<>(), recoveringTimeout, 
clock);
+    WitnessedContainerMetadataStore mockMetadataStore = 
mock(WitnessedContainerMetadataStore.class);
+    when(mockMetadataStore.getContainerCreateInfoTable()).thenReturn(new 
InMemoryTestTable<>());
+    return new ContainerSet(mockMetadataStore, recoveringTimeout, clock);
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java
new file mode 100644
index 00000000000..e6ddf977340
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.upgrade;
+
+import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.OPEN;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.net.InetSocketAddress;
+import java.nio.file.Path;
+import java.util.Collections;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
+import org.apache.hadoop.hdds.utils.db.StringCodec;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TypedTable;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import org.apache.hadoop.ozone.container.common.ScmTestMock;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.metadata.ContainerCreateInfo;
+import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerDBDefinition;
+import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+
+/**
+ * Tests upgrading a single datanode from HBASE_SUPPORT to 
CONTAINERID_TABLE_SCHEMA_CHANGE.
+ */
+public class TestDatanodeUpgradeToContainerIdsTable {
+  @TempDir
+  private Path tempFolder;
+
+  private DatanodeStateMachine dsm;
+  private ContainerDispatcher dispatcher;
+  private OzoneConfiguration conf;
+  private static final String CLUSTER_ID = "clusterID";
+
+  private RPC.Server scmRpcServer;
+  private InetSocketAddress address;
+
+  private void initTests() throws Exception {
+    conf = new OzoneConfiguration();
+    setup();
+  }
+
+  private void setup() throws Exception {
+    address = SCMTestUtils.getReuseableAddress();
+    conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address);
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
+        tempFolder.toString());
+  }
+
+  @AfterEach
+  public void teardown() throws Exception {
+    if (scmRpcServer != null) {
+      scmRpcServer.stop();
+    }
+
+    if (dsm != null) {
+      dsm.close();
+    }
+  }
+
+  @Test
+  public void testContainerTableAccessBeforeAndAfterUpgrade() throws Exception 
{
+    initTests();
+    // start DN and SCM
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf, new 
ScmTestMock(CLUSTER_ID), address, 10);
+    UpgradeTestHelper.addHddsVolume(conf, tempFolder);
+    dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
+        HDDSLayoutFeature.HBASE_SUPPORT.layoutVersion());
+    dispatcher = dsm.getContainer().getDispatcher();
+    final Pipeline pipeline = 
MockPipeline.createPipeline(Collections.singletonList(dsm.getDatanodeDetails()));
+
+    // add a container
+    final long containerID = UpgradeTestHelper.addContainer(dispatcher, 
pipeline);
+    Container<?> container = 
dsm.getContainer().getContainerSet().getContainer(containerID);
+    assertEquals(OPEN, container.getContainerData().getState());
+
+    // check if the containerIds table is in old format
+    WitnessedContainerMetadataStore metadataStore = 
dsm.getContainer().getWitnessedContainerMetadataStore();
+    TypedTable<ContainerID, String> tableWithStringCodec = 
metadataStore.getStore().getTable(
+        metadataStore.getContainerCreateInfoTable().getName(), 
ContainerID.getCodec(), StringCodec.get());
+    assertEquals("containerIds", 
metadataStore.getContainerCreateInfoTable().getName());
+    assertEquals(OPEN.name(), 
tableWithStringCodec.get(ContainerID.valueOf(containerID)));
+
+    // close container to allow upgrade.
+    UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline);
+
+    dsm.finalizeUpgrade();
+    
assertTrue(dsm.getLayoutVersionManager().isAllowed(HDDSLayoutFeature.WITNESSED_CONTAINER_DB_PROTO_VALUE));
+    
assertEquals(WitnessedContainerDBDefinition.CONTAINER_CREATE_INFO_TABLE_DEF.getName(),
+        metadataStore.getContainerCreateInfoTable().getName());
+    ContainerCreateInfo containerCreateInfo = 
metadataStore.getContainerCreateInfoTable().get(
+        ContainerID.valueOf(containerID));
+    // state is always open as state is update while create container only.
+    assertEquals(OPEN, containerCreateInfo.getState());
+  }
+
+  @Test
+  public void testContainerTableFinalizeRetry() throws Exception {
+    initTests();
+    // start DN and SCM
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf, new 
ScmTestMock(CLUSTER_ID), address, 10);
+    UpgradeTestHelper.addHddsVolume(conf, tempFolder);
+    dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
+        HDDSLayoutFeature.HBASE_SUPPORT.layoutVersion());
+    dispatcher = dsm.getContainer().getDispatcher();
+    final Pipeline pipeline = 
MockPipeline.createPipeline(Collections.singletonList(dsm.getDatanodeDetails()));
+
+    // add a container
+    final long containerID = UpgradeTestHelper.addContainer(dispatcher, 
pipeline);
+    Container<?> container = 
dsm.getContainer().getContainerSet().getContainer(containerID);
+    assertEquals(OPEN, container.getContainerData().getState());
+
+    // check if the containerIds table is in old format
+    WitnessedContainerMetadataStore metadataStore = 
dsm.getContainer().getWitnessedContainerMetadataStore();
+    TypedTable<ContainerID, String> tableWithStringCodec = 
metadataStore.getStore().getTable(
+        metadataStore.getContainerCreateInfoTable().getName(), 
ContainerID.getCodec(), StringCodec.get());
+    assertEquals("containerIds", 
metadataStore.getContainerCreateInfoTable().getName());
+    assertEquals(OPEN.name(), 
tableWithStringCodec.get(ContainerID.valueOf(containerID)));
+
+    // add few more container entries to containerIds table as dummy
+    for (int i = 0; i < 10; i++) {
+      long containerIDWithDummy = containerID + i + 1;
+      tableWithStringCodec.put(ContainerID.valueOf(containerIDWithDummy), 
OPEN.name());
+    }
+
+    // close container to allow upgrade.
+    UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline);
+
+    // trigger one upgrade which is not persisted to metastore, partial upgrade
+    ContainerTableSchemaFinalizeAction upgradeAction = new 
ContainerTableSchemaFinalizeAction();
+    upgradeAction.execute(dsm);
+
+    Table<ContainerID, ContainerCreateInfo> currTable =
+        
WitnessedContainerDBDefinition.CONTAINER_CREATE_INFO_TABLE_DEF.getTable(metadataStore.getStore());
+    assertEquals(11, getTableEntryCount(currTable)); // 1 original + 10 dummy 
entries
+
+    // cleanup entry and again upgrade
+    for (int i = 0; i < 10; i++) {
+      long containerIDWithDummy = containerID + i + 1;
+      tableWithStringCodec.delete(ContainerID.valueOf(containerIDWithDummy));
+    }
+
+    // trigger another upgrade which will update metainfo for upgrade
+    dsm.finalizeUpgrade();
+    
assertTrue(dsm.getLayoutVersionManager().isAllowed(HDDSLayoutFeature.WITNESSED_CONTAINER_DB_PROTO_VALUE));
+    
assertEquals(WitnessedContainerDBDefinition.CONTAINER_CREATE_INFO_TABLE_DEF.getName(),
+        metadataStore.getContainerCreateInfoTable().getName());
+    ContainerCreateInfo containerCreateInfo
+        = 
metadataStore.getContainerCreateInfoTable().get(ContainerID.valueOf(containerID));
+    // state is always open as state is update while create container only.
+    assertEquals(OPEN, containerCreateInfo.getState());
+
+    assertEquals(1, getTableEntryCount(currTable)); // 1 original + 10 dummy 
entries
+  }
+
+  private static int getTableEntryCount(Table<ContainerID, 
ContainerCreateInfo> currTable) throws Exception {
+    int count = 0;
+    try (Table.KeyValueIterator<ContainerID, ContainerCreateInfo> curTblItr = 
currTable.iterator()) {
+      while (curTblItr.hasNext()) {
+        curTblItr.next();
+        count++;
+      }
+    }
+    return count;
+  }
+}
diff --git 
a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto 
b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
index 3be68e25662..389e0dccd5d 100644
--- a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
@@ -591,3 +591,7 @@ service IntraDatanodeProtocolService {
   rpc download (CopyContainerRequestProto) returns (stream 
CopyContainerResponseProto);
   rpc upload (stream SendContainerRequest) returns (SendContainerResponse);
 }
+
+message ContainerCreateInfo {
+    required ContainerDataProto.State state = 1;
+}
diff --git 
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
 
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
index ade3eac1f6f..3ad35ecc5a2 100644
--- 
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
+++ 
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
@@ -197,7 +197,7 @@ private void initializeReplicationSupervisor(
     WitnessedContainerMetadataStore referenceCountedDS =
         WitnessedContainerMetadataStoreImpl.get(conf);
     this.witnessedContainerMetadataStore = referenceCountedDS;
-    ContainerSet containerSet = 
ContainerSet.newRwContainerSet(referenceCountedDS.getContainerIdsTable(), 1000);
+    ContainerSet containerSet = 
ContainerSet.newRwContainerSet(referenceCountedDS, 1000);
 
     ContainerMetrics metrics = ContainerMetrics.create(conf);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to