This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 62df3069f8 HDDS-12493. Move container upgrade under repair (#8205)
62df3069f8 is described below

commit 62df3069f8aa36b0cf9ca4c943d7868242ab83fb
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Tue Apr 15 17:53:37 2025 +0200

    HDDS-12493. Move container upgrade under repair (#8205)
---
 .../apache/hadoop/hdds/cli/AbstractSubcommand.java |   5 +
 .../org/apache/hadoop/hdds/cli/GenericCli.java     |   3 +-
 .../hadoop/hdds/cli/GenericParentCommand.java      |   2 +
 hadoop-hdds/tools/pom.xml                          |  12 -
 .../hdds/scm/cli/container/ContainerCommands.java  |   2 +-
 .../hdds/scm/cli/container/UpgradeSubcommand.java  | 162 +------
 .../scm/cli/container/upgrade/UpgradeChecker.java  | 109 -----
 .../scm/cli/container/upgrade/UpgradeManager.java  | 173 -------
 .../scm/cli/container/upgrade/UpgradeTask.java     | 475 -------------------
 .../cli/container/upgrade/TestUpgradeManager.java  | 307 -------------
 .../src/main/smoketest/admincli/container.robot    |   1 -
 .../shell/TestOzoneContainerUpgradeShell.java      |  76 +---
 hadoop-ozone/tools/pom.xml                         |  18 +
 .../org/apache/hadoop/ozone/repair/RepairTool.java |  15 +
 .../ozone/repair/datanode/DatanodeRepair.java      |  21 +-
 .../ozone/repair/datanode}/package-info.java       |   4 +-
 .../schemaupgrade/ContainerUpgradeResult.java      | 124 +++++
 .../schemaupgrade/UpgradeContainerSchema.java      | 506 +++++++++++++++++++++
 .../datanode/schemaupgrade}/UpgradeUtils.java      |  67 ++-
 .../schemaupgrade/VolumeUpgradeResult.java         | 119 +++++
 .../datanode/schemaupgrade}/package-info.java      |   2 +-
 .../schemaupgrade/TestUpgradeContainerSchema.java  | 378 +++++++++++++++
 22 files changed, 1268 insertions(+), 1313 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java
index 9aeab3c7e8..b7f7170c2a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java
@@ -76,6 +76,11 @@ public boolean isVerbose() {
     public OzoneConfiguration getOzoneConf() {
       return conf;
     }
+
+    @Override
+    public void printError(Throwable t) {
+      t.printStackTrace();
+    }
   }
 
   protected PrintWriter out() {
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
index 705909fc90..23e2c3cd10 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -89,7 +89,8 @@ public int execute(String[] argv) {
     return cmd.execute(argv);
   }
 
-  protected void printError(Throwable error) {
+  @Override
+  public void printError(Throwable error) {
     //message could be null in case of NPE. This is unexpected so we can
     //print out the stack trace.
     if (verbose || Strings.isNullOrEmpty(error.getMessage())) {
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
index ef0c94c003..68cf45e178 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
@@ -28,4 +28,6 @@ public interface GenericParentCommand {
 
   /** Returns a cached configuration, i.e. it is created only once, subsequent 
calls return the same instance. */
   OzoneConfiguration getOzoneConf();
+
+  void printError(Throwable t);
 }
diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml
index 9ebfb3fa90..d7c244bc57 100644
--- a/hadoop-hdds/tools/pom.xml
+++ b/hadoop-hdds/tools/pom.xml
@@ -134,18 +134,6 @@
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.apache.ozone</groupId>
-      <artifactId>hdds-container-service</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.ozone</groupId>
-      <artifactId>hdds-server-framework</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
     <dependency>
       <groupId>org.apache.ozone</groupId>
       <artifactId>hdds-test-utils</artifactId>
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java
index 2a2eec90ff..72f4e64aff 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java
@@ -36,7 +36,7 @@
         CreateSubcommand.class,
         CloseSubcommand.class,
         ReportSubcommand.class,
-        UpgradeSubcommand.class
+        UpgradeSubcommand.class,
     })
 @MetaInfServices(AdminSubcommand.class)
 public class ContainerCommands implements AdminSubcommand {
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java
index b350bc729a..cc28150146 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java
@@ -17,177 +17,31 @@
 
 package org.apache.hadoop.hdds.scm.cli.container;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
-import java.io.File;
-import java.io.InputStreamReader;
-import java.nio.charset.StandardCharsets;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Scanner;
 import java.util.concurrent.Callable;
-import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.cli.AbstractSubcommand;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.cli.container.upgrade.UpgradeChecker;
-import org.apache.hadoop.hdds.scm.cli.container.upgrade.UpgradeManager;
-import org.apache.hadoop.hdds.scm.cli.container.upgrade.UpgradeUtils;
-import org.apache.hadoop.hdds.server.OzoneAdmins;
-import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
-import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import picocli.CommandLine;
-import picocli.CommandLine.Command;
 
 /**
- * This is the handler that process container upgrade command.
+ * @deprecated by {@code ozone repair datanode upgrade-container-schema}
  */
-@Command(
[email protected](
     name = "upgrade",
-    description = "Offline upgrade all schema V2 containers to schema V3 " +
-        "for this datanode.",
+    description = "Please see `ozone repair datanode 
upgrade-container-schema`.",
     mixinStandardHelpOptions = true,
     versionProvider = HddsVersionProvider.class)
+@Deprecated
 public class UpgradeSubcommand extends AbstractSubcommand implements 
Callable<Void> {
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(UpgradeSubcommand.class);
-
-  @CommandLine.Option(names = {"--volume"},
-      required = false,
-      description = "volume path")
+  @CommandLine.Option(names = {"--volume"}, description = "ignored")
   private String volume;
 
-  @CommandLine.Option(names = {"-y", "--yes"},
-      description = "Continue without interactive user confirmation")
+  @CommandLine.Option(names = {"-y", "--yes"}, description = "ignored")
   private boolean yes;
 
-  private static OzoneConfiguration ozoneConfiguration;
-
-
   @Override
   public Void call() throws Exception {
-    OzoneConfiguration configuration = getConfiguration();
-    // Verify admin privilege
-    OzoneAdmins admins = OzoneAdmins.getOzoneAdmins("", configuration);
-    if (!admins.isAdmin(UserGroupInformation.getCurrentUser())) {
-      out().println("It requires ozone administrator privilege. Current user" +
-          " is " + UserGroupInformation.getCurrentUser() + ".");
-      return null;
-    }
-
-    final UpgradeChecker upgradeChecker = new UpgradeChecker();
-    Pair<Boolean, String> pair = upgradeChecker.checkDatanodeRunning();
-    final boolean isRunning = pair.getKey();
-    if (isRunning) {
-      out().println(pair.getValue());
-      return null;
-    }
-
-    DatanodeDetails dnDetail =
-        UpgradeUtils.getDatanodeDetails(configuration);
-
-    Pair<HDDSLayoutFeature, HDDSLayoutFeature> layoutFeature =
-        upgradeChecker.getLayoutFeature(dnDetail, configuration);
-    final HDDSLayoutFeature softwareLayoutFeature = layoutFeature.getLeft();
-    final HDDSLayoutFeature metadataLayoutFeature = layoutFeature.getRight();
-    final int needLayoutVersion =
-        HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion();
-
-    if (metadataLayoutFeature.layoutVersion() < needLayoutVersion ||
-        softwareLayoutFeature.layoutVersion() < needLayoutVersion) {
-      out().println(String.format(
-          "Please upgrade your software version, no less than %s," +
-              " current metadata layout version is %s," +
-              " software layout version is %s",
-          HDDSLayoutFeature.DATANODE_SCHEMA_V3.name(),
-          metadataLayoutFeature.name(), softwareLayoutFeature.name()));
-      return null;
-    }
-
-    if (!Strings.isNullOrEmpty(volume)) {
-      File volumeDir = new File(volume);
-      if (!volumeDir.exists() || !volumeDir.isDirectory()) {
-        out().println(
-            String.format("Volume path %s is not a directory or doesn't exist",
-                volume));
-        return null;
-      }
-      File hddsRootDir = new File(volume + "/" + HddsVolume.HDDS_VOLUME_DIR);
-      File versionFile = new File(volume + "/" + HddsVolume.HDDS_VOLUME_DIR +
-          "/" + Storage.STORAGE_FILE_VERSION);
-      if (!hddsRootDir.exists() || !hddsRootDir.isDirectory() ||
-          !versionFile.exists() || !versionFile.isFile()) {
-        out().println(
-            String.format("Volume path %s is not a valid data volume", 
volume));
-        return null;
-      }
-      configuration.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volume);
-    }
-
-    final HddsProtos.NodeOperationalState opState =
-        dnDetail.getPersistedOpState();
-
-    if (!opState.equals(HddsProtos.NodeOperationalState.IN_MAINTENANCE)) {
-      out().println("This command requires the datanode's " +
-          "NodeOperationalState to be IN_MAINTENANCE, currently is " +
-          opState);
-      return null;
-    }
-
-    List<HddsVolume> allVolume =
-        upgradeChecker.getAllVolume(dnDetail, configuration);
-
-    Iterator<HddsVolume> volumeIterator = allVolume.iterator();
-    while (volumeIterator.hasNext()) {
-      HddsVolume hddsVolume = volumeIterator.next();
-      if (UpgradeChecker.isAlreadyUpgraded(hddsVolume)) {
-        out().println("Volume " + hddsVolume.getVolumeRootDir() +
-            " is already upgraded, skip it.");
-        volumeIterator.remove();
-      }
-    }
-
-    if (allVolume.isEmpty()) {
-      out().println("There is no more volume to upgrade. Exit.");
-      return null;
-    }
-
-    if (!yes) {
-      Scanner scanner = new Scanner(new InputStreamReader(
-          System.in, StandardCharsets.UTF_8));
-      System.out.println(
-          "All volume db stores will be automatically backup," +
-              " should we continue the upgrade ? [yes|no] : ");
-      boolean confirm = scanner.next().trim().equals("yes");
-      scanner.close();
-      if (!confirm) {
-        return null;
-      }
-    }
-
-    // do upgrade
-    final UpgradeManager upgradeManager = new UpgradeManager();
-    upgradeManager.run(configuration, allVolume);
-    return null;
-  }
-
-  @VisibleForTesting
-  public static void setOzoneConfiguration(OzoneConfiguration config) {
-    ozoneConfiguration = config;
-  }
-
-  private OzoneConfiguration getConfiguration() {
-    if (ozoneConfiguration == null) {
-      ozoneConfiguration = new OzoneConfiguration();
-    }
-    return ozoneConfiguration;
+    throw new IllegalStateException(
+        "This command was moved, please use it via `ozone repair datanode 
upgrade-container-schema` instead.");
   }
 }
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeChecker.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeChecker.java
deleted file mode 100644
index 6cdd230c98..0000000000
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeChecker.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.cli.container.upgrade;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
-import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
-import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
-import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
-import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
-
-/**
- * This is the handler that process container upgrade checker.
- */
-public class UpgradeChecker {
-
-  /*
-   * Verify that the datanode is in the shutdown state or running.
-   */
-  public Pair<Boolean, String> checkDatanodeRunning() {
-    String command =
-        "ps aux | grep org.apache.hadoop.ozone.HddsDatanodeService " +
-            "| grep -v grep";
-    try {
-      Process exec = Runtime.getRuntime().exec(new String[]{"/bin/bash", "-c",
-          command});
-      boolean notTimeout = exec.waitFor(10, TimeUnit.SECONDS);
-      if (!notTimeout) {
-        return Pair.of(true,
-            String.format("Execution of the command '%s' timeout", command));
-      }
-      if (exec.exitValue() == 0) {
-        return Pair.of(true, "HddsDatanodeService is running." +
-            " This upgrade command requires datanode to be off and in" +
-            " the IN_MAINTENANCE mode. Please put the datanode in" +
-            " the desired state first, then try this command later again.");
-      } else if (exec.exitValue() == 1) {
-        return Pair.of(false, "HddsDatanodeService is not running.");
-      } else {
-        return Pair.of(true,
-            String.format("Return code of the command '%s' is %d", command,
-                exec.exitValue()));
-      }
-    } catch (IOException | InterruptedException e) {
-      return Pair.of(true,
-          String.format("Run command '%s' has error '%s'",
-              command, e.getMessage()));
-    }
-  }
-
-  public Pair<HDDSLayoutFeature, HDDSLayoutFeature> getLayoutFeature(
-      DatanodeDetails dnDetail, OzoneConfiguration conf) throws IOException {
-    DatanodeLayoutStorage layoutStorage =
-        new DatanodeLayoutStorage(conf, dnDetail.getUuidString());
-    HDDSLayoutVersionManager layoutVersionManager =
-        new HDDSLayoutVersionManager(layoutStorage.getLayoutVersion());
-
-    final int metadataLayoutVersion =
-        layoutVersionManager.getMetadataLayoutVersion();
-    final HDDSLayoutFeature metadataLayoutFeature =
-        (HDDSLayoutFeature) layoutVersionManager.getFeature(
-            metadataLayoutVersion);
-
-    final int softwareLayoutVersion =
-        layoutVersionManager.getSoftwareLayoutVersion();
-    final HDDSLayoutFeature softwareLayoutFeature =
-        (HDDSLayoutFeature) layoutVersionManager.getFeature(
-            softwareLayoutVersion);
-
-    return Pair.of(softwareLayoutFeature, metadataLayoutFeature);
-  }
-
-  public List<HddsVolume> getAllVolume(DatanodeDetails detail,
-      OzoneConfiguration configuration) throws IOException {
-    final MutableVolumeSet dataVolumeSet = UpgradeUtils
-        .getHddsVolumes(configuration, StorageVolume.VolumeType.DATA_VOLUME,
-            detail.getUuidString());
-    return 
StorageVolumeUtil.getHddsVolumesList(dataVolumeSet.getVolumesList());
-  }
-
-  public static boolean isAlreadyUpgraded(HddsVolume hddsVolume) {
-    final File migrateFile =
-        UpgradeUtils.getVolumeUpgradeCompleteFile(hddsVolume);
-    return migrateFile.exists();
-  }
-}
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java
deleted file mode 100644
index 6b2f7818f8..0000000000
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.cli.container.upgrade;
-
-import com.google.common.annotations.VisibleForTesting;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
-import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class manages v2 to v3 container upgrade.
- */
-public class UpgradeManager {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(UpgradeManager.class);
-
-  private final Map<String, DatanodeStoreSchemaThreeImpl>
-      volumeStoreMap = new ConcurrentHashMap<>();
-
-  public List<Result> run(OzoneConfiguration configuration,
-      List<HddsVolume> volumes) throws IOException {
-    List<Result> results = new ArrayList<>();
-    Map<HddsVolume, CompletableFuture<Result>> volumeFutures = new HashMap<>();
-    long startTime = Time.monotonicNow();
-
-    LOG.info("Start to upgrade {} volume(s)", volumes.size());
-    for (StorageVolume volume : volumes) {
-      final HddsVolume hddsVolume = (HddsVolume) volume;
-      final UpgradeTask task =
-          new UpgradeTask(configuration, hddsVolume, volumeStoreMap);
-      final CompletableFuture<Result> future = task.getUpgradeFuture();
-      volumeFutures.put(hddsVolume, future);
-    }
-
-    for (Map.Entry<HddsVolume, CompletableFuture<Result>> entry :
-        volumeFutures.entrySet()) {
-      final HddsVolume hddsVolume = entry.getKey();
-      final CompletableFuture<Result> volumeFuture = entry.getValue();
-
-      try {
-        final Result result = volumeFuture.get();
-        results.add(result);
-        LOG.info("Finish upgrading containers on volume {}, {}",
-            hddsVolume.getVolumeRootDir(), result.toString());
-      } catch (Exception e) {
-        LOG.error("Failed to upgrade containers on volume {}",
-            hddsVolume.getVolumeRootDir(), e);
-      }
-    }
-
-    LOG.info("It took {}ms to finish all volume upgrade.",
-        (Time.monotonicNow() - startTime));
-    return results;
-  }
-
-  @VisibleForTesting
-  public DatanodeStoreSchemaThreeImpl getDBStore(HddsVolume volume) {
-    return volumeStoreMap.get(volume.getStorageDir().getAbsolutePath());
-  }
-
-  /**
-   * This class contains v2 to v3 container upgrade result.
-   */
-  public static class Result {
-    private Map<Long, UpgradeTask.UpgradeContainerResult> resultMap;
-    private final HddsVolume hddsVolume;
-    private final long startTimeMs = Time.monotonicNow();
-    private long endTimeMs = 0L;
-    private Exception e = null;
-    private Status status = Status.FAIL;
-
-    public Result(HddsVolume hddsVolume) {
-      this.hddsVolume = hddsVolume;
-    }
-
-    public HddsVolume getHddsVolume() {
-      return hddsVolume;
-    }
-
-    public long getCost() {
-      return endTimeMs - startTimeMs;
-    }
-
-    public void setResultList(
-        List<UpgradeTask.UpgradeContainerResult> resultList) {
-      resultMap = new HashMap<>();
-      resultList.forEach(res -> resultMap
-          .put(res.getOriginContainerData().getContainerID(), res));
-    }
-
-    public Map<Long, UpgradeTask.UpgradeContainerResult> getResultMap() {
-      return resultMap;
-    }
-
-    public boolean isSuccess() {
-      return this.status == Status.SUCCESS;
-    }
-
-    public void success() {
-      this.endTimeMs = Time.monotonicNow();
-      this.status = Status.SUCCESS;
-    }
-
-    public void fail(Exception exception) {
-      this.endTimeMs = Time.monotonicNow();
-      this.status = Status.FAIL;
-      this.e = exception;
-    }
-
-    @Override
-    public String toString() {
-      final StringBuilder stringBuilder = new StringBuilder();
-      stringBuilder.append("Result:{");
-      stringBuilder.append("hddsRootDir=");
-      stringBuilder.append(getHddsVolume().getHddsRootDir());
-      stringBuilder.append(", resultList=");
-      AtomicLong total = new AtomicLong(0L);
-      if (resultMap != null) {
-        resultMap.forEach((k, r) -> {
-          stringBuilder.append(r.toString());
-          stringBuilder.append("\n");
-          total.addAndGet(r.getTotalRow());
-        });
-      }
-      stringBuilder.append(", totalRow=");
-      stringBuilder.append(total.get());
-      stringBuilder.append(", costMs=");
-      stringBuilder.append(getCost());
-      stringBuilder.append(", status=");
-      stringBuilder.append(status);
-      if (e != null) {
-        stringBuilder.append(", Exception=");
-        stringBuilder.append(e);
-      }
-      stringBuilder.append('}');
-      return stringBuilder.toString();
-    }
-
-    enum Status {
-      SUCCESS,
-      FAIL
-    }
-  }
-
-}
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java
deleted file mode 100644
index 262abc57fd..0000000000
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.cli.container.upgrade;
-
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME;
-
-import com.google.common.base.Preconditions;
-import java.io.File;
-import java.io.IOException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CompletableFuture;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.StringUtils;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.apache.hadoop.io.nativeio.NativeIO;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
-import org.apache.hadoop.ozone.container.common.utils.RawDB;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import 
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
-import 
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
-import 
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition;
-import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
-import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the v2 to v3 container upgrade process.
- */
-public class UpgradeTask {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(UpgradeTask.class);
-
-  private final ConfigurationSource config;
-  private final HddsVolume hddsVolume;
-  private DatanodeStoreSchemaThreeImpl dataStore;
-  private final Map volumeStoreMap;
-
-  private static final String BACKUP_CONTAINER_DATA_FILE_SUFFIX = ".backup";
-  public static final String UPGRADE_COMPLETE_FILE_NAME = "upgrade.complete";
-  public static final String UPGRADE_LOCK_FILE_NAME = "upgrade.lock";
-
-  private static final Set<String> COLUMN_FAMILIES_NAME =
-      (new DatanodeSchemaTwoDBDefinition("", new OzoneConfiguration()))
-          .getMap().keySet();
-
-  public UpgradeTask(ConfigurationSource config, HddsVolume hddsVolume,
-      Map storeMap) {
-    this.config = config;
-    this.hddsVolume = hddsVolume;
-    this.volumeStoreMap = storeMap;
-  }
-
-  public CompletableFuture<UpgradeManager.Result> getUpgradeFuture() {
-    final File lockFile = UpgradeUtils.getVolumeUpgradeLockFile(hddsVolume);
-
-    return CompletableFuture.supplyAsync(() -> {
-
-      final UpgradeManager.Result result =
-          new UpgradeManager.Result(hddsVolume);
-
-      List<UpgradeContainerResult> resultList = new ArrayList<>();
-      final File hddsVolumeRootDir = hddsVolume.getHddsRootDir();
-
-      Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" +
-          "cannot be null");
-
-      // check CID directory and current file
-      File clusterIDDir = new File(hddsVolume.getStorageDir(),
-          hddsVolume.getClusterID());
-      if (!clusterIDDir.exists() || !clusterIDDir.isDirectory()) {
-        result.fail(new Exception("Volume " + hddsVolumeRootDir +
-            " is in an inconsistent state. Expected " +
-            "clusterID directory " + clusterIDDir +
-            " is not found or not a directory."));
-        return result;
-      }
-      File currentDir = new File(clusterIDDir, Storage.STORAGE_DIR_CURRENT);
-      if (!currentDir.exists() || !currentDir.isDirectory()) {
-        result.fail(new Exception(
-            "Current dir " + currentDir + " is not found or not a directory,"
-                + " skip upgrade."));
-        return result;
-      }
-
-      try {
-        // create lock file
-        if (!lockFile.createNewFile()) {
-          result.fail(new Exception("Upgrade lock file already exists " +
-              lockFile.getAbsolutePath() + ", skip upgrade."));
-          return result;
-        }
-      } catch (IOException e) {
-        result.fail(new Exception("Failed to create upgrade lock file " +
-            lockFile.getAbsolutePath() + ", skip upgrade."));
-        return result;
-      }
-
-      // check complete file again
-      final File completeFile =
-          UpgradeUtils.getVolumeUpgradeCompleteFile(hddsVolume);
-      if (completeFile.exists()) {
-        result.fail(new Exception("Upgrade complete file already exists " +
-            completeFile.getAbsolutePath() + ", skip upgrade."));
-        if (!lockFile.delete()) {
-          LOG.warn("Failed to delete upgrade lock file {}.", lockFile);
-        }
-        return result;
-      }
-
-      // backup DB directory
-      final File volumeDBPath;
-      try {
-        volumeDBPath = getVolumeDBPath(hddsVolume);
-        dbBackup(volumeDBPath);
-      } catch (IOException e) {
-        result.fail(new Exception(e.getMessage() + ", skip upgrade."));
-        return result;
-      }
-
-      // load DB store
-      try {
-        hddsVolume.loadDbStore(false);
-        RawDB db = DatanodeStoreCache.getInstance().getDB(
-            volumeDBPath.getAbsolutePath(), config);
-        dataStore = (DatanodeStoreSchemaThreeImpl) db.getStore();
-        volumeStoreMap.put(
-            hddsVolume.getStorageDir().getAbsolutePath(), dataStore);
-      } catch (IOException e) {
-        result.fail(new Exception(
-            "Failed to load db for volume " + hddsVolume.getVolumeRootDir() +
-                " for " + e.getMessage() + ", skip upgrade."));
-        return result;
-      }
-
-      LOG.info("Start to upgrade containers on volume {}",
-          hddsVolume.getVolumeRootDir());
-      File[] containerTopDirs = currentDir.listFiles();
-      if (containerTopDirs != null) {
-        for (File containerTopDir : containerTopDirs) {
-          try {
-            final List<UpgradeContainerResult> results =
-                upgradeSubContainerDir(containerTopDir);
-            resultList.addAll(results);
-          } catch (IOException e) {
-            result.fail(e);
-            return result;
-          }
-        }
-      }
-
-      result.setResultList(resultList);
-      result.success();
-      return result;
-    }).whenComplete((r, e) -> {
-      final File hddsRootDir = r.getHddsVolume().getHddsRootDir();
-      final File file =
-          UpgradeUtils.getVolumeUpgradeCompleteFile(r.getHddsVolume());
-      // create a flag file
-      if (e == null && r.isSuccess()) {
-        try {
-          UpgradeUtils.createFile(file);
-        } catch (IOException ioe) {
-          LOG.warn("Failed to create upgrade complete file {}.", file, ioe);
-        }
-      }
-      if (lockFile.exists()) {
-        boolean deleted = lockFile.delete();
-        if (!deleted) {
-          LOG.warn("Failed to delete upgrade lock file {}.", file);
-        }
-      }
-    });
-  }
-
-  private List<UpgradeContainerResult> upgradeSubContainerDir(
-      File containerTopDir) throws IOException {
-    List<UpgradeContainerResult> resultList = new ArrayList<>();
-    if (containerTopDir.isDirectory()) {
-      File[] containerDirs = containerTopDir.listFiles();
-      if (containerDirs != null) {
-        for (File containerDir : containerDirs) {
-          final ContainerData containerData = parseContainerData(containerDir);
-          if (containerData != null &&
-              ((KeyValueContainerData) containerData)
-                  .hasSchema(OzoneConsts.SCHEMA_V2)) {
-            final UpgradeContainerResult result =
-                new UpgradeContainerResult(containerData);
-            upgradeContainer(containerData, result);
-            resultList.add(result);
-          }
-        }
-      }
-    }
-    return resultList;
-  }
-
-  private ContainerData parseContainerData(File containerDir) {
-    try {
-      File containerFile = ContainerUtils.getContainerFile(containerDir);
-      long containerID = ContainerUtils.getContainerID(containerDir);
-      if (!containerFile.exists()) {
-        LOG.error("Missing .container file: {}.", containerDir);
-        return null;
-      }
-      try {
-        ContainerData containerData =
-            ContainerDataYaml.readContainerFile(containerFile);
-        if (containerID != containerData.getContainerID()) {
-          LOG.error("ContainerID in file {} mismatch with expected {}.",
-              containerFile, containerID);
-          return null;
-        }
-        if (containerData.getContainerType().equals(
-            ContainerProtos.ContainerType.KeyValueContainer) &&
-            containerData instanceof KeyValueContainerData) {
-          KeyValueContainerData kvContainerData =
-              (KeyValueContainerData) containerData;
-          containerData.setVolume(hddsVolume);
-          KeyValueContainerUtil.parseKVContainerData(kvContainerData, config);
-          return kvContainerData;
-        } else {
-          LOG.error("Container is not KeyValueContainer type: {}.",
-              containerDir);
-          return null;
-        }
-      } catch (IOException ex) {
-        LOG.error("Failed to parse ContainerFile: {}.", containerFile, ex);
-        return null;
-      }
-    } catch (Throwable e) {
-      LOG.error("Failed to load container: {}.", containerDir, e);
-      return null;
-    }
-  }
-
-  private void upgradeContainer(ContainerData containerData,
-      UpgradeContainerResult result) throws IOException {
-    final DBStore targetDBStore = dataStore.getStore();
-
-    // open container schema v2 rocksdb
-    final DatanodeStore dbStore = BlockUtils
-        .getUncachedDatanodeStore((KeyValueContainerData) containerData, 
config,
-            true);
-    final DBStore sourceDBStore = dbStore.getStore();
-
-    long total = 0L;
-    for (String tableName : COLUMN_FAMILIES_NAME) {
-      total += transferTableData(targetDBStore, sourceDBStore, tableName,
-          containerData);
-    }
-
-    rewriteAndBackupContainerDataFile(containerData, result);
-    result.success(total);
-  }
-
-  private long transferTableData(DBStore targetDBStore,
-      DBStore sourceDBStore, String tableName, ContainerData containerData)
-      throws IOException {
-    final Table<byte[], byte[]> deleteTransactionTable =
-        sourceDBStore.getTable(tableName);
-    final Table<byte[], byte[]> targetDeleteTransactionTable =
-        targetDBStore.getTable(tableName);
-    return transferTableData(targetDeleteTransactionTable,
-        deleteTransactionTable, containerData);
-  }
-
-  private long transferTableData(Table<byte[], byte[]> targetTable,
-      Table<byte[], byte[]> sourceTable, ContainerData containerData)
-      throws IOException {
-    long count = 0;
-    try (TableIterator<byte[], ? extends Table.KeyValue<byte[], byte[]>>
-             iter = sourceTable.iterator()) {
-      while (iter.hasNext()) {
-        count++;
-        Table.KeyValue<byte[], byte[]> next = iter.next();
-        String key = DatanodeSchemaThreeDBDefinition
-            .getContainerKeyPrefix(containerData.getContainerID())
-            + StringUtils.bytes2String(next.getKey());
-        targetTable
-            .put(FixedLengthStringCodec.string2Bytes(key), next.getValue());
-      }
-    }
-    return count;
-  }
-
-  private void rewriteAndBackupContainerDataFile(ContainerData containerData,
-      UpgradeContainerResult result) throws IOException {
-    if (containerData instanceof KeyValueContainerData) {
-      final KeyValueContainerData keyValueContainerData =
-          (KeyValueContainerData) containerData;
-
-      final KeyValueContainerData copyContainerData =
-          new KeyValueContainerData(keyValueContainerData);
-
-      copyContainerData.setSchemaVersion(OzoneConsts.SCHEMA_V3);
-      copyContainerData.setState(keyValueContainerData.getState());
-      copyContainerData.setVolume(keyValueContainerData.getVolume());
-
-      final File originContainerFile = KeyValueContainer
-          .getContainerFile(keyValueContainerData.getMetadataPath(),
-              keyValueContainerData.getContainerID());
-
-      final File bakFile = new File(keyValueContainerData.getMetadataPath(),
-          keyValueContainerData.getContainerID() +
-              BACKUP_CONTAINER_DATA_FILE_SUFFIX);
-
-      // backup v2 container data file
-      NativeIO.renameTo(originContainerFile, bakFile);
-      result.setBackupContainerFilePath(bakFile.getAbsolutePath());
-
-      // gen new v3 container data file
-      ContainerDataYaml.createContainerFile(copyContainerData, 
originContainerFile);
-
-      result.setNewContainerData(copyContainerData);
-      result.setNewContainerFilePath(originContainerFile.getAbsolutePath());
-    }
-  }
-
-  public File getVolumeDBPath(HddsVolume volume) throws IOException {
-    File clusterIdDir = new File(volume.getStorageDir(), 
volume.getClusterID());
-    File storageIdDir = new File(clusterIdDir, volume.getStorageID());
-    File containerDBPath = new File(storageIdDir, CONTAINER_DB_NAME);
-    if (containerDBPath.exists() && containerDBPath.isDirectory()) {
-      return containerDBPath;
-    } else {
-      throw new IOException("DB " + containerDBPath +
-          " doesn't exist or is not a directory");
-    }
-  }
-
-  public void dbBackup(File dbPath) throws IOException {
-    final File backup = new File(dbPath.getParentFile(),
-        new SimpleDateFormat("yyyy-MM-dd'T'HH-mm-ss").format(new Date()) +
-            "-" + dbPath.getName() + ".backup");
-    if (backup.exists()) {
-      throw new IOException("Backup dir " + backup + "already exists");
-    } else {
-      FileUtils.copyDirectory(dbPath, backup, true);
-      System.out.println("DB " + dbPath + " is backup to " + backup);
-    }
-  }
-
-  /**
-   * This class represents upgrade v2 to v3 container result.
-   */
-  public static class UpgradeContainerResult {
-    private final ContainerData originContainerData;
-    private ContainerData newContainerData;
-    private long totalRow = 0L;
-    private final long startTimeMs = Time.monotonicNow();
-    private long endTimeMs = 0L;
-    private Status status;
-
-    private String backupContainerFilePath;
-    private String newContainerFilePath;
-
-    public UpgradeContainerResult(
-        ContainerData originContainerData) {
-      this.originContainerData = originContainerData;
-      this.status = Status.FAIL;
-    }
-
-    public long getTotalRow() {
-      return totalRow;
-    }
-
-    public Status getStatus() {
-      return status;
-    }
-
-    public void setNewContainerData(
-        ContainerData newContainerData) {
-      this.newContainerData = newContainerData;
-    }
-
-    public long getCostMs() {
-      return endTimeMs - startTimeMs;
-    }
-
-    public ContainerData getOriginContainerData() {
-      return originContainerData;
-    }
-
-    public ContainerData getNewContainerData() {
-      return newContainerData;
-    }
-
-    public void setBackupContainerFilePath(String backupContainerFilePath) {
-      this.backupContainerFilePath = backupContainerFilePath;
-    }
-
-    public void setNewContainerFilePath(String newContainerFilePath) {
-      this.newContainerFilePath = newContainerFilePath;
-    }
-
-    public void success(long rowCount) {
-      this.totalRow = rowCount;
-      this.endTimeMs = Time.monotonicNow();
-      this.status = Status.SUCCESS;
-    }
-
-    @Override
-    public String toString() {
-      final StringBuilder stringBuilder = new StringBuilder();
-      stringBuilder.append("Result:{");
-      stringBuilder.append("containerID=");
-      stringBuilder.append(originContainerData.getContainerID());
-      stringBuilder.append(", originContainerSchemaVersion=");
-      stringBuilder.append(
-          ((KeyValueContainerData) originContainerData).getSchemaVersion());
-
-      if (newContainerData != null) {
-        stringBuilder.append(", schemaV2ContainerFileBackupPath=");
-        stringBuilder.append(backupContainerFilePath);
-
-        stringBuilder.append(", newContainerSchemaVersion=");
-        stringBuilder.append(
-            ((KeyValueContainerData) newContainerData).getSchemaVersion());
-
-        stringBuilder.append(", schemaV3ContainerFilePath=");
-        stringBuilder.append(newContainerFilePath);
-      }
-      stringBuilder.append(", totalRow=");
-      stringBuilder.append(totalRow);
-      stringBuilder.append(", costMs=");
-      stringBuilder.append(getCostMs());
-      stringBuilder.append(", status=");
-      stringBuilder.append(status);
-      stringBuilder.append("}");
-      return stringBuilder.toString();
-    }
-
-    enum Status {
-      SUCCESS,
-      FAIL
-    }
-  }
-}
diff --git 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
deleted file mode 100644
index 817e7781e1..0000000000
--- 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.cli.container.upgrade;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE;
-import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
-import static 
org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.Mockito.anyList;
-import static org.mockito.Mockito.anyLong;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import com.google.common.collect.Lists;
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.utils.db.CodecBuffer;
-import org.apache.hadoop.hdds.utils.db.CodecTestUtil;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChunkBuffer;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
-import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
-import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
-import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
-import 
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
-import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.io.TempDir;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Tests for UpgradeManager class.
- */
-public class TestUpgradeManager {
-  private static final String SCM_ID = UUID.randomUUID().toString();
-  private static final OzoneConfiguration CONF = new OzoneConfiguration();
-  private static final Logger LOG = 
LoggerFactory.getLogger(TestUpgradeManager.class);
-
-  @TempDir
-  private File testRoot;
-  private MutableVolumeSet volumeSet;
-  private UUID datanodeId;
-  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
-
-  private BlockManager blockManager;
-  private FilePerBlockStrategy chunkManager;
-  private ContainerSet containerSet;
-
-  @BeforeEach
-  public void setup() throws Exception {
-    DatanodeConfiguration dc = CONF.getObject(DatanodeConfiguration.class);
-    dc.setContainerSchemaV3Enabled(true);
-    CONF.setFromObject(dc);
-
-    final File volume1Path = new File(testRoot, "volume1");
-    final File volume2Path = new File(testRoot, "volume2");
-
-    assertTrue(volume1Path.mkdirs());
-    assertTrue(volume2Path.mkdirs());
-
-    final File metadataPath = new File(testRoot, "metadata");
-    assertTrue(metadataPath.mkdirs());
-
-    CONF.set(HDDS_DATANODE_DIR_KEY,
-        volume1Path.getAbsolutePath() + "," + volume2Path.getAbsolutePath());
-    CONF.set(OZONE_METADATA_DIRS, metadataPath.getAbsolutePath());
-    datanodeId = UUID.randomUUID();
-    volumeSet = new MutableVolumeSet(datanodeId.toString(), SCM_ID, CONF,
-        null, StorageVolume.VolumeType.DATA_VOLUME, null);
-
-    // create rocksdb instance in volume dir
-    final List<HddsVolume> volumes = new ArrayList<>();
-    for (StorageVolume storageVolume : volumeSet.getVolumesList()) {
-      HddsVolume hddsVolume = (HddsVolume) storageVolume;
-      StorageVolumeUtil.checkVolume(hddsVolume, SCM_ID, SCM_ID, CONF, null,
-          null);
-      volumes.add(hddsVolume);
-    }
-
-    DatanodeDetails datanodeDetails = mock(DatanodeDetails.class);
-    when(datanodeDetails.getUuidString()).thenReturn(datanodeId.toString());
-    when(datanodeDetails.getUuid()).thenReturn(datanodeId);
-
-    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
-    final AtomicInteger loopCount = new AtomicInteger(0);
-    when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenAnswer(invocation -> {
-          final int ii = loopCount.getAndIncrement() % volumes.size();
-          return volumes.get(ii);
-        });
-
-    containerSet = newContainerSet();
-
-    blockManager = new BlockManagerImpl(CONF);
-    chunkManager = new FilePerBlockStrategy(true, blockManager);
-  }
-
-  @BeforeAll
-  public static void beforeClass() {
-    CodecBuffer.enableLeakDetection();
-  }
-
-  @AfterEach
-  public void after() throws Exception {
-    CodecTestUtil.gc();
-  }
-
-  @Test
-  public void testUpgrade() throws IOException {
-    int num = 2;
-
-    final Map<KeyValueContainerData, Map<String, BlockData>>
-        keyValueContainerBlockDataMap = genSchemaV2Containers(num);
-    assertEquals(num, keyValueContainerBlockDataMap.size());
-
-    shutdownAllVolume();
-
-    final UpgradeManager upgradeManager = new UpgradeManager();
-    final List<UpgradeManager.Result> results =
-        upgradeManager.run(CONF,
-            StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()));
-
-    checkV3MetaData(keyValueContainerBlockDataMap, results, upgradeManager);
-  }
-
-  private Map<String, BlockData> putAnyBlockData(KeyValueContainerData data,
-                                                 KeyValueContainer container,
-                                                 int numBlocks) {
-    // v3 key ==> block data
-    final Map<String, BlockData> containerBlockDataMap = new HashMap<>();
-
-    int txnID = 0;
-    for (int i = 0; i < numBlocks; i++) {
-      txnID = txnID + 1;
-      BlockID blockID =
-          ContainerTestHelper.getTestBlockID(data.getContainerID());
-      BlockData kd = new BlockData(blockID);
-      List<ContainerProtos.ChunkInfo> chunks = Lists.newArrayList();
-      putChunksInBlock(1, i, chunks, container, blockID);
-      kd.setChunks(chunks);
-
-      try {
-        final String localIDKey = Long.toString(blockID.getLocalID());
-        final String blockKey = DatanodeSchemaThreeDBDefinition
-            .getContainerKeyPrefix(data.getContainerID()) + localIDKey;
-        blockManager.putBlock(container, kd);
-        containerBlockDataMap.put(blockKey, kd);
-      } catch (IOException exception) {
-        LOG.warn("Failed to put block: " + blockID.getLocalID()
-            + " in BlockDataTable.", exception);
-      }
-    }
-
-    return containerBlockDataMap;
-  }
-
-  private void putChunksInBlock(int numOfChunksPerBlock, int i,
-                                List<ContainerProtos.ChunkInfo> chunks,
-                                KeyValueContainer container, BlockID blockID) {
-    final long chunkLength = 100;
-    try {
-      for (int k = 0; k < numOfChunksPerBlock; k++) {
-        final String chunkName = String.format("%d_chunk_%d_block_%d",
-            blockID.getContainerBlockID().getLocalID(), k, i);
-        final long offset = k * chunkLength;
-        ContainerProtos.ChunkInfo info =
-            ContainerProtos.ChunkInfo.newBuilder().setChunkName(chunkName)
-                .setLen(chunkLength).setOffset(offset)
-                .setChecksumData(Checksum.getNoChecksumDataProto()).build();
-        chunks.add(info);
-        ChunkInfo chunkInfo = new ChunkInfo(chunkName, offset, chunkLength);
-        try (ChunkBuffer chunkData = ChunkBuffer.allocate((int) chunkLength)) {
-          chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, 
WRITE_STAGE);
-          chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, 
COMMIT_STAGE);
-        }
-      }
-    } catch (IOException ex) {
-      LOG.warn("Putting chunks in blocks was not successful for BlockID: "
-          + blockID);
-    }
-  }
-
-  private Map<KeyValueContainerData, Map<String, BlockData>>
-      genSchemaV2Containers(int numContainers) throws IOException {
-    CONF.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, false);
-
-    // container id ==> blocks
-    final Map<KeyValueContainerData, Map<String, BlockData>> checkBlockDataMap 
=
-        new HashMap<>();
-
-    // create container
-    for (int i = 0; i < numContainers; i++) {
-      long containerId = ContainerTestHelper.getTestContainerID();
-
-      KeyValueContainerData data = new KeyValueContainerData(containerId,
-          ContainerLayoutVersion.FILE_PER_BLOCK,
-          ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(),
-          datanodeId.toString());
-      data.setSchemaVersion(OzoneConsts.SCHEMA_V2);
-
-      KeyValueContainer container = new KeyValueContainer(data, CONF);
-      container.create(volumeSet, volumeChoosingPolicy, SCM_ID);
-
-      containerSet.addContainer(container);
-      data = (KeyValueContainerData) containerSet.getContainer(containerId)
-          .getContainerData();
-      data.setSchemaVersion(OzoneConsts.SCHEMA_V2);
-
-      final Map<String, BlockData> blockDataMap =
-          putAnyBlockData(data, container, 10);
-
-      data.closeContainer();
-      container.close();
-
-      checkBlockDataMap.put(data, blockDataMap);
-    }
-    return checkBlockDataMap;
-  }
-
-  public void shutdownAllVolume() {
-    for (StorageVolume storageVolume : volumeSet.getVolumesList()) {
-      storageVolume.shutdown();
-    }
-  }
-
-  private void checkV3MetaData(Map<KeyValueContainerData,
-      Map<String, BlockData>> blockDataMap, List<UpgradeManager.Result> 
results,
-      UpgradeManager upgradeManager) throws IOException {
-    Map<Long, UpgradeTask.UpgradeContainerResult> resultMap = new HashMap<>();
-
-    for (UpgradeManager.Result result : results) {
-      resultMap.putAll(result.getResultMap());
-    }
-
-    for (Map.Entry<KeyValueContainerData, Map<String, BlockData>> entry :
-        blockDataMap.entrySet()) {
-      final KeyValueContainerData containerData = entry.getKey();
-      final Map<String, BlockData> blockKeyValue = entry.getValue();
-
-      final UpgradeTask.UpgradeContainerResult result =
-          resultMap.get(containerData.getContainerID());
-      final KeyValueContainerData v3ContainerData =
-          (KeyValueContainerData) result.getNewContainerData();
-
-      final DatanodeStoreSchemaThreeImpl datanodeStoreSchemaThree =
-          upgradeManager.getDBStore(v3ContainerData.getVolume());
-      final Table<String, BlockData> blockDataTable =
-          datanodeStoreSchemaThree.getBlockDataTable();
-
-      for (Map.Entry<String, BlockData> blockDataEntry : blockKeyValue
-          .entrySet()) {
-        final String v3key = blockDataEntry.getKey();
-        final BlockData blockData = blockDataTable.get(v3key);
-        final BlockData originBlockData = blockDataEntry.getValue();
-
-        assertEquals(originBlockData.getSize(), blockData.getSize());
-        assertEquals(originBlockData.getLocalID(), blockData.getLocalID());
-      }
-    }
-  }
-}
diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot 
b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot
index 1f3279c6bd..8533c1938e 100644
--- a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot
@@ -158,7 +158,6 @@ Incomplete command
                         Should contain   ${output}   create
                         Should contain   ${output}   close
                         Should contain   ${output}   report
-                        Should contain   ${output}   upgrade
 
 #List containers on unknown host
 #    ${output} =         Execute And Ignore Error     ozone admin --verbose 
container list --scm unknown-host
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java
index 75ddd145d9..0be07248a7 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java
@@ -22,18 +22,16 @@
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
 import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED;
-import static org.assertj.core.api.Assertions.assertThat;
+import static org.apache.ozone.test.IntLambda.withTextFromSystemIn;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import com.google.common.base.Preconditions;
 import java.io.File;
 import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
 import java.nio.charset.StandardCharsets;
 import java.time.Duration;
 import java.util.ArrayList;
@@ -41,12 +39,9 @@
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
-import java.util.stream.Collectors;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.cli.container.ContainerCommands;
-import org.apache.hadoop.hdds.scm.cli.container.UpgradeSubcommand;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
@@ -59,13 +54,14 @@
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneTestUtils;
 import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
 import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.repair.OzoneRepair;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
@@ -80,21 +76,12 @@ public class TestOzoneContainerUpgradeShell {
       LoggerFactory.getLogger(TestOzoneContainerUpgradeShell.class);
   private static MiniOzoneCluster cluster = null;
   private static OzoneClient client;
-  private static OzoneConfiguration conf = null;
   private static final String VOLUME_NAME = UUID.randomUUID().toString();
   private static final String BUCKET_NAME = UUID.randomUUID().toString();
 
-  protected static void startCluster() throws Exception {
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .build();
-    cluster.waitForClusterToBeReady();
-    client = cluster.newClient();
-  }
-
   @BeforeAll
   public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OZONE_ADMINISTRATORS, "*");
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
         TimeUnit.MILLISECONDS);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
@@ -110,7 +97,10 @@ public static void init() throws Exception {
     // gen schema v2 container
     conf.setBoolean(CONTAINER_SCHEMA_V3_ENABLED, false);
 
-    startCluster();
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .build();
+    cluster.waitForClusterToBeReady();
+    client = cluster.newClient();
   }
 
   public List<OzoneConfiguration> getDatanodeConfigs() {
@@ -141,32 +131,16 @@ public void testNormalContainerUpgrade() throws Exception 
{
     shutdownCluster();
 
     // datanode1 test check all pass & upgrade success
-    UpgradeSubcommand.setOzoneConfiguration(datanodeConf);
-    StringWriter stdout = new StringWriter();
-    PrintWriter pstdout = new PrintWriter(stdout);
-    CommandLine commandLine = upgradeCommand(pstdout);
-
-    String[] args = new String[]{"upgrade", "--yes"};
-    int exitCode = commandLine.execute(args);
+    int exitCode = runUpgrade(datanodeConf);
     assertEquals(0, exitCode);
-
-    // datanode2 NodeOperationalState is IN_SERVICE upgrade fail.
-    OzoneConfiguration datanode2Conf = datanodeConfigs.get(1);
-    UpgradeSubcommand.setOzoneConfiguration(datanode2Conf);
-    StringWriter stdout2 = new StringWriter();
-    PrintWriter pstdout2 = new PrintWriter(stdout2);
-    CommandLine commandLine2 = upgradeCommand(pstdout2);
-
-    String[] args2 = new String[]{"upgrade", "--yes"};
-    int exit2Code = commandLine2.execute(args2);
-
-    assertEquals(0, exit2Code);
-    String cmdOut = stdout2.toString();
-    assertThat(cmdOut).contains("IN_MAINTENANCE");
   }
 
-  private CommandLine upgradeCommand(PrintWriter pstdout) {
-    return new CommandLine(new ContainerCommands()).setOut(pstdout);
+  private static int runUpgrade(OzoneConfiguration conf) {
+    CommandLine cmd = new OzoneRepair().getCmd();
+    return withTextFromSystemIn("y")
+        .execute(() -> cmd.execute(
+            "-D", OZONE_METADATA_DIRS + "=" + conf.get(OZONE_METADATA_DIRS),
+            "datanode", "upgrade-container-schema"));
   }
 
   private static ContainerInfo writeKeyAndCloseContainer() throws Exception {
@@ -176,12 +150,8 @@ private static ContainerInfo writeKeyAndCloseContainer() 
throws Exception {
   }
 
   private static void writeKey(String keyName) throws IOException {
-    try (OzoneClient client = OzoneClientFactory.getRpcClient(conf)) {
-      TestDataUtil.createVolumeAndBucket(client, VOLUME_NAME, BUCKET_NAME);
-      TestDataUtil.createKey(
-          
client.getObjectStore().getVolume(VOLUME_NAME).getBucket(BUCKET_NAME),
-          keyName, "test".getBytes(StandardCharsets.UTF_8));
-    }
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, 
VOLUME_NAME, BUCKET_NAME);
+    TestDataUtil.createKey(bucket, keyName, 
"test".getBytes(StandardCharsets.UTF_8));
   }
 
   private static ContainerInfo closeContainerForKey(String keyName)
@@ -207,18 +177,10 @@ public static void shutdownCluster() throws 
InterruptedException {
     try {
       IOUtils.closeQuietly(client);
       if (cluster != null) {
-        List<OzoneConfiguration> dnConfigs = 
cluster.getHddsDatanodes().stream()
-            .map(HddsDatanodeService::getConf).collect(Collectors.toList());
-
         DatanodeStoreCache.setMiniClusterMode(false);
 
         cluster.stop();
-        ContainerCache.getInstance(conf).shutdownCache();
-
-
-        for (OzoneConfiguration dnConfig : dnConfigs) {
-          ContainerCache.getInstance(dnConfig).shutdownCache();
-        }
+        ContainerCache.getInstance(cluster.getConf()).shutdownCache();
         DefaultMetricsSystem.shutdown();
         ManagedRocksObjectMetrics.INSTANCE.assertNoLeaks();
         CodecTestUtil.gc();
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index c082c03b29..46ef925f9b 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -254,6 +254,24 @@
     </dependency>
 
     <!-- Test dependencies -->
+    <dependency>
+      <groupId>org.apache.ozone</groupId>
+      <artifactId>hdds-common</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.ozone</groupId>
+      <artifactId>hdds-container-service</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.ozone</groupId>
+      <artifactId>hdds-server-framework</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>org.apache.ozone</groupId>
       <artifactId>hdds-test-utils</artifactId>
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java
index c6a718ee30..04f24bc0d5 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java
@@ -101,14 +101,29 @@ protected boolean isDryRun() {
     return dryRun;
   }
 
+  /** Print to stdout the formatted from {@code msg} and {@code args}. */
   protected void info(String msg, Object... args) {
     out().println(formatMessage(msg, args));
   }
 
+  /** Print to stderr the formatted from {@code msg} and {@code args}. */
   protected void error(String msg, Object... args) {
     err().println(formatMessage(msg, args));
   }
 
+  /** Print to stderr the message formatted from {@code msg} and {@code args},
+   * and also print the exception {@code t}. */
+  protected void error(Throwable t, String msg, Object... args) {
+    error(msg, args);
+    rootCommand().printError(t);
+  }
+
+  /** Fail with {@link IllegalStateException} using the message formatted from 
{@code msg} and {@code args}. */
+  protected void fatal(String msg, Object... args) {
+    String formatted = formatMessage(msg, args);
+    throw new IllegalStateException(formatted);
+  }
+
   private String formatMessage(String msg, Object[] args) {
     if (args != null && args.length > 0) {
       msg = String.format(msg, args);
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/DatanodeRepair.java
similarity index 60%
copy from 
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
copy to 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/DatanodeRepair.java
index ef0c94c003..27250015fa 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/DatanodeRepair.java
@@ -15,17 +15,22 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdds.cli;
+package org.apache.hadoop.ozone.repair.datanode;
 
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.cli.RepairSubcommand;
+import 
org.apache.hadoop.ozone.repair.datanode.schemaupgrade.UpgradeContainerSchema;
+import org.kohsuke.MetaInfServices;
+import picocli.CommandLine;
 
 /**
- * Interface to access the higher level parameters.
+ * Ozone Repair CLI for Datanode.
  */
-public interface GenericParentCommand {
[email protected](name = "datanode",
+    subcommands = {
+        UpgradeContainerSchema.class,
+    },
+    description = "Tools to repair Datanode")
+@MetaInfServices(RepairSubcommand.class)
+public class DatanodeRepair implements RepairSubcommand {
 
-  boolean isVerbose();
-
-  /** Returns a cached configuration, i.e. it is created only once, subsequent 
calls return the same instance. */
-  OzoneConfiguration getOzoneConf();
 }
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/package-info.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/package-info.java
similarity index 87%
copy from 
hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/package-info.java
copy to 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/package-info.java
index c11a284cb2..8ccbb882dd 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/package-info.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/package-info.java
@@ -16,6 +16,6 @@
  */
 
 /**
- * Contains all of the container related scm commands.
+ * Repair tools for Datanode.
  */
-package org.apache.hadoop.hdds.scm.cli.container.upgrade;
+package org.apache.hadoop.ozone.repair.datanode;
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java
new file mode 100644
index 0000000000..aa7b3dbb26
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.repair.datanode.schemaupgrade;
+
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.util.Time;
+
+/**
+ * This class represents upgrade v2 to v3 container result.
+ */
+class ContainerUpgradeResult {
+  private final ContainerData originContainerData;
+  private ContainerData newContainerData;
+  private long totalRow = 0L;
+  private final long startTimeMs = Time.monotonicNow();
+  private long endTimeMs = 0L;
+  private Status status = Status.FAIL;
+
+  private String backupContainerFilePath;
+  private String newContainerFilePath;
+
+  ContainerUpgradeResult(ContainerData originContainerData) {
+    this.originContainerData = originContainerData;
+  }
+
+  public long getTotalRow() {
+    return totalRow;
+  }
+
+  public Status getStatus() {
+    return status;
+  }
+
+  public void setNewContainerData(
+      ContainerData newContainerData) {
+    this.newContainerData = newContainerData;
+  }
+
+  ContainerData getNewContainerData() {
+    return newContainerData;
+  }
+
+  public long getCostMs() {
+    return endTimeMs - startTimeMs;
+  }
+
+  public ContainerData getOriginContainerData() {
+    return originContainerData;
+  }
+
+  public void setBackupContainerFilePath(String backupContainerFilePath) {
+    this.backupContainerFilePath = backupContainerFilePath;
+  }
+
+  String getBackupContainerFilePath() {
+    return backupContainerFilePath;
+  }
+
+  public void setNewContainerFilePath(String newContainerFilePath) {
+    this.newContainerFilePath = newContainerFilePath;
+  }
+
+  String getNewContainerFilePath() {
+    return newContainerFilePath;
+  }
+
+  public void success(long rowCount) {
+    this.totalRow = rowCount;
+    this.endTimeMs = Time.monotonicNow();
+    this.status = Status.SUCCESS;
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder stringBuilder = new StringBuilder();
+    stringBuilder.append("Result:{");
+    stringBuilder.append("containerID=");
+    stringBuilder.append(originContainerData.getContainerID());
+    stringBuilder.append(", originContainerSchemaVersion=");
+    stringBuilder.append(
+        ((KeyValueContainerData) originContainerData).getSchemaVersion());
+
+    if (newContainerData != null) {
+      stringBuilder.append(", schemaV2ContainerFileBackupPath=");
+      stringBuilder.append(backupContainerFilePath);
+
+      stringBuilder.append(", newContainerSchemaVersion=");
+      stringBuilder.append(
+          ((KeyValueContainerData) newContainerData).getSchemaVersion());
+
+      stringBuilder.append(", schemaV3ContainerFilePath=");
+      stringBuilder.append(newContainerFilePath);
+    }
+    stringBuilder.append(", totalRow=");
+    stringBuilder.append(totalRow);
+    stringBuilder.append(", costMs=");
+    stringBuilder.append(getCostMs());
+    stringBuilder.append(", status=");
+    stringBuilder.append(status);
+    stringBuilder.append("}");
+    return stringBuilder.toString();
+  }
+
+  enum Status {
+    SUCCESS,
+    FAIL
+  }
+}
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java
new file mode 100644
index 0000000000..7a8fc81504
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java
@@ -0,0 +1,506 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.repair.datanode.schemaupgrade;
+
+import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME;
+import static 
org.apache.hadoop.ozone.repair.datanode.schemaupgrade.UpgradeUtils.BACKUP_CONTAINER_DATA_FILE_SUFFIX;
+import static 
org.apache.hadoop.ozone.repair.datanode.schemaupgrade.UpgradeUtils.COLUMN_FAMILY_NAMES;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import java.io.File;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.StringUtils;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.Storage;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
+import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
+import org.apache.hadoop.ozone.container.common.utils.RawDB;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import 
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
+import 
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
+import org.apache.hadoop.ozone.repair.RepairTool;
+import org.apache.hadoop.util.Time;
+import picocli.CommandLine;
+import picocli.CommandLine.Command;
+
+/**
+ * This is the handler that process container upgrade command.
+ */
+@Command(
+    name = "upgrade-container-schema",
+    description = "Offline upgrade all schema V2 containers to schema V3 " +
+        "for this datanode.",
+    mixinStandardHelpOptions = true,
+    versionProvider = HddsVersionProvider.class)
+public class UpgradeContainerSchema extends RepairTool {
+
+  @CommandLine.Option(names = {"--volume"},
+      description = "volume path")
+  private String volume;
+
+  private List<VolumeUpgradeResult> lastResults;
+
+  List<VolumeUpgradeResult> run(OzoneConfiguration configuration, 
List<HddsVolume> volumes) {
+    List<VolumeUpgradeResult> results = new ArrayList<>();
+    Map<HddsVolume, CompletableFuture<VolumeUpgradeResult>> volumeFutures = 
new HashMap<>();
+    long startTime = Time.monotonicNow();
+
+    info("Start to upgrade %s volume(s)", volumes.size());
+    for (HddsVolume hddsVolume : volumes) {
+      final UpgradeTask task =
+          new UpgradeTask(configuration, hddsVolume);
+      final CompletableFuture<VolumeUpgradeResult> future = 
task.getUpgradeFuture();
+      volumeFutures.put(hddsVolume, future);
+    }
+
+    for (Map.Entry<HddsVolume, CompletableFuture<VolumeUpgradeResult>> entry :
+        volumeFutures.entrySet()) {
+      final HddsVolume hddsVolume = entry.getKey();
+      final CompletableFuture<VolumeUpgradeResult> volumeFuture = 
entry.getValue();
+
+      try {
+        final VolumeUpgradeResult result = volumeFuture.get();
+        results.add(result);
+        info("Finish upgrading containers on volume %s, %s",
+            hddsVolume.getVolumeRootDir(), result);
+      } catch (Exception e) {
+        error(e, "Failed to upgrade containers on volume %s",
+            hddsVolume.getVolumeRootDir());
+      }
+    }
+
+    info("It took %sms to finish all volume upgrade.",
+        (Time.monotonicNow() - startTime));
+    return results;
+  }
+
+  @Override
+  protected Component serviceToBeOffline() {
+    return Component.DATANODE;
+  }
+
+  @Override
+  public void execute() throws Exception {
+    OzoneConfiguration configuration = getOzoneConf();
+
+    DatanodeDetails dnDetail =
+        UpgradeUtils.getDatanodeDetails(configuration);
+
+    Pair<HDDSLayoutFeature, HDDSLayoutFeature> layoutFeature =
+        UpgradeUtils.getLayoutFeature(dnDetail, configuration);
+    final HDDSLayoutFeature softwareLayoutFeature = layoutFeature.getLeft();
+    final HDDSLayoutFeature metadataLayoutFeature = layoutFeature.getRight();
+    final int needLayoutVersion =
+        HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion();
+
+    if (metadataLayoutFeature.layoutVersion() < needLayoutVersion ||
+        softwareLayoutFeature.layoutVersion() < needLayoutVersion) {
+      fatal(
+          "Please upgrade your software version, no less than %s," +
+              " current metadata layout version is %s," +
+              " software layout version is %s",
+          HDDSLayoutFeature.DATANODE_SCHEMA_V3.name(),
+          metadataLayoutFeature.name(), softwareLayoutFeature.name());
+      return;
+    }
+
+    if (!Strings.isNullOrEmpty(volume)) {
+      File volumeDir = new File(volume);
+      if (!volumeDir.exists() || !volumeDir.isDirectory()) {
+        fatal("Volume path %s is not a directory or doesn't exist", volume);
+        return;
+      }
+      File hddsRootDir = new File(volumeDir, HddsVolume.HDDS_VOLUME_DIR);
+      if (!hddsRootDir.exists() || !hddsRootDir.isDirectory()) {
+        fatal("Volume path %s is not a valid data volume", volume);
+        return;
+      }
+      File versionFile = new File(hddsRootDir, Storage.STORAGE_FILE_VERSION);
+      if (!versionFile.exists() || !versionFile.isFile()) {
+        fatal("Version file %s does not exist", versionFile);
+        return;
+      }
+      configuration.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volume);
+    }
+
+    List<HddsVolume> allVolume =
+        UpgradeUtils.getAllVolume(dnDetail, configuration);
+
+    Iterator<HddsVolume> volumeIterator = allVolume.iterator();
+    while (volumeIterator.hasNext()) {
+      HddsVolume hddsVolume = volumeIterator.next();
+      if (UpgradeUtils.isAlreadyUpgraded(hddsVolume)) {
+        info("Volume " + hddsVolume.getVolumeRootDir() +
+            " is already upgraded, skip it.");
+        volumeIterator.remove();
+      }
+    }
+
+    if (allVolume.isEmpty()) {
+      info("There is no more volume to upgrade. Exit.");
+      return;
+    }
+
+    // do upgrade
+    lastResults = run(configuration, allVolume);
+  }
+
+  List<VolumeUpgradeResult> getLastResults() {
+    return lastResults;
+  }
+
+  /**
+   * This class implements the v2 to v3 container upgrade process.
+   */
+  private class UpgradeTask {
+
+    private final ConfigurationSource config;
+    private final HddsVolume hddsVolume;
+    private DatanodeStoreSchemaThreeImpl dataStore;
+
+    UpgradeTask(ConfigurationSource config, HddsVolume hddsVolume) {
+      this.config = config;
+      this.hddsVolume = hddsVolume;
+    }
+
+    public CompletableFuture<VolumeUpgradeResult> getUpgradeFuture() {
+      final File lockFile = UpgradeUtils.getVolumeUpgradeLockFile(hddsVolume);
+
+      return CompletableFuture.supplyAsync(() -> {
+
+        final VolumeUpgradeResult result =
+            new VolumeUpgradeResult(hddsVolume);
+
+        List<ContainerUpgradeResult> resultList = new ArrayList<>();
+        final File hddsVolumeRootDir = hddsVolume.getHddsRootDir();
+
+        Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" +
+            "cannot be null");
+
+        // check CID directory and current file
+        File clusterIDDir = new File(hddsVolume.getStorageDir(),
+            hddsVolume.getClusterID());
+        if (!clusterIDDir.exists() || !clusterIDDir.isDirectory()) {
+          result.fail(new Exception("Volume " + hddsVolumeRootDir +
+              " is in an inconsistent state. Expected " +
+              "clusterID directory " + clusterIDDir +
+              " is not found or not a directory."));
+          return result;
+        }
+        File currentDir = new File(clusterIDDir, Storage.STORAGE_DIR_CURRENT);
+        if (!currentDir.exists() || !currentDir.isDirectory()) {
+          result.fail(new Exception(
+              "Current dir " + currentDir + " is not found or not a directory,"
+                  + " skip upgrade."));
+          return result;
+        }
+
+        try {
+          // create lock file
+          if (!lockFile.createNewFile()) {
+            result.fail(new Exception("Upgrade lock file already exists " +
+                lockFile.getAbsolutePath() + ", skip upgrade."));
+            return result;
+          }
+        } catch (IOException e) {
+          result.fail(new Exception("Failed to create upgrade lock file " +
+              lockFile.getAbsolutePath() + ", skip upgrade."));
+          return result;
+        }
+
+        // check complete file again
+        final File completeFile =
+            UpgradeUtils.getVolumeUpgradeCompleteFile(hddsVolume);
+        if (completeFile.exists()) {
+          result.fail(new Exception("Upgrade complete file already exists " +
+              completeFile.getAbsolutePath() + ", skip upgrade."));
+          if (!lockFile.delete()) {
+            error("Failed to delete upgrade lock file %s.", lockFile);
+          }
+          return result;
+        }
+
+        // backup DB directory
+        final File volumeDBPath;
+        try {
+          volumeDBPath = getVolumeDBPath();
+          dbBackup(volumeDBPath);
+        } catch (IOException e) {
+          result.fail(new Exception(e.getMessage() + ", skip upgrade."));
+          return result;
+        }
+
+        // load DB store
+        try {
+          hddsVolume.loadDbStore(isDryRun());
+          RawDB db = DatanodeStoreCache.getInstance().getDB(
+              volumeDBPath.getAbsolutePath(), config);
+          dataStore = (DatanodeStoreSchemaThreeImpl) db.getStore();
+          result.setStore(dataStore);
+        } catch (IOException e) {
+          result.fail(new Exception(
+              "Failed to load db for volume " + hddsVolume.getVolumeRootDir() +
+                  " for " + e.getMessage() + ", skip upgrade."));
+          return result;
+        }
+
+        info("Start to upgrade containers on volume %s",
+            hddsVolume.getVolumeRootDir());
+        File[] containerTopDirs = currentDir.listFiles();
+        if (containerTopDirs != null) {
+          for (File containerTopDir : containerTopDirs) {
+            try {
+              final List<ContainerUpgradeResult> results =
+                  upgradeSubContainerDir(containerTopDir);
+              resultList.addAll(results);
+            } catch (IOException e) {
+              result.fail(e);
+              return result;
+            }
+          }
+        }
+
+        result.setResultList(resultList);
+        result.success();
+        return result;
+      }).whenComplete((r, e) -> {
+        final File file =
+            UpgradeUtils.getVolumeUpgradeCompleteFile(r.getHddsVolume());
+        // create a flag file
+        if (e == null && r.isSuccess()) {
+          try {
+            UpgradeUtils.createFile(file);
+          } catch (IOException ioe) {
+            error(ioe, "Failed to create upgrade complete file %s.", file);
+          }
+        }
+        if (lockFile.exists()) {
+          boolean deleted = lockFile.delete();
+          if (!deleted) {
+            error("Failed to delete upgrade lock file %s.", file);
+          }
+        }
+      });
+    }
+
+    private List<ContainerUpgradeResult> upgradeSubContainerDir(
+        File containerTopDir) throws IOException {
+      List<ContainerUpgradeResult> resultList = new ArrayList<>();
+      if (containerTopDir.isDirectory()) {
+        File[] containerDirs = containerTopDir.listFiles();
+        if (containerDirs != null) {
+          for (File containerDir : containerDirs) {
+            final ContainerData containerData = 
parseContainerData(containerDir);
+            if (containerData != null &&
+                ((KeyValueContainerData) containerData)
+                    .hasSchema(OzoneConsts.SCHEMA_V2)) {
+              final ContainerUpgradeResult result =
+                  new ContainerUpgradeResult(containerData);
+              upgradeContainer(containerData, result);
+              resultList.add(result);
+            }
+          }
+        }
+      }
+      return resultList;
+    }
+
+    private ContainerData parseContainerData(File containerDir) {
+      try {
+        File containerFile = ContainerUtils.getContainerFile(containerDir);
+        long containerID = ContainerUtils.getContainerID(containerDir);
+        if (!containerFile.exists()) {
+          error("Missing .container file: %s.", containerDir);
+          return null;
+        }
+        try {
+          ContainerData containerData =
+              ContainerDataYaml.readContainerFile(containerFile);
+          if (containerID != containerData.getContainerID()) {
+            error("ContainerID in file %s mismatch with expected %s.",
+                containerFile, containerID);
+            return null;
+          }
+          if (containerData.getContainerType().equals(
+              ContainerProtos.ContainerType.KeyValueContainer) &&
+              containerData instanceof KeyValueContainerData) {
+            KeyValueContainerData kvContainerData =
+                (KeyValueContainerData) containerData;
+            containerData.setVolume(hddsVolume);
+            KeyValueContainerUtil.parseKVContainerData(kvContainerData, 
config);
+            return kvContainerData;
+          } else {
+            error("Container is not KeyValueContainer type: %s.",
+                containerDir);
+            return null;
+          }
+        } catch (IOException ex) {
+          error(ex, "Failed to parse ContainerFile: %s.", containerFile);
+          return null;
+        }
+      } catch (Throwable e) {
+        error(e, "Failed to load container: %s.", containerDir);
+        return null;
+      }
+    }
+
+    private void upgradeContainer(ContainerData containerData,
+        ContainerUpgradeResult result) throws IOException {
+      final DBStore targetDBStore = dataStore.getStore();
+
+      // open container schema v2 rocksdb
+      final DatanodeStore dbStore = BlockUtils
+          .getUncachedDatanodeStore((KeyValueContainerData) containerData, 
config,
+              true);
+      final DBStore sourceDBStore = dbStore.getStore();
+
+      long total = 0L;
+      for (String tableName : COLUMN_FAMILY_NAMES) {
+        total += transferTableData(targetDBStore, sourceDBStore, tableName,
+            containerData);
+      }
+
+      rewriteAndBackupContainerDataFile(containerData, result);
+      result.success(total);
+    }
+
+    private long transferTableData(DBStore targetDBStore,
+        DBStore sourceDBStore, String tableName, ContainerData containerData)
+        throws IOException {
+      final Table<byte[], byte[]> deleteTransactionTable =
+          sourceDBStore.getTable(tableName);
+      final Table<byte[], byte[]> targetDeleteTransactionTable =
+          targetDBStore.getTable(tableName);
+      return transferTableData(targetDeleteTransactionTable,
+          deleteTransactionTable, containerData);
+    }
+
+    private long transferTableData(Table<byte[], byte[]> targetTable,
+        Table<byte[], byte[]> sourceTable, ContainerData containerData)
+        throws IOException {
+      long count = 0;
+      try (TableIterator<byte[], ? extends Table.KeyValue<byte[], byte[]>>
+               iter = sourceTable.iterator()) {
+        while (iter.hasNext()) {
+          count++;
+          Table.KeyValue<byte[], byte[]> next = iter.next();
+          String key = DatanodeSchemaThreeDBDefinition
+              .getContainerKeyPrefix(containerData.getContainerID())
+              + StringUtils.bytes2String(next.getKey());
+          if (!isDryRun()) {
+            targetTable
+                .put(FixedLengthStringCodec.string2Bytes(key), 
next.getValue());
+          }
+        }
+      }
+      return count;
+    }
+
+    private void rewriteAndBackupContainerDataFile(ContainerData containerData,
+        ContainerUpgradeResult result) throws IOException {
+      if (containerData instanceof KeyValueContainerData) {
+        final KeyValueContainerData keyValueContainerData =
+            (KeyValueContainerData) containerData;
+
+        final KeyValueContainerData copyContainerData =
+            new KeyValueContainerData(keyValueContainerData);
+
+        copyContainerData.setSchemaVersion(OzoneConsts.SCHEMA_V3);
+        copyContainerData.setState(keyValueContainerData.getState());
+        copyContainerData.setVolume(keyValueContainerData.getVolume());
+
+        final File originContainerFile = KeyValueContainer
+            .getContainerFile(keyValueContainerData.getMetadataPath(),
+                keyValueContainerData.getContainerID());
+
+        final File bakFile = new File(keyValueContainerData.getMetadataPath(),
+            keyValueContainerData.getContainerID() +
+                BACKUP_CONTAINER_DATA_FILE_SUFFIX);
+
+        if (isDryRun()) {
+          FileUtils.copyFile(originContainerFile, bakFile);
+        } else {
+          // backup v2 container data file
+          NativeIO.renameTo(originContainerFile, bakFile);
+
+          // gen new v3 container data file
+          ContainerDataYaml.createContainerFile(copyContainerData, 
originContainerFile);
+        }
+
+        result.setBackupContainerFilePath(bakFile.getAbsolutePath());
+        result.setNewContainerData(copyContainerData);
+        result.setNewContainerFilePath(originContainerFile.getAbsolutePath());
+      }
+    }
+
+    private File getVolumeDBPath() throws IOException {
+      File clusterIdDir = new File(hddsVolume.getStorageDir(), 
hddsVolume.getClusterID());
+      File storageIdDir = new File(clusterIdDir, hddsVolume.getStorageID());
+      File containerDBPath = new File(storageIdDir, CONTAINER_DB_NAME);
+      if (containerDBPath.exists() && containerDBPath.isDirectory()) {
+        return containerDBPath;
+      } else {
+        throw new IOException("DB " + containerDBPath +
+            " doesn't exist or is not a directory");
+      }
+    }
+
+    private void dbBackup(File dbPath) throws IOException {
+      final File backup = new File(dbPath.getParentFile(),
+          new SimpleDateFormat("yyyy-MM-dd'T'HH-mm-ss").format(new Date()) +
+              "-" + dbPath.getName() + ".backup");
+      if (backup.exists()) {
+        throw new IOException("Backup dir " + backup + "already exists");
+      } else {
+        FileUtils.copyDirectory(dbPath, backup, true);
+        System.out.println("DB " + dbPath + " is backup to " + backup);
+      }
+    }
+
+  }
+}
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeUtils.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java
similarity index 51%
rename from 
hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeUtils.java
rename to 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java
index 567fd6df48..4cfdb3ed84 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeUtils.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java
@@ -15,9 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdds.scm.cli.container.upgrade;
-
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME;
+package org.apache.hadoop.ozone.repair.datanode.schemaupgrade;
 
 import com.google.common.base.Preconditions;
 import java.io.File;
@@ -26,19 +24,36 @@
 import java.io.Writer;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
+import java.util.Collections;
 import java.util.Date;
+import java.util.List;
+import java.util.Set;
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import 
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition;
 
 /**
  * Utils functions to help upgrade v2 to v3 container functions.
  */
-public final class UpgradeUtils {
+final class UpgradeUtils {
+
+  public static final Set<String> COLUMN_FAMILY_NAMES = 
Collections.unmodifiableSet(
+      new DatanodeSchemaTwoDBDefinition("", new OzoneConfiguration())
+          .getMap().keySet());
+
+  public static final String BACKUP_CONTAINER_DATA_FILE_SUFFIX = ".backup";
+  public static final String UPGRADE_COMPLETE_FILE_NAME = "upgrade.complete";
+  public static final String UPGRADE_LOCK_FILE_NAME = "upgrade.lock";
 
   /** Never constructed. **/
   private UpgradeUtils() {
@@ -60,18 +75,12 @@ public static DatanodeDetails 
getDatanodeDetails(OzoneConfiguration conf)
     return ContainerUtils.readDatanodeDetailsFrom(idFile);
   }
 
-  public static File getContainerDBPath(HddsVolume volume) {
-    return new File(volume.getDbParentDir(), CONTAINER_DB_NAME);
-  }
-
   public static File getVolumeUpgradeCompleteFile(HddsVolume volume) {
-    return new File(volume.getHddsRootDir(),
-        UpgradeTask.UPGRADE_COMPLETE_FILE_NAME);
+    return new File(volume.getHddsRootDir(), UPGRADE_COMPLETE_FILE_NAME);
   }
 
   public static File getVolumeUpgradeLockFile(HddsVolume volume) {
-    return new File(volume.getHddsRootDir(),
-        UpgradeTask.UPGRADE_LOCK_FILE_NAME);
+    return new File(volume.getHddsRootDir(), UPGRADE_LOCK_FILE_NAME);
   }
 
   public static boolean createFile(File file) throws IOException {
@@ -83,4 +92,38 @@ public static boolean createFile(File file) throws 
IOException {
     return file.exists();
   }
 
+  public static Pair<HDDSLayoutFeature, HDDSLayoutFeature> getLayoutFeature(
+      DatanodeDetails dnDetail, OzoneConfiguration conf) throws IOException {
+    DatanodeLayoutStorage layoutStorage =
+        new DatanodeLayoutStorage(conf, dnDetail.getUuidString());
+    HDDSLayoutVersionManager layoutVersionManager =
+        new HDDSLayoutVersionManager(layoutStorage.getLayoutVersion());
+
+    final int metadataLayoutVersion =
+        layoutVersionManager.getMetadataLayoutVersion();
+    final HDDSLayoutFeature metadataLayoutFeature =
+        (HDDSLayoutFeature) layoutVersionManager.getFeature(
+            metadataLayoutVersion);
+
+    final int softwareLayoutVersion =
+        layoutVersionManager.getSoftwareLayoutVersion();
+    final HDDSLayoutFeature softwareLayoutFeature =
+        (HDDSLayoutFeature) layoutVersionManager.getFeature(
+            softwareLayoutVersion);
+
+    return Pair.of(softwareLayoutFeature, metadataLayoutFeature);
+  }
+
+  public static List<HddsVolume> getAllVolume(DatanodeDetails detail,
+      OzoneConfiguration configuration) throws IOException {
+    final MutableVolumeSet dataVolumeSet = getHddsVolumes(configuration, 
StorageVolume.VolumeType.DATA_VOLUME,
+            detail.getUuidString());
+    return 
StorageVolumeUtil.getHddsVolumesList(dataVolumeSet.getVolumesList());
+  }
+
+  public static boolean isAlreadyUpgraded(HddsVolume hddsVolume) {
+    final File migrateFile =
+        getVolumeUpgradeCompleteFile(hddsVolume);
+    return migrateFile.exists();
+  }
 }
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java
new file mode 100644
index 0000000000..d7dce31e87
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.repair.datanode.schemaupgrade;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
+import org.apache.hadoop.util.Time;
+
+/**
+ * This class contains v2 to v3 container upgrade result.
+ */
+class VolumeUpgradeResult {
+  private Map<Long, ContainerUpgradeResult> resultMap;
+  private final HddsVolume hddsVolume;
+  private final long startTimeMs = Time.monotonicNow();
+  private long endTimeMs = 0L;
+  private Exception e = null;
+  private Status status = Status.FAIL;
+  private DatanodeStoreSchemaThreeImpl store;
+
+  VolumeUpgradeResult(HddsVolume hddsVolume) {
+    this.hddsVolume = hddsVolume;
+  }
+
+  public HddsVolume getHddsVolume() {
+    return hddsVolume;
+  }
+
+  public long getCost() {
+    return endTimeMs - startTimeMs;
+  }
+
+  DatanodeStoreSchemaThreeImpl getStore() {
+    return store;
+  }
+
+  void setStore(DatanodeStoreSchemaThreeImpl store) {
+    this.store = store;
+  }
+
+  public void setResultList(
+      List<ContainerUpgradeResult> resultList) {
+    resultMap = new HashMap<>();
+    resultList.forEach(res -> resultMap
+        .put(res.getOriginContainerData().getContainerID(), res));
+  }
+
+  public Map<Long, ContainerUpgradeResult> getResultMap() {
+    return resultMap;
+  }
+
+  public boolean isSuccess() {
+    return this.status == Status.SUCCESS;
+  }
+
+  public void success() {
+    this.endTimeMs = Time.monotonicNow();
+    this.status = Status.SUCCESS;
+  }
+
+  public void fail(Exception exception) {
+    this.endTimeMs = Time.monotonicNow();
+    this.status = Status.FAIL;
+    this.e = exception;
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder stringBuilder = new StringBuilder();
+    stringBuilder.append("Result:{");
+    stringBuilder.append("hddsRootDir=");
+    stringBuilder.append(getHddsVolume().getHddsRootDir());
+    stringBuilder.append(", resultList=");
+    AtomicLong total = new AtomicLong(0L);
+    if (resultMap != null) {
+      resultMap.forEach((k, r) -> {
+        stringBuilder.append(r.toString());
+        stringBuilder.append("\n");
+        total.addAndGet(r.getTotalRow());
+      });
+    }
+    stringBuilder.append(", totalRow=");
+    stringBuilder.append(total.get());
+    stringBuilder.append(", costMs=");
+    stringBuilder.append(getCost());
+    stringBuilder.append(", status=");
+    stringBuilder.append(status);
+    if (e != null) {
+      stringBuilder.append(", Exception=");
+      stringBuilder.append(e);
+    }
+    stringBuilder.append('}');
+    return stringBuilder.toString();
+  }
+
+  enum Status {
+    SUCCESS,
+    FAIL
+  }
+}
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/package-info.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/package-info.java
similarity index 93%
rename from 
hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/package-info.java
rename to 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/package-info.java
index c11a284cb2..9c0d31902f 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/package-info.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/package-info.java
@@ -18,4 +18,4 @@
 /**
  * Contains all of the container related scm commands.
  */
-package org.apache.hadoop.hdds.scm.cli.container.upgrade;
+package org.apache.hadoop.ozone.repair.datanode.schemaupgrade;
diff --git 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java
 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java
new file mode 100644
index 0000000000..f8983e23a9
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java
@@ -0,0 +1,378 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.repair.datanode.schemaupgrade;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2;
+import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
+import static 
org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet;
+import static 
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition.getContainerKeyPrefix;
+import static org.apache.ozone.test.IntLambda.withTextFromSystemIn;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.apache.hadoop.hdds.cli.GenericCli;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.apache.hadoop.hdds.utils.db.CodecBuffer;
+import org.apache.hadoop.hdds.utils.db.CodecTestUtil;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.ChunkBuffer;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
+import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
+import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
+import org.apache.hadoop.ozone.repair.OzoneRepair;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.io.TempDir;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
+import picocli.CommandLine;
+
+/**
+ * Tests for {@link UpgradeContainerSchema} class.
+ */
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestUpgradeContainerSchema {
+  private static final String SCM_ID = UUID.randomUUID().toString();
+  private OzoneConfiguration conf;
+
+  private MutableVolumeSet volumeSet;
+  private DatanodeDetails datanodeDetails;
+  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
+
+  private BlockManager blockManager;
+  private FilePerBlockStrategy chunkManager;
+  private ContainerSet containerSet;
+  private List<HddsVolume> volumes;
+
+  @BeforeAll
+  void init() {
+    CodecBuffer.enableLeakDetection();
+  }
+
+  @BeforeEach
+  void setup(@TempDir Path testRoot) throws Exception {
+    conf = new OzoneConfiguration();
+
+    DatanodeConfiguration dc = conf.getObject(DatanodeConfiguration.class);
+    dc.setContainerSchemaV3Enabled(true);
+    conf.setFromObject(dc);
+
+    final Path volume1Path = 
Files.createDirectories(testRoot.resolve("volume1").toAbsolutePath());
+    final Path volume2Path = 
Files.createDirectories(testRoot.resolve("volume2").toAbsolutePath());
+    final Path metadataPath = 
Files.createDirectories(testRoot.resolve("metadata").toAbsolutePath());
+
+    conf.set(HDDS_DATANODE_DIR_KEY, volume1Path + "," + volume2Path);
+    conf.set(OZONE_METADATA_DIRS, metadataPath.toString());
+
+    datanodeDetails = MockDatanodeDetails.randomDatanodeDetails();
+  }
+
+  private void initDatanode(HDDSLayoutFeature layoutFeature) throws 
IOException {
+    DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf,
+        datanodeDetails.getUuidString(),
+        layoutFeature.layoutVersion());
+    layoutStorage.initialize();
+
+    String idFilePath = 
Objects.requireNonNull(HddsServerUtil.getDatanodeIdFilePath(conf), "datanode.id 
path");
+    ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, new 
File(idFilePath), conf);
+
+    volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), SCM_ID, 
conf,
+        null, StorageVolume.VolumeType.DATA_VOLUME, null);
+
+    // create rocksdb instance in volume dir
+    volumes = new ArrayList<>();
+    for (StorageVolume storageVolume : volumeSet.getVolumesList()) {
+      HddsVolume hddsVolume = (HddsVolume) storageVolume;
+      StorageVolumeUtil.checkVolume(hddsVolume, SCM_ID, SCM_ID, conf, null,
+          null);
+      volumes.add(hddsVolume);
+    }
+
+    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
+    final AtomicInteger loopCount = new AtomicInteger(0);
+    when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
+        .thenAnswer(invocation -> {
+          final int ii = loopCount.getAndIncrement() % volumes.size();
+          return volumes.get(ii);
+        });
+
+    containerSet = newContainerSet();
+
+    blockManager = new BlockManagerImpl(conf);
+    chunkManager = new FilePerBlockStrategy(true, blockManager);
+  }
+
+  @AfterEach
+  void after() throws Exception {
+    CodecTestUtil.gc();
+  }
+
+  @Test
+  void failsBeforeOzoneUpgrade() throws IOException {
+    initDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V2);
+    genSchemaV2Containers(1);
+    shutdownAllVolume();
+    List<VolumeUpgradeResult> results = runCommand(false, 
GenericCli.EXECUTION_ERROR_EXIT_CODE);
+    assertNull(results);
+  }
+
+  @ParameterizedTest
+  @ValueSource(booleans = {false, true})
+  void testUpgrade(boolean dryRun) throws IOException {
+    initDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3);
+
+    final Map<KeyValueContainerData, Map<String, BlockData>>
+        keyValueContainerBlockDataMap = genSchemaV2Containers(2);
+
+    shutdownAllVolume();
+
+    List<VolumeUpgradeResult> results = runCommand(dryRun, 0);
+    assertNotNull(results);
+    assertEquals(2, results.size());
+    for (VolumeUpgradeResult result : results) {
+      assertTrue(result.isSuccess());
+      for (ContainerUpgradeResult cr : result.getResultMap().values()) {
+        assertSame(ContainerUpgradeResult.Status.SUCCESS, cr.getStatus());
+        KeyValueContainerData pre = 
assertInstanceOf(KeyValueContainerData.class, cr.getOriginContainerData());
+        KeyValueContainerData post = 
assertInstanceOf(KeyValueContainerData.class, cr.getNewContainerData());
+        assertEquals(SCHEMA_V2, pre.getSchemaVersion());
+        assertEquals(SCHEMA_V3, post.getSchemaVersion());
+        assertEquals(pre.getState(), post.getState());
+        String schemaVersionKey = "schemaVersion\\s*:\\W*";
+        assertThat(new File(cr.getBackupContainerFilePath()))
+            .exists()
+            .content(UTF_8)
+            .containsPattern(schemaVersionKey + SCHEMA_V2);
+        assertThat(new File(cr.getNewContainerFilePath()))
+            .exists()
+            .content(UTF_8)
+            .containsPattern(schemaVersionKey + (dryRun ? SCHEMA_V2 : 
SCHEMA_V3));
+      }
+    }
+
+    if (!dryRun) {
+      checkV3MetaData(keyValueContainerBlockDataMap, results);
+    }
+  }
+
+  private List<VolumeUpgradeResult> runCommand(boolean dryRun, int 
expectedExitCode) {
+    CommandLine cmd = new OzoneRepair().getCmd();
+
+    List<String> argList = Stream.of(HDDS_DATANODE_DIR_KEY, 
OZONE_METADATA_DIRS)
+        .flatMap(key -> Stream.of("-D", key + "=" + conf.get(key)))
+        .collect(Collectors.toList());
+    argList.addAll(Arrays.asList("datanode", "upgrade-container-schema"));
+    if (dryRun) {
+      argList.add("--dry-run");
+    }
+
+    int exitCode = withTextFromSystemIn("y")
+        .execute(() -> cmd.execute(argList.toArray(new String[0])));
+    assertEquals(expectedExitCode, exitCode);
+
+    UpgradeContainerSchema subject = cmd
+        .getSubcommands().get("datanode")
+        .getSubcommands().get("upgrade-container-schema")
+        .getCommand();
+
+    return subject.getLastResults();
+  }
+
+  private Map<String, BlockData> putAnyBlockData(
+      KeyValueContainerData data,
+      KeyValueContainer container,
+      int numBlocks
+  ) throws IOException {
+    // v3 key ==> block data
+    final Map<String, BlockData> containerBlockDataMap = new HashMap<>();
+
+    int txnID = 0;
+    for (int i = 0; i < numBlocks; i++) {
+      txnID = txnID + 1;
+      BlockID blockID =
+          ContainerTestHelper.getTestBlockID(data.getContainerID());
+      BlockData kd = new BlockData(blockID);
+      List<ContainerProtos.ChunkInfo> chunks = new ArrayList<>();
+      putChunksInBlock(1, i, chunks, container, blockID);
+      kd.setChunks(chunks);
+
+      final String localIDKey = Long.toString(blockID.getLocalID());
+      final String blockKey = getContainerKeyPrefix(data.getContainerID()) + 
localIDKey;
+      blockManager.putBlock(container, kd);
+      containerBlockDataMap.put(blockKey, kd);
+    }
+
+    return containerBlockDataMap;
+  }
+
+  private void putChunksInBlock(
+      int numOfChunksPerBlock,
+      int i,
+      List<ContainerProtos.ChunkInfo> chunks,
+      KeyValueContainer container,
+      BlockID blockID
+  ) throws IOException {
+    final long chunkLength = 100;
+    for (int k = 0; k < numOfChunksPerBlock; k++) {
+      final String chunkName = String.format("%d_chunk_%d_block_%d",
+          blockID.getContainerBlockID().getLocalID(), k, i);
+      final long offset = k * chunkLength;
+      ContainerProtos.ChunkInfo info =
+          ContainerProtos.ChunkInfo.newBuilder().setChunkName(chunkName)
+              .setLen(chunkLength).setOffset(offset)
+              .setChecksumData(Checksum.getNoChecksumDataProto()).build();
+      chunks.add(info);
+      ChunkInfo chunkInfo = new ChunkInfo(chunkName, offset, chunkLength);
+      try (ChunkBuffer chunkData = ChunkBuffer.allocate((int) chunkLength)) {
+        chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, 
WRITE_STAGE);
+        chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, 
COMMIT_STAGE);
+      }
+    }
+  }
+
+  private Map<KeyValueContainerData, Map<String, BlockData>>
+      genSchemaV2Containers(int numContainers) throws IOException {
+    conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, false);
+
+    // container id ==> blocks
+    final Map<KeyValueContainerData, Map<String, BlockData>> checkBlockDataMap 
=
+        new HashMap<>();
+
+    // create container
+    for (int i = 0; i < numContainers; i++) {
+      long containerId = ContainerTestHelper.getTestContainerID();
+
+      KeyValueContainerData data = new KeyValueContainerData(containerId,
+          ContainerLayoutVersion.FILE_PER_BLOCK,
+          ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(),
+          datanodeDetails.getUuidString());
+      data.setSchemaVersion(SCHEMA_V2);
+
+      KeyValueContainer container = new KeyValueContainer(data, conf);
+      container.create(volumeSet, volumeChoosingPolicy, SCM_ID);
+
+      containerSet.addContainer(container);
+      data = (KeyValueContainerData) containerSet.getContainer(containerId)
+          .getContainerData();
+      data.setSchemaVersion(SCHEMA_V2);
+
+      final Map<String, BlockData> blockDataMap =
+          putAnyBlockData(data, container, 10);
+
+      data.closeContainer();
+      container.close();
+
+      checkBlockDataMap.put(data, blockDataMap);
+    }
+
+    assertEquals(numContainers, checkBlockDataMap.size());
+
+    return checkBlockDataMap;
+  }
+
+  public void shutdownAllVolume() {
+    for (StorageVolume storageVolume : volumeSet.getVolumesList()) {
+      storageVolume.shutdown();
+    }
+  }
+
+  private void checkV3MetaData(Map<KeyValueContainerData,
+      Map<String, BlockData>> blockDataMap, List<VolumeUpgradeResult> results) 
throws IOException {
+    Map<Long, VolumeUpgradeResult> volumeResults = new HashMap<>();
+
+    for (VolumeUpgradeResult result : results) {
+      result.getResultMap().forEach((k, v) -> volumeResults.put(k, result));
+    }
+
+    for (Map.Entry<KeyValueContainerData, Map<String, BlockData>> entry :
+        blockDataMap.entrySet()) {
+      final KeyValueContainerData containerData = entry.getKey();
+      final Map<String, BlockData> blockKeyValue = entry.getValue();
+      Long containerID = containerData.getContainerID();
+
+      final DatanodeStoreSchemaThreeImpl datanodeStoreSchemaThree =
+          volumeResults.get(containerID).getStore();
+      final Table<String, BlockData> blockDataTable =
+          datanodeStoreSchemaThree.getBlockDataTable();
+
+      for (Map.Entry<String, BlockData> blockDataEntry : blockKeyValue
+          .entrySet()) {
+        final String v3key = blockDataEntry.getKey();
+        final BlockData blockData = blockDataTable.get(v3key);
+        final BlockData originBlockData = blockDataEntry.getValue();
+
+        assertEquals(originBlockData.getSize(), blockData.getSize());
+        assertEquals(originBlockData.getLocalID(), blockData.getLocalID());
+      }
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to