This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 400e94d543 HDDS-11959. Remove tests for non-Ratis SCM (#7612)
400e94d543 is described below

commit 400e94d543e13373bff1c3abd787fcb0f94b0ab9
Author: Chung En Lee <[email protected]>
AuthorDate: Thu Jan 9 08:17:59 2025 +0800

    HDDS-11959. Remove tests for non-Ratis SCM (#7612)
---
 .../upgrade/TestDatanodeUpgradeToScmHA.java        | 604 ---------------------
 .../hadoop/hdds/scm/block/TestDeletedBlockLog.java |   2 -
 ...onfiguration.java => TestSCMConfiguration.java} |  73 +--
 .../ha/TestStatefulServiceStateManagerImpl.java    |   2 -
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |   1 -
 .../TestSCMHAUnfinalizedStateValidationAction.java |  54 +-
 .../hadoop/hdds/scm/TestSCMInstallSnapshot.java    |   3 +-
 .../apache/hadoop/hdds/scm/TestSCMSnapshot.java    |   1 -
 .../hdds/scm/TestStorageContainerManager.java      | 100 ++--
 .../metrics/TestSCMContainerManagerMetrics.java    |   1 +
 .../hdds/scm/storage/TestContainerCommandsEC.java  |   1 -
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |  28 +-
 .../hadoop/ozone/MiniOzoneHAClusterImpl.java       |   1 -
 .../hadoop/ozone/TestSecureOzoneCluster.java       |  43 +-
 .../client/rpc/TestContainerStateMachine.java      |   2 +
 .../client/rpc/TestDeleteWithInAdequateDN.java     |   2 -
 .../commandhandler/TestBlockDeletion.java          |   2 -
 .../commandhandler/TestDeleteContainerHandler.java |   2 -
 .../hadoop/ozone/recon/TestReconScmHASnapshot.java |  65 ---
 .../ozone/recon/TestReconScmNonHASnapshot.java     |  64 ---
 .../apache/hadoop/ozone/recon/TestReconTasks.java  |   1 +
 .../ozone/shell/TestDeletedBlocksTxnShell.java     |   2 -
 22 files changed, 87 insertions(+), 967 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
deleted file mode 100644
index d4a27e74cd..0000000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
+++ /dev/null
@@ -1,604 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.upgrade;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.ozone.container.common.ScmTestMock;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.replication.ContainerImporter;
-import 
org.apache.hadoop.ozone.container.replication.ContainerReplicationSource;
-import 
org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource;
-import org.apache.ozone.test.LambdaTestUtils;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.io.TempDir;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.ValueSource;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.net.InetSocketAddress;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.StandardCopyOption;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-import static 
org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION;
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-/**
- * Tests upgrading a single datanode from pre-SCM HA volume format that used
- * SCM ID to the post-SCM HA volume format using cluster ID. If SCM HA was
- * already being used before the upgrade, there should be no changes.
- */
-public class TestDatanodeUpgradeToScmHA {
-  @TempDir
-  private Path tempFolder;
-
-  private DatanodeStateMachine dsm;
-  private ContainerDispatcher dispatcher;
-  private OzoneConfiguration conf;
-  private static final String CLUSTER_ID = "clusterID";
-  private boolean scmHAAlreadyEnabled;
-
-  private RPC.Server scmRpcServer;
-  private InetSocketAddress address;
-  private ScmTestMock scmServerImpl;
-
-  private void setScmHAEnabled(boolean enableSCMHA)
-      throws Exception {
-    this.scmHAAlreadyEnabled = enableSCMHA;
-    conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, 
scmHAAlreadyEnabled);
-    setup();
-  }
-
-  private void setup() throws Exception {
-    address = SCMTestUtils.getReuseableAddress();
-    conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address);
-  }
-
-  @AfterEach
-  public void teardown() throws Exception {
-    if (scmRpcServer != null) {
-      scmRpcServer.stop();
-    }
-
-    if (dsm != null) {
-      dsm.close();
-    }
-  }
-
-  @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}")
-  @ValueSource(booleans = {true, false})
-  public void testReadsDuringFinalization(boolean enableSCMHA)
-      throws Exception {
-    setScmHAEnabled(enableSCMHA);
-    // start DN and SCM
-    startScmServer();
-    UpgradeTestHelper.addHddsVolume(conf, tempFolder);
-    dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
-        HDDSLayoutFeature.INITIAL_VERSION.layoutVersion());
-    dispatcher = dsm.getContainer().getDispatcher();
-    final Pipeline pipeline = MockPipeline.createPipeline(
-        Collections.singletonList(dsm.getDatanodeDetails()));
-
-    // Add data to read.
-    final long containerID = UpgradeTestHelper.addContainer(dispatcher, 
pipeline);
-    ContainerProtos.WriteChunkRequestProto writeChunk =
-        UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline);
-    UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline);
-
-    // Create thread to keep reading during finalization.
-    ExecutorService executor = Executors.newFixedThreadPool(1);
-    Future<Void> readFuture = executor.submit(() -> {
-      // Layout version check should be thread safe.
-      while (!dsm.getLayoutVersionManager()
-          .isAllowed(HDDSLayoutFeature.SCM_HA)) {
-        UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline);
-      }
-      // Make sure we can read after finalizing too.
-      UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline);
-      return null;
-    });
-
-    dsm.finalizeUpgrade();
-    // If there was a failure reading during the upgrade, the exception will
-    // be thrown here.
-    readFuture.get();
-  }
-
-  @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}")
-  @ValueSource(booleans = {true, false})
-  public void testImportContainer(boolean enableSCMHA) throws Exception {
-    setScmHAEnabled(enableSCMHA);
-    // start DN and SCM
-    startScmServer();
-    UpgradeTestHelper.addHddsVolume(conf, tempFolder);
-    dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
-        HDDSLayoutFeature.INITIAL_VERSION.layoutVersion());
-    dispatcher = dsm.getContainer().getDispatcher();
-    final Pipeline pipeline = MockPipeline.createPipeline(
-        Collections.singletonList(dsm.getDatanodeDetails()));
-
-    // Pre-export a container to continuously import and delete.
-    final long exportContainerID = UpgradeTestHelper.addContainer(dispatcher, 
pipeline);
-    ContainerProtos.WriteChunkRequestProto exportWriteChunk =
-        UpgradeTestHelper.putBlock(dispatcher, exportContainerID, pipeline);
-    UpgradeTestHelper.closeContainer(dispatcher, exportContainerID, pipeline);
-    File exportedContainerFile = exportContainer(exportContainerID);
-    UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID, pipeline);
-
-    // Export another container to import while pre-finalized and read
-    // finalized.
-    final long exportContainerID2 = UpgradeTestHelper.addContainer(dispatcher, 
pipeline);
-    ContainerProtos.WriteChunkRequestProto exportWriteChunk2 =
-        UpgradeTestHelper.putBlock(dispatcher, exportContainerID2, pipeline);
-    UpgradeTestHelper.closeContainer(dispatcher, exportContainerID2, pipeline);
-    File exportedContainerFile2 = exportContainer(exportContainerID2);
-    UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID2, 
pipeline);
-
-    // Make sure we can import and read a container pre-finalized.
-    importContainer(exportContainerID2, exportedContainerFile2);
-    UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline);
-
-    // Now SCM and enough other DNs finalize to enable SCM HA. This DN is
-    // restarted with SCM HA config and gets a different SCM ID.
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
-    changeScmID();
-
-    dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, 
address,
-        HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), true);
-    dispatcher = dsm.getContainer().getDispatcher();
-
-    // Make sure the existing container can be read.
-    UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline);
-
-    // Create thread to keep importing containers during the upgrade.
-    // Since the datanode's MLV is behind SCM's, container creation is not
-    // allowed. We will keep importing and deleting the same container since
-    // we cannot create new ones to import here.
-    ExecutorService executor = Executors.newFixedThreadPool(1);
-    Future<Void> importFuture = executor.submit(() -> {
-      // Layout version check should be thread safe.
-      while (!dsm.getLayoutVersionManager()
-          .isAllowed(HDDSLayoutFeature.SCM_HA)) {
-        importContainer(exportContainerID, exportedContainerFile);
-        UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk, pipeline);
-        UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID, 
pipeline);
-      }
-      // Make sure we can import after finalizing too.
-      importContainer(exportContainerID, exportedContainerFile);
-      UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk, pipeline);
-      return null;
-    });
-
-    dsm.finalizeUpgrade();
-    // If there was a failure importing during the upgrade, the exception will
-    // be thrown here.
-    importFuture.get();
-
-    // Make sure we can read the container that was imported while
-    // pre-finalized after finalizing.
-    UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline);
-  }
-
-  @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}")
-  @ValueSource(booleans = {true, false})
-  public void testFailedVolumeDuringFinalization(boolean enableSCMHA)
-      throws Exception {
-    setScmHAEnabled(enableSCMHA);
-    /// SETUP ///
-
-    startScmServer();
-    String originalScmID = scmServerImpl.getScmId();
-    File volume = UpgradeTestHelper.addHddsVolume(conf, tempFolder);
-    dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
-        HDDSLayoutFeature.INITIAL_VERSION.layoutVersion());
-    dispatcher = dsm.getContainer().getDispatcher();
-    final Pipeline pipeline = MockPipeline.createPipeline(
-        Collections.singletonList(dsm.getDatanodeDetails()));
-
-    /// PRE-FINALIZED: Write and Read from formatted volume ///
-
-    assertEquals(1,
-        dsm.getContainer().getVolumeSet().getVolumesList().size());
-    assertEquals(0,
-        dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
-
-    // Add container with data, make sure it can be read and written.
-    final long containerID = UpgradeTestHelper.addContainer(dispatcher, 
pipeline);
-    ContainerProtos.WriteChunkRequestProto writeChunk =
-        UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline);
-    UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline);
-
-    checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID);
-    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
-
-    // FINALIZE: With failed volume ///
-
-    failVolume(volume);
-    // Since volume is failed, container should be marked unhealthy.
-    // Finalization should proceed anyways.
-    UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline,
-        ContainerProtos.Result.CONTAINER_FILES_CREATE_ERROR);
-    State containerState = dsm.getContainer().getContainerSet()
-        .getContainer(containerID).getContainerState();
-    assertEquals(State.UNHEALTHY, containerState);
-    dsm.finalizeUpgrade();
-    LambdaTestUtils.await(2000, 500,
-        () -> dsm.getLayoutVersionManager()
-            .isAllowed(HDDSLayoutFeature.SCM_HA));
-
-    /// FINALIZED: Volume marked failed but gets restored on disk ///
-
-    // Check that volume is marked failed during finalization.
-    assertEquals(0,
-        dsm.getContainer().getVolumeSet().getVolumesList().size());
-    assertEquals(1,
-        dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
-
-    // Since the volume was out during the upgrade, it should maintain its
-    // original format.
-    checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID);
-    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
-
-    // Now that we are done finalizing, restore the volume.
-    restoreVolume(volume);
-    // After restoring the failed volume, its containers are readable again.
-    // However, since it is marked as failed no containers can be created or
-    // imported to it.
-    // This should log a warning about reading from an unhealthy container
-    // but otherwise proceed successfully.
-    UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline);
-
-    /// FINALIZED: Restart datanode to upgrade the failed volume ///
-
-    dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, 
address,
-        HDDSLayoutFeature.SCM_HA.layoutVersion(), false);
-    dispatcher = dsm.getContainer().getDispatcher();
-
-    assertEquals(1,
-        dsm.getContainer().getVolumeSet().getVolumesList().size());
-    assertEquals(0,
-        dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
-
-    checkFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID);
-    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
-
-    // Read container from before upgrade. The upgrade required it to be 
closed.
-    UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline);
-    // Write and read container after upgrade.
-    long newContainerID = UpgradeTestHelper.addContainer(dispatcher, pipeline);
-    ContainerProtos.WriteChunkRequestProto newWriteChunk =
-        UpgradeTestHelper.putBlock(dispatcher, newContainerID, pipeline);
-    UpgradeTestHelper.readChunk(dispatcher, newWriteChunk, pipeline);
-    // The new container should use cluster ID in its path.
-    // The volume it is placed on is up to the implementation.
-    checkContainerPathID(newContainerID, CLUSTER_ID);
-  }
-
-  @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}")
-  @ValueSource(booleans = {true, false})
-  public void testFormattingNewVolumes(boolean enableSCMHA) throws Exception {
-    setScmHAEnabled(enableSCMHA);
-    /// SETUP ///
-
-    startScmServer();
-    String originalScmID = scmServerImpl.getScmId();
-    File preFinVolume1 = UpgradeTestHelper.addHddsVolume(conf, tempFolder);
-    dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
-        HDDSLayoutFeature.INITIAL_VERSION.layoutVersion());
-    dispatcher = dsm.getContainer().getDispatcher();
-    final Pipeline pipeline = MockPipeline.createPipeline(
-        Collections.singletonList(dsm.getDatanodeDetails()));
-
-    /// PRE-FINALIZED: Write and Read from formatted volume ///
-
-    assertEquals(1,
-        dsm.getContainer().getVolumeSet().getVolumesList().size());
-    assertEquals(0,
-        dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
-
-    // Add container with data, make sure it can be read and written.
-    final long containerID = UpgradeTestHelper.addContainer(dispatcher, 
pipeline);
-    ContainerProtos.WriteChunkRequestProto writeChunk =
-        UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline);
-    UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline);
-
-    checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID);
-    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
-
-    /// PRE-FINALIZED: Restart with SCM HA enabled and new SCM ID ///
-
-    // Now SCM and enough other DNs finalize to enable SCM HA. This DN is
-    // restarted with SCM HA config and gets a different SCM ID.
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
-    changeScmID();
-    // A new volume is added that must be formatted.
-    File preFinVolume2 = UpgradeTestHelper.addHddsVolume(conf, tempFolder);
-
-    dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, 
address,
-        HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), true);
-    dispatcher = dsm.getContainer().getDispatcher();
-
-    assertEquals(2,
-        dsm.getContainer().getVolumeSet().getVolumesList().size());
-    assertEquals(0,
-        dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
-
-    // Because DN mlv would be behind SCM mlv, only reads are allowed.
-    UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline);
-
-    // On restart, there should have been no changes to the paths already used.
-    checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID);
-    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
-    // No new containers can be created on this volume since SCM MLV is ahead
-    // of DN MLV at this point.
-    // cluster ID should always be used for the new volume since SCM HA is now
-    // enabled.
-    checkVolumePathID(preFinVolume2, CLUSTER_ID);
-
-    /// FINALIZE ///
-
-    UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline);
-    dsm.finalizeUpgrade();
-    LambdaTestUtils.await(2000, 500,
-        () -> dsm.getLayoutVersionManager()
-            .isAllowed(HDDSLayoutFeature.SCM_HA));
-
-    /// FINALIZED: Add a new volume and check its formatting ///
-
-    // Add a new volume that should be formatted with cluster ID only, since
-    // DN has finalized.
-    File finVolume = UpgradeTestHelper.addHddsVolume(conf, tempFolder);
-    // Yet another SCM ID is received this time, but it should not matter.
-    changeScmID();
-
-    dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, 
address,
-        HDDSLayoutFeature.SCM_HA.layoutVersion(), false);
-    dispatcher = dsm.getContainer().getDispatcher();
-
-    assertEquals(3,
-        dsm.getContainer().getVolumeSet().getVolumesList().size());
-    assertEquals(0,
-        dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
-
-    checkFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID);
-    checkVolumePathID(preFinVolume2, CLUSTER_ID);
-    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
-    // New volume should have been formatted with cluster ID only, since the
-    // datanode is finalized.
-    checkVolumePathID(finVolume, CLUSTER_ID);
-
-    /// FINALIZED: Read old data and write + read new data ///
-
-    // Read container from before upgrade. The upgrade required it to be 
closed.
-    UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline);
-    // Write and read container after upgrade.
-    long newContainerID = UpgradeTestHelper.addContainer(dispatcher, pipeline);
-    ContainerProtos.WriteChunkRequestProto newWriteChunk =
-        UpgradeTestHelper.putBlock(dispatcher, newContainerID, pipeline);
-    UpgradeTestHelper.readChunk(dispatcher, newWriteChunk, pipeline);
-    // The new container should use cluster ID in its path.
-    // The volume it is placed on is up to the implementation.
-    checkContainerPathID(newContainerID, CLUSTER_ID);
-  }
-
-  /// CHECKS FOR TESTING ///
-
-  public void checkContainerPathID(long containerID, String scmID,
-      String clusterID) {
-    if (scmHAAlreadyEnabled) {
-      checkContainerPathID(containerID, clusterID);
-    } else {
-      checkContainerPathID(containerID, scmID);
-    }
-  }
-
-  public void checkContainerPathID(long containerID, String expectedID) {
-    KeyValueContainerData data =
-        (KeyValueContainerData) dsm.getContainer().getContainerSet()
-            .getContainer(containerID).getContainerData();
-    assertThat(data.getChunksPath()).contains(expectedID);
-    assertThat(data.getMetadataPath()).contains(expectedID);
-  }
-
-  public void checkFinalizedVolumePathID(File volume, String scmID,
-      String clusterID) throws Exception {
-
-    if (scmHAAlreadyEnabled)  {
-      checkVolumePathID(volume, clusterID);
-    } else {
-      List<File> subdirs = getHddsSubdirs(volume);
-      File hddsRoot = getHddsRoot(volume);
-
-      // Volume should have SCM ID and cluster ID directory, where cluster ID
-      // is a symlink to SCM ID.
-      assertEquals(2, subdirs.size());
-
-      File scmIDDir = new File(hddsRoot, scmID);
-      assertThat(subdirs).contains(scmIDDir);
-
-      File clusterIDDir = new File(hddsRoot, CLUSTER_ID);
-      assertThat(subdirs).contains(clusterIDDir);
-      assertTrue(Files.isSymbolicLink(clusterIDDir.toPath()));
-      Path symlinkTarget = Files.readSymbolicLink(clusterIDDir.toPath());
-      assertEquals(scmID, symlinkTarget.toString());
-    }
-  }
-
-  public void checkPreFinalizedVolumePathID(File volume, String scmID,
-      String clusterID) {
-
-    if (scmHAAlreadyEnabled) {
-      checkVolumePathID(volume, clusterID);
-    } else {
-      checkVolumePathID(volume, scmID);
-    }
-
-  }
-
-  public void checkVolumePathID(File volume, String expectedID) {
-    List<File> subdirs;
-    File hddsRoot;
-    if (dnThinksVolumeFailed(volume)) {
-      // If the volume is failed, read from the failed location it was
-      // moved to.
-      subdirs = getHddsSubdirs(getFailedVolume(volume));
-      hddsRoot = getHddsRoot(getFailedVolume(volume));
-    } else {
-      subdirs = getHddsSubdirs(volume);
-      hddsRoot = getHddsRoot(volume);
-    }
-
-    // Volume should only have the specified ID directory.
-    assertEquals(1, subdirs.size());
-    File idDir = new File(hddsRoot, expectedID);
-    assertThat(subdirs).contains(idDir);
-  }
-
-  public List<File> getHddsSubdirs(File volume) {
-    File[] subdirsArray = getHddsRoot(volume).listFiles(File::isDirectory);
-    assertNotNull(subdirsArray);
-    return Arrays.asList(subdirsArray);
-  }
-
-  public File getHddsRoot(File volume) {
-    return new File(HddsVolumeUtil.getHddsRoot(volume.getAbsolutePath()));
-  }
-
-  /// CLUSTER OPERATIONS ///
-
-  private void startScmServer() throws Exception {
-    scmServerImpl = new ScmTestMock(CLUSTER_ID);
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        scmServerImpl, address, 10);
-  }
-
-  /**
-   * Updates the SCM ID on the SCM server. Datanode will not be aware of this
-   * until {@link UpgradeTestHelper#callVersionEndpointTask} is called.
-   * @return the new scm ID.
-   */
-  private String changeScmID() {
-    String scmID = UUID.randomUUID().toString();
-    scmServerImpl.setScmId(scmID);
-    return scmID;
-  }
-
-  /// CONTAINER OPERATIONS ///
-
-  /**
-   * Exports the specified container to a temporary file and returns the file.
-   */
-  private File exportContainer(long containerId) throws Exception {
-    final ContainerReplicationSource replicationSource =
-        new OnDemandContainerReplicationSource(
-            dsm.getContainer().getController());
-
-    replicationSource.prepare(containerId);
-
-    File destination =
-        Files.createFile(tempFolder.resolve("destFile" + 
containerId)).toFile();
-    try (FileOutputStream fos = new FileOutputStream(destination)) {
-      replicationSource.copyData(containerId, fos, NO_COMPRESSION);
-    }
-    return destination;
-  }
-
-  /**
-   * Imports the container found in {@code source} to the datanode with the ID
-   * {@code containerID}.
-   */
-  private void importContainer(long containerID, File source) throws Exception 
{
-    ContainerImporter replicator =
-        new ContainerImporter(dsm.getConf(),
-            dsm.getContainer().getContainerSet(),
-            dsm.getContainer().getController(),
-            dsm.getContainer().getVolumeSet());
-
-    File tempFile = Files.createFile(
-            
tempFolder.resolve(ContainerUtils.getContainerTarName(containerID)))
-        .toFile();
-    Files.copy(source.toPath(), tempFile.toPath(),
-        StandardCopyOption.REPLACE_EXISTING);
-    replicator.importContainer(containerID, tempFile.toPath(), null,
-        NO_COMPRESSION);
-  }
-
-  /// VOLUME OPERATIONS ///
-
-  /**
-   * Renames the specified volume directory so it will appear as failed to
-   * the datanode.
-   */
-  public void failVolume(File volume) {
-    File failedVolume = getFailedVolume(volume);
-    assertTrue(volume.renameTo(failedVolume));
-  }
-
-  /**
-   * Convert the specified volume from its failed name back to its original
-   * name. The File passed should be the original volume path, not the one it
-   * was renamed to to fail it.
-   */
-  public void restoreVolume(File volume) {
-    File failedVolume = getFailedVolume(volume);
-    assertTrue(failedVolume.renameTo(volume));
-  }
-
-  /**
-   * @return The file name that will be used to rename a volume to fail it.
-   */
-  public File getFailedVolume(File volume) {
-    return new File(volume.getParent(), volume.getName() + "-failed");
-  }
-
-  /**
-   * Checks whether the datanode thinks the volume has failed.
-   * This could be outdated information if the volume was restored already
-   * and the datanode has not been restarted since then.
-   */
-  public boolean dnThinksVolumeFailed(File volume) {
-    return dsm.getContainer().getVolumeSet().getFailedVolumesList().stream()
-        .anyMatch(v ->
-            getHddsRoot(v.getStorageDir()).equals(getHddsRoot(volume)));
-  }
-}
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index 2a012cbe18..4fb323d745 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -26,7 +26,6 @@ import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
@@ -112,7 +111,6 @@ public class TestDeletedBlockLog {
   @BeforeEach
   public void setup() throws Exception {
     conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     replicationManager = mock(ReplicationManager.class);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java
similarity index 80%
rename from 
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
rename to 
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java
index 75a943ee8d..2d9a18c5a8 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java
@@ -18,13 +18,11 @@
 package org.apache.hadoop.hdds.scm.ha;
 
 import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.ConfigurationException;
 import org.apache.hadoop.hdds.conf.DefaultConfigManager;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmRatisServerConfig;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.common.Storage;
@@ -35,13 +33,10 @@ import org.apache.ratis.util.TimeDuration;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.ValueSource;
 
 import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.UUID;
 
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY;
@@ -63,8 +58,6 @@ import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVIC
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -72,7 +65,7 @@ import static org.mockito.Mockito.when;
 /**
  * Test for SCM HA-related configuration.
  */
-class TestSCMHAConfiguration {
+class TestSCMConfiguration {
   private OzoneConfiguration conf;
   @TempDir
   private File tempDir;
@@ -85,7 +78,7 @@ class TestSCMHAConfiguration {
   }
 
   @Test
-  public void testSCMHAConfig() throws Exception {
+  public void testSCMConfig() throws Exception {
     String scmServiceId = "scmserviceId";
     conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId);
 
@@ -225,7 +218,7 @@ class TestSCMHAConfiguration {
 
 
   @Test
-  public void testHAWithSamePortConfig() throws Exception {
+  public void testSamePortConfig() throws Exception {
     String scmServiceId = "scmserviceId";
     conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId);
 
@@ -301,25 +294,7 @@ class TestSCMHAConfiguration {
   }
 
   @Test
-  public void testRatisEnabledDefaultConfigWithoutInitializedSCM()
-      throws IOException {
-    SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class);
-    
when(scmStorageConfig.getState()).thenReturn(Storage.StorageState.NOT_INITIALIZED);
-    SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig);
-    assertEquals(SCMHAUtils.isSCMHAEnabled(conf),
-        ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT);
-    DefaultConfigManager.clearDefaultConfigs();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false);
-    SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig);
-    assertFalse(SCMHAUtils.isSCMHAEnabled(conf));
-    DefaultConfigManager.clearDefaultConfigs();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
-    SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig);
-    assertTrue(SCMHAUtils.isSCMHAEnabled(conf));
-  }
-
-  @Test
-  public void testRatisEnabledDefaultConfigWithInitializedSCM()
+  public void testDefaultConfigWithInitializedSCM()
       throws IOException {
     SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class);
     when(scmStorageConfig.getState())
@@ -333,44 +308,4 @@ class TestSCMHAConfiguration {
     DefaultConfigManager.clearDefaultConfigs();
     assertTrue(SCMHAUtils.isSCMHAEnabled(conf));
   }
-
-  @Test
-  public void testRatisEnabledDefaultConflictConfigWithInitializedSCM() {
-    SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class);
-    when(scmStorageConfig.getState())
-        .thenReturn(Storage.StorageState.INITIALIZED);
-    when(scmStorageConfig.isSCMHAEnabled()).thenReturn(true);
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false);
-    assertThrows(ConfigurationException.class,
-            () -> SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig));
-  }
-
-  @ParameterizedTest
-  @ValueSource(booleans = {true, false})
-  void testHAConfig(boolean ratisEnabled) throws IOException {
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, ratisEnabled);
-    SCMStorageConfig scmStorageConfig = newStorageConfig(ratisEnabled);
-    StorageContainerManager.scmInit(conf, scmStorageConfig.getClusterID());
-    assertEquals(ratisEnabled, DefaultConfigManager.getValue(
-        ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, !ratisEnabled));
-  }
-
-  @Test
-  void testInvalidHAConfig() throws IOException {
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false);
-    SCMStorageConfig scmStorageConfig = newStorageConfig(true);
-    String clusterID = scmStorageConfig.getClusterID();
-    assertThrows(ConfigurationException.class,
-        () -> StorageContainerManager.scmInit(conf, clusterID));
-  }
-
-  private SCMStorageConfig newStorageConfig(
-      boolean ratisEnabled) throws IOException {
-    final SCMStorageConfig scmStorageConfig = new SCMStorageConfig(conf);
-    scmStorageConfig.setClusterId(UUID.randomUUID().toString());
-    scmStorageConfig.setSCMHAFlag(ratisEnabled);
-    scmStorageConfig.initialize();
-    return scmStorageConfig;
-  }
-
 }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java
index 4e69f46b6e..33da298423 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdds.scm.ha;
 
 import com.google.protobuf.ByteString;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
@@ -48,7 +47,6 @@ public class TestStatefulServiceStateManagerImpl {
   @BeforeEach
   void setup(@TempDir File testDir) throws IOException {
     conf = SCMTestUtils.getConf(testDir);
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get());
     statefulServiceConfig =
         SCMDBDefinition.STATEFUL_SERVICE_CONFIG.getTable(dbStore);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 568c11c541..e4e4a57232 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -184,7 +184,6 @@ public class TestSCMNodeManager {
         TimeUnit.MILLISECONDS);
     conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false);
     conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     return conf;
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java
index 8b4bc906e0..91dfaa1daf 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdds.scm.upgrade;
 
-import org.apache.hadoop.hdds.conf.ConfigurationException;
 import org.apache.hadoop.hdds.conf.DefaultConfigManager;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.HddsTestUtils;
@@ -26,19 +25,16 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.upgrade.UpgradeException;
 import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer;
 import org.apache.ratis.util.ExitUtils;
 import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.CsvSource;
 
 import java.nio.file.Path;
 import java.util.UUID;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
@@ -62,20 +58,12 @@ public class TestSCMHAUnfinalizedStateValidationAction {
     ExitUtils.disableSystemExit();
   }
 
-  @ParameterizedTest
-  @CsvSource({
-      "true, true",
-      "true, false",
-      "false, true",
-      "false, false",
-  })
-  public void testUpgrade(boolean haEnabledBefore,
-      boolean haEnabledPreFinalized, @TempDir Path dataPath) throws Exception {
+  @Test
+  public void testUpgrade(@TempDir Path dataPath) throws Exception {
     // Write version file for original version.
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION,
         HDDSLayoutFeature.INITIAL_VERSION.layoutVersion());
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, haEnabledBefore);
     conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dataPath.toString());
     conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, dataPath.toString());
     // This init should always succeed, since SCM is not pre-finalized yet.
@@ -83,43 +71,17 @@ public class TestSCMHAUnfinalizedStateValidationAction {
     boolean initResult1 = StorageContainerManager.scmInit(conf, CLUSTER_ID);
     assertTrue(initResult1);
 
-    // Set up new pre-finalized SCM.
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY,
-        haEnabledPreFinalized);
-    /* Clusters from Ratis SCM -> Non Ratis SCM
-       Ratis SCM -> Non Ratis SCM not supported
-     */
-    if (haEnabledPreFinalized != haEnabledBefore) {
-      if (haEnabledBefore) {
-        assertThrows(ConfigurationException.class,
-            () -> StorageContainerManager.scmInit(conf, CLUSTER_ID));
-      } else {
-        assertThrows(UpgradeException.class,
-            () -> StorageContainerManager.scmInit(conf, CLUSTER_ID));
-      }
-      return;
-    }
     StorageContainerManager scm = HddsTestUtils.getScm(conf);
 
     assertEquals(UpgradeFinalizer.Status.FINALIZATION_REQUIRED,
         scm.getFinalizationManager().getUpgradeFinalizer().getStatus());
 
-    final boolean shouldFail = !haEnabledBefore && haEnabledPreFinalized;
+
     DefaultConfigManager.clearDefaultConfigs();
-    if (shouldFail) {
-      // Start on its own should fail.
-      assertThrows(UpgradeException.class, scm::start);
+    boolean initResult2 = StorageContainerManager.scmInit(conf, CLUSTER_ID);
+    assertTrue(initResult2);
+    scm.start();
+    scm.stop();
 
-      // Init followed by start should both fail.
-      // Init is not necessary here, but is allowed to be run.
-      assertThrows(UpgradeException.class,
-          () -> StorageContainerManager.scmInit(conf, CLUSTER_ID));
-      assertThrows(UpgradeException.class, scm::start);
-    } else {
-      boolean initResult2 = StorageContainerManager.scmInit(conf, CLUSTER_ID);
-      assertTrue(initResult2);
-      scm.start();
-      scm.stop();
-    }
   }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
index e90c576e8d..ffdc49fd09 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
@@ -66,7 +66,6 @@ public class TestSCMInstallSnapshot {
   @BeforeAll
   static void setup(@TempDir Path tempDir) throws Exception {
     conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s");
     conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, 1L);
     conf.set(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_DIR, 
tempDir.toString());
@@ -105,7 +104,7 @@ public class TestSCMInstallSnapshot {
     pipelineManager.openPipeline(ratisPipeline2.getId());
     SCMNodeDetails scmNodeDetails = new SCMNodeDetails.Builder()
         .setRpcAddress(new InetSocketAddress("0.0.0.0", 0))
-        .setGrpcPort(ScmConfigKeys.OZONE_SCM_GRPC_PORT_DEFAULT)
+        .setGrpcPort(conf.getInt(ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY, 
ScmConfigKeys.OZONE_SCM_GRPC_PORT_DEFAULT))
         .setSCMNodeId("scm1")
         .build();
     Map<String, SCMNodeDetails> peerMap = new HashMap<>();
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
index 0375d83baa..d0ad8222f6 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
@@ -44,7 +44,6 @@ public class TestSCMSnapshot {
   @BeforeAll
   public static void setup() throws Exception {
     conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s");
     conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, 1L);
     cluster = MiniOzoneCluster
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
index 14df7670f6..b00c7f8040 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
@@ -188,13 +188,11 @@ public class TestStorageContainerManager {
   public void testRpcPermission() throws Exception {
     // Test with default configuration
     OzoneConfiguration defaultConf = new OzoneConfiguration();
-    defaultConf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     testRpcPermissionWithConf(defaultConf, any -> false, "unknownUser");
 
     // Test with ozone.administrators defined in configuration
     String admins = "adminUser1, adminUser2";
     OzoneConfiguration ozoneConf = new OzoneConfiguration();
-    ozoneConf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     ozoneConf.setStrings(OzoneConfigKeys.OZONE_ADMINISTRATORS, admins);
     // Non-admin user will get permission denied.
     // Admin user will pass the permission check.
@@ -266,7 +264,6 @@ public class TestStorageContainerManager {
   public void testBlockDeletionTransactions() throws Exception {
     int numKeys = 5;
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
         TimeUnit.MILLISECONDS);
     DatanodeConfiguration datanodeConfiguration = conf.getObject(
@@ -358,7 +355,6 @@ public class TestStorageContainerManager {
   @Test
   public void testOldDNRegistersToReInitialisedSCM() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS);
     conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 
3000, TimeUnit.MILLISECONDS);
 
@@ -405,38 +401,41 @@ public class TestStorageContainerManager {
       assertThat(scmDnHBDispatcherLog.getOutput()).isEmpty();
       assertThat(versionEndPointTaskLog.getOutput()).isEmpty();
       // start the new SCM
-      scm.start();
-      // Initially DatanodeStateMachine will be in Running state
-      assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
-          dsm.getContext().getState());
-      // DN heartbeats to new SCM, SCM doesn't recognize the node, sends the
-      // command to DN to re-register. Wait for SCM to send re-register command
-      String expectedLog = String.format(
-          "SCM received heartbeat from an unregistered datanode %s. "
-              + "Asking datanode to re-register.",
-          datanode.getDatanodeDetails());
-      GenericTestUtils.waitFor(
-          () -> scmDnHBDispatcherLog.getOutput().contains(expectedLog), 100,
-          30000);
-      ExitUtil.disableSystemExit();
-      // As part of processing response for re-register, DN 
EndpointStateMachine
-      // goes to GET-VERSION state which checks if there is already existing
-      // version file on the DN & if the clusterID matches with that of the SCM
-      // In this case, it won't match and gets 
InconsistentStorageStateException
-      // and DN shuts down.
-      String expectedLog2 = "Received SCM notification to register."
-          + " Interrupt HEARTBEAT and transit to GETVERSION state.";
-      GenericTestUtils.waitFor(
-          () -> heartbeatEndpointTaskLog.getOutput().contains(expectedLog2),
-          100, 5000);
-      GenericTestUtils.waitFor(() -> dsm.getContext().getShutdownOnError(), 
100,
-          5000);
-      assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN,
-          dsm.getContext().getState());
-      assertThat(versionEndPointTaskLog.getOutput()).contains(
-          "org.apache.hadoop.ozone.common" +
-              ".InconsistentStorageStateException: Mismatched ClusterIDs");
-      scm.stop();
+      try {
+        scm.start();
+        // Initially DatanodeStateMachine will be in Running state
+        assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
+            dsm.getContext().getState());
+        // DN heartbeats to new SCM, SCM doesn't recognize the node, sends the
+        // command to DN to re-register. Wait for SCM to send re-register 
command
+        String expectedLog = String.format(
+            "SCM received heartbeat from an unregistered datanode %s. "
+                + "Asking datanode to re-register.",
+            datanode.getDatanodeDetails());
+        GenericTestUtils.waitFor(
+            () -> scmDnHBDispatcherLog.getOutput().contains(expectedLog), 100,
+            30000);
+        ExitUtil.disableSystemExit();
+        // As part of processing response for re-register, DN 
EndpointStateMachine
+        // goes to GET-VERSION state which checks if there is already existing
+        // version file on the DN & if the clusterID matches with that of the 
SCM
+        // In this case, it won't match and gets 
InconsistentStorageStateException
+        // and DN shuts down.
+        String expectedLog2 = "Received SCM notification to register."
+            + " Interrupt HEARTBEAT and transit to GETVERSION state.";
+        GenericTestUtils.waitFor(
+            () -> heartbeatEndpointTaskLog.getOutput().contains(expectedLog2),
+            100, 5000);
+        GenericTestUtils.waitFor(() -> dsm.getContext().getShutdownOnError(), 
100,
+            5000);
+        assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN,
+            dsm.getContext().getState());
+        assertThat(versionEndPointTaskLog.getOutput()).contains(
+            "org.apache.hadoop.ozone.common" +
+                ".InconsistentStorageStateException: Mismatched ClusterIDs");
+      } finally {
+        scm.stop();
+      }
     }
   }
 
@@ -444,7 +443,6 @@ public class TestStorageContainerManager {
   public void testBlockDeletingThrottling() throws Exception {
     int numKeys = 15;
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
     conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
@@ -555,7 +553,6 @@ public class TestStorageContainerManager {
   @Test
   public void testSCMInitialization(@TempDir Path tempDir) throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     Path scmPath = tempDir.resolve("scm-meta");
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
 
@@ -577,7 +574,6 @@ public class TestStorageContainerManager {
   @Test
   public void testSCMReinitialization(@TempDir Path tempDir) throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     Path scmPath = tempDir.resolve("scm-meta");
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
     //This will set the cluster id in the version file
@@ -639,7 +635,6 @@ public class TestStorageContainerManager {
   @Test
   void testSCMInitializationFailure(@TempDir Path tempDir) {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     Path scmPath = tempDir.resolve("scm-meta");
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
 
@@ -652,7 +647,6 @@ public class TestStorageContainerManager {
     OzoneConfiguration conf = new OzoneConfiguration();
     Path scmPath = tempDir.resolve("scm-meta");
 
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
     SCMStorageConfig scmStore = new SCMStorageConfig(conf);
     String clusterId = UUID.randomUUID().toString();
@@ -666,15 +660,19 @@ public class TestStorageContainerManager {
         SCMHANodeDetails.loadSCMHAConfig(conf, scmStore)
             .getLocalNodeDetails(), conf);
     StorageContainerManager scm = HddsTestUtils.getScmSimple(conf);
-    scm.start();
-    //Reads the SCM Info from SCM instance
-    ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
-    assertEquals(clusterId, scmInfo.getClusterId());
-    assertEquals(scmId, scmInfo.getScmId());
-
-    String expectedVersion = HddsVersionInfo.HDDS_VERSION_INFO.getVersion();
-    String actualVersion = scm.getSoftwareVersion();
-    assertEquals(expectedVersion, actualVersion);
+    try {
+      scm.start();
+      //Reads the SCM Info from SCM instance
+      ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
+      assertEquals(clusterId, scmInfo.getClusterId());
+      assertEquals(scmId, scmInfo.getScmId());
+
+      String expectedVersion = HddsVersionInfo.HDDS_VERSION_INFO.getVersion();
+      String actualVersion = scm.getSoftwareVersion();
+      assertEquals(expectedVersion, actualVersion);
+    } finally {
+      scm.stop();
+    }
   }
 
   /**
@@ -684,7 +682,6 @@ public class TestStorageContainerManager {
   public void testScmProcessDatanodeHeartbeat() throws Exception {
     String rackName = "/rack1";
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
         StaticMapping.class, DNSToSwitchMapping.class);
     
StaticMapping.addNodeToRack(NetUtils.normalizeHostName(HddsUtils.getHostName(conf)),
@@ -727,7 +724,6 @@ public class TestStorageContainerManager {
   public void testCloseContainerCommandOnRestart() throws Exception {
     int numKeys = 15;
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
     conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
index 14875781b9..84b1f1610a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
@@ -68,6 +68,7 @@ public class TestSCMContainerManagerMetrics {
     conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false);
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
     cluster.waitForClusterToBeReady();
+    cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000);
     client = cluster.newClient();
     scm = cluster.getStorageContainerManager();
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
index bf40a600e2..ca4e1a896b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
@@ -171,7 +171,6 @@ public class TestContainerCommandsEC {
   @BeforeAll
   public static void init() throws Exception {
     config = new OzoneConfiguration();
-    config.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     config.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
     config.setTimeDuration(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, 3, 
TimeUnit.SECONDS);
     config.setBoolean(OzoneConfigKeys.OZONE_ACL_ENABLED, true);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 30e41764d3..b3d9f78088 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -40,7 +40,6 @@ import com.amazonaws.client.builder.AwsClientBuilder;
 import com.amazonaws.regions.Regions;
 import com.amazonaws.services.s3.AmazonS3;
 import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -50,7 +49,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails;
-import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
 import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
@@ -191,10 +189,8 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
 
   public void waitForSCMToBeReady() throws TimeoutException,
       InterruptedException {
-    if (SCMHAUtils.isSCMHAEnabled(conf)) {
-      GenericTestUtils.waitFor(scm::checkLeader,
-          1000, waitForClusterToBeReadyTimeout);
-    }
+    GenericTestUtils.waitFor(scm::checkLeader,
+        1000, waitForClusterToBeReadyTimeout);
   }
 
   public StorageContainerManager getActiveSCM() {
@@ -752,18 +748,12 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
       scmStore.setClusterId(clusterId);
       scmStore.setScmId(scmId);
       scmStore.initialize();
-      //TODO: HDDS-6897
-      //Disabling Ratis for only of MiniOzoneClusterImpl.
-      //MiniOzoneClusterImpl doesn't work with Ratis enabled SCM
-      if (StringUtils.isNotEmpty(
-          conf.get(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY))
-              && SCMHAUtils.isSCMHAEnabled(conf)) {
-        scmStore.setSCMHAFlag(true);
-        scmStore.persistCurrentState();
-        SCMRatisServerImpl.initialize(clusterId, scmId,
-                SCMHANodeDetails.loadSCMHAConfig(conf, scmStore)
-                        .getLocalNodeDetails(), conf);
-      }
+      scmStore.setSCMHAFlag(true);
+      scmStore.persistCurrentState();
+      SCMRatisServerImpl.initialize(clusterId, scmId,
+          SCMHANodeDetails.loadSCMHAConfig(conf, scmStore)
+              .getLocalNodeDetails(), conf);
+
     }
 
     void initializeOmStorage(OMStorage omStorage) throws IOException {
@@ -876,6 +866,8 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
           localhostWithFreePort());
       conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
           "3s");
+      conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY, getFreePort());
+      conf.setInt(ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY, getFreePort());
     }
 
     private void configureOM() {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
index 9df70f1b7c..1526933028 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
@@ -562,7 +562,6 @@ public class MiniOzoneHAClusterImpl extends 
MiniOzoneClusterImpl {
             OzoneConfiguration scmConfig = new OzoneConfiguration(conf);
             scmConfig.set(OZONE_METADATA_DIRS, metaDirPath);
             scmConfig.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, nodeId);
-            scmConfig.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
 
             configureSCM();
             if (i == 1) {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 637e8bd9e4..d71a4854c9 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -49,7 +49,6 @@ import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
 import 
org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.ScmConfig;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.client.ScmTopologyClient;
@@ -65,7 +64,6 @@ import 
org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.SecurityConfig;
 import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey;
-import org.apache.hadoop.hdds.security.symmetric.SecretKeyManager;
 import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType;
 import 
org.apache.hadoop.hdds.security.x509.certificate.authority.DefaultApprover;
 import 
org.apache.hadoop.hdds.security.x509.certificate.authority.profile.DefaultProfile;
@@ -117,7 +115,6 @@ import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.commons.lang3.StringUtils;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_ENABLED;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_CA_ROTATION_ACK_TIMEOUT;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_CA_ROTATION_CHECK_INTERNAL;
@@ -169,7 +166,6 @@ import org.slf4j.LoggerFactory;
 import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
@@ -232,7 +228,6 @@ final class TestSecureOzoneCluster {
       conf.setInt(OZONE_SCM_GRPC_PORT_KEY, getFreePort());
       conf.set(OZONE_OM_ADDRESS_KEY,
           InetAddress.getLocalHost().getCanonicalHostName() + ":" + 
getFreePort());
-      conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false);
 
       DefaultMetricsSystem.setMiniClusterMode(true);
       ExitUtils.disableSystemExit();
@@ -353,10 +348,17 @@ final class TestSecureOzoneCluster {
     initSCM();
     scm = HddsTestUtils.getScmSimple(conf);
     //Reads the SCM Info from SCM instance
-    ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
-    assertEquals(clusterId, scmInfo.getClusterId());
-    assertEquals(scmId, scmInfo.getScmId());
-    assertEquals(2, scm.getScmCertificateClient().getTrustChain().size());
+    try {
+      scm.start();
+      ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
+      assertEquals(clusterId, scmInfo.getClusterId());
+      assertEquals(scmId, scmInfo.getScmId());
+      assertEquals(2, scm.getScmCertificateClient().getTrustChain().size());
+    } finally {
+      if (scm != null) {
+        scm.stop();
+      }
+    }
   }
 
   @Test
@@ -444,28 +446,6 @@ final class TestSecureOzoneCluster {
     }
   }
 
-  @Test
-  void testSecretManagerInitializedNonHASCM() throws Exception {
-    conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true);
-    initSCM();
-    scm = HddsTestUtils.getScmSimple(conf);
-    //Reads the SCM Info from SCM instance
-    try {
-      scm.start();
-
-      SecretKeyManager secretKeyManager = scm.getSecretKeyManager();
-      boolean inSafeMode = scm.getScmSafeModeManager().getInSafeMode();
-      assertFalse(SCMHAUtils.isSCMHAEnabled(conf));
-      assertTrue(inSafeMode);
-      assertNotNull(secretKeyManager);
-      assertTrue(secretKeyManager.isInitialized());
-    } finally {
-      if (scm != null) {
-        scm.stop();
-      }
-    }
-  }
-
   private void initSCM() throws IOException {
     Path scmPath = new File(tempDir, "scm-meta").toPath();
     Files.createDirectories(scmPath);
@@ -474,6 +454,7 @@ final class TestSecureOzoneCluster {
     SCMStorageConfig scmStore = new SCMStorageConfig(conf);
     scmStore.setClusterId(clusterId);
     scmStore.setScmId(scmId);
+    scmStore.setSCMHAFlag(true);
     HASecurityUtils.initializeSecurity(scmStore, conf,
         InetAddress.getLocalHost().getHostName(), true);
     scmStore.setPrimaryScmNodeId(scmId);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
index dc00b0acc5..7c1c6874c1 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import 
org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl;
@@ -114,6 +115,7 @@ public class TestContainerStateMachine {
             .build();
     cluster.setWaitForClusterToBeReadyTimeout(300000);
     cluster.waitForClusterToBeReady();
+    cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000);
     cluster.getOzoneManager().startSecretManager();
     //the easiest way to create an open container is creating a key
     client = OzoneClientFactory.getRpcClient(conf);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
index 2b199306b7..bc7bb36a24 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
@@ -66,7 +66,6 @@ import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_I
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
@@ -106,7 +105,6 @@ public class TestDeleteWithInAdequateDN {
 
     conf = new OzoneConfiguration();
 
-    conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true);
     conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100,
         TimeUnit.MILLISECONDS);
     conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index e38312e02e..df5f3ec0d2 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -94,7 +94,6 @@ import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_I
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
@@ -133,7 +132,6 @@ public class TestBlockDeletion {
     GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG);
     GenericTestUtils.setLogLevel(ReplicationManager.LOG, Level.DEBUG);
 
-    conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true);
     conf.set("ozone.replication.allowed-configs",
         "^(RATIS/THREE)|(EC/2-1-256k)$");
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
index 705ef1e0d8..0006feb858 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
@@ -75,7 +75,6 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -98,7 +97,6 @@ public class TestDeleteContainerHandler {
   @BeforeAll
   public static void setup() throws Exception {
     conf = new OzoneConfiguration();
-    conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true);
     conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
     conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
         0, StorageUnit.MB);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java
deleted file mode 100644
index 6006ce6758..0000000000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Timeout;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
-
-/**
- * Test Recon SCM HA Snapshot Download implementation.
- */
-@Timeout(300)
-public class TestReconScmHASnapshot {
-  private OzoneConfiguration conf;
-  private MiniOzoneCluster ozoneCluster = null;
-
-  @BeforeEach
-  public void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true);
-    conf.setBoolean(
-        ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED, true);
-    conf.setInt(ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD, 0);
-    conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 5);
-    ozoneCluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(4)
-        .includeRecon(true)
-        .build();
-    ozoneCluster.waitForClusterToBeReady();
-  }
-
-  @Test
-  public void testScmHASnapshot() throws Exception {
-    TestReconScmSnapshot.testSnapshot(ozoneCluster);
-  }
-
-  @AfterEach
-  public void shutdown() throws Exception {
-    if (ozoneCluster != null) {
-      ozoneCluster.shutdown();
-    }
-  }
-}
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmNonHASnapshot.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmNonHASnapshot.java
deleted file mode 100644
index ae342e63e8..0000000000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmNonHASnapshot.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.recon;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Timeout;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
-
-/**
- * Test Recon SCM HA Snapshot Download implementation.
- */
-@Timeout(300)
-public class TestReconScmNonHASnapshot {
-  private OzoneConfiguration conf;
-  private MiniOzoneCluster ozoneCluster = null;
-
-  @BeforeEach
-  public void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, false);
-    conf.setBoolean(
-        ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED, true);
-    conf.setInt(ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD, 0);
-    conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 5);
-    ozoneCluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(4)
-        .includeRecon(true)
-        .build();
-    ozoneCluster.waitForClusterToBeReady();
-  }
-
-  @Test
-  public void testScmNonHASnapshot() throws Exception {
-    TestReconScmSnapshot.testSnapshot(ozoneCluster);
-  }
-
-  @AfterEach
-  public void shutdown() throws Exception {
-    if (ozoneCluster != null) {
-      ozoneCluster.shutdown();
-    }
-  }
-}
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
index 4476cbc3e3..e4b81da020 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
@@ -79,6 +79,7 @@ public class TestReconTasks {
     cluster =  MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1)
         .includeRecon(true).build();
     cluster.waitForClusterToBeReady();
+    cluster.waitForPipelineTobeReady(ONE, 30000);
     GenericTestUtils.setLogLevel(SCMDatanodeHeartbeatDispatcher.LOG,
         Level.DEBUG);
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
index 730a2479a5..fd27652791 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
 import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
@@ -93,7 +92,6 @@ public class TestDeletedBlocksTxnShell {
     conf = new OzoneConfiguration();
     scmServiceId = "scm-service-test1";
 
-    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
 
     cluster = MiniOzoneCluster.newHABuilder(conf)


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to