This is an automated email from the ASF dual-hosted git repository.

cshannon pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new 121758a40e Convert several ITs to use SharedMiniClusterBase (#5352)
121758a40e is described below

commit 121758a40e35df5589e53d9cfecdd95f7705cc56
Author: Christopher L. Shannon <cshan...@apache.org>
AuthorDate: Mon Feb 24 18:37:10 2025 -0500

    Convert several ITs to use SharedMiniClusterBase (#5352)
    
    This change migrates several ITs to using SharedMiniClusterBase so that
    a single cluster is stood up and reused for all the tests in the test
    class. This has the benefit of speeding up the tests because the entire
    cluster does not need to be torn down and recreated for each test.
---
 .../org/apache/accumulo/test/AdminCheckIT.java     | 23 +++++--
 .../java/org/apache/accumulo/test/CloneIT.java     | 36 ++++++++---
 .../org/apache/accumulo/test/LargeSplitRowIT.java  | 72 +++++++++++++---------
 .../accumulo/test/MultiTableBatchWriterIT.java     | 16 ++++-
 .../org/apache/accumulo/test/OrIteratorIT.java     | 17 ++++-
 .../java/org/apache/accumulo/test/SampleIT.java    | 36 ++++++++---
 .../accumulo/test/functional/AddSplitIT.java       | 22 +++++--
 .../accumulo/test/functional/CloneTestIT.java      | 18 +++++-
 .../test/functional/CreateInitialSplitsIT.java     | 30 +++++----
 .../{MergeIT.java => MergeTabletsBaseIT.java}      | 72 +++++++++++-----------
 ...akyFateIT.java => MergeTabletsFlakyFateIT.java} | 21 +++++--
 .../{MergeFlakyFateIT.java => MergeTabletsIT.java} | 25 ++++----
 .../accumulo/test/functional/MetadataIT.java       | 24 +++++---
 .../test/functional/SessionDurabilityIT.java       | 42 ++++++++-----
 14 files changed, 299 insertions(+), 155 deletions(-)

diff --git a/test/src/main/java/org/apache/accumulo/test/AdminCheckIT.java 
b/test/src/main/java/org/apache/accumulo/test/AdminCheckIT.java
index 8e32f1c89a..13b0cbf410 100644
--- a/test/src/main/java/org/apache/accumulo/test/AdminCheckIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/AdminCheckIT.java
@@ -40,22 +40,35 @@ import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.admin.CompactionConfig;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.cli.ServerUtilOpts;
 import org.apache.accumulo.server.util.Admin;
 import org.apache.accumulo.server.util.checkCommand.CheckRunner;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
 import org.apache.accumulo.test.functional.ReadWriteIT;
 import org.apache.accumulo.test.functional.SlowIterator;
 import org.easymock.EasyMock;
 import org.easymock.IAnswer;
+import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
 import com.beust.jcommander.JCommander;
 import com.google.common.collect.Sets;
 
-public class AdminCheckIT extends ConfigurableMacBase {
+public class AdminCheckIT extends SharedMiniClusterBase {
+
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniCluster();
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
+  }
+
   private static final PrintStream ORIGINAL_OUT = System.out;
 
   @AfterEach
@@ -308,7 +321,7 @@ public class AdminCheckIT extends ConfigurableMacBase {
     String table = getUniqueNames(1)[0];
     Admin.CheckCommand.Check tableLocksCheck = 
Admin.CheckCommand.Check.TABLE_LOCKS;
 
-    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       client.tableOperations().create(table);
 
       ReadWriteIT.ingest(client, 10, 10, 10, 0, table);
@@ -400,7 +413,7 @@ public class AdminCheckIT extends ConfigurableMacBase {
     // Tests the USER_FILES check in the case where it should pass
     Admin.CheckCommand.Check userFilesCheck = 
Admin.CheckCommand.Check.USER_FILES;
 
-    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       // create a table, insert some data, and flush so there's a file to check
       String table = getUniqueNames(1)[0];
       client.tableOperations().create(table);
@@ -447,7 +460,7 @@ public class AdminCheckIT extends ConfigurableMacBase {
       Admin.CheckCommand dummyCheckCommand = new DummyCheckCommand(checksPass);
       cl.addCommand("check", dummyCheckCommand);
       cl.parse(args);
-      Admin.executeCheckCommand(getServerContext(), dummyCheckCommand, opts);
+      Admin.executeCheckCommand(getCluster().getServerContext(), 
dummyCheckCommand, opts);
       return null;
     });
     EasyMock.replay(admin);
diff --git a/test/src/main/java/org/apache/accumulo/test/CloneIT.java 
b/test/src/main/java/org/apache/accumulo/test/CloneIT.java
index 5b84f3685a..d00d77bd2f 100644
--- a/test/src/main/java/org/apache/accumulo/test/CloneIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CloneIT.java
@@ -24,6 +24,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.HashSet;
 import java.util.Map.Entry;
+import java.util.UUID;
 import java.util.stream.Stream;
 
 import org.apache.accumulo.core.client.Accumulo;
@@ -43,10 +44,12 @@ import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Se
 import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
 import org.apache.accumulo.core.metadata.schema.TabletDeletedException;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.extension.ExtensionContext;
 import org.junit.jupiter.params.ParameterizedTest;
@@ -54,12 +57,22 @@ import org.junit.jupiter.params.provider.Arguments;
 import org.junit.jupiter.params.provider.ArgumentsProvider;
 import org.junit.jupiter.params.provider.ArgumentsSource;
 
-public class CloneIT extends AccumuloClusterHarness {
+public class CloneIT extends SharedMiniClusterBase {
+
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniCluster();
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
+  }
 
   @Test
   public void testNoFiles() throws Exception {
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
-      String tableName = getUniqueNames(1)[0];
+      String tableName = generateTableName();
       client.tableOperations().create(tableName);
 
       KeyExtent ke = new KeyExtent(TableId.of("0"), null, null);
@@ -88,7 +101,7 @@ public class CloneIT extends AccumuloClusterHarness {
   public void testFilesChange(Range range1, Range range2) throws Exception {
     String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
-      String tableName = getUniqueNames(1)[0];
+      String tableName = generateTableName();
       client.tableOperations().create(tableName);
 
       KeyExtent ke = new KeyExtent(TableId.of("0"), null, null);
@@ -150,7 +163,7 @@ public class CloneIT extends AccumuloClusterHarness {
     String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
 
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
-      String tableName = getUniqueNames(1)[0];
+      String tableName = generateTableName();
       client.tableOperations().create(tableName);
 
       try (BatchWriter bw1 = client.createBatchWriter(tableName);
@@ -198,7 +211,7 @@ public class CloneIT extends AccumuloClusterHarness {
   public void testSplit2(Range range) throws Exception {
     String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
-      String tableName = getUniqueNames(1)[0];
+      String tableName = generateTableName();
       client.tableOperations().create(tableName);
 
       try (BatchWriter bw1 = client.createBatchWriter(tableName);
@@ -280,7 +293,7 @@ public class CloneIT extends AccumuloClusterHarness {
   public void testSplit3(Range range1, Range range2, Range range3) throws 
Exception {
     String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
-      String tableName = getUniqueNames(1)[0];
+      String tableName = generateTableName();
       client.tableOperations().create(tableName);
 
       try (BatchWriter bw1 = client.createBatchWriter(tableName);
@@ -329,7 +342,7 @@ public class CloneIT extends AccumuloClusterHarness {
   @ArgumentsSource(RangeArgumentsProvider.class)
   public void testClonedMarker(Range range1, Range range2, Range range3) 
throws Exception {
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
-      String tableName = getUniqueNames(1)[0];
+      String tableName = generateTableName();
       client.tableOperations().create(tableName);
       String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
 
@@ -400,7 +413,7 @@ public class CloneIT extends AccumuloClusterHarness {
   public void testMerge(Range range1, Range range2) throws Exception {
     String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
-      String tableName = getUniqueNames(1)[0];
+      String tableName = generateTableName();
       client.tableOperations().create(tableName);
 
       try (BatchWriter bw1 = client.createBatchWriter(tableName);
@@ -443,4 +456,9 @@ public class CloneIT extends AccumuloClusterHarness {
               new Range("row_0", false, "row_1", true), new Range()));
     }
   }
+
+  // Append random text because of parameterized tests repeat same test name
+  private String generateTableName() {
+    return getUniqueNames(1)[0] + UUID.randomUUID().toString().substring(0, 8);
+  }
 }
diff --git a/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java 
b/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java
index a2a304a5f2..cf7479c3f3 100644
--- a/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java
@@ -49,19 +49,31 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.dataImpl.KeyExtent;
 import org.apache.accumulo.core.metadata.schema.TabletMetadata;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.server.split.SplitUtils;
 import org.apache.accumulo.test.fate.ManagerRepoIT;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
 import org.apache.accumulo.test.util.Wait;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class LargeSplitRowIT extends ConfigurableMacBase {
+public class LargeSplitRowIT extends SharedMiniClusterBase {
+
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniClusterWithConfig(
+        (cfg, coreSite) -> 
cfg.getClusterServerConfiguration().setNumDefaultTabletServers(1));
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
+  }
+
   private static final Logger log = 
LoggerFactory.getLogger(LargeSplitRowIT.class);
 
   @Override
@@ -69,11 +81,6 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
     return Duration.ofMinutes(1);
   }
 
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {
-    cfg.getClusterServerConfiguration().setNumDefaultTabletServers(1);
-  }
-
   // User added split
   @Test
   public void userAddedSplit() throws Exception {
@@ -82,7 +89,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
 
     // make a table and lower the TABLE_END_ROW_MAX_SIZE property
     final String tableName = getUniqueNames(1)[0];
-    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       Map<String,String> props = 
Map.of(Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
       client.tableOperations().create(tableName, new 
NewTableConfiguration().setProperties(props));
 
@@ -128,7 +135,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
 
     // make a table and lower the configuration properties
     final String tableName = getUniqueNames(1)[0];
-    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       // @formatter:off
       Map<String,String> props = Map.of(
         Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K",
@@ -164,14 +171,16 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
 
       // Wait for the tablet to be marked as unsplittable due to the system 
split running
       TableId tableId = 
TableId.of(client.tableOperations().tableIdMap().get(tableName));
-      Wait.waitFor(() -> getServerContext().getAmple()
-          .readTablet(new KeyExtent(tableId, null, null)).getUnSplittable() != 
null,
+      Wait.waitFor(
+          () -> getCluster().getServerContext().getAmple()
+              .readTablet(new KeyExtent(tableId, null, 
null)).getUnSplittable() != null,
           Wait.MAX_WAIT_MILLIS, 100);
 
       // Verify that the unsplittable column is read correctly
       TabletMetadata tm =
-          getServerContext().getAmple().readTablet(new KeyExtent(tableId, 
null, null));
-      assertEquals(tm.getUnSplittable(), 
SplitUtils.toUnSplittable(getServerContext(), tm));
+          getCluster().getServerContext().getAmple().readTablet(new 
KeyExtent(tableId, null, null));
+      assertEquals(tm.getUnSplittable(),
+          SplitUtils.toUnSplittable(getCluster().getServerContext(), tm));
 
       // Make sure all the data that was put in the table is still correct
       int count = 0;
@@ -199,7 +208,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
   @Timeout(60)
   public void automaticSplitWithGaps() throws Exception {
     log.info("Automatic Split With Gaps");
-    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       automaticSplit(client, 30, 2);
     }
   }
@@ -209,7 +218,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
   @Timeout(60)
   public void automaticSplitWithoutGaps() throws Exception {
     log.info("Automatic Split Without Gaps");
-    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       automaticSplit(client, 15, 1);
     }
   }
@@ -218,7 +227,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
   @Timeout(120)
   public void automaticSplitLater() throws Exception {
     log.info("Split later");
-    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       // Generate large rows which have long common prefixes and therefore no 
split can be found.
       // Setting max to 1 causes all rows to have long common prefixes. 
Setting a max of greater
       // than 1 would generate a row with a short common prefix.
@@ -262,7 +271,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
   @Timeout(60)
   public void testUnsplittableColumn() throws Exception {
     log.info("Unsplittable Column Test");
-    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       // make a table and lower the configuration properties
       // @formatter:off
       var maxEndRow = 100;
@@ -298,15 +307,16 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
 
       // Wait for the tablets to be marked as unsplittable due to the system 
split running
       TableId tableId = 
TableId.of(client.tableOperations().tableIdMap().get(tableName));
-      Wait.waitFor(() -> getServerContext().getAmple()
-          .readTablet(new KeyExtent(tableId, null, null)).getUnSplittable() != 
null,
+      Wait.waitFor(
+          () -> getCluster().getServerContext().getAmple()
+              .readTablet(new KeyExtent(tableId, null, 
null)).getUnSplittable() != null,
           Wait.MAX_WAIT_MILLIS, 100);
 
       // Verify that the unsplittable column is read correctly
       TabletMetadata tm =
-          getServerContext().getAmple().readTablet(new KeyExtent(tableId, 
null, null));
+          getCluster().getServerContext().getAmple().readTablet(new 
KeyExtent(tableId, null, null));
       var unsplittable = tm.getUnSplittable();
-      assertEquals(unsplittable, SplitUtils.toUnSplittable(getServerContext(), 
tm));
+      assertEquals(unsplittable, 
SplitUtils.toUnSplittable(getCluster().getServerContext(), tm));
 
       // Make sure no splits occurred in the table
       assertTrue(client.tableOperations().listSplits(tableName).isEmpty());
@@ -318,13 +328,15 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
 
       // wait for the unsplittable marker to be set to a new value due to the 
property change
       Wait.waitFor(() -> {
-        var updatedUnsplittable = getServerContext().getAmple()
+        var updatedUnsplittable = getCluster().getServerContext().getAmple()
             .readTablet(new KeyExtent(tableId, null, null)).getUnSplittable();
         return updatedUnsplittable != null && 
!updatedUnsplittable.equals(unsplittable);
       }, Wait.MAX_WAIT_MILLIS, 100);
       // recheck with the computed meta is correct after property update
-      tm = getServerContext().getAmple().readTablet(new KeyExtent(tableId, 
null, null));
-      assertEquals(tm.getUnSplittable(), 
SplitUtils.toUnSplittable(getServerContext(), tm));
+      tm = getCluster().getServerContext().getAmple()
+          .readTablet(new KeyExtent(tableId, null, null));
+      assertEquals(tm.getUnSplittable(),
+          SplitUtils.toUnSplittable(getCluster().getServerContext(), tm));
 
       // Bump max end row size and verify split occurs and unsplittable column 
is cleaned up
       client.tableOperations().setProperty(tableName, 
Property.TABLE_MAX_END_ROW_SIZE.getKey(),
@@ -338,7 +350,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
       // Verify all tablets have no unsplittable metadata column
       Wait.waitFor(() -> {
         try (var tabletsMetadata =
-            
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
+            
getCluster().getServerContext().getAmple().readTablets().forTable(tableId).build())
 {
           return tabletsMetadata.stream()
               .allMatch(tabletMetadata -> tabletMetadata.getUnSplittable() == 
null);
         }
@@ -355,7 +367,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
   @Timeout(60)
   public void testUnsplittableCleanup() throws Exception {
     log.info("Unsplittable Column Cleanup");
-    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       // make a table and lower the configuration properties
       // @formatter:off
       Map<String,String> props = Map.of(
@@ -394,7 +406,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
       // as unsplittable due to the same end row for all keys after the 
default tablet is split
       Wait.waitFor(() -> {
         try (var tabletsMetadata =
-            
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
+            
getCluster().getServerContext().getAmple().readTablets().forTable(tableId).build())
 {
           return tabletsMetadata.stream().anyMatch(tm -> tm.getUnSplittable() 
!= null);
         }
       }, Wait.MAX_WAIT_MILLIS, 100);
@@ -409,7 +421,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
       // same number of splits as before
       Wait.waitFor(() -> {
         try (var tabletsMetadata =
-            
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
+            
getCluster().getServerContext().getAmple().readTablets().forTable(tableId).build())
 {
           return tabletsMetadata.stream().allMatch(tm -> tm.getUnSplittable() 
== null);
         }
       }, Wait.MAX_WAIT_MILLIS, 100);
diff --git 
a/test/src/main/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java 
b/test/src/main/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
index 98c835e624..4a483eca73 100644
--- a/test/src/main/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
@@ -41,14 +41,26 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 
 import com.google.common.collect.Maps;
 
-public class MultiTableBatchWriterIT extends AccumuloClusterHarness {
+public class MultiTableBatchWriterIT extends SharedMiniClusterBase {
+
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniCluster();
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
+  }
 
   private AccumuloClient accumuloClient;
   private MultiTableBatchWriter mtbw;
diff --git a/test/src/main/java/org/apache/accumulo/test/OrIteratorIT.java 
b/test/src/main/java/org/apache/accumulo/test/OrIteratorIT.java
index 54399e7ce0..05c1ba8871 100644
--- a/test/src/main/java/org/apache/accumulo/test/OrIteratorIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/OrIteratorIT.java
@@ -46,11 +46,24 @@ import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.OrIterator;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.hadoop.io.Text;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
-public class OrIteratorIT extends AccumuloClusterHarness {
+public class OrIteratorIT extends SharedMiniClusterBase {
+
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniCluster();
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
+  }
+
   private static final String EMPTY = "";
 
   @Override
diff --git a/test/src/main/java/org/apache/accumulo/test/SampleIT.java 
b/test/src/main/java/org/apache/accumulo/test/SampleIT.java
index 30908254db..928ad740f9 100644
--- a/test/src/main/java/org/apache/accumulo/test/SampleIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/SampleIT.java
@@ -66,14 +66,26 @@ import 
org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.WrappingIterator;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.test.util.FileMetadataUtil;
 import org.apache.hadoop.io.Text;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
 import com.google.common.collect.Iterables;
 
-public class SampleIT extends AccumuloClusterHarness {
+public class SampleIT extends SharedMiniClusterBase {
+
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniCluster();
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
+  }
 
   private static final Map<String,String> OPTIONS_1 =
       Map.of("hasher", "murmur3_32", "modulus", "1009");
@@ -162,8 +174,9 @@ public class SampleIT extends AccumuloClusterHarness {
         // Fence off the data to a Range that is a subset of the original data
         Range fenced = new Range(new Text(String.format("r_%06d", 2999)), 
false,
             new Text(String.format("r_%06d", 6000)), true);
-        FileMetadataUtil.splitFilesIntoRanges(getServerContext(), tableName, 
Set.of(fenced));
-        assertEquals(1, countFiles(getServerContext(), tableName));
+        FileMetadataUtil.splitFilesIntoRanges(getCluster().getServerContext(), 
tableName,
+            Set.of(fenced));
+        assertEquals(1, countFiles(getCluster().getServerContext(), 
tableName));
 
         // Build the map of expected values to be seen by filtering out keys 
not in the fenced range
         TreeMap<Key,Value> fenceExpected =
@@ -222,8 +235,9 @@ public class SampleIT extends AccumuloClusterHarness {
 
         // Split files into ranged files if provided
         if (!fileRanges.isEmpty()) {
-          FileMetadataUtil.splitFilesIntoRanges(getServerContext(), tableName, 
fileRanges);
-          assertEquals(fileRanges.size(), countFiles(getServerContext(), 
tableName));
+          
FileMetadataUtil.splitFilesIntoRanges(getCluster().getServerContext(), 
tableName,
+              fileRanges);
+          assertEquals(fileRanges.size(), 
countFiles(getCluster().getServerContext(), tableName));
         }
 
         Scanner oScanner = newOfflineScanner(client, tableName, clone, SC1);
@@ -430,8 +444,9 @@ public class SampleIT extends AccumuloClusterHarness {
 
         // Split files into ranged files if provided
         if (!fileRanges.isEmpty()) {
-          FileMetadataUtil.splitFilesIntoRanges(getServerContext(), tableName, 
fileRanges);
-          assertEquals(fileRanges.size(), countFiles(getServerContext(), 
tableName));
+          
FileMetadataUtil.splitFilesIntoRanges(getCluster().getServerContext(), 
tableName,
+              fileRanges);
+          assertEquals(fileRanges.size(), 
countFiles(getCluster().getServerContext(), tableName));
         }
 
         oScanner = newOfflineScanner(client, tableName, clone, null);
@@ -524,8 +539,9 @@ public class SampleIT extends AccumuloClusterHarness {
 
         // Split files into ranged files if provided
         if (!fileRanges.isEmpty()) {
-          FileMetadataUtil.splitFilesIntoRanges(getServerContext(), tableName, 
fileRanges);
-          assertEquals(fileRanges.size(), countFiles(getServerContext(), 
tableName));
+          
FileMetadataUtil.splitFilesIntoRanges(getCluster().getServerContext(), 
tableName,
+              fileRanges);
+          assertEquals(fileRanges.size(), 
countFiles(getCluster().getServerContext(), tableName));
         }
 
         Scanner oScanner = newOfflineScanner(client, tableName, clone, SC1);
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/AddSplitIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/AddSplitIT.java
index 8533879ddd..e97c239e95 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/AddSplitIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/AddSplitIT.java
@@ -56,13 +56,25 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.metadata.schema.TabletMergeabilityMetadata;
 import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.hadoop.io.Text;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
 import com.google.common.collect.ImmutableMap;
 
-public class AddSplitIT extends AccumuloClusterHarness {
+public class AddSplitIT extends SharedMiniClusterBase {
+
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniCluster();
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
+  }
 
   @Override
   protected Duration defaultTimeout() {
@@ -115,7 +127,8 @@ public class AddSplitIT extends AccumuloClusterHarness {
       verifyData(c, tableName, 2L);
 
       TableId id = TableId.of(c.tableOperations().tableIdMap().get(tableName));
-      try (TabletsMetadata tm = 
getServerContext().getAmple().readTablets().forTable(id).build()) {
+      try (TabletsMetadata tm =
+          
getCluster().getServerContext().getAmple().readTablets().forTable(id).build()) {
         // Default for user created tablets should be mergeability set to NEVER
         tm.stream().forEach(tablet -> 
assertEquals(TabletMergeabilityMetadata.never(),
             tablet.getTabletMergeability()));
@@ -343,7 +356,8 @@ public class AddSplitIT extends AccumuloClusterHarness {
   // Checks that TabletMergeability in metadata matches split settings in the 
map
   private void verifySplits(TableId id, SortedMap<Text,TabletMergeability> 
splits) {
     final Set<Text> addedSplits = new HashSet<>(splits.keySet());
-    try (TabletsMetadata tm = 
getServerContext().getAmple().readTablets().forTable(id).build()) {
+    try (TabletsMetadata tm =
+        
getCluster().getServerContext().getAmple().readTablets().forTable(id).build()) {
       tm.stream().forEach(t -> {
         var split = t.getEndRow();
         // default tablet should be set to never
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
index b8fc981441..a586bd4c7e 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
@@ -62,15 +62,27 @@ import org.apache.accumulo.core.metadata.StoredTabletFile;
 import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
-public class CloneTestIT extends AccumuloClusterHarness {
+public class CloneTestIT extends SharedMiniClusterBase {
+
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniCluster();
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
+  }
 
   @Override
   protected Duration defaultTimeout() {
@@ -161,7 +173,7 @@ public class CloneTestIT extends AccumuloClusterHarness {
 
         if (cf.equals(DataFileColumnFamily.NAME)) {
           Path p = StoredTabletFile.of(cq).getPath();
-          FileSystem fs = cluster.getFileSystem();
+          FileSystem fs = getCluster().getFileSystem();
           assertTrue(fs.exists(p), "File does not exist: " + p);
         } else if 
(cf.equals(ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily())) {
           
assertEquals(ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), cq,
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/CreateInitialSplitsIT.java
 
b/test/src/main/java/org/apache/accumulo/test/functional/CreateInitialSplitsIT.java
index e3e99f2c38..bc3176258a 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/functional/CreateInitialSplitsIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/functional/CreateInitialSplitsIT.java
@@ -41,18 +41,18 @@ import 
org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.client.admin.TabletMergeability;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.minicluster.MemoryUnit;
 import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.io.Text;
+import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 
-public class CreateInitialSplitsIT extends AccumuloClusterHarness {
+public class CreateInitialSplitsIT extends SharedMiniClusterBase {
 
   private AccumuloClient client;
   private String tableName;
@@ -62,12 +62,19 @@ public class CreateInitialSplitsIT extends 
AccumuloClusterHarness {
     return Duration.ofMinutes(2);
   }
 
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration 
conf) {
-    cfg.setMemory(ServerType.TABLET_SERVER, 512, MemoryUnit.MEGABYTE);
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniClusterWithConfig((cfg, coreSite) -> {
+      cfg.setMemory(ServerType.TABLET_SERVER, 512, MemoryUnit.MEGABYTE);
+
+      // use raw local file system
+      coreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+    });
+  }
 
-    // use raw local file system
-    conf.set("fs.file.impl", RawLocalFileSystem.class.getName());
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
   }
 
   @BeforeEach
@@ -129,8 +136,9 @@ public class CreateInitialSplitsIT extends 
AccumuloClusterHarness {
     Collection<Text> createdSplits = 
client.tableOperations().listSplits(tableName);
     assertEquals(splits.keySet(), new TreeSet<>(createdSplits));
 
-    var tableId = getServerContext().getTableId(tableName);
-    try (var tablets = 
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
+    var tableId = getCluster().getServerContext().getTableId(tableName);
+    try (var tablets =
+        
getCluster().getServerContext().getAmple().readTablets().forTable(tableId).build())
 {
       // default tablet (null end row) should have a default 
TabletMergeability of never for user
       // created tablets
       assertTrue(tablets.stream()
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/MergeTabletsBaseIT.java
similarity index 91%
rename from test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
rename to 
test/src/main/java/org/apache/accumulo/test/functional/MergeTabletsBaseIT.java
index 765e044232..4decbd1183 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/functional/MergeTabletsBaseIT.java
@@ -73,7 +73,7 @@ import org.apache.accumulo.core.spi.compaction.CompactionKind;
 import org.apache.accumulo.core.spi.compaction.CompactorGroupId;
 import org.apache.accumulo.core.util.FastFormat;
 import org.apache.accumulo.core.util.Merge;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.test.TestIngest;
 import org.apache.accumulo.test.TestIngest.IngestParams;
 import org.apache.accumulo.test.VerifyIngest;
@@ -86,9 +86,9 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 
-public class MergeIT extends AccumuloClusterHarness {
+public abstract class MergeTabletsBaseIT extends SharedMiniClusterBase {
 
-  private static final Logger log = LoggerFactory.getLogger(MergeIT.class);
+  private static final Logger log = 
LoggerFactory.getLogger(MergeTabletsBaseIT.class);
 
   SortedSet<Text> splits(String[] points) {
     SortedSet<Text> result = new TreeSet<>();
@@ -121,9 +121,9 @@ public class MergeIT extends AccumuloClusterHarness {
       // add two bogus files to each tablet, creating 40K file entries
       c.tableOperations().offline(tableName, true);
       try (
-          var tablets = getServerContext().getAmple().readTablets()
-              .forTable(getServerContext().getTableId(tableName)).build();
-          var mutator = getServerContext().getAmple().mutateTablets()) {
+          var tablets = 
getCluster().getServerContext().getAmple().readTablets()
+              
.forTable(getCluster().getServerContext().getTableId(tableName)).build();
+          var mutator = 
getCluster().getServerContext().getAmple().mutateTablets()) {
         int fc = 0;
         for (var tabletMeta : tablets) {
           StoredTabletFile f1 = StoredTabletFile.of(new Path(
@@ -157,8 +157,8 @@ public class MergeIT extends AccumuloClusterHarness {
       }
 
       assertEquals(20, c.tableOperations().listSplits(tableName).size());
-      try (var tablets = getServerContext().getAmple().readTablets()
-          .forTable(getServerContext().getTableId(tableName)).build()) {
+      try (var tablets = 
getCluster().getServerContext().getAmple().readTablets()
+          
.forTable(getCluster().getServerContext().getTableId(tableName)).build()) {
         assertEquals(40002,
             tablets.stream().mapToInt(tabletMetadata -> 
tabletMetadata.getFiles().size()).sum());
       }
@@ -186,7 +186,7 @@ public class MergeIT extends AccumuloClusterHarness {
       c.tableOperations().merge(tableName, new Text("c1"), new Text("f1"));
       assertEquals(8, c.tableOperations().listSplits(tableName).size());
       // Verify that the MERGED marker was cleared
-      verifyMergedMarkerCleared(getServerContext(),
+      verifyMergedMarkerCleared(getCluster().getServerContext(),
           TableId.of(c.tableOperations().tableIdMap().get(tableName)));
       try (Scanner s = c.createScanner(AccumuloTable.METADATA.tableName())) {
         String tid = c.tableOperations().tableIdMap().get(tableName);
@@ -244,7 +244,7 @@ public class MergeIT extends AccumuloClusterHarness {
       c.tableOperations().flush(tableName, null, null, true);
 
       log.debug("Metadata after Ingest");
-      printAndVerifyFileMetadata(getServerContext(), tableId, 1);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, 1);
 
       // Add splits so we end up with 4 tablets
       final SortedSet<Text> splits = new TreeSet<>();
@@ -255,7 +255,7 @@ public class MergeIT extends AccumuloClusterHarness {
 
       log.debug("Metadata after Split");
       verify(c, 1000, 1, tableName);
-      printAndVerifyFileMetadata(getServerContext(), tableId, 4);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, 4);
 
       // Go through and delete two blocks of rows, 101 - 200
       // and also 301 - 400 so we can test that the data doesn't come
@@ -284,12 +284,12 @@ public class MergeIT extends AccumuloClusterHarness {
       c.tableOperations().compact(tableName, new 
CompactionConfig().setStartRow(null)
           .setEndRow(List.copyOf(splits).get(1)).setWait(true));
       log.debug("Metadata after deleting rows 101 - 200 and 301 - 400");
-      printAndVerifyFileMetadata(getServerContext(), tableId, 4);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, 4);
 
       // Merge and print results
       c.tableOperations().merge(tableName, null, null);
       log.debug("Metadata after Merge");
-      printAndVerifyFileMetadata(getServerContext(), tableId, 4);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, 4);
 
       // Verify that the deleted rows can't be read after merge
       verify(c, 100, 1, tableName);
@@ -299,7 +299,7 @@ public class MergeIT extends AccumuloClusterHarness {
       verify(c, 600, 401, tableName);
 
       // Verify that the MERGED marker was cleared
-      verifyMergedMarkerCleared(getServerContext(), tableId);
+      verifyMergedMarkerCleared(getCluster().getServerContext(), tableId);
     }
   }
 
@@ -317,7 +317,7 @@ public class MergeIT extends AccumuloClusterHarness {
       c.tableOperations().flush(tableName, null, null, true);
 
       log.debug("Metadata after Ingest");
-      printAndVerifyFileMetadata(getServerContext(), tableId, 1);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, 1);
 
       // Add splits so we end up with 10 tablets
       final SortedSet<Text> splits = new TreeSet<>();
@@ -328,7 +328,7 @@ public class MergeIT extends AccumuloClusterHarness {
 
       log.debug("Metadata after Split");
       verify(c, 1000, 1, tableName);
-      printAndVerifyFileMetadata(getServerContext(), tableId, 10);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, 10);
 
       // Go through and delete three blocks of rows
       // 151 - 250, 451 - 550, 751 - 850
@@ -357,13 +357,13 @@ public class MergeIT extends AccumuloClusterHarness {
               .setEndRow(new Text("row_" + String.format("%010d", 
850))).setWait(true));
       // Should be 16 files (10 for the original splits plus 2 extra files per 
deletion range across
       // tablets)
-      printAndVerifyFileMetadata(getServerContext(), tableId, 12);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, 12);
 
       c.tableOperations().merge(tableName, null, null);
       log.debug("Metadata after Merge");
-      printAndVerifyFileMetadata(getServerContext(), tableId, 12);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, 12);
       // Verify that the MERGED marker was cleared
-      verifyMergedMarkerCleared(getServerContext(), tableId);
+      verifyMergedMarkerCleared(getCluster().getServerContext(), tableId);
 
       // Verify that the deleted rows can't be read after merge
       verify(c, 150, 1, tableName);
@@ -380,7 +380,7 @@ public class MergeIT extends AccumuloClusterHarness {
       log.debug("Metadata after compact");
       // Should just be 1 file with infinite range
       Map<StoredTabletFile,DataFileValue> files =
-          printAndVerifyFileMetadata(getServerContext(), tableId, 1);
+          printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, 
1);
       assertEquals(new Range(), 
files.keySet().stream().findFirst().orElseThrow().getRange());
       assertEquals(700, 
files.values().stream().findFirst().orElseThrow().getNumEntries());
     }
@@ -399,7 +399,7 @@ public class MergeIT extends AccumuloClusterHarness {
       final TableId tableId = 
TableId.of(c.tableOperations().tableIdMap().get(tableName));
 
       log.debug("Metadata after initial test run");
-      printAndVerifyFileMetadata(getServerContext(), tableId, -1);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, -1);
 
       // Add splits so we end up with 10 tablets
       final SortedSet<Text> splits = new TreeSet<>();
@@ -417,7 +417,7 @@ public class MergeIT extends AccumuloClusterHarness {
       verify(c, 200, 551, tableName);
       verifyNoRows(c, 100, 751, tableName);
       verify(c, 150, 851, tableName);
-      printAndVerifyFileMetadata(getServerContext(), tableId, -1);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, -1);
 
       c.tableOperations().flush(tableName, null, null, true);
 
@@ -444,9 +444,9 @@ public class MergeIT extends AccumuloClusterHarness {
       // Re-merge a second time after deleting more rows
       c.tableOperations().merge(tableName, null, null);
       log.debug("Metadata after second Merge");
-      printAndVerifyFileMetadata(getServerContext(), tableId, -1);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, -1);
       // Verify that the MERGED marker was cleared
-      verifyMergedMarkerCleared(getServerContext(), tableId);
+      verifyMergedMarkerCleared(getCluster().getServerContext(), tableId);
 
       // Verify that the deleted rows can't be read after merge
       verify(c, 150, 1, tableName);
@@ -474,7 +474,7 @@ public class MergeIT extends AccumuloClusterHarness {
       final TableId tableId = 
TableId.of(c.tableOperations().tableIdMap().get(tableName));
 
       log.debug("Metadata after initial no chop merge test");
-      printAndVerifyFileMetadata(getServerContext(), tableId, 4);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, 4);
 
       // Add splits so we end up with 4 tablets
       final SortedSet<Text> splits = new TreeSet<>();
@@ -490,15 +490,15 @@ public class MergeIT extends AccumuloClusterHarness {
       verify(c, 100, 201, tableName);
       verifyNoRows(c, 100, 301, tableName);
       verify(c, 600, 401, tableName);
-      printAndVerifyFileMetadata(getServerContext(), tableId, -1);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, -1);
 
       // Re-Merge and print results. This tests merging with files
       // that already have a range
       c.tableOperations().merge(tableName, null, null);
       log.debug("Metadata after Merge");
-      printAndVerifyFileMetadata(getServerContext(), tableId, -1);
+      printAndVerifyFileMetadata(getCluster().getServerContext(), tableId, -1);
       // Verify that the MERGED marker was cleared
-      verifyMergedMarkerCleared(getServerContext(), tableId);
+      verifyMergedMarkerCleared(getCluster().getServerContext(), tableId);
 
       // Verify that the deleted rows can't be read after merge
       verify(c, 100, 1, tableName);
@@ -636,16 +636,16 @@ public class MergeIT extends AccumuloClusterHarness {
     log.debug("Before Merge");
     client.tableOperations().flush(table, null, null, true);
     TableId tableId = 
TableId.of(client.tableOperations().tableIdMap().get(table));
-    printAndVerifyFileMetadata(getServerContext(), tableId);
+    printAndVerifyFileMetadata(getCluster().getServerContext(), tableId);
 
     client.tableOperations().merge(table, start == null ? null : new 
Text(start),
         end == null ? null : new Text(end));
 
     client.tableOperations().flush(table, null, null, true);
     log.debug("After Merge");
-    printAndVerifyFileMetadata(getServerContext(), tableId);
+    printAndVerifyFileMetadata(getCluster().getServerContext(), tableId);
     // Verify that the MERGED marker was cleared
-    verifyMergedMarkerCleared(getServerContext(), tableId);
+    verifyMergedMarkerCleared(getCluster().getServerContext(), tableId);
 
     try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
 
@@ -683,10 +683,10 @@ public class MergeIT extends AccumuloClusterHarness {
       var split = new Text("m");
       c.tableOperations().addSplits(tableName, new TreeSet<>(List.of(split)));
 
-      TableId tableId = getServerContext().getTableId(tableName);
+      TableId tableId = getCluster().getServerContext().getTableId(tableName);
 
       // add metadata from compactions to tablets prior to merge
-      try (var tabletsMutator = getServerContext().getAmple().mutateTablets()) 
{
+      try (var tabletsMutator = 
getCluster().getServerContext().getAmple().mutateTablets()) {
         for (var extent : List.of(new KeyExtent(tableId, split, null),
             new KeyExtent(tableId, null, split))) {
           var tablet = tabletsMutator.mutateTablet(extent);
@@ -707,7 +707,8 @@ public class MergeIT extends AccumuloClusterHarness {
       }
 
       // ensure data is in metadata table as expected
-      try (var tablets = 
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
+      try (var tablets =
+          
getCluster().getServerContext().getAmple().readTablets().forTable(tableId).build())
 {
         for (var tablet : tablets) {
           assertFalse(tablet.getExternalCompactions().isEmpty());
         }
@@ -716,7 +717,8 @@ public class MergeIT extends AccumuloClusterHarness {
       c.tableOperations().merge(tableName, null, null);
 
       // ensure merge operation remove compaction entries
-      try (var tablets = 
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
+      try (var tablets =
+          
getCluster().getServerContext().getAmple().readTablets().forTable(tableId).build())
 {
         for (var tablet : tablets) {
           assertTrue(tablet.getExternalCompactions().isEmpty());
         }
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/MergeFlakyFateIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/MergeTabletsFlakyFateIT.java
similarity index 68%
copy from 
test/src/main/java/org/apache/accumulo/test/functional/MergeFlakyFateIT.java
copy to 
test/src/main/java/org/apache/accumulo/test/functional/MergeTabletsFlakyFateIT.java
index e6435f1266..44c1240d82 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/functional/MergeFlakyFateIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/functional/MergeTabletsFlakyFateIT.java
@@ -18,19 +18,28 @@
  */
 package org.apache.accumulo.test.functional;
 
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
 import org.apache.accumulo.test.fate.FlakyFateManager;
-import org.apache.hadoop.conf.Configuration;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * Run all of the merge test using a flaky Fate impl that will run merge fate 
steps multiple times
  * to ensure idempotent.
  */
-public class MergeFlakyFateIT extends MergeIT {
+public class MergeTabletsFlakyFateIT extends MergeTabletsBaseIT {
 
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {
-    cfg.setServerClass(ServerType.MANAGER, FlakyFateManager.class);
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniClusterWithConfig((cfg, coreSite) -> {
+      cfg.setServerClass(ServerType.MANAGER, FlakyFateManager.class);
+    });
+
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
   }
 }
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/MergeFlakyFateIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/MergeTabletsIT.java
similarity index 60%
rename from 
test/src/main/java/org/apache/accumulo/test/functional/MergeFlakyFateIT.java
rename to 
test/src/main/java/org/apache/accumulo/test/functional/MergeTabletsIT.java
index e6435f1266..a637c33efc 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/functional/MergeFlakyFateIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MergeTabletsIT.java
@@ -18,19 +18,20 @@
  */
 package org.apache.accumulo.test.functional;
 
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.fate.FlakyFateManager;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
-/**
- * Run all of the merge test using a flaky Fate impl that will run merge fate 
steps multiple times
- * to ensure idempotent.
- */
-public class MergeFlakyFateIT extends MergeIT {
+public class MergeTabletsIT extends MergeTabletsBaseIT {
+
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniCluster();
+  }
 
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {
-    cfg.setServerClass(ServerType.MANAGER, FlakyFateManager.class);
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
   }
+
 }
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java
index e805143e67..730c114838 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java
@@ -59,22 +59,28 @@ import 
org.apache.accumulo.core.metadata.schema.TabletMetadata;
 import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.hadoop.io.Text;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
-public class MetadataIT extends AccumuloClusterHarness {
+public class MetadataIT extends SharedMiniClusterBase {
 
-  @Override
-  protected Duration defaultTimeout() {
-    return Duration.ofMinutes(2);
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniClusterWithConfig(
+        (cfg, coreSite) -> 
cfg.getClusterServerConfiguration().setNumDefaultTabletServers(1));
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
   }
 
   @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {
-    cfg.getClusterServerConfiguration().setNumDefaultTabletServers(1);
+  protected Duration defaultTimeout() {
+    return Duration.ofMinutes(2);
   }
 
   @Test
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
 
b/test/src/main/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
index 572b1d0b53..51bbf558b7 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
@@ -38,32 +38,40 @@ import org.apache.accumulo.core.data.Condition;
 import org.apache.accumulo.core.data.ConditionalMutation;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
 import org.apache.accumulo.miniclusterImpl.ProcessReference;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
 import com.google.common.collect.Iterators;
 
-public class SessionDurabilityIT extends ConfigurableMacBase {
+public class SessionDurabilityIT extends SharedMiniClusterBase {
 
-  @Override
-  protected Duration defaultTimeout() {
-    return Duration.ofMinutes(3);
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniClusterWithConfig((cfg, coreSite) -> {
+      cfg.getClusterServerConfiguration().setNumDefaultTabletServers(1);
+      coreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+      cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
+    });
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
   }
 
   @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {
-    cfg.getClusterServerConfiguration().setNumDefaultTabletServers(1);
-    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
+  protected Duration defaultTimeout() {
+    return Duration.ofMinutes(3);
   }
 
   @Test
   public void nondurableTableHasDurableWrites() throws Exception {
-    try (AccumuloClient c = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
       // table default has no durability
       c.tableOperations().create(tableName, new NewTableConfiguration()
@@ -81,7 +89,7 @@ public class SessionDurabilityIT extends ConfigurableMacBase {
 
   @Test
   public void durableTableLosesNonDurableWrites() throws Exception {
-    try (AccumuloClient c = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
       // table default is durable writes
       c.tableOperations().create(tableName, new NewTableConfiguration()
@@ -113,7 +121,7 @@ public class SessionDurabilityIT extends 
ConfigurableMacBase {
 
   @Test
   public void testConditionDurability() throws Exception {
-    try (AccumuloClient c = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
       // table default is durable writes
       c.tableOperations().create(tableName, new NewTableConfiguration()
@@ -132,7 +140,7 @@ public class SessionDurabilityIT extends 
ConfigurableMacBase {
 
   @Test
   public void testConditionDurability2() throws Exception {
-    try (AccumuloClient c = 
Accumulo.newClient().from(getClientProperties()).build()) {
+    try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
       // table default is durable writes
       c.tableOperations().create(tableName, new NewTableConfiguration()
@@ -161,10 +169,10 @@ public class SessionDurabilityIT extends 
ConfigurableMacBase {
   }
 
   private void restartTServer() throws Exception {
-    for (ProcessReference proc : 
cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
-      cluster.killProcess(ServerType.TABLET_SERVER, proc);
+    for (ProcessReference proc : 
getCluster().getProcesses().get(ServerType.TABLET_SERVER)) {
+      getCluster().killProcess(ServerType.TABLET_SERVER, proc);
     }
-    cluster.start();
+    getCluster().start();
   }
 
 }

Reply via email to