This is an automated email from the ASF dual-hosted git repository.

kturner pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new 4de6087f27 improves ComprehensiveIT and adjust sunny ITs (#3843)
4de6087f27 is described below

commit 4de6087f2739602128bbe22ebb9ee1c020810c7c
Author: Keith Turner <ktur...@apache.org>
AuthorDate: Mon Oct 16 11:03:55 2023 -0400

    improves ComprehensiveIT and adjust sunny ITs (#3843)
    
    This change adds a new security test to Comprehensive IT and moves some
    test from ReadWriteIT into comprehensive IT.  Each test in ReadWriteIT
    spins up a mini cluster, which takes a lot of time for really simple
    test.  Also remove the sunny tag from shell ITs as ComprehensiveIT now
    covers most of what thost test were covering, but using public API
    instead of shell.
---
 .../org/apache/accumulo/test/ComprehensiveIT.java  | 326 ++++++++++++++++++++-
 .../accumulo/test/functional/LocalityGroupIT.java  | 218 ++++++++++++++
 .../accumulo/test/functional/ReadWriteIT.java      | 227 +-------------
 .../test/shell/ShellCreateNamespaceIT.java         |   2 -
 .../accumulo/test/shell/ShellCreateTableIT.java    |   2 -
 .../org/apache/accumulo/test/shell/ShellIT.java    |   2 -
 .../apache/accumulo/test/shell/ShellServerIT.java  |   2 -
 7 files changed, 535 insertions(+), 244 deletions(-)

diff --git a/test/src/main/java/org/apache/accumulo/test/ComprehensiveIT.java 
b/test/src/main/java/org/apache/accumulo/test/ComprehensiveIT.java
index ce2e3ae181..4ea8843e00 100644
--- a/test/src/main/java/org/apache/accumulo/test/ComprehensiveIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ComprehensiveIT.java
@@ -48,6 +48,7 @@ import java.util.function.Predicate;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.ConditionalWriter;
 import org.apache.accumulo.core.client.IteratorSetting;
@@ -64,6 +65,8 @@ import org.apache.accumulo.core.client.admin.TimeType;
 import org.apache.accumulo.core.client.rfile.RFile;
 import org.apache.accumulo.core.client.sample.Sampler;
 import org.apache.accumulo.core.client.sample.SamplerConfiguration;
+import org.apache.accumulo.core.client.security.SecurityErrorCode;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.client.summary.CountingSummarizer;
 import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
 import org.apache.accumulo.core.conf.Property;
@@ -76,12 +79,16 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.data.constraints.Constraint;
 import org.apache.accumulo.core.data.constraints.DefaultKeySizeConstraint;
+import org.apache.accumulo.core.data.constraints.VisibilityConstraint;
 import org.apache.accumulo.core.iterators.Filter;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.IteratorUtil;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.security.NamespacePermission;
+import org.apache.accumulo.core.security.SystemPermission;
+import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.test.util.Wait;
 import org.apache.hadoop.fs.FileUtil;
@@ -92,6 +99,8 @@ import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Tag;
 import org.junit.jupiter.api.Test;
 
+import com.google.common.collect.MoreCollectors;
+
 /**
  * The purpose of this test is to exercise a large amount of Accumulo's 
features in a single test.
  * It provides a quick test that verifies a lot of functionality is working 
for basic use. This test
@@ -417,6 +426,299 @@ public class ComprehensiveIT extends 
SharedMiniClusterBase {
     }
   }
 
+  @Test
+  public void invalidInstanceName() {
+    try (var client = Accumulo.newClient().to("fake_instance_name", 
getCluster().getZooKeepers())
+        .as(getAdminPrincipal(), getToken()).build()) {
+      assertThrows(RuntimeException.class, () -> 
client.instanceOperations().getTabletServers());
+    }
+  }
+
+  @Test
+  public void testMultiTableWrite() throws Exception {
+    String[] tables = getUniqueNames(2);
+    String table1 = tables[0];
+    String table2 = tables[1];
+
+    try (var client = Accumulo.newClient().from(getClientProps()).build()) {
+      client.tableOperations().create(table1);
+      client.tableOperations().create(table2);
+
+      try (var writer = client.createMultiTableBatchWriter()) {
+        writer.getBatchWriter(table1).addMutations(generateMutations(0, 100, 
tr -> true));
+        writer.getBatchWriter(table2).addMutations(generateMutations(100, 200, 
tr -> true));
+        writer.getBatchWriter(table1).addMutations(generateMutations(200, 300, 
tr -> true));
+        writer.getBatchWriter(table2).addMutations(generateMutations(300, 400, 
tr -> true));
+      }
+
+      TreeMap<Key,Value> expected1 = new TreeMap<>();
+      expected1.putAll(generateKeys(0, 100));
+      expected1.putAll(generateKeys(200, 300));
+
+      TreeMap<Key,Value> expected2 = new TreeMap<>();
+      expected2.putAll(generateKeys(100, 200));
+      expected2.putAll(generateKeys(300, 400));
+
+      verifyData(client, table1, AUTHORIZATIONS, expected1);
+      verifyData(client, table2, AUTHORIZATIONS, expected2);
+
+      try (var writer = client.createMultiTableBatchWriter()) {
+        writer.getBatchWriter(table1)
+            .addMutations(generateMutations(0, 100, 0x12345678, tr -> true));
+        writer.getBatchWriter(table2)
+            .addMutations(generateMutations(100, 200, 0x12345678, tr -> true));
+        writer.getBatchWriter(table1)
+            .addMutations(generateMutations(200, 300, 0x12345678, tr -> true));
+        writer.getBatchWriter(table2)
+            .addMutations(generateMutations(300, 400, 0x12345678, tr -> true));
+      }
+
+      expected1.putAll(generateKeys(0, 100, 0x12345678, tr -> true));
+      expected1.putAll(generateKeys(200, 300, 0x12345678, tr -> true));
+      expected2.putAll(generateKeys(100, 200, 0x12345678, tr -> true));
+      expected2.putAll(generateKeys(300, 400, 0x12345678, tr -> true));
+
+      verifyData(client, table1, AUTHORIZATIONS, expected1);
+      verifyData(client, table2, AUTHORIZATIONS, expected2);
+    }
+  }
+
+  @Test
+  public void testSecurity() throws Exception {
+    String[] tables = getUniqueNames(2);
+    String rootsTable = tables[0];
+    String usersTable = tables[0];
+
+    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
+
+      client.tableOperations().create(rootsTable);
+      write(client, rootsTable, generateMutations(0, 100, tr -> true));
+      verifyData(client, rootsTable, AUTHORIZATIONS, generateKeys(0, 100));
+
+      var password = new PasswordToken("bestpass1234");
+      client.securityOperations().createLocalUser("user1", password);
+
+      try (var userClient =
+          Accumulo.newClient().from(getClientProps()).as("user1", 
password).build()) {
+
+        // user1 should not be able to read table
+        var ise = assertThrows(IllegalStateException.class,
+            () -> verifyData(userClient, rootsTable, AUTHORIZATIONS, 
generateKeys(0, 100)));
+        assertEquals(SecurityErrorCode.PERMISSION_DENIED,
+            ((AccumuloSecurityException) 
ise.getCause()).getSecurityErrorCode());
+
+        // user1 should not be able to grant theirself read access
+        var ase = assertThrows(AccumuloSecurityException.class, () -> 
userClient
+            .securityOperations().grantTablePermission("user1", rootsTable, 
TablePermission.READ));
+        assertEquals(SecurityErrorCode.PERMISSION_DENIED, 
ase.getSecurityErrorCode());
+
+        // grant user1 read access
+        client.securityOperations().grantTablePermission("user1", rootsTable, 
TablePermission.READ);
+
+        // security changes take time to propagate, should eventually be able 
to scan table
+        Wait.waitFor(() -> {
+          try {
+            verifyData(userClient, rootsTable, Authorizations.EMPTY,
+                generateKeys(0, 100, tr -> tr.vis.isEmpty()));
+            return true;
+          } catch (IllegalStateException e) {
+            return false;
+          }
+        });
+        verifyData(userClient, rootsTable, Authorizations.EMPTY,
+            generateKeys(0, 100, tr -> tr.vis.isEmpty()));
+
+        // should not be able to scan with authorizations the user does not 
have
+        ise = assertThrows(IllegalStateException.class,
+            () -> verifyData(userClient, rootsTable, AUTHORIZATIONS, 
generateKeys(0, 100)));
+        assertEquals(SecurityErrorCode.BAD_AUTHORIZATIONS,
+            ((AccumuloSecurityException) 
ise.getCause()).getSecurityErrorCode());
+
+        // scan w/o setting auths, should only return data w/o auths
+        try (Scanner scanner = userClient.createScanner(rootsTable)) {
+          assertEquals(generateKeys(0, 100, tr -> tr.vis.isEmpty()), 
scan(scanner));
+        }
+
+        // user should not have permission to write to table
+        var mre = assertThrows(MutationsRejectedException.class,
+            () -> write(userClient, rootsTable, generateMutations(100, 200, tr 
-> true)));
+        assertEquals(SecurityErrorCode.PERMISSION_DENIED, 
mre.getSecurityErrorCodes().values()
+            
.stream().flatMap(Set::stream).collect(MoreCollectors.onlyElement()));
+        // ensure no new data was written
+        assertEquals(new Text(row(99)), 
client.tableOperations().getMaxRow(rootsTable,
+            AUTHORIZATIONS, new Text(row(98)), true, new Text(row(110)), 
true));
+
+        client.securityOperations().grantTablePermission("user1", rootsTable,
+            TablePermission.WRITE);
+        // security changes take time to propagate, should eventually be able 
to write
+        Wait.waitFor(() -> {
+          try {
+            write(userClient, rootsTable, generateMutations(100, 200, tr -> 
true));
+            return true;
+          } catch (MutationsRejectedException e) {
+            return false;
+          }
+        });
+
+        // ensure newly written data is visible
+        verifyData(client, rootsTable, AUTHORIZATIONS, generateKeys(0, 200));
+
+        // allow user to write to table and verify can write
+        client.securityOperations().changeUserAuthorizations("user1", 
AUTHORIZATIONS);
+        Wait.waitFor(() -> {
+          try {
+            verifyData(userClient, rootsTable, AUTHORIZATIONS, generateKeys(0, 
200));
+            return true;
+          } catch (IllegalStateException e) {
+            return false;
+          }
+        });
+        verifyData(userClient, rootsTable, AUTHORIZATIONS, generateKeys(0, 
200));
+        // should scan with all of users granted auths since no auths were 
specified
+        try (Scanner scanner = userClient.createScanner(rootsTable)) {
+          assertEquals(generateKeys(0, 200), scan(scanner));
+        }
+
+        var splits = new TreeSet<>(List.of(new Text(row(50))));
+
+        // should not have permission to alter the table
+        ase = assertThrows(AccumuloSecurityException.class,
+            () -> userClient.tableOperations().addSplits(rootsTable, splits));
+        assertEquals(SecurityErrorCode.PERMISSION_DENIED, 
ase.getSecurityErrorCode());
+        // ensure no splits were added
+        assertEquals(Set.of(), 
Set.copyOf(client.tableOperations().listSplits(rootsTable)));
+
+        client.securityOperations().grantTablePermission("user1", rootsTable,
+            TablePermission.ALTER_TABLE);
+        Wait.waitFor(() -> {
+          try {
+            userClient.tableOperations().addSplits(rootsTable, splits);
+            return true;
+          } catch (AccumuloSecurityException e) {
+            return false;
+          }
+        });
+        assertEquals(splits, 
Set.copyOf(userClient.tableOperations().listSplits(rootsTable)));
+
+        // user should not have permission to bulk import
+        ase = assertThrows(AccumuloSecurityException.class, () -> 
bulkImport(userClient, rootsTable,
+            List.of(generateKeys(200, 250, tr -> true), generateKeys(250, 300, 
tr -> true))));
+        assertEquals(SecurityErrorCode.PERMISSION_DENIED, 
ase.getSecurityErrorCode());
+        verifyData(userClient, rootsTable, AUTHORIZATIONS, generateKeys(0, 
200));
+
+        // TODO open a bug about this, had to add this permission to get bulk 
import to work
+        client.securityOperations().grantSystemPermission("user1", 
SystemPermission.SYSTEM);
+        // give permission to bulk import and verify it works
+        client.securityOperations().grantTablePermission("user1", rootsTable,
+            TablePermission.BULK_IMPORT);
+        Wait.waitFor(() -> {
+          try {
+            bulkImport(userClient, rootsTable,
+                List.of(generateKeys(200, 250, tr -> true), generateKeys(250, 
300, tr -> true)));
+            return true;
+          } catch (AccumuloSecurityException e) {
+            return false;
+          }
+        });
+        verifyData(userClient, rootsTable, AUTHORIZATIONS, generateKeys(0, 
300));
+        client.securityOperations().revokeSystemPermission("user1", 
SystemPermission.SYSTEM);
+
+        // user1 should not be able to delete the table
+        ase = assertThrows(AccumuloSecurityException.class,
+            () -> userClient.tableOperations().delete(rootsTable));
+        assertEquals(SecurityErrorCode.PERMISSION_DENIED, 
ase.getSecurityErrorCode());
+        // table should still exists and be readable
+        verifyData(userClient, rootsTable, AUTHORIZATIONS, generateKeys(0, 
300));
+
+        // remove user1 table permission and veriy that eventually they can 
not read
+        client.securityOperations().revokeTablePermission("user1", rootsTable,
+            TablePermission.READ);
+        Wait.waitFor(() -> {
+          try {
+            verifyData(userClient, rootsTable, AUTHORIZATIONS, generateKeys(0, 
300));
+            return false;
+          } catch (IllegalStateException e) {
+            assertEquals(SecurityErrorCode.PERMISSION_DENIED,
+                ((AccumuloSecurityException) 
e.getCause()).getSecurityErrorCode());
+            return true;
+          }
+        });
+
+        // grant user1 permissions to drop table, delete the table and verify 
its deleted
+        client.securityOperations().grantTablePermission("user1", rootsTable,
+            TablePermission.DROP_TABLE);
+        Wait.waitFor(() -> {
+          try {
+            userClient.tableOperations().delete(rootsTable);
+            return true;
+          } catch (AccumuloSecurityException e) {
+            return false;
+          }
+        });
+        assertThrows(TableNotFoundException.class,
+            () -> verifyData(userClient, rootsTable, AUTHORIZATIONS, 
generateKeys(0, 300)));
+        assertFalse(userClient.tableOperations().list().contains(rootsTable));
+
+        // user1 should not be able to create a table
+        ase = assertThrows(AccumuloSecurityException.class,
+            () -> userClient.tableOperations().create(usersTable));
+        assertEquals(SecurityErrorCode.PERMISSION_DENIED, 
ase.getSecurityErrorCode());
+
+        // create namespace and grant user1 access to create tables in the 
namespace
+        client.namespaceOperations().create("ns1");
+        client.securityOperations().grantNamespacePermission("user1", "ns1",
+            NamespacePermission.CREATE_TABLE);
+        var tableNS = "ns1." + usersTable;
+        Wait.waitFor(() -> {
+          try {
+            var ntc = new NewTableConfiguration()
+                
.setProperties(Map.of(Property.TABLE_CONSTRAINT_PREFIX.getKey() + "2",
+                    VisibilityConstraint.class.getName()));
+            userClient.tableOperations().create(tableNS, ntc);
+            return true;
+          } catch (AccumuloSecurityException e) {
+            return false;
+          }
+        });
+
+        // user1 should still not be able to create table in the default 
namepsace
+        ase = assertThrows(AccumuloSecurityException.class,
+            () -> userClient.tableOperations().create(usersTable));
+        assertEquals(SecurityErrorCode.PERMISSION_DENIED, 
ase.getSecurityErrorCode());
+
+        // verify user1 can interact with table they created
+        write(userClient, tableNS, generateMutations(0, 100, tr -> true));
+        verifyData(userClient, tableNS, AUTHORIZATIONS, generateKeys(0, 100, 
tr -> true));
+
+        // confirm user can not write data they can not see because visibility 
constraint was set on
+        // table
+        mre = assertThrows(MutationsRejectedException.class, () -> {
+          try (var writer = userClient.createBatchWriter(tableNS)) {
+            Mutation m = new Mutation("invisible");
+            m.put("f", "q", new ColumnVisibility("DOG&HAMSTER"), "v1");
+            writer.addMutation(m);
+          }
+        });
+        assertEquals(VisibilityConstraint.class.getName(), 
mre.getConstraintViolationSummaries()
+            .stream().map(cvs -> 
cvs.constrainClass).collect(MoreCollectors.onlyElement()));
+
+        // confirm user can delete table
+        userClient.tableOperations().delete(tableNS);
+        assertFalse(userClient.tableOperations().list().contains(tableNS));
+        assertFalse(userClient.tableOperations().list().contains(usersTable));
+      }
+
+      // attempt to perform operations with incorrect password
+      try (var userClient = Accumulo.newClient().from(getClientProps())
+          .as("user1", new PasswordToken("bestpass123")).build()) {
+        var ase = assertThrows(AccumuloSecurityException.class,
+            () -> userClient.tableOperations().create("ns1." + usersTable));
+        assertEquals(SecurityErrorCode.BAD_CREDENTIALS, 
ase.getSecurityErrorCode());
+      }
+
+    }
+  }
+
   /*
    * This test happens to cover a lot features in the Accumulo public API like 
sampling,
    * summarizations, and some table operations. Those features do not need to 
be tested elsewhere.
@@ -732,19 +1034,23 @@ public class ComprehensiveIT extends 
SharedMiniClusterBase {
 
     getCluster().getFileSystem().mkdirs(dir);
 
-    int count = 0;
-    for (var keyValues : data) {
-      try (var output = getCluster().getFileSystem().create(new Path(dir, "f" 
+ count + ".rf"));
-          var writer = RFile.newWriter().to(output).build()) {
-        writer.startDefaultLocalityGroup();
-        for (Map.Entry<Key,Value> entry : keyValues.entrySet()) {
-          writer.append(entry.getKey(), entry.getValue());
+    try {
+      int count = 0;
+      for (var keyValues : data) {
+        try (var output = getCluster().getFileSystem().create(new Path(dir, 
"f" + count + ".rf"));
+            var writer = RFile.newWriter().to(output).build()) {
+          writer.startDefaultLocalityGroup();
+          for (Map.Entry<Key,Value> entry : keyValues.entrySet()) {
+            writer.append(entry.getKey(), entry.getValue());
+          }
         }
+        count++;
       }
-      count++;
-    }
 
-    client.tableOperations().importDirectory(dir.toString()).to(table).load();
+      
client.tableOperations().importDirectory(dir.toString()).to(table).load();
+    } finally {
+      getCluster().getFileSystem().delete(dir, true);
+    }
   }
 
   static class TestRecord {
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/LocalityGroupIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/LocalityGroupIT.java
new file mode 100644
index 0000000000..18f95a860b
--- /dev/null
+++ 
b/test/src/main/java/org/apache/accumulo/test/functional/LocalityGroupIT.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.apache.accumulo.test.functional.ReadWriteIT.ROWS;
+import static org.apache.accumulo.test.functional.ReadWriteIT.ingest;
+import static org.apache.accumulo.test.functional.ReadWriteIT.m;
+import static org.apache.accumulo.test.functional.ReadWriteIT.t;
+import static org.apache.accumulo.test.functional.ReadWriteIT.verify;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.rfile.PrintInfo;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LocalityGroupIT extends AccumuloClusterHarness {
+
+  private static final Logger log = 
LoggerFactory.getLogger(LocalityGroupIT.class);
+
+  @Test
+  public void localityGroupPerf() throws Exception {
+    // verify that locality groups can make look-ups faster
+    try (AccumuloClient accumuloClient = 
Accumulo.newClient().from(getClientProps()).build()) {
+      final String tableName = getUniqueNames(1)[0];
+      accumuloClient.tableOperations().create(tableName);
+      accumuloClient.tableOperations().setProperty(tableName, 
"table.group.g1", "colf");
+      accumuloClient.tableOperations().setProperty(tableName, 
"table.groups.enabled", "g1");
+      ingest(accumuloClient, 2000, 1, 50, 0, tableName);
+      accumuloClient.tableOperations().compact(tableName, null, null, true, 
true);
+      try (BatchWriter bw = accumuloClient.createBatchWriter(tableName)) {
+        bw.addMutation(m("zzzzzzzzzzz", "colf2", "cq", "value"));
+      }
+      long now = System.currentTimeMillis();
+      try (Scanner scanner = accumuloClient.createScanner(tableName, 
Authorizations.EMPTY)) {
+        scanner.fetchColumnFamily(new Text("colf"));
+        scanner.forEach((k, v) -> {});
+      }
+      long diff = System.currentTimeMillis() - now;
+      now = System.currentTimeMillis();
+
+      try (Scanner scanner = accumuloClient.createScanner(tableName, 
Authorizations.EMPTY)) {
+        scanner.fetchColumnFamily(new Text("colf2"));
+        scanner.forEach((k, v) -> {});
+      }
+      long diff2 = System.currentTimeMillis() - now;
+      assertTrue(diff2 < diff);
+    }
+  }
+
+  /**
+   * create a locality group, write to it and ensure it exists in the RFiles 
that result
+   */
+  @Test
+  public void sunnyLG() throws Exception {
+    try (AccumuloClient accumuloClient = 
Accumulo.newClient().from(getClientProps()).build()) {
+      final String tableName = getUniqueNames(1)[0];
+      accumuloClient.tableOperations().create(tableName);
+      Map<String,Set<Text>> groups = new TreeMap<>();
+      groups.put("g1", Collections.singleton(t("colf")));
+      accumuloClient.tableOperations().setLocalityGroups(tableName, groups);
+      verifyLocalityGroupsInRFile(accumuloClient, tableName);
+    }
+  }
+
+  /**
+   * Pretty much identical to sunnyLG, but verifies locality groups are 
created when configured in
+   * NewTableConfiguration prior to table creation.
+   */
+  @Test
+  public void sunnyLGUsingNewTableConfiguration() throws Exception {
+    // create a locality group, write to it and ensure it exists in the RFiles 
that result
+    try (AccumuloClient accumuloClient = 
Accumulo.newClient().from(getClientProps()).build()) {
+      final String tableName = getUniqueNames(1)[0];
+      NewTableConfiguration ntc = new NewTableConfiguration();
+      Map<String,Set<Text>> groups = new HashMap<>();
+      groups.put("g1", Collections.singleton(t("colf")));
+      ntc.setLocalityGroups(groups);
+      accumuloClient.tableOperations().create(tableName, ntc);
+      verifyLocalityGroupsInRFile(accumuloClient, tableName);
+    }
+  }
+
+  private void verifyLocalityGroupsInRFile(final AccumuloClient accumuloClient,
+      final String tableName) throws Exception {
+    ingest(accumuloClient, 2000, 1, 50, 0, tableName);
+    verify(accumuloClient, 2000, 1, 50, 0, tableName);
+    accumuloClient.tableOperations().flush(tableName, null, null, true);
+    try (BatchScanner bscanner =
+        accumuloClient.createBatchScanner(MetadataTable.NAME, 
Authorizations.EMPTY, 1)) {
+      String tableId = 
accumuloClient.tableOperations().tableIdMap().get(tableName);
+      bscanner.setRanges(
+          Collections.singletonList(new Range(new Text(tableId + ";"), new 
Text(tableId + "<"))));
+      
bscanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+      boolean foundFile = false;
+      for (Map.Entry<Key,Value> entry : bscanner) {
+        foundFile = true;
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        PrintStream oldOut = System.out;
+        try (PrintStream newOut = new PrintStream(baos)) {
+          System.setOut(newOut);
+          List<String> args = new ArrayList<>();
+          
args.add(StoredTabletFile.of(entry.getKey().getColumnQualifier()).getMetadataPath());
+          args.add("--props");
+          args.add(getCluster().getAccumuloPropertiesPath());
+          if (getClusterType() == ClusterType.STANDALONE && saslEnabled()) {
+            args.add("--config");
+            StandaloneAccumuloCluster sac = (StandaloneAccumuloCluster) 
cluster;
+            String hadoopConfDir = sac.getHadoopConfDir();
+            args.add(new Path(hadoopConfDir, "core-site.xml").toString());
+            args.add(new Path(hadoopConfDir, "hdfs-site.xml").toString());
+          }
+          log.info("Invoking PrintInfo with {}", args);
+          PrintInfo.main(args.toArray(new String[args.size()]));
+          newOut.flush();
+          String stdout = baos.toString();
+          assertTrue(stdout.contains("Locality group           : g1"));
+          assertTrue(stdout.contains("families        : [colf]"));
+        } finally {
+          System.setOut(oldOut);
+        }
+      }
+      assertTrue(foundFile);
+    }
+  }
+
+  @Test
+  public void localityGroupChange() throws Exception {
+    // Make changes to locality groups and ensure nothing is lost
+    try (AccumuloClient accumuloClient = 
Accumulo.newClient().from(getClientProps()).build()) {
+      String table = getUniqueNames(1)[0];
+      TableOperations to = accumuloClient.tableOperations();
+      to.create(table);
+      String[] config = {"lg1:colf", null, "lg1:colf,xyz", 
"lg1:colf,xyz;lg2:c1,c2"};
+      int i = 0;
+      for (String cfg : config) {
+        to.setLocalityGroups(table, getGroups(cfg));
+        ingest(accumuloClient, ROWS * (i + 1), 1, 50, ROWS * i, table);
+        to.flush(table, null, null, true);
+        verify(accumuloClient, 0, 1, 50, ROWS * (i + 1), table);
+        i++;
+      }
+      to.delete(table);
+      to.create(table);
+      config = new String[] {"lg1:colf", null, "lg1:colf,xyz", 
"lg1:colf;lg2:colf",};
+      i = 1;
+      for (String cfg : config) {
+        ingest(accumuloClient, ROWS * i, 1, 50, 0, table);
+        ingest(accumuloClient, ROWS * i, 1, 50, 0, "xyz", table);
+        to.setLocalityGroups(table, getGroups(cfg));
+        to.flush(table, null, null, true);
+        verify(accumuloClient, ROWS * i, 1, 50, 0, table);
+        verify(accumuloClient, ROWS * i, 1, 50, 0, "xyz", table);
+        i++;
+      }
+    }
+  }
+
+  private Map<String,Set<Text>> getGroups(String cfg) {
+    Map<String,Set<Text>> groups = new TreeMap<>();
+    if (cfg != null) {
+      for (String group : cfg.split(";")) {
+        String[] parts = group.split(":");
+        Set<Text> cols = new HashSet<>();
+        for (String col : parts[1].split(",")) {
+          cols.add(t(col));
+        }
+        groups.put(parts[1], cols);
+      }
+    }
+    return groups;
+  }
+}
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
index e601638ae0..27868ad90e 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -21,31 +21,13 @@ package org.apache.accumulo.test.functional;
 import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
 import static 
org.apache.accumulo.harness.AccumuloITBase.STANDALONE_CAPABLE_CLUSTER;
 import static org.apache.accumulo.harness.AccumuloITBase.SUNNY_DAY;
-import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
 import java.net.URL;
 import java.security.cert.X509Certificate;
 import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Optional;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import javax.net.ssl.HostnameVerifier;
@@ -61,37 +43,23 @@ import 
org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.NewTableConfiguration;
-import org.apache.accumulo.core.client.admin.TableOperations;
 import org.apache.accumulo.core.clientImpl.ClientContext;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.fate.zookeeper.ZooCache;
 import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
-import org.apache.accumulo.core.file.rfile.PrintInfo;
 import org.apache.accumulo.core.lock.ServiceLock;
 import org.apache.accumulo.core.lock.ServiceLockData;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.StoredTabletFile;
-import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.MonitorUtil;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
 import org.apache.accumulo.minicluster.ServerType;
 import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
 import org.apache.accumulo.test.TestIngest;
 import org.apache.accumulo.test.TestIngest.IngestParams;
-import org.apache.accumulo.test.TestMultiTableIngest;
 import org.apache.accumulo.test.VerifyIngest;
 import org.apache.accumulo.test.VerifyIngest.VerifyParams;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.junit.jupiter.api.Tag;
 import org.junit.jupiter.api.Test;
@@ -120,14 +88,6 @@ public class ReadWriteIT extends AccumuloClusterHarness {
   static final int COLS = 1;
   static final String COLF = "colf";
 
-  @Test
-  public void invalidInstanceName() {
-    try (var client = Accumulo.newClient().to("fake_instance_name", 
cluster.getZooKeepers())
-        .as(getAdminPrincipal(), getAdminToken()).build()) {
-      assertThrows(RuntimeException.class, () -> 
client.instanceOperations().getTabletServers());
-    }
-  }
-
   @SuppressFBWarnings(value = {"PATH_TRAVERSAL_IN", "URLCONNECTION_SSRF_FD"},
       justification = "path provided by test; url provided by test")
   @Test
@@ -218,7 +178,7 @@ public class ReadWriteIT extends AccumuloClusterHarness {
     verify(accumuloClient, rows, cols, width, offset, COLF, tableName);
   }
 
-  private static void verify(AccumuloClient accumuloClient, int rows, int 
cols, int width,
+  public static void verify(AccumuloClient accumuloClient, int rows, int cols, 
int width,
       int offset, String colf, String tableName) throws Exception {
     VerifyParams params = new VerifyParams(accumuloClient.properties(), 
tableName, rows);
     params.rows = rows;
@@ -233,38 +193,6 @@ public class ReadWriteIT extends AccumuloClusterHarness {
     return args;
   }
 
-  @Test
-  public void multiTableTest() throws Exception {
-    // Write to multiple tables
-    final ClusterControl control = cluster.getClusterControl();
-    final String prefix = getClass().getSimpleName() + "_" + testName();
-    ExecutorService svc = Executors.newFixedThreadPool(2);
-    Future<Integer> p1 = svc.submit(() -> {
-      try {
-        return control.exec(TestMultiTableIngest.class, args("--count", 
Integer.toString(ROWS),
-            "-c", cluster.getClientPropsPath(), "--tablePrefix", prefix));
-      } catch (IOException e) {
-        log.error("Error running MultiTableIngest", e);
-        return -1;
-      }
-    });
-    Future<Integer> p2 = svc.submit(() -> {
-      try {
-        return control.exec(TestMultiTableIngest.class, args("--count", 
Integer.toString(ROWS),
-            "--readonly", "-c", cluster.getClientPropsPath(), "--tablePrefix", 
prefix));
-      } catch (IOException e) {
-        log.error("Error running MultiTableIngest", e);
-        return -1;
-      }
-    });
-    svc.shutdown();
-    while (!svc.isTerminated()) {
-      svc.awaitTermination(15, TimeUnit.SECONDS);
-    }
-    assertEquals(0, p1.get().intValue());
-    assertEquals(0, p2.get().intValue());
-  }
-
   @Test
   public void largeTest() throws Exception {
     // write a few large values
@@ -317,159 +245,6 @@ public class ReadWriteIT extends AccumuloClusterHarness {
     return m;
   }
 
-  @Test
-  public void localityGroupPerf() throws Exception {
-    // verify that locality groups can make look-ups faster
-    try (AccumuloClient accumuloClient = 
Accumulo.newClient().from(getClientProps()).build()) {
-      final String tableName = getUniqueNames(1)[0];
-      accumuloClient.tableOperations().create(tableName);
-      accumuloClient.tableOperations().setProperty(tableName, 
"table.group.g1", "colf");
-      accumuloClient.tableOperations().setProperty(tableName, 
"table.groups.enabled", "g1");
-      ingest(accumuloClient, 2000, 1, 50, 0, tableName);
-      accumuloClient.tableOperations().compact(tableName, null, null, true, 
true);
-      try (BatchWriter bw = accumuloClient.createBatchWriter(tableName)) {
-        bw.addMutation(m("zzzzzzzzzzz", "colf2", "cq", "value"));
-      }
-      long now = System.currentTimeMillis();
-      try (Scanner scanner = accumuloClient.createScanner(tableName, 
Authorizations.EMPTY)) {
-        scanner.fetchColumnFamily(new Text("colf"));
-        scanner.forEach((k, v) -> {});
-      }
-      long diff = System.currentTimeMillis() - now;
-      now = System.currentTimeMillis();
-
-      try (Scanner scanner = accumuloClient.createScanner(tableName, 
Authorizations.EMPTY)) {
-        scanner.fetchColumnFamily(new Text("colf2"));
-        scanner.forEach((k, v) -> {});
-      }
-      long diff2 = System.currentTimeMillis() - now;
-      assertTrue(diff2 < diff);
-    }
-  }
-
-  /**
-   * create a locality group, write to it and ensure it exists in the RFiles 
that result
-   */
-  @Test
-  public void sunnyLG() throws Exception {
-    try (AccumuloClient accumuloClient = 
Accumulo.newClient().from(getClientProps()).build()) {
-      final String tableName = getUniqueNames(1)[0];
-      accumuloClient.tableOperations().create(tableName);
-      Map<String,Set<Text>> groups = new TreeMap<>();
-      groups.put("g1", Collections.singleton(t("colf")));
-      accumuloClient.tableOperations().setLocalityGroups(tableName, groups);
-      verifyLocalityGroupsInRFile(accumuloClient, tableName);
-    }
-  }
-
-  /**
-   * Pretty much identical to sunnyLG, but verifies locality groups are 
created when configured in
-   * NewTableConfiguration prior to table creation.
-   */
-  @Test
-  public void sunnyLGUsingNewTableConfiguration() throws Exception {
-    // create a locality group, write to it and ensure it exists in the RFiles 
that result
-    try (AccumuloClient accumuloClient = 
Accumulo.newClient().from(getClientProps()).build()) {
-      final String tableName = getUniqueNames(1)[0];
-      NewTableConfiguration ntc = new NewTableConfiguration();
-      Map<String,Set<Text>> groups = new HashMap<>();
-      groups.put("g1", Collections.singleton(t("colf")));
-      ntc.setLocalityGroups(groups);
-      accumuloClient.tableOperations().create(tableName, ntc);
-      verifyLocalityGroupsInRFile(accumuloClient, tableName);
-    }
-  }
-
-  private void verifyLocalityGroupsInRFile(final AccumuloClient accumuloClient,
-      final String tableName) throws Exception {
-    ingest(accumuloClient, 2000, 1, 50, 0, tableName);
-    verify(accumuloClient, 2000, 1, 50, 0, tableName);
-    accumuloClient.tableOperations().flush(tableName, null, null, true);
-    try (BatchScanner bscanner =
-        accumuloClient.createBatchScanner(MetadataTable.NAME, 
Authorizations.EMPTY, 1)) {
-      String tableId = 
accumuloClient.tableOperations().tableIdMap().get(tableName);
-      bscanner.setRanges(
-          Collections.singletonList(new Range(new Text(tableId + ";"), new 
Text(tableId + "<"))));
-      bscanner.fetchColumnFamily(DataFileColumnFamily.NAME);
-      boolean foundFile = false;
-      for (Entry<Key,Value> entry : bscanner) {
-        foundFile = true;
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        PrintStream oldOut = System.out;
-        try (PrintStream newOut = new PrintStream(baos)) {
-          System.setOut(newOut);
-          List<String> args = new ArrayList<>();
-          
args.add(StoredTabletFile.of(entry.getKey().getColumnQualifier()).getMetadataPath());
-          args.add("--props");
-          args.add(getCluster().getAccumuloPropertiesPath());
-          if (getClusterType() == ClusterType.STANDALONE && saslEnabled()) {
-            args.add("--config");
-            StandaloneAccumuloCluster sac = (StandaloneAccumuloCluster) 
cluster;
-            String hadoopConfDir = sac.getHadoopConfDir();
-            args.add(new Path(hadoopConfDir, "core-site.xml").toString());
-            args.add(new Path(hadoopConfDir, "hdfs-site.xml").toString());
-          }
-          log.info("Invoking PrintInfo with {}", args);
-          PrintInfo.main(args.toArray(new String[args.size()]));
-          newOut.flush();
-          String stdout = baos.toString();
-          assertTrue(stdout.contains("Locality group           : g1"));
-          assertTrue(stdout.contains("families        : [colf]"));
-        } finally {
-          System.setOut(oldOut);
-        }
-      }
-      assertTrue(foundFile);
-    }
-  }
-
-  @Test
-  public void localityGroupChange() throws Exception {
-    // Make changes to locality groups and ensure nothing is lost
-    try (AccumuloClient accumuloClient = 
Accumulo.newClient().from(getClientProps()).build()) {
-      String table = getUniqueNames(1)[0];
-      TableOperations to = accumuloClient.tableOperations();
-      to.create(table);
-      String[] config = {"lg1:colf", null, "lg1:colf,xyz", 
"lg1:colf,xyz;lg2:c1,c2"};
-      int i = 0;
-      for (String cfg : config) {
-        to.setLocalityGroups(table, getGroups(cfg));
-        ingest(accumuloClient, ROWS * (i + 1), 1, 50, ROWS * i, table);
-        to.flush(table, null, null, true);
-        verify(accumuloClient, 0, 1, 50, ROWS * (i + 1), table);
-        i++;
-      }
-      to.delete(table);
-      to.create(table);
-      config = new String[] {"lg1:colf", null, "lg1:colf,xyz", 
"lg1:colf;lg2:colf",};
-      i = 1;
-      for (String cfg : config) {
-        ingest(accumuloClient, ROWS * i, 1, 50, 0, table);
-        ingest(accumuloClient, ROWS * i, 1, 50, 0, "xyz", table);
-        to.setLocalityGroups(table, getGroups(cfg));
-        to.flush(table, null, null, true);
-        verify(accumuloClient, ROWS * i, 1, 50, 0, table);
-        verify(accumuloClient, ROWS * i, 1, 50, 0, "xyz", table);
-        i++;
-      }
-    }
-  }
-
-  private Map<String,Set<Text>> getGroups(String cfg) {
-    Map<String,Set<Text>> groups = new TreeMap<>();
-    if (cfg != null) {
-      for (String group : cfg.split(";")) {
-        String[] parts = group.split(":");
-        Set<Text> cols = new HashSet<>();
-        for (String col : parts[1].split(",")) {
-          cols.add(t(col));
-        }
-        groups.put(parts[1], cols);
-      }
-    }
-    return groups;
-  }
-
   @SuppressFBWarnings(value = "WEAK_TRUST_MANAGER",
       justification = "trust manager is okay for testing")
   private static class TestTrustManager implements X509TrustManager {
diff --git 
a/test/src/main/java/org/apache/accumulo/test/shell/ShellCreateNamespaceIT.java 
b/test/src/main/java/org/apache/accumulo/test/shell/ShellCreateNamespaceIT.java
index 1878308afa..4dbe845d74 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/shell/ShellCreateNamespaceIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/shell/ShellCreateNamespaceIT.java
@@ -19,7 +19,6 @@
 package org.apache.accumulo.test.shell;
 
 import static org.apache.accumulo.harness.AccumuloITBase.MINI_CLUSTER_ONLY;
-import static org.apache.accumulo.harness.AccumuloITBase.SUNNY_DAY;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -40,7 +39,6 @@ import org.junit.jupiter.api.Tag;
 import org.junit.jupiter.api.Test;
 
 @Tag(MINI_CLUSTER_ONLY)
-@Tag(SUNNY_DAY)
 public class ShellCreateNamespaceIT extends SharedMiniClusterBase {
 
   private MockShell ts;
diff --git 
a/test/src/main/java/org/apache/accumulo/test/shell/ShellCreateTableIT.java 
b/test/src/main/java/org/apache/accumulo/test/shell/ShellCreateTableIT.java
index d241a51b51..a72efbd94c 100644
--- a/test/src/main/java/org/apache/accumulo/test/shell/ShellCreateTableIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/shell/ShellCreateTableIT.java
@@ -23,7 +23,6 @@ import static java.nio.file.Files.newBufferedReader;
 import static java.util.Objects.requireNonNull;
 import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
 import static org.apache.accumulo.harness.AccumuloITBase.MINI_CLUSTER_ONLY;
-import static org.apache.accumulo.harness.AccumuloITBase.SUNNY_DAY;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNull;
@@ -68,7 +67,6 @@ import org.junit.jupiter.api.Tag;
 import org.junit.jupiter.api.Test;
 
 @Tag(MINI_CLUSTER_ONLY)
-@Tag(SUNNY_DAY)
 public class ShellCreateTableIT extends SharedMiniClusterBase {
 
   private MockShell ts;
diff --git a/test/src/main/java/org/apache/accumulo/test/shell/ShellIT.java 
b/test/src/main/java/org/apache/accumulo/test/shell/ShellIT.java
index 93ac1459f5..b090c28dd4 100644
--- a/test/src/main/java/org/apache/accumulo/test/shell/ShellIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/shell/ShellIT.java
@@ -19,7 +19,6 @@
 package org.apache.accumulo.test.shell;
 
 import static org.apache.accumulo.harness.AccumuloITBase.MINI_CLUSTER_ONLY;
-import static org.apache.accumulo.harness.AccumuloITBase.SUNNY_DAY;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
@@ -53,7 +52,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @Tag(MINI_CLUSTER_ONLY)
-@Tag(SUNNY_DAY)
 public class ShellIT extends SharedMiniClusterBase {
 
   @Override
diff --git 
a/test/src/main/java/org/apache/accumulo/test/shell/ShellServerIT.java 
b/test/src/main/java/org/apache/accumulo/test/shell/ShellServerIT.java
index 39da70fcd4..5acc588ed6 100644
--- a/test/src/main/java/org/apache/accumulo/test/shell/ShellServerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/shell/ShellServerIT.java
@@ -22,7 +22,6 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
 import static org.apache.accumulo.harness.AccumuloITBase.MINI_CLUSTER_ONLY;
-import static org.apache.accumulo.harness.AccumuloITBase.SUNNY_DAY;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -102,7 +101,6 @@ import com.google.common.collect.Iterables;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 
 @Tag(MINI_CLUSTER_ONLY)
-@Tag(SUNNY_DAY)
 public class ShellServerIT extends SharedMiniClusterBase {
 
   private static final Logger log = 
LoggerFactory.getLogger(ShellServerIT.class);


Reply via email to