This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit e228dde61c18d58cb9c4f23b818f878e66b1f092
Merge: 9dc2638d66 f3f392fad6
Author: Christopher Tubbs <[email protected]>
AuthorDate: Fri Feb 27 20:09:50 2026 -0500

    Merge branch '2.1'

 .github/workflows/maven-full-its.yaml              | 25 +++++-----
 .github/workflows/maven-on-demand.yaml             | 17 +++----
 .github/workflows/maven.yaml                       | 28 +++++------
 .github/workflows/scripts.yaml                     |  2 -
 core/pom.xml                                       |  5 --
 .../accumulo/core/security/Authorizations.java     | 31 ++++--------
 .../apache/accumulo/core/zookeeper/ZooSession.java |  2 +-
 .../core/spi/balancer/GroupBalancerTest.java       |  8 +--
 minicluster/pom.xml                                |  5 --
 pom.xml                                            | 57 ++++++++++++++--------
 server/base/pom.xml                                |  5 --
 server/compactor/pom.xml                           |  5 --
 server/gc/pom.xml                                  |  5 --
 server/manager/pom.xml                             |  5 --
 .../java/org/apache/accumulo/manager/Manager.java  |  4 +-
 .../coordinator/commit/CompactionCommitData.java   |  7 +--
 .../accumulo/manager/tableOps/TableInfo.java       |  4 +-
 .../accumulo/manager/tableOps/clone/CloneInfo.java | 12 +++--
 .../manager/tableOps/create/CreateTable.java       |  5 +-
 .../tableOps/namespace/create/CreateNamespace.java |  5 +-
 .../tableOps/namespace/create/NamespaceInfo.java   |  4 +-
 .../split/AllocateDirsAndEnsureOnline.java         |  3 +-
 .../manager/tableOps/split/UpdateTablets.java      |  6 +--
 .../manager/tableOps/tableImport/ImportTable.java  | 10 ++--
 .../tableOps/tableImport/ImportedTableInfo.java    |  4 +-
 .../manager/tableOps/split/UpdateTabletsTest.java  | 14 ++++--
 server/monitor/pom.xml                             |  5 --
 server/tserver/pom.xml                             |  5 --
 shell/pom.xml                                      |  5 --
 start/pom.xml                                      |  5 --
 test/pom.xml                                       |  5 --
 31 files changed, 130 insertions(+), 173 deletions(-)

diff --cc .github/workflows/maven.yaml
index 0af62dd019,91c98caed0..604d9a6434
--- a/.github/workflows/maven.yaml
+++ b/.github/workflows/maven.yaml
@@@ -67,10 -63,10 +65,10 @@@ jobs
      strategy:
        matrix:
          profile:
-           - {name: 'unit-tests',    javaver: 17, args: 'verify -PskipQA 
-DskipTests=false'}
-           - {name: 'qa-checks',     javaver: 17, args: 'verify javadoc:jar 
-Psec-bugs -DskipTests -Dspotbugs.timeout=3600000'}
-           - {name: 'compat',        javaver: 17, args: 'package -DskipTests 
-Dversion.hadoop=3.0.3 -Dversion.zookeeper=3.6.4'}
-           - {name: 'errorprone',    javaver: 17, args: 'verify 
-Perrorprone,skipQA'}
+           - {name: 'unit-tests',    javaver: 21, args: 'verify -PskipQA 
-DskipTests=false'}
+           - {name: 'qa-checks',     javaver: 21, args: 'verify javadoc:jar 
-Psec-bugs -DskipTests -Dspotbugs.timeout=3600000'}
 -          - {name: 'compat',        javaver: 21, args: 'package -DskipTests 
-Dversion.hadoop=3.0.3 -Dversion.zookeeper=3.5.10'}
 -          - {name: 'errorprone',    javaver: 21, args: 'verify -Derrorprone 
-PskipQA'}
++          - {name: 'compat',        javaver: 21, args: 'package -DskipTests 
-Dversion.hadoop=3.0.3 -Dversion.zookeeper=3.6.4'}
++          - {name: 'errorprone',    javaver: 21, args: 'verify 
-Perrorprone,skipQA'}
        fail-fast: false
      runs-on: ubuntu-latest
      steps:
diff --cc core/src/main/java/org/apache/accumulo/core/zookeeper/ZooSession.java
index ce66cc6a4a,0000000000..a05166f849
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooSession.java
+++ b/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooSession.java
@@@ -1,389 -1,0 +1,389 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *   https://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.accumulo.core.zookeeper;
 +
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static java.util.Objects.requireNonNull;
 +import static java.util.concurrent.TimeUnit.MILLISECONDS;
 +import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
 +
 +import java.io.IOException;
 +import java.net.UnknownHostException;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Objects;
 +import java.util.Set;
 +import java.util.UUID;
 +import java.util.concurrent.atomic.AtomicReference;
 +
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.fate.zookeeper.ZooReader;
 +import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
 +import org.apache.accumulo.core.util.AddressUtil;
 +import org.apache.accumulo.core.util.Timer;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.zookeeper.AddWatchMode;
 +import org.apache.zookeeper.AsyncCallback.StringCallback;
 +import org.apache.zookeeper.AsyncCallback.VoidCallback;
 +import org.apache.zookeeper.CreateMode;
 +import org.apache.zookeeper.KeeperException;
 +import org.apache.zookeeper.WatchedEvent;
 +import org.apache.zookeeper.Watcher;
 +import org.apache.zookeeper.Watcher.Event.KeeperState;
 +import org.apache.zookeeper.Watcher.WatcherType;
 +import org.apache.zookeeper.ZooKeeper;
 +import org.apache.zookeeper.data.ACL;
 +import org.apache.zookeeper.data.Stat;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import com.google.common.base.Preconditions;
 +
 +/**
 + * A ZooKeeper client facade that maintains a ZooKeeper delegate instance. If 
the delegate instance
 + * loses its session, it is replaced with a new instance to establish a new 
session. Any Watchers
 + * registered on a session will need to monitor for the session expired event 
triggered from the old
 + * delegate, and must be reset on the new session if you intend them to 
monitor any further events.
 + * That is no different than if you created a new ZooKeeper instance directly 
after the first one
 + * expired.
 + */
 +public class ZooSession implements AutoCloseable {
 +
 +  public static class ZKUtil {
 +    public static void deleteRecursive(ZooSession zk, final String pathRoot)
 +        throws InterruptedException, KeeperException {
 +      org.apache.zookeeper.ZKUtil.deleteRecursive(zk.verifyConnected(), 
pathRoot);
 +    }
 +
 +    public static void visitSubTreeDFS(ZooSession zk, final String path, 
boolean watch,
 +        StringCallback cb) throws KeeperException, InterruptedException {
 +      org.apache.zookeeper.ZKUtil.visitSubTreeDFS(zk.verifyConnected(), path, 
watch, cb);
 +    }
 +  }
 +
 +  private static class ZooSessionWatcher implements Watcher {
 +
 +    private final String connectionName;
 +    private final AtomicReference<KeeperState> lastState = new 
AtomicReference<>(null);
 +
 +    public ZooSessionWatcher(String connectionName) {
 +      this.connectionName = connectionName;
 +    }
 +
 +    @Override
 +    public void process(WatchedEvent event) {
 +      final var newState = event.getState();
 +      var oldState = lastState.getAndUpdate(s -> newState);
 +      if (oldState == null) {
 +        log.debug("{} state changed to {}", connectionName, newState);
 +      } else if (newState != oldState) {
 +        log.debug("{} state changed from {} to {}", connectionName, oldState, 
newState);
 +      }
 +    }
 +  }
 +
 +  private static final Logger log = LoggerFactory.getLogger(ZooSession.class);
 +
 +  private static void closeZk(ZooKeeper zk) {
 +    if (zk != null) {
 +      try {
 +        zk.close();
 +      } catch (InterruptedException e) {
 +        // ZooKeeper doesn't actually throw this; it's just there for 
backwards compatibility
 +        Thread.currentThread().interrupt();
 +      }
 +    }
 +  }
 +
 +  private static void digestAuth(ZooKeeper zoo, String secret) {
 +    zoo.addAuthInfo("digest", ("accumulo:" + 
requireNonNull(secret)).getBytes(UTF_8));
 +  }
 +
 +  private static class ZookeeperAndCounter {
 +    final ZooKeeper zookeeper;
 +    final long connectionCount;
 +
 +    private ZookeeperAndCounter(ZooKeeper zookeeper, long connectionCount) {
 +      Preconditions.checkArgument(connectionCount >= 0);
 +      this.zookeeper = Objects.requireNonNull(zookeeper);
 +      this.connectionCount = connectionCount;
 +    }
 +  }
 +
 +  private boolean closed = false;
 +  private final String connectString;
 +  private final AtomicReference<ZookeeperAndCounter> delegate = new 
AtomicReference<>();
 +  private final String instanceSecret;
 +  private final String sessionName;
 +  private final int timeout;
 +  private final ZooReaderWriter zrw;
 +
 +  /**
 +   * Construct a new ZooKeeper client, retrying indefinitely if it doesn't 
work right away. The
 +   * caller is responsible for closing instances returned from this method.
 +   *
 +   * @param clientName a convenient name for logging its connection state 
changes
 +   * @param conf a convenient carrier of ZK connection information using 
Accumulo properties
 +   */
 +  public ZooSession(String clientName, AccumuloConfiguration conf) {
 +    this(clientName, conf.get(Property.INSTANCE_ZK_HOST),
 +        (int) conf.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT),
 +        conf.get(Property.INSTANCE_SECRET));
 +  }
 +
 +  /**
 +   * Construct a new ZooKeeper client, retrying indefinitely if it doesn't 
work right away. The
 +   * caller is responsible for closing instances returned from this method.
 +   *
 +   * @param clientName a convenient name for logging its connection state 
changes
 +   * @param connectString in the form of host1:port1,host2:port2/chroot/path
 +   * @param timeout in milliseconds
 +   * @param instanceSecret instance secret (may be null)
 +   */
 +  public ZooSession(String clientName, String connectString, int timeout, 
String instanceSecret) {
 +    // information needed to construct a ZooKeeper instance and add 
authentication
 +    this.connectString = connectString;
 +    this.timeout = timeout;
 +    this.instanceSecret = instanceSecret;
 +
 +    // information for logging which instance of ZooSession this is
 +    this.sessionName =
 +        String.format("%s[%s_%s]", getClass().getSimpleName(), clientName, 
UUID.randomUUID());
 +    this.zrw = new ZooReaderWriter(this);
 +  }
 +
 +  private ZooKeeper verifyConnected() {
 +    var zkac = delegate.get();
 +    if (zkac != null && zkac.zookeeper.getState().isAlive()) {
 +      return zkac.zookeeper;
 +    } else {
 +      return reconnect().zookeeper;
 +    }
 +  }
 +
 +  private synchronized ZookeeperAndCounter reconnect() {
 +    if (closed) {
 +      throw new IllegalStateException(sessionName + " was closed");
 +    }
 +
 +    ZookeeperAndCounter zkac;
 +    if ((zkac = delegate.get()) != null && 
zkac.zookeeper.getState().isAlive()) {
 +      return zkac;
 +    }
 +
 +    final long nextCounter = (zkac == null ? 0 : zkac.connectionCount) + 1;
 +    zkac = null;
 +
 +    var reconnectName = String.format("%s#%s", sessionName, nextCounter);
 +    log.debug("{} (re-)connecting to {} with timeout {}{}", reconnectName, 
connectString, timeout,
 +        instanceSecret == null ? "" : " with auth");
 +    final int TIME_BETWEEN_CONNECT_CHECKS_MS = 100;
-     int connectTimeWait = Math.min(10_000, timeout);
++    long connectTimeWait = Math.min(10_000, timeout);
 +    boolean tryAgain = true;
 +    long sleepTime = 100;
 +
 +    Timer timer = Timer.startNew();
 +
 +    ZooKeeper zk = null;
 +
 +    while (tryAgain) {
 +      try {
 +        zk = new ZooKeeper(connectString, timeout, new 
ZooSessionWatcher(reconnectName));
 +        // it may take some time to get connected to zookeeper if some of the 
servers are down
 +        for (int i = 0; i < connectTimeWait / TIME_BETWEEN_CONNECT_CHECKS_MS 
&& tryAgain; i++) {
 +          if (zk.getState().isConnected()) {
 +            if (instanceSecret != null) {
 +              digestAuth(zk, instanceSecret);
 +            }
 +            tryAgain = false;
 +          } else {
 +            UtilWaitThread.sleep(TIME_BETWEEN_CONNECT_CHECKS_MS);
 +          }
 +        }
 +
 +      } catch (IOException e) {
 +        if (e instanceof UnknownHostException) {
 +          /*
 +           * Make sure we wait at least as long as the JVM TTL for negative 
DNS responses
 +           */
 +          int ttl = 
AddressUtil.getAddressCacheNegativeTtl((UnknownHostException) e);
 +          sleepTime = Math.max(sleepTime, (ttl + 1) * 1000L);
 +        }
 +        log.warn("Connection to zooKeeper failed, will try again in "
 +            + String.format("%.2f secs", sleepTime / 1000.0), e);
 +      } finally {
 +        if (tryAgain && zk != null) {
 +          closeZk(zk);
 +          zk = null;
 +        }
 +      }
 +
 +      long duration = timer.elapsed(MILLISECONDS);
 +
 +      if (duration > 2L * timeout) {
 +        throw new IllegalStateException("Failed to connect to zookeeper (" + 
connectString
 +            + ") within 2x zookeeper timeout period " + timeout);
 +      }
 +
 +      if (tryAgain) {
 +        if (2L * timeout < duration + sleepTime + connectTimeWait) {
 +          sleepTime = 2L * timeout - duration - connectTimeWait;
 +        }
 +        if (sleepTime < 0) {
 +          connectTimeWait -= sleepTime;
 +          sleepTime = 0;
 +        }
 +        UtilWaitThread.sleep(sleepTime);
 +        if (sleepTime < 10000) {
 +          sleepTime = sleepTime + (long) (sleepTime * 
RANDOM.get().nextDouble());
 +        }
 +      }
 +    }
 +
 +    zkac = new ZookeeperAndCounter(zk, nextCounter);
 +    delegate.set(zkac);
 +    return zkac;
 +  }
 +
 +  public void addAuthInfo(String scheme, byte[] auth) {
 +    verifyConnected().addAuthInfo(scheme, auth);
 +  }
 +
 +  public String create(final String path, byte[] data, List<ACL> acl, 
CreateMode createMode)
 +      throws KeeperException, InterruptedException {
 +    return verifyConnected().create(path, data, acl, createMode);
 +  }
 +
 +  public void delete(final String path, int version) throws 
InterruptedException, KeeperException {
 +    verifyConnected().delete(path, version);
 +  }
 +
 +  public Stat exists(final String path, Watcher watcher)
 +      throws KeeperException, InterruptedException {
 +    return verifyConnected().exists(path, watcher);
 +  }
 +
 +  public List<ACL> getACL(final String path, Stat stat)
 +      throws KeeperException, InterruptedException {
 +    return verifyConnected().getACL(path, stat);
 +  }
 +
 +  public List<String> getChildren(final String path, Watcher watcher)
 +      throws KeeperException, InterruptedException {
 +    return verifyConnected().getChildren(path, watcher);
 +  }
 +
 +  public byte[] getData(final String path, Watcher watcher, Stat stat)
 +      throws KeeperException, InterruptedException {
 +    return verifyConnected().getData(path, watcher, stat);
 +  }
 +
 +  public long getSessionId() {
 +    return verifyConnected().getSessionId();
 +  }
 +
 +  public int getSessionTimeout() {
 +    return verifyConnected().getSessionTimeout();
 +  }
 +
 +  public void removeWatches(String path, Watcher watcher, WatcherType 
watcherType, boolean local)
 +      throws InterruptedException, KeeperException {
 +    verifyConnected().removeWatches(path, watcher, watcherType, local);
 +  }
 +
 +  public Stat setData(final String path, byte[] data, int version)
 +      throws KeeperException, InterruptedException {
 +    return verifyConnected().setData(path, data, version);
 +  }
 +
 +  public void sync(final String path, VoidCallback cb, Object ctx) {
 +    verifyConnected().sync(path, cb, ctx);
 +  }
 +
 +  public long addPersistentRecursiveWatchers(Set<String> paths, Watcher 
watcher)
 +      throws KeeperException, InterruptedException {
 +    ZookeeperAndCounter localZkac = reconnect();
 +
 +    Set<String> remainingPaths = new HashSet<>(paths);
 +    while (true) {
 +      try {
 +        Iterator<String> remainingPathsIter = remainingPaths.iterator();
 +        while (remainingPathsIter.hasNext()) {
 +          String path = remainingPathsIter.next();
 +          localZkac.zookeeper.addWatch(path, watcher, 
AddWatchMode.PERSISTENT_RECURSIVE);
 +          remainingPathsIter.remove();
 +        }
 +
 +        return localZkac.connectionCount;
 +      } catch (KeeperException e) {
 +        log.error("Error setting persistent watcher in ZooKeeper, 
retrying...", e);
 +        ZookeeperAndCounter currentZkac = reconnect();
 +        // If ZooKeeper object is different, then reset the localZK variable
 +        // and start over.
 +        if (localZkac != currentZkac) {
 +          localZkac = currentZkac;
 +          remainingPaths = new HashSet<>(paths);
 +        }
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public synchronized void close() {
 +    if (!closed) {
 +      var zkac = delegate.getAndSet(null);
 +      if (zkac != null) {
 +        closeZk(zkac.zookeeper);
 +      }
 +      closed = true;
 +    }
 +  }
 +
 +  public void addAccumuloDigestAuth(String auth) {
 +    digestAuth(verifyConnected(), auth);
 +  }
 +
 +  public ZooReader asReader() {
 +    return zrw;
 +  }
 +
 +  public ZooReaderWriter asReaderWriter() {
 +    return zrw;
 +  }
 +
 +  /**
 +   * Connection counter is incremented internal when ZooSession creates a new 
ZooKeeper client.
 +   * Clients of ZooSession can use this counter as a way to determine if a 
new ZooKeeper connection
 +   * has been created.
 +   *
 +   * @return connection counter
 +   */
 +  public long getConnectionCounter() {
 +    var zkac = delegate.get();
 +    if (delegate.get() == null) {
 +      // If null then this is closed or in the process of opening. If closed 
reconnect will throw an
 +      // exception. If in the process of opening, then reconnect will wait 
for that to finish.
 +      return reconnect().connectionCount;
 +    }
 +    return zkac.connectionCount;
 +  }
 +
 +}
diff --cc pom.xml
index b167c46093,462f665396..a56a9252a7
--- a/pom.xml
+++ b/pom.xml
@@@ -142,9 -143,9 +142,9 @@@ under the License
      <failsafe.forkCount>1</failsafe.forkCount>
      <failsafe.groups />
      <failsafe.reuseForks>false</failsafe.reuseForks>
 -    <javaVersion>11</javaVersion>
++    <javaVersion>17</javaVersion>
      <!-- prevent introduction of new compiler warnings -->
      <maven.compiler.failOnWarning>true</maven.compiler.failOnWarning>
-     <maven.compiler.target>17</maven.compiler.target>
      <maven.javadoc.failOnWarnings>true</maven.javadoc.failOnWarnings>
      <maven.site.deploy.skip>true</maven.site.deploy.skip>
      <maven.site.skip>true</maven.site.skip>
@@@ -739,10 -779,8 +746,7 @@@
          <plugin>
            <groupId>org.gaul</groupId>
            <artifactId>modernizer-maven-plugin</artifactId>
 -          <!-- newer versions suggest changes not worth the effort in 2.1 -->
 -          <version>2.9.0</version>
 +          <version>3.2.0</version>
-           <configuration>
-             <javaVersion>${maven.compiler.target}</javaVersion>
-           </configuration>
          </plugin>
          <plugin>
            <groupId>com.github.spotbugs</groupId>
@@@ -983,10 -1025,14 +990,10 @@@
                  
<undeclared>org.apache.hadoop:hadoop-mapreduce-client-core:jar:*</undeclared>
                  <undeclared>org.apache.hadoop:hadoop-auth:jar:*</undeclared>
                  
<undeclared>org.apache.httpcomponents:httpcore:jar:*</undeclared>
 -                <undeclared>org.powermock:powermock-core:jar:*</undeclared>
 -                <undeclared>org.powermock:powermock-reflect:jar:*</undeclared>
 -                <!-- This should be removed upon completion of migrating 
junit 4 to 5 -->
 -                <undeclared>junit:junit:jar:*</undeclared>
                </ignoredUsedUndeclaredDependencies>
-               <ignoredUnusedDeclaredDependencies>
-                 <!-- auto-service isn't used in every module -->
-                 <unused>com.google.auto.service:auto-service:jar:*</unused>
+               <ignoredUnusedDeclaredDependencies combine.children="append">
+                 <!-- auto-service annotations may or may not be used in each 
module -->
+                 
<unused>com.google.auto.service:auto-service-annotations:jar:*</unused>
                  <!-- unused/declared implementation jars or parent jars that 
bring in children -->
                  <unused>org.apache.hadoop:hadoop-client:jar:*</unused>
                  <unused>org.apache.hadoop:hadoop-client-runtime:jar:*</unused>
diff --cc server/compactor/pom.xml
index dea08a053e,25d3fde5a6..8997978c06
--- a/server/compactor/pom.xml
+++ b/server/compactor/pom.xml
@@@ -29,15 -29,11 +29,10 @@@
    </parent>
    <artifactId>accumulo-compactor</artifactId>
    <name>Apache Accumulo Compactor</name>
 +  <properties>
 +    
<spotbugs.omitVisitors>SharedVariableAtomicityDetector,ConstructorThrow</spotbugs.omitVisitors>
 +  </properties>
    <dependencies>
--    <dependency>
-       <groupId>com.google.auto.service</groupId>
-       <artifactId>auto-service</artifactId>
-       <optional>true</optional>
 -      <groupId>com.beust</groupId>
 -      <artifactId>jcommander</artifactId>
--    </dependency>
      <dependency>
        <groupId>com.google.guava</groupId>
        <artifactId>guava</artifactId>
diff --cc server/gc/pom.xml
index d8eeae5e58,b2861d706f..66ea1f3848
--- a/server/gc/pom.xml
+++ b/server/gc/pom.xml
@@@ -30,15 -30,7 +30,10 @@@
    <artifactId>accumulo-gc</artifactId>
    <name>Apache Accumulo GC Server</name>
    <description>The garbage collecting server for Apache Accumulo to clean up 
unused files.</description>
 +  <properties>
 +    
<spotbugs.omitVisitors>SharedVariableAtomicityDetector,ConstructorThrow</spotbugs.omitVisitors>
 +  </properties>
    <dependencies>
-     <dependency>
-       <groupId>com.google.auto.service</groupId>
-       <artifactId>auto-service</artifactId>
-       <optional>true</optional>
-     </dependency>
      <dependency>
        <groupId>com.google.guava</groupId>
        <artifactId>guava</artifactId>
diff --cc server/manager/pom.xml
index ff4ae267ba,b2280f88f4..b29ad745aa
--- a/server/manager/pom.xml
+++ b/server/manager/pom.xml
@@@ -30,19 -30,11 +30,14 @@@
    <artifactId>accumulo-manager</artifactId>
    <name>Apache Accumulo Manager Server</name>
    <description>The manager server for Apache Accumulo for load balancing and 
other system-wide operations.</description>
 +  <properties>
 +    
<spotbugs.omitVisitors>SharedVariableAtomicityDetector,ConstructorThrow</spotbugs.omitVisitors>
 +  </properties>
    <dependencies>
      <dependency>
 -      <groupId>com.google.code.gson</groupId>
 -      <artifactId>gson</artifactId>
 +      <groupId>com.github.ben-manes.caffeine</groupId>
 +      <artifactId>caffeine</artifactId>
      </dependency>
-     <dependency>
-       <groupId>com.google.auto.service</groupId>
-       <artifactId>auto-service</artifactId>
-       <optional>true</optional>
-     </dependency>
      <dependency>
        <groupId>com.google.guava</groupId>
        <artifactId>guava</artifactId>
diff --cc server/manager/src/main/java/org/apache/accumulo/manager/Manager.java
index 4384e7644a,6e758d6ebc..3316578756
--- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java
+++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java
@@@ -774,10 -907,10 +774,10 @@@ public class Manager extends AbstractSe
          Span span = TraceUtil.startSpan(this.getClass(), "run::updateStatus");
          try (Scope scope = span.makeCurrent()) {
            wait = updateStatus();
 -          eventListener.waitForEvents(wait);
 +          eventTracker.waitForEvents(wait);
-         } catch (Exception t) {
+         } catch (RuntimeException t) {
            TraceUtil.setException(span, t, false);
 -          log.error("Error balancing tablets, will wait for {} (seconds) and 
then retry ",
 +          log.error("Error updating status tablets, will wait for {} 
(seconds) and then retry ",
                WAIT_BETWEEN_ERRORS / ONE_SECOND, t);
            sleepUninterruptibly(WAIT_BETWEEN_ERRORS, MILLISECONDS);
          } finally {
diff --cc 
server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/commit/CompactionCommitData.java
index 0e7587d633,0000000000..6a2270f4fc
mode 100644,000000..100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/commit/CompactionCommitData.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/commit/CompactionCommitData.java
@@@ -1,61 -1,0 +1,62 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *   https://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.accumulo.manager.compaction.coordinator.commit;
 +
 +import java.io.Serializable;
++import java.util.HashSet;
 +import java.util.Set;
 +import java.util.stream.Collectors;
 +
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.dataImpl.KeyExtent;
 +import org.apache.accumulo.core.dataImpl.thrift.TKeyExtent;
 +import org.apache.accumulo.core.metadata.StoredTabletFile;
 +import org.apache.accumulo.core.metadata.schema.CompactionMetadata;
 +import org.apache.accumulo.core.metadata.schema.ExternalCompactionId;
 +import org.apache.accumulo.core.spi.compaction.CompactionKind;
 +import org.apache.accumulo.core.tabletserver.thrift.TCompactionStats;
 +
 +public class CompactionCommitData implements Serializable {
 +  private static final long serialVersionUID = 1L;
 +  final CompactionKind kind;
-   final Set<String> inputPaths;
++  final HashSet<String> inputPaths; // type must be serializable
 +  final String outputTmpPath;
 +  final String ecid;
 +  final TKeyExtent textent;
 +  final TCompactionStats stats;
 +
 +  public CompactionCommitData(ExternalCompactionId ecid, KeyExtent extent, 
CompactionMetadata ecm,
 +      TCompactionStats stats) {
 +    this.ecid = ecid.canonical();
 +    this.textent = extent.toThrift();
 +    this.kind = ecm.getKind();
-     this.inputPaths =
-         
ecm.getJobFiles().stream().map(StoredTabletFile::getMetadata).collect(Collectors.toSet());
++    this.inputPaths = 
ecm.getJobFiles().stream().map(StoredTabletFile::getMetadata)
++        .collect(Collectors.toCollection(HashSet::new));
 +    this.outputTmpPath = ecm.getCompactTmpName().getNormalizedPathStr();
 +    this.stats = stats;
 +  }
 +
 +  public TableId getTableId() {
 +    return KeyExtent.fromThrift(textent).tableId();
 +  }
 +
 +  public Set<StoredTabletFile> getJobFiles() {
 +    return 
inputPaths.stream().map(StoredTabletFile::of).collect(Collectors.toSet());
 +  }
 +}
diff --cc 
server/manager/src/main/java/org/apache/accumulo/manager/tableOps/TableInfo.java
index f283d48ea1,c23bd83158..006c4911a5
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/TableInfo.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/TableInfo.java
@@@ -19,11 -19,9 +19,11 @@@
  package org.apache.accumulo.manager.tableOps;
  
  import java.io.Serializable;
- import java.util.Map;
+ import java.util.HashMap;
  
  import org.apache.accumulo.core.client.admin.InitialTableState;
 +import org.apache.accumulo.core.client.admin.TabletAvailability;
 +import org.apache.accumulo.core.client.admin.TabletMergeability;
  import org.apache.accumulo.core.client.admin.TimeType;
  import org.apache.accumulo.core.data.NamespaceId;
  import org.apache.accumulo.core.data.TableId;
@@@ -48,20 -46,8 +48,20 @@@ public class TableInfo implements Seria
    private String splitFile;
    private String splitDirsFile;
  
-   public Map<String,String> props;
+   public HashMap<String,String> props; // type must be serializable
  
 +  private TabletAvailability initialTabletAvailability;
 +
 +  private TabletMergeability defaultTabletMergeability;
 +
 +  public TabletAvailability getInitialTabletAvailability() {
 +    return initialTabletAvailability;
 +  }
 +
 +  public void setInitialTabletAvailability(TabletAvailability 
initialTabletAvailability) {
 +    this.initialTabletAvailability = initialTabletAvailability;
 +  }
 +
    public String getTableName() {
      return tableName;
    }
diff --cc 
server/manager/src/main/java/org/apache/accumulo/manager/tableOps/clone/CloneInfo.java
index 5403cffc55,87d4c9ea79..9e69c95631
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/clone/CloneInfo.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/clone/CloneInfo.java
@@@ -32,10 -36,11 +36,10 @@@ class CloneInfo implements Serializabl
    private final TableId srcTableId;
    private final String tableName;
    private TableId tableId;
 -  // TODO: Make final in 3.1
 -  private NamespaceId namespaceId;
 +  private final NamespaceId namespaceId;
    private final NamespaceId srcNamespaceId;
-   private final Map<String,String> propertiesToSet;
-   private final Set<String> propertiesToExclude;
+   private final HashMap<String,String> propertiesToSet; // type must be 
serializable
+   private final HashSet<String> propertiesToExclude; // type must be 
serializable
    private final boolean keepOffline;
    private final String user;
  
diff --cc 
server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/AllocateDirsAndEnsureOnline.java
index 171106399e,0000000000..e6bba8e6e3
mode 100644,000000..100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/AllocateDirsAndEnsureOnline.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/AllocateDirsAndEnsureOnline.java
@@@ -1,109 -1,0 +1,108 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *   https://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.accumulo.manager.tableOps.split;
 +
 +import java.util.ArrayList;
- import java.util.List;
 +
 +import 
org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException;
 +import org.apache.accumulo.core.clientImpl.thrift.TableOperation;
 +import org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType;
 +import org.apache.accumulo.core.fate.FateId;
 +import org.apache.accumulo.core.fate.Repo;
 +import org.apache.accumulo.core.manager.state.tables.TableState;
 +import org.apache.accumulo.core.metadata.schema.Ample;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadata;
 +import org.apache.accumulo.core.metadata.schema.TabletOperationId;
 +import org.apache.accumulo.core.metadata.schema.TabletOperationType;
 +import org.apache.accumulo.manager.tableOps.AbstractFateOperation;
 +import org.apache.accumulo.manager.tableOps.FateEnv;
 +import org.apache.accumulo.server.tablets.TabletNameGenerator;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +public class AllocateDirsAndEnsureOnline extends AbstractFateOperation {
 +
 +  private static final long serialVersionUID = 1L;
 +  private static final Logger log = LoggerFactory.getLogger(PreSplit.class);
 +
 +  private final SplitInfo splitInfo;
 +
 +  public AllocateDirsAndEnsureOnline(SplitInfo splitInfo) {
 +    this.splitInfo = splitInfo;
 +  }
 +
 +  @Override
 +  public Repo<FateEnv> call(FateId fateId, FateEnv env) throws Exception {
 +    // This check of table state is done after setting the operation id to 
avoid a race condition
 +    // with the client code that waits for a table to go offline. That client 
code sets the table
 +    // state and then scans the metadata table looking for split operations 
ids. If split checks
 +    // tables state before setting the opid then there is race condition with 
the client. Setting it
 +    // after ensures that in the case when the client does not see any split 
op id in the metadata
 +    // table that it knows that any splits starting after that point in time 
will not complete. This
 +    // order is needed because the split fate operation does not acquire a 
table lock in zookeeper.
 +    if (env.getContext().getTableState(splitInfo.getOriginal().tableId()) != 
TableState.ONLINE) {
 +
 +      var opid = TabletOperationId.from(TabletOperationType.SPLITTING, 
fateId);
 +
 +      // attempt to delete the operation id
 +      try (var tabletsMutator = 
env.getContext().getAmple().conditionallyMutateTablets()) {
 +
 +        Ample.RejectionHandler rejectionHandler = new 
Ample.RejectionHandler() {
 +
 +          @Override
 +          public boolean callWhenTabletDoesNotExists() {
 +            return true;
 +          }
 +
 +          @Override
 +          public boolean test(TabletMetadata tabletMetadata) {
 +            // if the tablet no longer exists or our operation id is not set 
then consider a success
 +            return tabletMetadata == null || 
!opid.equals(tabletMetadata.getOperationId());
 +          }
 +        };
 +
 +        
tabletsMutator.mutateTablet(splitInfo.getOriginal()).requireOperation(opid)
 +            
.requireAbsentLocation().requireAbsentLogs().deleteOperation().submit(rejectionHandler);
 +
 +        var result = tabletsMutator.process().get(splitInfo.getOriginal());
 +
 +        if (result.getStatus() != Ample.ConditionalResult.Status.ACCEPTED) {
 +          throw new IllegalStateException(
 +              "Failed to delete operation id " + splitInfo.getOriginal());
 +        }
 +      }
 +
 +      throw new AcceptableThriftTableOperationException(
 +          splitInfo.getOriginal().tableId().canonical(), null, 
TableOperation.SPLIT,
 +          TableOperationExceptionType.OFFLINE,
 +          "Unable to split tablet because the table is offline");
 +    } else {
 +      // Create the dir name here for the next step. If the next step fails 
it will always have the
 +      // same dir name each time it runs again making it idempotent.
-       List<String> dirs = new ArrayList<>();
++      var dirs = new ArrayList<String>();
 +
 +      splitInfo.getSplits().keySet().forEach(split -> {
 +        String dirName = 
TabletNameGenerator.createTabletDirectoryName(env.getContext(), split);
 +        dirs.add(dirName);
 +        log.trace("{} allocated dir name {}", fateId, dirName);
 +      });
 +      return new UpdateTablets(splitInfo, dirs);
 +    }
 +  }
 +}
diff --cc 
server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/UpdateTablets.java
index ec34f80700,0000000000..c12386a372
mode 100644,000000..100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/UpdateTablets.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/UpdateTablets.java
@@@ -1,353 -1,0 +1,353 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *   https://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.accumulo.manager.tableOps.split;
 +
++import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.Iterator;
- import java.util.List;
 +import java.util.Map;
 +import java.util.NavigableMap;
 +import java.util.Objects;
 +import java.util.SortedMap;
 +import java.util.TreeMap;
 +
 +import org.apache.accumulo.core.client.admin.TabletMergeability;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.dataImpl.KeyExtent;
 +import org.apache.accumulo.core.fate.FateId;
 +import org.apache.accumulo.core.fate.Repo;
 +import org.apache.accumulo.core.file.FileSKVIterator;
 +import org.apache.accumulo.core.metadata.StoredTabletFile;
 +import org.apache.accumulo.core.metadata.schema.Ample;
 +import 
org.apache.accumulo.core.metadata.schema.Ample.ConditionalResult.Status;
 +import org.apache.accumulo.core.metadata.schema.DataFileValue;
 +import org.apache.accumulo.core.metadata.schema.TabletMergeabilityMetadata;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadata;
 +import org.apache.accumulo.core.metadata.schema.TabletOperationId;
 +import org.apache.accumulo.core.metadata.schema.TabletOperationType;
 +import org.apache.accumulo.core.util.RowRangeUtil;
 +import org.apache.accumulo.manager.tableOps.AbstractFateOperation;
 +import org.apache.accumulo.manager.tableOps.FateEnv;
 +import org.apache.accumulo.server.ServerContext;
 +import org.apache.hadoop.io.Text;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import com.google.common.base.Function;
 +import com.google.common.base.Preconditions;
 +import com.google.common.base.Predicate;
 +
 +public class UpdateTablets extends AbstractFateOperation {
 +  private static final Logger log = 
LoggerFactory.getLogger(UpdateTablets.class);
 +  private static final long serialVersionUID = 1L;
 +  private final SplitInfo splitInfo;
-   private final List<String> dirNames;
++  private final ArrayList<String> dirNames;
 +
-   public UpdateTablets(SplitInfo splitInfo, List<String> dirNames) {
++  public UpdateTablets(SplitInfo splitInfo, ArrayList<String> dirNames) {
 +    this.splitInfo = splitInfo;
 +    this.dirNames = dirNames;
 +  }
 +
 +  @Override
 +  public Repo<FateEnv> call(FateId fateId, FateEnv env) throws Exception {
 +    TabletMetadata tabletMetadata = 
env.getContext().getAmple().readTablet(splitInfo.getOriginal());
 +
 +    var opid = TabletOperationId.from(TabletOperationType.SPLITTING, fateId);
 +
 +    if (tabletMetadata == null) {
 +      // check to see if this operation has already succeeded.
 +      TabletMetadata newTabletMetadata =
 +          
env.getContext().getAmple().readTablet(splitInfo.getTablets().navigableKeySet().last());
 +
 +      if (newTabletMetadata != null && 
opid.equals(newTabletMetadata.getOperationId())) {
 +        // have already created the new tablet and failed before we could 
return the next step, so
 +        // lets go ahead and return the next step.
 +        log.trace(
 +            "{} creating new tablet was rejected because it existed, 
operation probably failed before.",
 +            fateId);
 +        return new DeleteOperationIds(splitInfo);
 +      } else {
 +        throw new IllegalStateException("Tablet is in an unexpected condition 
"
 +            + splitInfo.getOriginal() + " " + (newTabletMetadata == null) + " 
"
 +            + (newTabletMetadata == null ? null : 
newTabletMetadata.getOperationId()));
 +      }
 +    }
 +
 +    Preconditions.checkState(opid.equals(tabletMetadata.getOperationId()),
 +        "Tablet %s does not have expected operation id %s it has %s", 
splitInfo.getOriginal(), opid,
 +        tabletMetadata.getOperationId());
 +
 +    Preconditions.checkState(tabletMetadata.getLocation() == null,
 +        "Tablet %s unexpectedly has a location %s", splitInfo.getOriginal(),
 +        tabletMetadata.getLocation());
 +
 +    Preconditions.checkState(tabletMetadata.getLogs().isEmpty(),
 +        "Tablet unexpectedly had walogs %s %s %s", fateId, 
tabletMetadata.getLogs(),
 +        tabletMetadata.getExtent());
 +
 +    Preconditions.checkState(!tabletMetadata.hasMerged(),
 +        "Tablet unexpectedly has a merged marker %s %s", fateId, 
tabletMetadata.getExtent());
 +
 +    Preconditions.checkState(tabletMetadata.getCloned() == null,
 +        "Tablet unexpectedly has a cloned marker %s %s %s", fateId, 
tabletMetadata.getCloned(),
 +        tabletMetadata.getExtent());
 +
 +    var newTablets = splitInfo.getTablets();
 +
 +    var newTabletsFiles = getNewTabletFiles(fateId, newTablets, 
tabletMetadata,
 +        file -> 
env.getFileRangeCache().getCachedFileInfo(splitInfo.getOriginal().tableId(), 
file));
 +
 +    addNewTablets(fateId, env, tabletMetadata, opid, newTablets, 
newTabletsFiles);
 +
 +    // Only update the original tablet after successfully creating the new 
tablets, this is
 +    // important for failure cases where this operation partially runs a then 
runs again.
 +    updateExistingTablet(fateId, env.getContext(), tabletMetadata, opid, 
newTablets,
 +        newTabletsFiles);
 +
 +    return new DeleteOperationIds(splitInfo);
 +  }
 +
 +  /**
 +   * Determine which files from the original tablet go to each new tablet 
being created by the
 +   * split.
 +   */
 +  static Map<KeyExtent,Map<StoredTabletFile,DataFileValue>> 
getNewTabletFiles(FateId fateId,
 +      SortedMap<KeyExtent,TabletMergeability> newTablets, TabletMetadata 
tabletMetadata,
 +      Function<StoredTabletFile,FileSKVIterator.FileRange> fileInfoProvider) {
 +
 +    Map<KeyExtent,Map<StoredTabletFile,DataFileValue>> tabletsFiles = new 
TreeMap<>();
 +
 +    newTablets.keySet().forEach(extent -> tabletsFiles.put(extent, new 
HashMap<>()));
 +
 +    // determine which files overlap which tablets and their estimated sizes
 +    tabletMetadata.getFilesMap().forEach((file, dataFileValue) -> {
 +      FileSKVIterator.FileRange fileRange = fileInfoProvider.apply(file);
 +
 +      // This predicate is used to determine if a given tablet range overlaps 
the data in this file.
 +      Predicate<Range> overlapPredicate;
 +      if (fileRange == null) {
 +        // The range is not known, so assume all tablets overlap the file
 +        overlapPredicate = r -> true;
 +      } else if (fileRange.empty) {
 +        // The file is empty so not tablets can overlap it
 +        overlapPredicate = r -> false;
 +      } else {
 +        // check if the tablet range overlaps the file range
 +        overlapPredicate = range -> range.clip(fileRange.rowRange, true) != 
null;
 +
 +        if (!file.getRange().isInfiniteStartKey() || 
!file.getRange().isInfiniteStopKey()) {
 +          // Its expected that if a file has a range that the first row and 
last row will be clipped
 +          // to be within that range. For that reason this code does not 
check file.getRange() when
 +          // making decisions about whether a file should go to a tablet, 
because its assumed that
 +          // fileRange will cover that case. Since file.getRange() is not 
being checked directly
 +          // this code validates the assumption that fileRange is within 
file.getRange()
 +
 +          Preconditions.checkState(
 +              file.getRange().clip(new 
Range(fileRange.rowRange.getStartKey().getRow()), false)
 +                  != null,
 +              "First row %s computed for file %s did not fall in its range",
 +              fileRange.rowRange.getStartKey().getRow(), file);
 +
 +          var lastRow = 
RowRangeUtil.stripZeroTail(fileRange.rowRange.getEndKey().getRowData());
 +          Preconditions.checkState(
 +              file.getRange().clip(new Range(new Text(lastRow.toArray())), 
false) != null,
 +              "Last row %s computed for file %s did not fall in its range", 
lastRow, file);
 +        }
 +      }
 +
 +      // count how many of the new tablets the file will overlap
 +      double numOverlapping =
 +          
newTablets.keySet().stream().map(KeyExtent::toDataRange).filter(overlapPredicate).count();
 +
 +      if (numOverlapping == 0) {
 +        log.debug("{} File {} with range {} that does not overlap tablet {}", 
fateId, file,
 +            fileRange, tabletMetadata.getExtent());
 +      } else {
 +        // evenly split the tablets estimates between the number of tablets 
it actually overlaps
 +        double sizePerTablet = dataFileValue.getSize() / numOverlapping;
 +        double entriesPerTablet = dataFileValue.getNumEntries() / 
numOverlapping;
 +
 +        // add the file to the tablets it overlaps
 +        newTablets.keySet().forEach(newTablet -> {
 +          if (overlapPredicate.apply(newTablet.toDataRange())) {
 +            DataFileValue ndfv = new DataFileValue((long) sizePerTablet, 
(long) entriesPerTablet,
 +                dataFileValue.getTime());
 +            tabletsFiles.get(newTablet).put(file, ndfv);
 +          }
 +        });
 +      }
 +    });
 +
 +    if (log.isTraceEnabled()) {
 +      tabletMetadata.getFilesMap().forEach((f, v) -> {
 +        log.trace("{} {} original file {} {} {} {}", fateId, 
tabletMetadata.getExtent(),
 +            f.getFileName(), f.getRange(), v.getSize(), v.getNumEntries());
 +      });
 +
 +      tabletsFiles.forEach((extent, files) -> {
 +        files.forEach((f, v) -> {
 +          log.trace("{} {} split file {} {} {} {}", fateId, extent, 
f.getFileName(), f.getRange(),
 +              v.getSize(), v.getNumEntries());
 +        });
 +      });
 +    }
 +
 +    return tabletsFiles;
 +  }
 +
 +  private void addNewTablets(FateId fateId, FateEnv env, TabletMetadata 
tabletMetadata,
 +      TabletOperationId opid, NavigableMap<KeyExtent,TabletMergeability> 
newTablets,
 +      Map<KeyExtent,Map<StoredTabletFile,DataFileValue>> newTabletsFiles) {
 +    Iterator<String> dirNameIter = dirNames.iterator();
 +
 +    try (var tabletsMutator = 
env.getContext().getAmple().conditionallyMutateTablets()) {
 +      for (var entry : newTablets.entrySet()) {
 +        var newExtent = entry.getKey();
 +        var mergeability = entry.getValue();
 +
 +        if (newExtent.equals(newTablets.navigableKeySet().last())) {
 +          // Skip the last tablet, its done after successfully adding all new 
tablets
 +          continue;
 +        }
 +
 +        var mutator = 
tabletsMutator.mutateTablet(newExtent).requireAbsentTablet();
 +
 +        mutator.putOperation(opid);
 +        mutator.putDirName(dirNameIter.next());
 +        mutator.putTime(tabletMetadata.getTime());
 +        tabletMetadata.getFlushId().ifPresent(mutator::putFlushId);
 +        mutator.putPrevEndRow(newExtent.prevEndRow());
 +
 +        tabletMetadata.getCompacted().forEach(mutator::putCompacted);
 +
 +        tabletMetadata.getCompacted().forEach(compactedFateId -> log
 +            .debug("{} copying compacted marker to new child tablet {}", 
fateId, compactedFateId));
 +
 +        mutator.putTabletAvailability(tabletMetadata.getTabletAvailability());
 +
 +        // Null is only expected for the last tablet which is skipped
 +        Preconditions.checkState(mergeability != null,
 +            "Null TabletMergeability for extent %s is unexpected", newExtent);
 +        mutator.putTabletMergeability(
 +            TabletMergeabilityMetadata.toMetadata(mergeability, 
env.getSteadyTime()));
 +        tabletMetadata.getLoaded().forEach((k, v) -> 
mutator.putBulkFile(k.getTabletFile(), v));
 +
 +        newTabletsFiles.get(newExtent).forEach(mutator::putFile);
 +
 +        mutator.submit(afterMeta -> opid.equals(afterMeta.getOperationId()));
 +      }
 +
 +      var results = tabletsMutator.process();
 +      results.values().forEach(result -> {
 +        var status = result.getStatus();
 +
 +        Preconditions.checkState(status == Status.ACCEPTED, "Failed to add 
new tablet %s %s %s",
 +            status, splitInfo.getOriginal(), result.getExtent());
 +      });
 +    }
 +  }
 +
 +  private void updateExistingTablet(FateId fateId, ServerContext ctx, 
TabletMetadata tabletMetadata,
 +      TabletOperationId opid, NavigableMap<KeyExtent,TabletMergeability> 
newTablets,
 +      Map<KeyExtent,Map<StoredTabletFile,DataFileValue>> newTabletsFiles) {
 +    try (var tabletsMutator = ctx.getAmple().conditionallyMutateTablets()) {
 +      var newExtent = newTablets.navigableKeySet().last();
 +
 +      var mutator = 
tabletsMutator.mutateTablet(splitInfo.getOriginal()).requireOperation(opid)
 +          .requireAbsentLocation().requireAbsentLogs();
 +
 +      Preconditions
 +          .checkArgument(Objects.equals(tabletMetadata.getExtent().endRow(), 
newExtent.endRow()));
 +
 +      mutator.putPrevEndRow(newExtent.prevEndRow());
 +
 +      newTabletsFiles.get(newExtent).forEach(mutator::putFile);
 +
 +      // remove the files from the original tablet that did not end up in the 
tablet
 +      tabletMetadata.getFiles().forEach(existingFile -> {
 +        if (!newTabletsFiles.get(newExtent).containsKey(existingFile)) {
 +          mutator.deleteFile(existingFile);
 +        }
 +      });
 +
 +      // remove any external compaction entries that are present
 +      
tabletMetadata.getExternalCompactions().keySet().forEach(mutator::deleteExternalCompaction);
 +
 +      tabletMetadata.getExternalCompactions().keySet().forEach(
 +          ecid -> log.debug("{} deleting external compaction entry for split 
{}", fateId, ecid));
 +
 +      // remove any selected file entries that are present, the compaction 
operation will need to
 +      // reselect files
 +      if (tabletMetadata.getSelectedFiles() != null) {
 +        mutator.deleteSelectedFiles();
 +        log.debug("{} deleting selected files {} because of split", fateId,
 +            tabletMetadata.getSelectedFiles().getFateId());
 +      }
 +
 +      // Remove any user compaction requested markers as the tablet may fall 
outside the compaction
 +      // range. The markers will be recreated if needed.
 +      
tabletMetadata.getUserCompactionsRequested().forEach(mutator::deleteUserCompactionRequested);
 +
 +      // scan entries are related to a hosted tablet, this tablet is not 
hosted so can safely delete
 +      // these
 +      tabletMetadata.getScans().forEach(mutator::deleteScan);
 +
 +      if (tabletMetadata.getHostingRequested()) {
 +        // The range of the tablet is changing, so lets delete the hosting 
requested column in case
 +        // this tablet does not actually need to be hosted.
 +        mutator.deleteHostingRequested();
 +      }
 +
 +      if (tabletMetadata.getSuspend() != null) {
 +        // This no longer the exact tablet that was suspended. For 
consistency should either delete
 +        // the suspension marker OR add it to the new tablets. Choosing to 
delete it.
 +        mutator.deleteSuspension();
 +      }
 +
 +      if (tabletMetadata.getLast() != null) {
 +        // This is no longer the same tablet so lets delete the last location.
 +        mutator.deleteLocation(tabletMetadata.getLast());
 +      }
 +
 +      // Clean up any previous unsplittable marker
 +      if (tabletMetadata.getUnSplittable() != null) {
 +        mutator.deleteUnSplittable();
 +        log.debug("{} deleting unsplittable metadata from {} because of 
split", fateId, newExtent);
 +      }
 +
 +      var migration = tabletMetadata.getMigration();
 +      if (migration != null) {
 +        // This is no longer the same tablet, so delete the migration
 +        mutator.deleteMigration();
 +        log.debug("{} deleting migration {} metadata from {} because of 
split", fateId, migration,
 +            newExtent);
 +      }
 +
 +      // if the tablet no longer exists (because changed prev end row, then 
the update was
 +      // successful.
 +      mutator.submit(Ample.RejectionHandler.acceptAbsentTablet());
 +
 +      var result = tabletsMutator.process().get(splitInfo.getOriginal());
 +
 +      Preconditions.checkState(result.getStatus() == Status.ACCEPTED,
 +          "Failed to update existing tablet in split %s %s %s", fateId, 
splitInfo.getOriginal(),
 +          result.getExtent());
 +    }
 +  }
 +}
diff --cc 
server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableImport/ImportTable.java
index 9608b6fd10,4ed5649b5d..c464a0c16a
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableImport/ImportTable.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableImport/ImportTable.java
@@@ -156,17 -155,17 +155,17 @@@ public class ImportTable extends Abstra
    }
  
    @Override
 -  public void undo(long tid, Manager env) throws Exception {
 +  public void undo(FateId fateId, FateEnv env) throws Exception {
      for (ImportedTableInfo.DirectoryMapping dm : tableInfo.directories) {
 -      Utils.unreserveHdfsDirectory(env, new Path(dm.exportDir).toString(), 
tid);
 +      Utils.unreserveHdfsDirectory(env.getContext(), new 
Path(dm.exportDir).toString(), fateId);
      }
  
 -    Utils.unreserveNamespace(env, tableInfo.namespaceId, tid, false);
 +    Utils.unreserveNamespace(env.getContext(), tableInfo.namespaceId, fateId, 
LockType.READ);
    }
  
-   static List<ImportedTableInfo.DirectoryMapping> parseExportDir(Set<String> 
exportDirs) {
+   static ArrayList<ImportedTableInfo.DirectoryMapping> 
parseExportDir(Set<String> exportDirs) {
      if (exportDirs == null || exportDirs.isEmpty()) {
-       return Collections.emptyList();
+       return new ArrayList<>();
      }
  
      return exportDirs.stream().filter(not(String::isEmpty))
diff --cc 
server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
index 73bc764c2e,0000000000..6ceac3d681
mode 100644,000000..100644
--- 
a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
+++ 
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
@@@ -1,488 -1,0 +1,492 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *   https://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.accumulo.manager.tableOps.split;
 +
 +import static org.easymock.EasyMock.mock;
 +import static org.junit.jupiter.api.Assertions.assertEquals;
 +import static org.junit.jupiter.api.Assertions.assertThrows;
 +import static org.junit.jupiter.api.Assertions.assertTrue;
 +
++import java.util.ArrayList;
 +import java.util.EnumSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.OptionalLong;
 +import java.util.Set;
 +import java.util.SortedMap;
 +import java.util.SortedSet;
 +import java.util.TreeMap;
 +import java.util.TreeSet;
 +import java.util.UUID;
 +import java.util.concurrent.TimeUnit;
 +import java.util.function.Function;
 +import java.util.stream.Collectors;
 +
 +import org.apache.accumulo.core.client.admin.TabletAvailability;
 +import org.apache.accumulo.core.client.admin.TabletMergeability;
 +import org.apache.accumulo.core.clientImpl.TabletMergeabilityUtil;
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.dataImpl.KeyExtent;
 +import org.apache.accumulo.core.fate.FateId;
 +import org.apache.accumulo.core.fate.FateInstanceType;
 +import org.apache.accumulo.core.file.FileSKVIterator;
 +import org.apache.accumulo.core.lock.ServiceLock;
 +import org.apache.accumulo.core.metadata.ReferencedTabletFile;
 +import org.apache.accumulo.core.metadata.StoredTabletFile;
 +import org.apache.accumulo.core.metadata.SuspendingTServer;
 +import org.apache.accumulo.core.metadata.TServerInstance;
 +import org.apache.accumulo.core.metadata.schema.Ample;
 +import org.apache.accumulo.core.metadata.schema.CompactionMetadata;
 +import org.apache.accumulo.core.metadata.schema.DataFileValue;
 +import org.apache.accumulo.core.metadata.schema.ExternalCompactionId;
 +import org.apache.accumulo.core.metadata.schema.MetadataTime;
 +import org.apache.accumulo.core.metadata.schema.SelectedFiles;
 +import org.apache.accumulo.core.metadata.schema.TabletMergeabilityMetadata;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadata;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType;
 +import org.apache.accumulo.core.metadata.schema.TabletOperationId;
 +import org.apache.accumulo.core.metadata.schema.TabletOperationType;
 +import org.apache.accumulo.core.metadata.schema.UnSplittableMetadata;
 +import org.apache.accumulo.core.tabletserver.log.LogEntry;
 +import org.apache.accumulo.core.util.time.SteadyTime;
 +import org.apache.accumulo.manager.Manager;
 +import org.apache.accumulo.manager.split.FileRangeCache;
 +import org.apache.accumulo.server.ServerContext;
 +import org.apache.accumulo.server.metadata.ConditionalTabletMutatorImpl;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.Text;
 +import org.easymock.EasyMock;
 +import org.junit.jupiter.api.Test;
 +
 +public class UpdateTabletsTest {
 +
 +  public static StoredTabletFile newSTF(int fileNum) {
 +    return new ReferencedTabletFile(new Path(
 +        "hdfs://localhost:8020/accumulo/tables/2a/default_tablet/F00000" + 
fileNum + ".rf"))
 +        .insert();
 +  }
 +
 +  FileSKVIterator.FileRange newFileInfo(String start, String end) {
 +    return new FileSKVIterator.FileRange(new Text(start), new Text(end));
 +  }
 +
 +  /**
 +   * This is a set of tablet metadata columns that the split code is known to 
handle. The purpose of
 +   * the set is to detect when a new tablet metadata column was added without 
considering the
 +   * implications for splitting tablets. For a column to be in this set it 
means an Accumulo
 +   * developer has determined that split code can handle that column OR has 
opened an issue about
 +   * handling it.
 +   */
 +  private static final Set<ColumnType> COLUMNS_HANDLED_BY_SPLIT = 
EnumSet.of(ColumnType.TIME,
 +      ColumnType.LOGS, ColumnType.FILES, ColumnType.PREV_ROW, 
ColumnType.OPID, ColumnType.LOCATION,
 +      ColumnType.ECOMP, ColumnType.SELECTED, ColumnType.LOADED,
 +      ColumnType.USER_COMPACTION_REQUESTED, ColumnType.MERGED, 
ColumnType.LAST, ColumnType.SCANS,
 +      ColumnType.DIR, ColumnType.CLONED, ColumnType.FLUSH_ID, 
ColumnType.FLUSH_NONCE,
 +      ColumnType.SUSPEND, ColumnType.AVAILABILITY, 
ColumnType.HOSTING_REQUESTED,
 +      ColumnType.COMPACTED, ColumnType.UNSPLITTABLE, ColumnType.MERGEABILITY, 
ColumnType.MIGRATION);
 +
 +  /**
 +   * The purpose of this test is to catch new tablet metadata columns that 
were added w/o
 +   * considering splitting tablets.
 +   */
 +  @Test
 +  public void checkColumns() {
 +    for (ColumnType columnType : ColumnType.values()) {
 +      assertTrue(COLUMNS_HANDLED_BY_SPLIT.contains(columnType),
 +          "The split code does not know how to handle " + columnType);
 +    }
 +  }
 +
 +  // When a tablet splits its files are partitioned among the new children 
tablets. This test
 +  // exercises the partitioning code.
 +  @Test
 +  public void testFileParitioning() {
 +
 +    var file1 = newSTF(1);
 +    var file2 = newSTF(2);
 +    var file3 = newSTF(3);
 +    var file4 = newSTF(4);
 +
 +    var tabletFiles =
 +        Map.of(file1, new DataFileValue(1000, 100, 20), file2, new 
DataFileValue(2000, 200, 50),
 +            file3, new DataFileValue(4000, 400), file4, new 
DataFileValue(4000, 400));
 +
 +    var ke1 = new KeyExtent(TableId.of("1"), new Text("m"), null);
 +    var ke2 = new KeyExtent(TableId.of("1"), new Text("r"), new Text("m"));
 +    var ke3 = new KeyExtent(TableId.of("1"), new Text("v"), new Text("r"));
 +    var ke4 = new KeyExtent(TableId.of("1"), null, new Text("v"));
 +
 +    var firstAndLastKeys = Map.of(file2, newFileInfo("m", "r"), file3, 
newFileInfo("g", "x"), file4,
 +        newFileInfo("s", "v"));
 +
 +    var ke1Expected = Map.of(file1, new DataFileValue(250, 25, 20), file2,
 +        new DataFileValue(1000, 100, 50), file3, new DataFileValue(1000, 
100));
 +    var ke2Expected = Map.of(file1, new DataFileValue(250, 25, 20), file2,
 +        new DataFileValue(1000, 100, 50), file3, new DataFileValue(1000, 
100));
 +    var ke3Expected = Map.of(file1, new DataFileValue(250, 25, 20), file3,
 +        new DataFileValue(1000, 100), file4, new DataFileValue(4000, 400));
 +    var ke4Expected =
 +        Map.of(file1, new DataFileValue(250, 25, 20), file3, new 
DataFileValue(1000, 100));
 +
 +    var expected = Map.of(ke1, ke1Expected, ke2, ke2Expected, ke3, 
ke3Expected, ke4, ke4Expected);
 +
 +    SortedMap<KeyExtent,TabletMergeability> newExtents = new 
TreeMap<>(Set.of(ke1, ke2, ke3, ke4)
 +        .stream().collect(Collectors.toMap(Function.identity(), e -> 
TabletMergeability.never())));
 +
 +    TabletMetadata tabletMeta = EasyMock.createMock(TabletMetadata.class);
 +    
EasyMock.expect(tabletMeta.getFilesMap()).andReturn(tabletFiles).anyTimes();
 +    EasyMock.replay(tabletMeta);
 +
 +    var fid = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    Map<KeyExtent,Map<StoredTabletFile,DataFileValue>> results =
 +        UpdateTablets.getNewTabletFiles(fid, newExtents, tabletMeta, 
firstAndLastKeys::get);
 +
 +    assertEquals(expected.keySet(), results.keySet());
 +    expected.forEach(((extent, files) -> {
 +      assertEquals(files, results.get(extent));
 +    }));
 +
 +    // Test a tablet with no files going to it
 +
 +    var tabletFiles2 = Map.of(file2, tabletFiles.get(file2), file4, 
tabletFiles.get(file4));
 +    ke1Expected = Map.of(file2, new DataFileValue(1000, 100, 50));
 +    ke2Expected = Map.of(file2, new DataFileValue(1000, 100, 50));
 +    ke3Expected = Map.of(file4, new DataFileValue(4000, 400));
 +    ke4Expected = Map.of();
 +    expected = Map.of(ke1, ke1Expected, ke2, ke2Expected, ke3, ke3Expected, 
ke4, ke4Expected);
 +
 +    tabletMeta = EasyMock.createMock(TabletMetadata.class);
 +    
EasyMock.expect(tabletMeta.getFilesMap()).andReturn(tabletFiles2).anyTimes();
 +    EasyMock.replay(tabletMeta);
 +
 +    Map<KeyExtent,Map<StoredTabletFile,DataFileValue>> results2 =
 +        UpdateTablets.getNewTabletFiles(fid, newExtents, tabletMeta, 
firstAndLastKeys::get);
 +    assertEquals(expected.keySet(), results2.keySet());
 +    expected.forEach(((extent, files) -> {
 +      assertEquals(files, results2.get(extent));
 +    }));
 +
 +  }
 +
 +  /**
 +   * The purpose of this test is create tablet with as many columns in its 
metadata set as possible
 +   * and exercise the split code with that tablet.
 +   */
 +  @Test
 +  public void testManyColumns() throws Exception {
 +    TableId tableId = TableId.of("123");
 +    KeyExtent origExtent = new KeyExtent(tableId, new Text("m"), null);
 +
 +    var newExtent1 = new KeyExtent(tableId, new Text("c"), null);
 +    var newExtent2 = new KeyExtent(tableId, new Text("h"), new Text("c"));
 +    var newExtent3 = new KeyExtent(tableId, new Text("m"), new Text("h"));
 +
 +    var file1 = newSTF(1);
 +    var file2 = newSTF(2);
 +    var file3 = newSTF(3);
 +    var file4 = newSTF(4);
 +
 +    var loaded1 = newSTF(5);
 +    var loaded2 = newSTF(6);
 +
 +    var flid1 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    var flid2 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    var loaded = Map.of(loaded1, flid1, loaded2, flid2);
 +
 +    var dfv1 = new DataFileValue(1000, 100, 20);
 +    var dfv2 = new DataFileValue(500, 50, 20);
 +    var dfv3 = new DataFileValue(4000, 400);
 +    var dfv4 = new DataFileValue(2000, 200);
 +
 +    var tabletFiles = Map.of(file1, dfv1, file2, dfv2, file3, dfv3, file4, 
dfv4);
 +
 +    var cid1 = ExternalCompactionId.generate(UUID.randomUUID());
 +    var cid2 = ExternalCompactionId.generate(UUID.randomUUID());
 +    var cid3 = ExternalCompactionId.generate(UUID.randomUUID());
 +
 +    var fateId = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    var opid = TabletOperationId.from(TabletOperationType.SPLITTING, fateId);
 +    var tabletTime = MetadataTime.parse("L30");
 +    var flushID = OptionalLong.of(40);
 +    var availability = TabletAvailability.HOSTED;
 +    var lastLocation = TabletMetadata.Location.last("1.2.3.4:1234", 
"123456789");
 +    var suspendingTServer = SuspendingTServer.fromValue(new 
Value("1.2.3.4:5|56"));
 +    var migration = new TServerInstance("localhost:1234", 56L);
 +
 +    String dir1 = "dir1";
 +    String dir2 = "dir2";
 +
 +    Manager manager = EasyMock.mock(Manager.class);
 +    ServerContext context = EasyMock.mock(ServerContext.class);
 +    EasyMock.expect(manager.getContext()).andReturn(context).atLeastOnce();
 +    Ample ample = EasyMock.mock(Ample.class);
 +    EasyMock.expect(context.getAmple()).andReturn(ample).atLeastOnce();
 +    FileRangeCache fileRangeCache = EasyMock.mock(FileRangeCache.class);
 +    EasyMock.expect(fileRangeCache.getCachedFileInfo(tableId, file1))
 +        .andReturn(newFileInfo("a", "z"));
 +    EasyMock.expect(fileRangeCache.getCachedFileInfo(tableId, file2))
 +        .andReturn(newFileInfo("a", "b"));
 +    EasyMock.expect(fileRangeCache.getCachedFileInfo(tableId, file3))
 +        .andReturn(newFileInfo("d", "f"));
 +    EasyMock.expect(fileRangeCache.getCachedFileInfo(tableId, file4))
 +        .andReturn(newFileInfo("d", "j"));
 +    
EasyMock.expect(manager.getFileRangeCache()).andReturn(fileRangeCache).atLeastOnce();
 +    
EasyMock.expect(manager.getSteadyTime()).andReturn(SteadyTime.from(100_000, 
TimeUnit.SECONDS))
 +        .atLeastOnce();
 +
 +    ServiceLock managerLock = EasyMock.mock(ServiceLock.class);
 +    
EasyMock.expect(context.getServiceLock()).andReturn(managerLock).anyTimes();
 +
 +    // Setup the metadata for the tablet that is going to split, set as many 
columns as possible on
 +    // it.
 +    TabletMetadata tabletMeta = EasyMock.mock(TabletMetadata.class);
 +    
EasyMock.expect(tabletMeta.getExtent()).andReturn(origExtent).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getOperationId()).andReturn(opid).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getLocation()).andReturn(null).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getLogs()).andReturn(List.of()).atLeastOnce();
 +    EasyMock.expect(tabletMeta.hasMerged()).andReturn(false).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getCloned()).andReturn(null).atLeastOnce();
 +    Map<ExternalCompactionId,CompactionMetadata> compactions = 
EasyMock.mock(Map.class);
 +    EasyMock.expect(compactions.keySet()).andReturn(Set.of(cid1, cid2, 
cid3)).anyTimes();
 +    
EasyMock.expect(tabletMeta.getExternalCompactions()).andReturn(compactions).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getFilesMap()).andReturn(tabletFiles).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getFiles()).andReturn(tabletFiles.keySet());
 +    SelectedFiles selectedFiles = EasyMock.mock(SelectedFiles.class);
 +    EasyMock.expect(selectedFiles.getFateId()).andReturn(null);
 +    
EasyMock.expect(tabletMeta.getSelectedFiles()).andReturn(selectedFiles).atLeastOnce();
 +    FateId ucfid1 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    FateId ucfid2 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    
EasyMock.expect(tabletMeta.getUserCompactionsRequested()).andReturn(Set.of(ucfid1,
 ucfid2))
 +        .atLeastOnce();
 +    FateId ucfid3 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    EasyMock.expect(tabletMeta.getCompacted()).andReturn(Set.of(ucfid1, 
ucfid3)).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getScans()).andReturn(List.of(file1, 
file2)).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getTime()).andReturn(tabletTime).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getFlushId()).andReturn(flushID).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getTabletAvailability()).andReturn(availability).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getLoaded()).andReturn(loaded).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getHostingRequested()).andReturn(true).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getSuspend()).andReturn(suspendingTServer).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getLast()).andReturn(lastLocation).atLeastOnce();
 +    UnSplittableMetadata usm =
 +        UnSplittableMetadata.toUnSplittable(origExtent, 1000, 1001, 1002, 
tabletFiles.keySet());
 +    
EasyMock.expect(tabletMeta.getUnSplittable()).andReturn(usm).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getMigration()).andReturn(migration).atLeastOnce();
 +
 +    EasyMock.expect(ample.readTablet(origExtent)).andReturn(tabletMeta);
 +
 +    Ample.ConditionalTabletsMutator tabletsMutator =
 +        EasyMock.mock(Ample.ConditionalTabletsMutator.class);
 +    
EasyMock.expect(ample.conditionallyMutateTablets()).andReturn(tabletsMutator).atLeastOnce();
 +
 +    // Setup the mutator for creating the first new tablet
 +    ConditionalTabletMutatorImpl tablet1Mutator = 
EasyMock.mock(ConditionalTabletMutatorImpl.class);
 +    
EasyMock.expect(tablet1Mutator.requireAbsentTablet()).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putOperation(opid)).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putDirName(dir1)).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putTime(tabletTime)).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putFlushId(flushID.getAsLong())).andReturn(tablet1Mutator);
 +    EasyMock.expect(tablet1Mutator.putPrevEndRow(newExtent1.prevEndRow()))
 +        .andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putCompacted(ucfid1)).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putCompacted(ucfid3)).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putTabletAvailability(availability)).andReturn(tablet1Mutator);
 +    EasyMock.expect(tablet1Mutator.putBulkFile(loaded1.getTabletFile(), 
flid1))
 +        .andReturn(tablet1Mutator);
 +    EasyMock.expect(tablet1Mutator.putBulkFile(loaded2.getTabletFile(), 
flid2))
 +        .andReturn(tablet1Mutator);
 +    EasyMock.expect(tablet1Mutator.putFile(file1, new DataFileValue(333, 33, 
20)))
 +        .andReturn(tablet1Mutator);
 +    EasyMock.expect(tablet1Mutator.putFile(file2, 
dfv2)).andReturn(tablet1Mutator);
 +    // SplitInfo marked as system generated so should be set to ALWAYS (0 
delay)
 +    EasyMock
 +        .expect(tablet1Mutator.putTabletMergeability(
 +            TabletMergeabilityMetadata.always(SteadyTime.from(100_000, 
TimeUnit.SECONDS))))
 +        .andReturn(tablet1Mutator);
 +    tablet1Mutator.submit(EasyMock.anyObject());
 +    EasyMock.expectLastCall().once();
 +    
EasyMock.expect(tabletsMutator.mutateTablet(newExtent1)).andReturn(tablet1Mutator);
 +
 +    // Setup the mutator for creating the second new tablet
 +    ConditionalTabletMutatorImpl tablet2Mutator = 
EasyMock.mock(ConditionalTabletMutatorImpl.class);
 +    
EasyMock.expect(tablet2Mutator.requireAbsentTablet()).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putOperation(opid)).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putDirName(dir2)).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putTime(tabletTime)).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putFlushId(flushID.getAsLong())).andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putPrevEndRow(newExtent2.prevEndRow()))
 +        .andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putCompacted(ucfid1)).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putCompacted(ucfid3)).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putTabletAvailability(availability)).andReturn(tablet2Mutator);
 +    // SplitInfo marked as system generated so should be set to ALWAYS (0 
delay)
 +    EasyMock
 +        .expect(tablet2Mutator.putTabletMergeability(
 +            TabletMergeabilityMetadata.always(SteadyTime.from(100_000, 
TimeUnit.SECONDS))))
 +        .andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putBulkFile(loaded1.getTabletFile(), 
flid1))
 +        .andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putBulkFile(loaded2.getTabletFile(), 
flid2))
 +        .andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putFile(file1, new DataFileValue(333, 33, 
20)))
 +        .andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putFile(file3, 
dfv3)).andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putFile(file4, new DataFileValue(1000, 
100)))
 +        .andReturn(tablet2Mutator);
 +    tablet2Mutator.submit(EasyMock.anyObject());
 +    EasyMock.expectLastCall().once();
 +    
EasyMock.expect(tabletsMutator.mutateTablet(newExtent2)).andReturn(tablet2Mutator);
 +
 +    // Setup the mutator for updating the existing tablet
 +    ConditionalTabletMutatorImpl tablet3Mutator = 
mock(ConditionalTabletMutatorImpl.class);
 +    
EasyMock.expect(tablet3Mutator.requireOperation(opid)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.requireAbsentLocation()).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.requireAbsentLogs()).andReturn(tablet3Mutator);
 +    EasyMock.expect(tablet3Mutator.putPrevEndRow(newExtent3.prevEndRow()))
 +        .andReturn(tablet3Mutator);
 +    EasyMock.expect(tablet3Mutator.putFile(file1, new DataFileValue(333, 33, 
20)))
 +        .andReturn(tablet3Mutator);
 +    EasyMock.expect(tablet3Mutator.putFile(file4, new DataFileValue(1000, 
100)))
 +        .andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteFile(file2)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteFile(file3)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteExternalCompaction(cid1)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteExternalCompaction(cid2)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteExternalCompaction(cid3)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteSelectedFiles()).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteUserCompactionRequested(ucfid1)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteUserCompactionRequested(ucfid2)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteScan(file1)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteScan(file2)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteHostingRequested()).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteSuspension()).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteLocation(lastLocation)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteUnSplittable()).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteMigration()).andReturn(tablet3Mutator);
 +    tablet3Mutator.submit(EasyMock.anyObject());
 +    EasyMock.expectLastCall().once();
 +    
EasyMock.expect(tabletsMutator.mutateTablet(origExtent)).andReturn(tablet3Mutator);
 +
 +    // setup processing of conditional mutations
 +    Ample.ConditionalResult cr = 
EasyMock.createMock(Ample.ConditionalResult.class);
 +    EasyMock.expect(cr.getExtent()).andReturn(origExtent).atLeastOnce();
 +    
EasyMock.expect(cr.getStatus()).andReturn(Ample.ConditionalResult.Status.ACCEPTED)
 +        .atLeastOnce();
 +    EasyMock.expect(tabletsMutator.process())
 +        .andReturn(Map.of(newExtent1, cr, newExtent2, cr, origExtent, 
cr)).atLeastOnce();
 +    tabletsMutator.close();
 +    EasyMock.expectLastCall().anyTimes();
 +
 +    EasyMock.replay(manager, context, ample, tabletMeta, fileRangeCache, 
tabletsMutator,
 +        tablet1Mutator, tablet2Mutator, tablet3Mutator, cr, compactions);
 +    // Now we can actually test the split code that writes the new tablets 
with a bunch columns in
 +    // the original tablet
 +    SortedSet<Text> splits = new TreeSet<>(List.of(newExtent1.endRow(), 
newExtent2.endRow()));
++    var dirNames = new ArrayList<String>();
++    dirNames.add(dir1);
++    dirNames.add(dir2);
 +    UpdateTablets updateTablets = new UpdateTablets(
-         new SplitInfo(origExtent, 
TabletMergeabilityUtil.systemDefaultSplits(splits)),
-         List.of(dir1, dir2));
++        new SplitInfo(origExtent, 
TabletMergeabilityUtil.systemDefaultSplits(splits)), dirNames);
 +    updateTablets.call(fateId, manager);
 +
 +    EasyMock.verify(manager, context, ample, tabletMeta, fileRangeCache, 
tabletsMutator,
 +        tablet1Mutator, tablet2Mutator, tablet3Mutator, cr, compactions);
 +  }
 +
 +  @Test
 +  public void testErrors() throws Exception {
 +    TableId tableId = TableId.of("123");
 +    KeyExtent origExtent = new KeyExtent(tableId, new Text("m"), null);
 +
 +    var fateId = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    var opid = TabletOperationId.from(TabletOperationType.SPLITTING, fateId);
 +
 +    // Test splitting a tablet with a location
 +    var location = TabletMetadata.Location.future(new 
TServerInstance("1.2.3.4:1234", 123456789L));
 +    var tablet1 =
 +        
TabletMetadata.builder(origExtent).putOperation(opid).putLocation(location).build();
 +    var e = assertThrows(IllegalStateException.class, () -> 
testError(origExtent, tablet1, fateId));
 +    assertTrue(e.getMessage().contains(location.toString()));
 +
 +    // Test splitting a tablet without an operation id set
 +    var tablet2 = TabletMetadata.builder(origExtent).build(ColumnType.OPID);
 +    e = assertThrows(IllegalStateException.class, () -> testError(origExtent, 
tablet2, fateId));
 +    assertTrue(e.getMessage().contains("does not have expected operation id 
"));
 +    assertTrue(e.getMessage().contains("null"));
 +
 +    // Test splitting a tablet with an unexpected operation id
 +    var fateId2 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    var opid2 = TabletOperationId.from(TabletOperationType.SPLITTING, 
fateId2);
 +    var tablet3 = 
TabletMetadata.builder(origExtent).putOperation(opid2).build();
 +    e = assertThrows(IllegalStateException.class, () -> testError(origExtent, 
tablet3, fateId));
 +    assertTrue(e.getMessage().contains("does not have expected operation id 
"));
 +    assertTrue(e.getMessage().contains(opid2.toString()));
 +
 +    // Test splitting a tablet with walogs
 +    var walog = LogEntry.fromPath("localhost+8020/" + UUID.randomUUID());
 +    var tablet4 = 
TabletMetadata.builder(origExtent).putOperation(opid).putWal(walog)
 +        .build(ColumnType.LOCATION);
 +    e = assertThrows(IllegalStateException.class, () -> testError(origExtent, 
tablet4, fateId));
 +    assertTrue(e.getMessage().contains("unexpectedly had walogs"));
 +    assertTrue(e.getMessage().contains(walog.toString()));
 +
 +    // Test splitting tablet with merged marker
 +    var tablet5 = 
TabletMetadata.builder(origExtent).putOperation(opid).setMerged()
 +        .build(ColumnType.LOCATION, ColumnType.LOGS);
 +    e = assertThrows(IllegalStateException.class, () -> testError(origExtent, 
tablet5, fateId));
 +    assertTrue(e.getMessage().contains("unexpectedly has a merged"));
 +
 +    // Test splitting tablet with cloned marker
 +    TabletMetadata tablet6 = EasyMock.mock(TabletMetadata.class);
 +    EasyMock.expect(tablet6.getExtent()).andReturn(origExtent).anyTimes();
 +    EasyMock.expect(tablet6.getOperationId()).andReturn(opid).anyTimes();
 +    EasyMock.expect(tablet6.getLocation()).andReturn(null).anyTimes();
 +    EasyMock.expect(tablet6.getLogs()).andReturn(List.of()).anyTimes();
 +    EasyMock.expect(tablet6.hasMerged()).andReturn(false);
 +    EasyMock.expect(tablet6.getCloned()).andReturn("OK").atLeastOnce();
 +    EasyMock.replay(tablet6);
 +    e = assertThrows(IllegalStateException.class, () -> testError(origExtent, 
tablet6, fateId));
 +    assertTrue(e.getMessage().contains("unexpectedly has a cloned"));
 +    EasyMock.verify(tablet6);
 +  }
 +
 +  private static void testError(KeyExtent origExtent, TabletMetadata tm1, 
FateId fateId)
 +      throws Exception {
 +    Manager manager = EasyMock.mock(Manager.class);
 +    ServerContext context = EasyMock.mock(ServerContext.class);
 +    EasyMock.expect(manager.getContext()).andReturn(context).atLeastOnce();
 +    Ample ample = EasyMock.mock(Ample.class);
 +    EasyMock.expect(context.getAmple()).andReturn(ample).atLeastOnce();
 +
 +    EasyMock.expect(ample.readTablet(origExtent)).andReturn(tm1);
 +
 +    EasyMock.replay(manager, context, ample);
 +    // Now we can actually test the split code that writes the new tablets 
with a bunch columns in
 +    // the original tablet
 +    SortedSet<Text> splits = new TreeSet<>(List.of(new Text("c")));
-     UpdateTablets updateTablets = new UpdateTablets(
-         new SplitInfo(origExtent, 
TabletMergeabilityUtil.systemDefaultSplits(splits)),
-         List.of("d1"));
++    var dirNames = new ArrayList<String>();
++    dirNames.add("d1");
++    var updateTablets = new UpdateTablets(
++        new SplitInfo(origExtent, 
TabletMergeabilityUtil.systemDefaultSplits(splits)), dirNames);
 +    updateTablets.call(fateId, manager);
 +
 +    EasyMock.verify(manager, context, ample);
 +  }
 +}
diff --cc server/monitor/pom.xml
index fad0af4ab1,56002681e3..089502e40c
--- a/server/monitor/pom.xml
+++ b/server/monitor/pom.xml
@@@ -43,22 -36,13 +43,17 @@@
        <artifactId>jackson-databind</artifactId>
      </dependency>
      <dependency>
 -      <groupId>com.github.ben-manes.caffeine</groupId>
 -      <artifactId>caffeine</artifactId>
 +      <groupId>com.fasterxml.jackson.datatype</groupId>
 +      <artifactId>jackson-datatype-jdk8</artifactId>
      </dependency>
      <dependency>
 -      <groupId>com.google.code.gson</groupId>
 -      <artifactId>gson</artifactId>
 +      <groupId>com.fasterxml.jackson.datatype</groupId>
 +      <artifactId>jackson-datatype-jsr310</artifactId>
 +    </dependency>
 +    <dependency>
 +      <groupId>com.github.ben-manes.caffeine</groupId>
 +      <artifactId>caffeine</artifactId>
      </dependency>
-     <dependency>
-       <groupId>com.google.auto.service</groupId>
-       <artifactId>auto-service</artifactId>
-       <optional>true</optional>
-     </dependency>
      <dependency>
        <groupId>com.google.guava</groupId>
        <artifactId>guava</artifactId>
diff --cc shell/pom.xml
index 23afddc6bd,787959690d..599443f38f
--- a/shell/pom.xml
+++ b/shell/pom.xml
@@@ -37,11 -34,10 +37,6 @@@
        <groupId>com.beust</groupId>
        <artifactId>jcommander</artifactId>
      </dependency>
--    <dependency>
-       <groupId>com.google.auto.service</groupId>
-       <artifactId>auto-service</artifactId>
-       <optional>true</optional>
 -      <groupId>com.google.code.gson</groupId>
 -      <artifactId>gson</artifactId>
--    </dependency>
      <dependency>
        <groupId>com.google.guava</groupId>
        <artifactId>guava</artifactId>
diff --cc start/pom.xml
index 76782d1515,e0407189cf..0766d612a9
--- a/start/pom.xml
+++ b/start/pom.xml
@@@ -29,15 -29,27 +29,10 @@@
    <artifactId>accumulo-start</artifactId>
    <name>Apache Accumulo Start</name>
    <description>A library for launching Apache Accumulo services.</description>
 +  <properties>
 +    
<spotbugs.omitVisitors>SharedVariableAtomicityDetector,ConstructorThrow</spotbugs.omitVisitors>
 +  </properties>
    <dependencies>
--    <dependency>
-       <groupId>com.google.auto.service</groupId>
-       <artifactId>auto-service</artifactId>
-       <optional>true</optional>
 -      <groupId>commons-io</groupId>
 -      <artifactId>commons-io</artifactId>
 -    </dependency>
 -    <dependency>
 -      <groupId>commons-logging</groupId>
 -      <artifactId>commons-logging</artifactId>
 -    </dependency>
 -    <dependency>
 -      <groupId>org.apache.commons</groupId>
 -      <artifactId>commons-configuration2</artifactId>
 -    </dependency>
 -    <dependency>
 -      <groupId>org.apache.commons</groupId>
 -      <artifactId>commons-vfs2</artifactId>
 -    </dependency>
 -    <dependency>
 -      <groupId>org.apache.hadoop</groupId>
 -      <artifactId>hadoop-client-api</artifactId>
--    </dependency>
      <dependency>
        <groupId>org.slf4j</groupId>
        <artifactId>slf4j-api</artifactId>

Reply via email to