This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit 4217116ea7e705a890eac8f8dca6361a06c2d50b
Merge: aa7b4952c2 d0ee570819
Author: Christopher Tubbs <ctubb...@apache.org>
AuthorDate: Tue Jan 14 14:46:11 2025 -0500

    Merge branch '3.1'

 .../data/constraints/VisibilityConstraintTest.java | 25 +++++------
 .../manager/tableOps/merge/MergeTabletsTest.java   |  2 +-
 .../manager/tableOps/split/UpdateTabletsTest.java  |  3 +-
 .../accumulo/tserver/ActiveAssignmentRunnable.java |  9 ++--
 .../tserver/TabletServerResourceManager.java       | 10 ++---
 .../accumulo/tserver/AssignmentWatcherTest.java    | 48 +++++++++-------------
 .../shell/format/DeleterFormatterTest.java         | 14 ++++---
 7 files changed, 52 insertions(+), 59 deletions(-)

diff --cc 
server/manager/src/test/java/org/apache/accumulo/manager/tableOps/merge/MergeTabletsTest.java
index 8fd15f78ec,0000000000..08d951e21f
mode 100644,000000..100644
--- 
a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/merge/MergeTabletsTest.java
+++ 
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/merge/MergeTabletsTest.java
@@@ -1,485 -1,0 +1,485 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *   https://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.accumulo.manager.tableOps.merge;
 +
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.AVAILABILITY;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.CLONED;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.COMPACTED;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.DIR;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.ECOMP;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.FILES;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.FLUSH_ID;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.FLUSH_NONCE;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.HOSTING_REQUESTED;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.LAST;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.LOADED;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.LOCATION;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.LOGS;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.MERGEABILITY;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.MERGED;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.OPID;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.PREV_ROW;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.SCANS;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.SELECTED;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.SUSPEND;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.TIME;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.UNSPLITTABLE;
 +import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.USER_COMPACTION_REQUESTED;
 +import static 
org.apache.accumulo.manager.tableOps.split.UpdateTabletsTest.newSTF;
 +import static org.junit.jupiter.api.Assertions.assertThrows;
 +import static org.junit.jupiter.api.Assertions.assertTrue;
 +
 +import java.util.ArrayList;
 +import java.util.EnumSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.OptionalLong;
 +import java.util.Set;
 +import java.util.UUID;
 +import java.util.concurrent.TimeUnit;
 +import java.util.function.Consumer;
 +
 +import org.apache.accumulo.core.client.admin.TabletAvailability;
 +import org.apache.accumulo.core.data.NamespaceId;
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.dataImpl.KeyExtent;
 +import org.apache.accumulo.core.fate.FateId;
 +import org.apache.accumulo.core.fate.FateInstanceType;
 +import org.apache.accumulo.core.gc.ReferenceFile;
 +import org.apache.accumulo.core.lock.ServiceLock;
 +import org.apache.accumulo.core.metadata.StoredTabletFile;
 +import org.apache.accumulo.core.metadata.SuspendingTServer;
 +import org.apache.accumulo.core.metadata.TServerInstance;
 +import org.apache.accumulo.core.metadata.schema.Ample;
 +import org.apache.accumulo.core.metadata.schema.CompactionMetadata;
 +import org.apache.accumulo.core.metadata.schema.DataFileValue;
 +import org.apache.accumulo.core.metadata.schema.ExternalCompactionId;
 +import org.apache.accumulo.core.metadata.schema.MetadataTime;
 +import org.apache.accumulo.core.metadata.schema.SelectedFiles;
 +import org.apache.accumulo.core.metadata.schema.TabletMergeabilityMetadata;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadata;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadataBuilder;
 +import org.apache.accumulo.core.metadata.schema.TabletOperationId;
 +import org.apache.accumulo.core.metadata.schema.TabletOperationType;
 +import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
 +import org.apache.accumulo.core.metadata.schema.UnSplittableMetadata;
 +import org.apache.accumulo.core.tabletserver.log.LogEntry;
 +import org.apache.accumulo.core.util.time.SteadyTime;
 +import org.apache.accumulo.manager.Manager;
 +import org.apache.accumulo.server.ServerContext;
 +import org.apache.accumulo.server.gc.AllVolumesDirectory;
 +import org.apache.accumulo.server.metadata.ConditionalTabletMutatorImpl;
 +import org.apache.accumulo.server.metadata.ConditionalTabletsMutatorImpl;
 +import org.apache.hadoop.io.Text;
 +import org.easymock.EasyMock;
 +import org.junit.jupiter.api.Test;
 +
 +public class MergeTabletsTest {
 +
 +  private static final TableId tableId = TableId.of("789");
 +  private static final FateId fateId = FateId.from(FateInstanceType.USER, 
UUID.randomUUID());
 +  private static final TabletOperationId opid =
 +      TabletOperationId.from(TabletOperationType.MERGING, fateId);
 +
 +  private static final KeyExtent ke1 = new KeyExtent(tableId, new Text("c"), 
null);
 +  private static final KeyExtent ke2 = new KeyExtent(tableId, new Text("m"), 
new Text("c"));
 +  private static final KeyExtent ke3 = new KeyExtent(tableId, null, new 
Text("m"));
 +
 +  /**
 +   * This is a set of tablet metadata columns that the merge code is known to 
handle. The purpose of
 +   * the set is to detect when a new tablet metadata column was added without 
considering the
 +   * implications for merging tablets. For a column to be in this set it 
means an Accumulo developer
 +   * has determined that merge code can handle that column OR has opened an 
issue about handling it.
 +   */
 +  private static final Set<TabletMetadata.ColumnType> 
COLUMNS_HANDLED_BY_MERGE =
 +      EnumSet.of(TIME, LOGS, FILES, PREV_ROW, OPID, LOCATION, ECOMP, 
SELECTED, LOADED,
 +          USER_COMPACTION_REQUESTED, MERGED, LAST, SCANS, DIR, CLONED, 
FLUSH_ID, FLUSH_NONCE,
 +          SUSPEND, AVAILABILITY, HOSTING_REQUESTED, COMPACTED, UNSPLITTABLE, 
MERGEABILITY);
 +
 +  /**
 +   * The purpose of this test is to catch new tablet metadata columns that 
were added w/o
 +   * considering merging tablets.
 +   */
 +  @Test
 +  public void checkColumns() {
 +    for (TabletMetadata.ColumnType columnType : 
TabletMetadata.ColumnType.values()) {
 +      assertTrue(COLUMNS_HANDLED_BY_MERGE.contains(columnType),
 +          "The merge code does not know how to handle " + columnType);
 +    }
 +  }
 +
 +  @Test
 +  public void testManyColumns() throws Exception {
 +
 +    // This test exercises the merge code with as many of the tablet metadata 
columns as possible.
 +    // For the columns FLUSH_ID and FLUSH_NONCE the merge code leaves them as 
is and that is ok.
 +    // This test should break if the code starts reading them though.
 +
 +    var file1 = newSTF(1);
 +    var file2 = newSTF(2);
 +    var file3 = newSTF(3);
 +    var file4 = newSTF(4);
 +
 +    var dfv1 = new DataFileValue(1000, 100, 20);
 +    var dfv2 = new DataFileValue(500, 50, 20);
 +    var dfv3 = new DataFileValue(4000, 400);
 +    var dfv4 = new DataFileValue(2000, 200);
 +
 +    var cid1 = ExternalCompactionId.generate(UUID.randomUUID());
 +    var cid2 = ExternalCompactionId.generate(UUID.randomUUID());
 +    var cid3 = ExternalCompactionId.generate(UUID.randomUUID());
 +
 +    var tabletTime = MetadataTime.parse("L30");
 +    var flushID = OptionalLong.of(40);
 +    var availability = TabletAvailability.HOSTED;
 +    var lastLocation = TabletMetadata.Location.last("1.2.3.4:1234", 
"123456789");
 +    var suspendingTServer = SuspendingTServer.fromValue(new 
Value("1.2.3.4:5|56"));
 +    var mergeability = TabletMergeabilityMetadata.always(SteadyTime.from(1, 
TimeUnit.SECONDS));
 +
 +    var tablet1 =
 +        
TabletMetadata.builder(ke1).putOperation(opid).putDirName("td1").putFile(file3, 
dfv3)
 +            
.putTime(MetadataTime.parse("L3")).putTabletAvailability(TabletAvailability.HOSTED)
 +            .putTabletMergeability(mergeability).build(LOCATION, LOGS, FILES, 
ECOMP, MERGED,
 +                COMPACTED, SELECTED, USER_COMPACTION_REQUESTED, LOADED, 
CLONED);
 +    var tablet2 =
 +        
TabletMetadata.builder(ke2).putOperation(opid).putDirName("td2").putFile(file4, 
dfv4)
 +            
.putTime(MetadataTime.parse("L2")).putTabletAvailability(TabletAvailability.HOSTED)
 +            .putTabletMergeability(mergeability).build(LOCATION, LOGS, FILES, 
ECOMP, MERGED,
 +                COMPACTED, SELECTED, USER_COMPACTION_REQUESTED, LOADED, 
CLONED);
 +
 +    var tabletFiles = Map.of(file1, dfv1, file2, dfv2);
 +
 +    var unsplittableMeta =
 +        UnSplittableMetadata.toUnSplittable(ke3, 1000, 1001, 10, 
tabletFiles.keySet());
 +
 +    // Setup the metadata for the last tablet in the merge range, this is 
that tablet that merge
 +    // will modify.
 +    TabletMetadata lastTabletMeta = EasyMock.mock(TabletMetadata.class);
 +    
EasyMock.expect(lastTabletMeta.getTableId()).andReturn(ke3.tableId()).anyTimes();
 +    EasyMock.expect(lastTabletMeta.getExtent()).andReturn(ke3).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getEndRow()).andReturn(ke3.endRow()).anyTimes();
 +    
EasyMock.expect(lastTabletMeta.getOperationId()).andReturn(opid).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getLocation()).andReturn(null).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getLogs()).andReturn(List.of()).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.hasMerged()).andReturn(false).atLeastOnce();
 +    EasyMock.expect(lastTabletMeta.getCloned()).andReturn(null).atLeastOnce();
 +    Map<ExternalCompactionId,CompactionMetadata> compactions = 
EasyMock.mock(Map.class);
 +    EasyMock.expect(compactions.keySet()).andReturn(Set.of(cid1, cid2, 
cid3)).anyTimes();
 +    
EasyMock.expect(lastTabletMeta.getExternalCompactions()).andReturn(compactions).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getFilesMap()).andReturn(tabletFiles).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getFiles()).andReturn(tabletFiles.keySet()).anyTimes();
 +    
EasyMock.expect(lastTabletMeta.getSelectedFiles()).andReturn(null).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getUserCompactionsRequested()).andReturn(Set.of()).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getCompacted()).andReturn(Set.of()).atLeastOnce();
 +    EasyMock.expect(lastTabletMeta.getScans()).andReturn(List.of(file1, 
file2)).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getTime()).andReturn(tabletTime).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getFlushId()).andReturn(flushID).anyTimes();
 +    
EasyMock.expect(lastTabletMeta.getTabletAvailability()).andReturn(availability).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getLoaded()).andReturn(Map.of()).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getHostingRequested()).andReturn(true).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getSuspend()).andReturn(suspendingTServer).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getLast()).andReturn(lastLocation).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getUnSplittable()).andReturn(unsplittableMeta).atLeastOnce();
 +    
EasyMock.expect(lastTabletMeta.getTabletMergeability()).andReturn(mergeability).atLeastOnce();
 +
 +    EasyMock.replay(lastTabletMeta, compactions);
 +
 +    testMerge(List.of(tablet1, tablet2, lastTabletMeta), tableId, null, null, 
tabletMutator -> {
 +      
EasyMock.expect(tabletMutator.putTime(MetadataTime.parse("L30"))).andReturn(tabletMutator)
 +          .once();
 +      
EasyMock.expect(tabletMutator.putTabletAvailability(TabletAvailability.HOSTED))
 +          .andReturn(tabletMutator).once();
 +      
EasyMock.expect(tabletMutator.putPrevEndRow(ke1.prevEndRow())).andReturn(tabletMutator)
 +          .once();
 +      
EasyMock.expect(tabletMutator.setMerged()).andReturn(tabletMutator).once();
 +
 +      // set file update expectations
 +      
EasyMock.expect(tabletMutator.deleteFile(file1)).andReturn(tabletMutator).once();
 +      
EasyMock.expect(tabletMutator.deleteFile(file2)).andReturn(tabletMutator).once();
 +      var fencedFile1 = StoredTabletFile.of(file1.getPath(), 
ke3.toDataRange());
 +      var fencedFile2 = StoredTabletFile.of(file2.getPath(), 
ke3.toDataRange());
 +      var fencedFile3 = StoredTabletFile.of(file3.getPath(), 
ke1.toDataRange());
 +      var fencedFile4 = StoredTabletFile.of(file4.getPath(), 
ke2.toDataRange());
 +      EasyMock.expect(tabletMutator.putFile(fencedFile1, 
dfv1)).andReturn(tabletMutator).once();
 +      EasyMock.expect(tabletMutator.putFile(fencedFile2, 
dfv2)).andReturn(tabletMutator).once();
 +      EasyMock.expect(tabletMutator.putFile(fencedFile3, 
dfv3)).andReturn(tabletMutator).once();
 +      EasyMock.expect(tabletMutator.putFile(fencedFile4, 
dfv4)).andReturn(tabletMutator).once();
 +
 +      
EasyMock.expect(tabletMutator.deleteExternalCompaction(cid1)).andReturn(tabletMutator).once();
 +      
EasyMock.expect(tabletMutator.deleteExternalCompaction(cid2)).andReturn(tabletMutator).once();
 +      
EasyMock.expect(tabletMutator.deleteExternalCompaction(cid3)).andReturn(tabletMutator).once();
 +
 +      
EasyMock.expect(tabletMutator.deleteScan(file1)).andReturn(tabletMutator);
 +      
EasyMock.expect(tabletMutator.deleteScan(file2)).andReturn(tabletMutator);
 +
 +      
EasyMock.expect(tabletMutator.deleteHostingRequested()).andReturn(tabletMutator);
 +      
EasyMock.expect(tabletMutator.deleteSuspension()).andReturn(tabletMutator);
 +      
EasyMock.expect(tabletMutator.deleteLocation(lastLocation)).andReturn(tabletMutator);
 +      
EasyMock.expect(tabletMutator.deleteUnSplittable()).andReturn(tabletMutator);
 +      EasyMock
 +          .expect(tabletMutator.putTabletMergeability(
 +              TabletMergeabilityMetadata.always(SteadyTime.from(1, 
TimeUnit.SECONDS))))
 +          .andReturn(tabletMutator).once();
 +
 +    });
 +
 +    EasyMock.verify(lastTabletMeta, compactions);
 +  }
 +
 +  @Test
 +  public void testTwoLastTablets() {
 +    var tablet1 = 
TabletMetadata.builder(ke1).putOperation(opid).putDirName("td1")
 +        
.putTime(MetadataTime.parse("L40")).putTabletAvailability(TabletAvailability.HOSTED)
 +        .build(LOCATION, LOGS, FILES, ECOMP, MERGED, COMPACTED, SELECTED, 
USER_COMPACTION_REQUESTED,
 +            LOADED, CLONED);
 +    var tablet2 = 
TabletMetadata.builder(ke2).putOperation(opid).putDirName("td2")
 +        
.putTime(MetadataTime.parse("L30")).putTabletAvailability(TabletAvailability.HOSTED)
 +        .build(LOCATION, LOGS, FILES, ECOMP, MERGED, COMPACTED, SELECTED, 
USER_COMPACTION_REQUESTED,
 +            LOADED, CLONED);
 +    var tablet3 = 
TabletMetadata.builder(ke3).putOperation(opid).putDirName("td3")
 +        
.putTime(MetadataTime.parse("L50")).putTabletAvailability(TabletAvailability.HOSTED)
 +        .build(LOCATION, LOGS, FILES, ECOMP, MERGED, COMPACTED, SELECTED, 
USER_COMPACTION_REQUESTED,
 +            LOADED, CLONED);
 +
 +    // repeat tablet3 in the list of tablets to simulate two last tablets
 +    var exception = assertThrows(IllegalStateException.class,
 +        () -> testMerge(List.of(tablet1, tablet2, tablet3, tablet3), tableId, 
null, null,
 +            tabletMutator -> {}));
 +    assertTrue(exception.getMessage().contains(" unexpectedly saw multiple 
last tablets"));
 +
 +  }
 +
 +  @Test
 +  public void testMetadataHole() throws Exception {
 +    var tablet1 = 
TabletMetadata.builder(ke1).putOperation(opid).putDirName("td1")
 +        
.putTime(MetadataTime.parse("L40")).putTabletAvailability(TabletAvailability.HOSTED)
 +        .build(LOCATION, LOGS, FILES, ECOMP, MERGED, COMPACTED, SELECTED, 
USER_COMPACTION_REQUESTED,
 +            LOADED, CLONED);
 +    var tablet3 = 
TabletMetadata.builder(ke3).putOperation(opid).putDirName("td1")
 +        
.putTime(MetadataTime.parse("L50")).putTabletAvailability(TabletAvailability.HOSTED)
 +        .build(LOCATION, LOGS, FILES, ECOMP, MERGED, COMPACTED, SELECTED, 
USER_COMPACTION_REQUESTED,
 +            LOADED, CLONED);
 +
 +    var exception = assertThrows(IllegalStateException.class,
 +        () -> testMerge(List.of(tablet1, tablet3), tableId, null, null, 
tabletMutator -> {}));
 +    assertTrue(exception.getMessage().contains(" unexpectedly saw a hole in 
the metadata table"));
 +  }
 +
 +  @Test
 +  public void testMisplacedMerge() throws Exception {
 +    // the merge marker should occur in the last tablet, try putting it in 
the first tablet
 +    var tablet1 = 
TabletMetadata.builder(ke1).putOperation(opid).putDirName("td1")
 +        
.putTime(MetadataTime.parse("L40")).putTabletAvailability(TabletAvailability.HOSTED)
 +        .setMerged().build(LOCATION, LOGS, FILES, ECOMP, MERGED, COMPACTED, 
SELECTED,
 +            USER_COMPACTION_REQUESTED, LOADED, CLONED);
 +    var tablet2 = 
TabletMetadata.builder(ke2).putOperation(opid).putDirName("td1")
 +        
.putTime(MetadataTime.parse("L50")).putTabletAvailability(TabletAvailability.HOSTED)
 +        .build(LOCATION, LOGS, FILES, ECOMP, MERGED, COMPACTED, SELECTED, 
USER_COMPACTION_REQUESTED,
 +            LOADED, CLONED);
 +
 +    var exception = assertThrows(IllegalStateException.class,
 +        () -> testMerge(List.of(tablet1, tablet2), tableId, null, null, 
tabletMutator -> {}));
 +    assertTrue(exception.getMessage().contains(" has unexpected merge 
marker"));
 +  }
 +
 +  @Test
 +  public void testUnexpectedColumns() {
 +    var tserver = new TServerInstance("1.2.3.4:1234", 123456789L);
 +    var futureLoc = TabletMetadata.Location.future(tserver);
 +    testUnexpectedColumn(tmb -> tmb.putLocation(futureLoc), "had location", 
futureLoc.toString());
 +
 +    var currLoc = TabletMetadata.Location.current(tserver);
 +    testUnexpectedColumn(tmb -> tmb.putLocation(currLoc), "had location", 
currLoc.toString());
 +
 +    var otherFateId = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    var otherOpid = TabletOperationId.from(TabletOperationType.MERGING, 
otherFateId);
 +    testUnexpectedColumn(tmb -> tmb.putOperation(otherOpid), "had unexpected 
opid",
 +        otherOpid.toString());
 +
 +    var walog = LogEntry.fromPath("localhost+8020/" + UUID.randomUUID());
 +    testUnexpectedColumn(tmb -> tmb.putWal(walog), "has unexpected walogs 1");
 +
 +    FateId ucfid1 = otherFateId;
 +    testUnexpectedColumn(tmb -> tmb.putCompacted(ucfid1), "has unexpected 
compacted columns");
 +
 +    testUnexpectedColumn(tmb -> tmb.putUserCompactionRequested(ucfid1),
 +        "has unexpected use compaction requested column");
 +
 +    var lodedFile = newSTF(99);
 +    testUnexpectedColumn(tmb -> tmb.putBulkFile(lodedFile.getTabletFile(), 
otherFateId),
 +        "has unexpected loaded column");
 +
 +    var selectedFiles = new SelectedFiles(Set.of(newSTF(567)), false, 
otherFateId,
 +        SteadyTime.from(100, TimeUnit.NANOSECONDS));
 +    testUnexpectedColumn(tmb -> tmb.putSelectedFiles(selectedFiles),
 +        "has unexpected selected file");
 +
 +    testUnexpectedColumn(TabletMetadataBuilder::putCloned, "has unexpected 
cloned column");
 +  }
 +
 +  private void testUnexpectedColumn(Consumer<TabletMetadataBuilder> 
badColumnSetter,
 +      String... expectedMessages) {
 +    assertTrue(expectedMessages.length >= 1);
 +
 +    // run the test setting the bad column on the first, middle and last 
tablet
 +
 +    for (int i = 0; i < 3; i++) {
 +      var builder1 = 
TabletMetadata.builder(ke1).putOperation(opid).putDirName("td1")
 +          
.putTime(MetadataTime.parse("L40")).putTabletAvailability(TabletAvailability.HOSTED);
 +      if (i == 0) {
 +        badColumnSetter.accept(builder1);
 +      }
 +      var tablet1 = builder1.build(LOCATION, LOGS, FILES, ECOMP, MERGED, 
COMPACTED, SELECTED,
 +          USER_COMPACTION_REQUESTED, LOADED, CLONED);
 +      var builder2 = 
TabletMetadata.builder(ke2).putOperation(opid).putDirName("td1")
 +          
.putTime(MetadataTime.parse("L70")).putTabletAvailability(TabletAvailability.HOSTED);
 +      if (i == 1) {
 +        badColumnSetter.accept(builder2);
 +      }
 +      var tablet2 = builder2.build(LOCATION, LOGS, FILES, ECOMP, MERGED, 
COMPACTED, SELECTED,
 +          USER_COMPACTION_REQUESTED, LOADED, CLONED);
 +      var builder3 = 
TabletMetadata.builder(ke3).putOperation(opid).putDirName("td1")
 +          
.putTime(MetadataTime.parse("L50")).putTabletAvailability(TabletAvailability.HOSTED);
 +      if (i == 2) {
 +        badColumnSetter.accept(builder3);
 +      }
 +      var tablet3 = builder3.build(LOCATION, LOGS, FILES, ECOMP, MERGED, 
COMPACTED, SELECTED,
 +          USER_COMPACTION_REQUESTED, LOADED, CLONED);
 +
 +      for (var expectedMessage : expectedMessages) {
 +        var exception = assertThrows(IllegalStateException.class,
 +            () -> testMerge(List.of(tablet1, tablet2, tablet3), tableId, 
null, null,
 +                tabletMutator -> {}));
 +        assertTrue(exception.getMessage().contains(expectedMessage),
 +            () -> exception.getMessage() + " did not contain " + 
expectedMessage);
 +      }
 +    }
 +  }
 +
 +  @Test
 +  public void testTime() throws Exception {
 +
 +    // this test ensures the max time is taken from all tablets
 +    String testTimes[][] = {{"L30", "L20", "L10"}, {"L20", "L30", "L10"}, 
{"L10", "L20", "L30"}};
 +
 +    for (String[] times : testTimes) {
 +
 +      var tablet1 = 
TabletMetadata.builder(ke1).putOperation(opid).putDirName("td1")
 +          
.putTime(MetadataTime.parse(times[0])).putTabletAvailability(TabletAvailability.HOSTED)
 +          .build(LOCATION, LOGS, FILES, ECOMP, MERGED, COMPACTED, SELECTED,
 +              USER_COMPACTION_REQUESTED, LOADED, CLONED, SCANS, 
HOSTING_REQUESTED, SUSPEND, LAST,
 +              UNSPLITTABLE, MERGEABILITY);
 +      var tablet2 = 
TabletMetadata.builder(ke2).putOperation(opid).putDirName("td2")
 +          
.putTime(MetadataTime.parse(times[1])).putTabletAvailability(TabletAvailability.HOSTED)
 +          .build(LOCATION, LOGS, FILES, ECOMP, MERGED, COMPACTED, SELECTED,
 +              USER_COMPACTION_REQUESTED, LOADED, CLONED, SCANS, 
HOSTING_REQUESTED, SUSPEND, LAST,
 +              UNSPLITTABLE, MERGEABILITY);
 +      var tablet3 = 
TabletMetadata.builder(ke3).putOperation(opid).putDirName("td3")
 +          
.putTime(MetadataTime.parse(times[2])).putTabletAvailability(TabletAvailability.HOSTED)
 +          .build(LOCATION, LOGS, FILES, ECOMP, MERGED, COMPACTED, SELECTED,
 +              USER_COMPACTION_REQUESTED, LOADED, CLONED, SCANS, 
HOSTING_REQUESTED, SUSPEND, LAST,
 +              UNSPLITTABLE, MERGEABILITY);
 +
 +      testMerge(List.of(tablet1, tablet2, tablet3), tableId, null, null, 
tabletMutator -> {
 +        
EasyMock.expect(tabletMutator.putTime(MetadataTime.parse("L30"))).andReturn(tabletMutator)
 +            .once();
 +        
EasyMock.expect(tabletMutator.putTabletAvailability(TabletAvailability.HOSTED))
 +            .andReturn(tabletMutator).once();
 +        
EasyMock.expect(tabletMutator.putPrevEndRow(ke1.prevEndRow())).andReturn(tabletMutator)
 +            .once();
 +        
EasyMock.expect(tabletMutator.setMerged()).andReturn(tabletMutator).once();
 +        // Current default if not set is NEVER
 +        
EasyMock.expect(tabletMutator.putTabletMergeability(TabletMergeabilityMetadata.never()))
 +            .andReturn(tabletMutator).once();
 +      });
 +    }
 +
 +  }
 +
 +  private static void testMerge(List<TabletMetadata> inputTablets, TableId 
tableId, String start,
 +      String end, Consumer<ConditionalTabletMutatorImpl> expectationsSetter) 
throws Exception {
 +    MergeInfo mergeInfo =
 +        new MergeInfo(tableId, NamespaceId.of("1"), start == null ? null : 
start.getBytes(UTF_8),
 +            end == null ? null : end.getBytes(UTF_8), 
MergeInfo.Operation.MERGE);
 +    MergeTablets mergeTablets = new MergeTablets(mergeInfo);
 +
 +    Manager manager = EasyMock.mock(Manager.class);
 +    ServerContext context = EasyMock.mock(ServerContext.class);
 +    Ample ample = EasyMock.mock(Ample.class);
 +    TabletsMetadata.Builder tabletBuilder = 
EasyMock.mock(TabletsMetadata.Builder.class);
 +    TabletsMetadata tabletsMetadata = EasyMock.mock(TabletsMetadata.class);
 +    ConditionalTabletsMutatorImpl tabletsMutator =
 +        EasyMock.mock(ConditionalTabletsMutatorImpl.class);
 +    ConditionalTabletMutatorImpl tabletMutator = 
EasyMock.mock(ConditionalTabletMutatorImpl.class);
 +
 +    ServiceLock managerLock = EasyMock.mock(ServiceLock.class);
 +    
EasyMock.expect(context.getServiceLock()).andReturn(managerLock).anyTimes();
 +
 +    // setup reading the tablets
 +    EasyMock.expect(manager.getContext()).andReturn(context).atLeastOnce();
 +    EasyMock.expect(context.getAmple()).andReturn(ample).atLeastOnce();
 +    EasyMock.expect(ample.readTablets()).andReturn(tabletBuilder).once();
 +    
EasyMock.expect(tabletBuilder.forTable(tableId)).andReturn(tabletBuilder).once();
 +    
EasyMock.expect(tabletBuilder.overlapping(mergeInfo.getMergeExtent().prevEndRow(),
 +        mergeInfo.getMergeExtent().endRow())).andReturn(tabletBuilder).once();
 +    EasyMock.expect(tabletBuilder.build()).andReturn(tabletsMetadata).once();
 +    
EasyMock.expect(tabletsMetadata.iterator()).andReturn(inputTablets.iterator()).once();
 +    tabletsMetadata.close();
 +    EasyMock.expectLastCall().once();
 +
 +    var lastExtent = inputTablets.get(inputTablets.size() - 1).getExtent();
 +
 +    // setup writing the tablets
 +    
EasyMock.expect(ample.conditionallyMutateTablets()).andReturn(tabletsMutator).once();
 +    
EasyMock.expect(tabletsMutator.mutateTablet(lastExtent)).andReturn(tabletMutator).once();
 +    
EasyMock.expect(tabletMutator.requireOperation(opid)).andReturn(tabletMutator).once();
 +    
EasyMock.expect(tabletMutator.requireAbsentLocation()).andReturn(tabletMutator).once();
 +    
EasyMock.expect(tabletMutator.requireAbsentLogs()).andReturn(tabletMutator).once();
 +
 +    expectationsSetter.accept(tabletMutator);
 +
 +    tabletMutator.submit(EasyMock.anyObject());
 +    EasyMock.expectLastCall().once();
 +
 +    // setup processing of conditional mutations
-     Ample.ConditionalResult cr = 
EasyMock.niceMock(Ample.ConditionalResult.class);
++    Ample.ConditionalResult cr = 
EasyMock.createMock(Ample.ConditionalResult.class);
 +    
EasyMock.expect(cr.getStatus()).andReturn(Ample.ConditionalResult.Status.ACCEPTED)
 +        .atLeastOnce();
 +    EasyMock.expect(tabletsMutator.process()).andReturn(Map.of(lastExtent, 
cr)).atLeastOnce();
 +    tabletsMutator.close();
 +    EasyMock.expectLastCall().anyTimes();
 +
 +    // Setup GC candidate write
 +    List<ReferenceFile> dirs = new ArrayList<>();
 +    dirs.add(new AllVolumesDirectory(tableId, "td1"));
 +    dirs.add(new AllVolumesDirectory(tableId, "td2"));
 +    ample.putGcFileAndDirCandidates(tableId, dirs);
 +    EasyMock.expectLastCall().once();
 +
 +    EasyMock.replay(manager, context, ample, tabletBuilder, tabletsMetadata, 
tabletsMutator,
 +        tabletMutator, cr, managerLock);
 +
 +    mergeTablets.call(fateId, manager);
 +
 +    EasyMock.verify(manager, context, ample, tabletBuilder, tabletsMetadata, 
tabletsMutator,
 +        tabletMutator, cr, managerLock);
 +  }
 +}
diff --cc 
server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
index 82f2e5949f,0000000000..41f9824b7c
mode 100644,000000..100644
--- 
a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
+++ 
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
@@@ -1,469 -1,0 +1,470 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *   https://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.accumulo.manager.tableOps.split;
 +
 +import static org.easymock.EasyMock.mock;
 +import static org.junit.jupiter.api.Assertions.assertEquals;
 +import static org.junit.jupiter.api.Assertions.assertThrows;
 +import static org.junit.jupiter.api.Assertions.assertTrue;
 +
 +import java.util.EnumSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.OptionalLong;
 +import java.util.Set;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +import java.util.UUID;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.client.admin.TabletAvailability;
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.dataImpl.KeyExtent;
 +import org.apache.accumulo.core.fate.FateId;
 +import org.apache.accumulo.core.fate.FateInstanceType;
 +import org.apache.accumulo.core.lock.ServiceLock;
 +import org.apache.accumulo.core.metadata.ReferencedTabletFile;
 +import org.apache.accumulo.core.metadata.StoredTabletFile;
 +import org.apache.accumulo.core.metadata.SuspendingTServer;
 +import org.apache.accumulo.core.metadata.TServerInstance;
 +import org.apache.accumulo.core.metadata.schema.Ample;
 +import org.apache.accumulo.core.metadata.schema.CompactionMetadata;
 +import org.apache.accumulo.core.metadata.schema.DataFileValue;
 +import org.apache.accumulo.core.metadata.schema.ExternalCompactionId;
 +import org.apache.accumulo.core.metadata.schema.MetadataTime;
 +import org.apache.accumulo.core.metadata.schema.SelectedFiles;
 +import org.apache.accumulo.core.metadata.schema.TabletMergeabilityMetadata;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadata;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType;
 +import org.apache.accumulo.core.metadata.schema.TabletOperationId;
 +import org.apache.accumulo.core.metadata.schema.TabletOperationType;
 +import org.apache.accumulo.core.metadata.schema.UnSplittableMetadata;
 +import org.apache.accumulo.core.tabletserver.log.LogEntry;
 +import org.apache.accumulo.core.util.time.SteadyTime;
 +import org.apache.accumulo.manager.Manager;
 +import org.apache.accumulo.manager.split.Splitter;
 +import org.apache.accumulo.server.ServerContext;
 +import org.apache.accumulo.server.metadata.ConditionalTabletMutatorImpl;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.Text;
 +import org.easymock.EasyMock;
 +import org.junit.jupiter.api.Test;
 +
 +public class UpdateTabletsTest {
 +
 +  public static StoredTabletFile newSTF(int fileNum) {
 +    return new ReferencedTabletFile(new Path(
 +        "hdfs://localhost:8020/accumulo/tables/2a/default_tablet/F00000" + 
fileNum + ".rf"))
 +        .insert();
 +  }
 +
 +  Splitter.FileInfo newFileInfo(String start, String end) {
 +    return new Splitter.FileInfo(new Text(start), new Text(end));
 +  }
 +
 +  /**
 +   * This is a set of tablet metadata columns that the split code is known to 
handle. The purpose of
 +   * the set is to detect when a new tablet metadata column was added without 
considering the
 +   * implications for splitting tablets. For a column to be in this set it 
means an Accumulo
 +   * developer has determined that split code can handle that column OR has 
opened an issue about
 +   * handling it.
 +   */
 +  private static final Set<ColumnType> COLUMNS_HANDLED_BY_SPLIT = 
EnumSet.of(ColumnType.TIME,
 +      ColumnType.LOGS, ColumnType.FILES, ColumnType.PREV_ROW, 
ColumnType.OPID, ColumnType.LOCATION,
 +      ColumnType.ECOMP, ColumnType.SELECTED, ColumnType.LOADED,
 +      ColumnType.USER_COMPACTION_REQUESTED, ColumnType.MERGED, 
ColumnType.LAST, ColumnType.SCANS,
 +      ColumnType.DIR, ColumnType.CLONED, ColumnType.FLUSH_ID, 
ColumnType.FLUSH_NONCE,
 +      ColumnType.SUSPEND, ColumnType.AVAILABILITY, 
ColumnType.HOSTING_REQUESTED,
 +      ColumnType.COMPACTED, ColumnType.UNSPLITTABLE, ColumnType.MERGEABILITY);
 +
 +  /**
 +   * The purpose of this test is to catch new tablet metadata columns that 
were added w/o
 +   * considering splitting tablets.
 +   */
 +  @Test
 +  public void checkColumns() {
 +    for (ColumnType columnType : ColumnType.values()) {
 +      assertTrue(COLUMNS_HANDLED_BY_SPLIT.contains(columnType),
 +          "The split code does not know how to handle " + columnType);
 +    }
 +  }
 +
 +  // When a tablet splits its files are partitioned among the new children 
tablets. This test
 +  // exercises the partitioning code.
 +  @Test
 +  public void testFileParitioning() {
 +
 +    var file1 = newSTF(1);
 +    var file2 = newSTF(2);
 +    var file3 = newSTF(3);
 +    var file4 = newSTF(4);
 +
 +    var tabletFiles =
 +        Map.of(file1, new DataFileValue(1000, 100, 20), file2, new 
DataFileValue(2000, 200, 50),
 +            file3, new DataFileValue(4000, 400), file4, new 
DataFileValue(4000, 400));
 +
 +    var ke1 = new KeyExtent(TableId.of("1"), new Text("m"), null);
 +    var ke2 = new KeyExtent(TableId.of("1"), new Text("r"), new Text("m"));
 +    var ke3 = new KeyExtent(TableId.of("1"), new Text("v"), new Text("r"));
 +    var ke4 = new KeyExtent(TableId.of("1"), null, new Text("v"));
 +
 +    var firstAndLastKeys = Map.of(file2, newFileInfo("m", "r"), file3, 
newFileInfo("g", "x"), file4,
 +        newFileInfo("s", "v"));
 +
 +    var ke1Expected = Map.of(file1, new DataFileValue(250, 25, 20), file2,
 +        new DataFileValue(1000, 100, 50), file3, new DataFileValue(1000, 
100));
 +    var ke2Expected = Map.of(file1, new DataFileValue(250, 25, 20), file2,
 +        new DataFileValue(1000, 100, 50), file3, new DataFileValue(1000, 
100));
 +    var ke3Expected = Map.of(file1, new DataFileValue(250, 25, 20), file3,
 +        new DataFileValue(1000, 100), file4, new DataFileValue(4000, 400));
 +    var ke4Expected =
 +        Map.of(file1, new DataFileValue(250, 25, 20), file3, new 
DataFileValue(1000, 100));
 +
 +    var expected = Map.of(ke1, ke1Expected, ke2, ke2Expected, ke3, 
ke3Expected, ke4, ke4Expected);
 +
 +    Set<KeyExtent> newExtents = Set.of(ke1, ke2, ke3, ke4);
 +
 +    TabletMetadata tabletMeta = EasyMock.createMock(TabletMetadata.class);
 +    
EasyMock.expect(tabletMeta.getFilesMap()).andReturn(tabletFiles).anyTimes();
 +    EasyMock.replay(tabletMeta);
 +
 +    Map<KeyExtent,Map<StoredTabletFile,DataFileValue>> results =
 +        UpdateTablets.getNewTabletFiles(newExtents, tabletMeta, 
firstAndLastKeys::get);
 +
 +    assertEquals(expected.keySet(), results.keySet());
 +    expected.forEach(((extent, files) -> {
 +      assertEquals(files, results.get(extent));
 +    }));
 +
 +    // Test a tablet with no files going to it
 +
 +    var tabletFiles2 = Map.of(file2, tabletFiles.get(file2), file4, 
tabletFiles.get(file4));
 +    ke1Expected = Map.of(file2, new DataFileValue(1000, 100, 50));
 +    ke2Expected = Map.of(file2, new DataFileValue(1000, 100, 50));
 +    ke3Expected = Map.of(file4, new DataFileValue(4000, 400));
 +    ke4Expected = Map.of();
 +    expected = Map.of(ke1, ke1Expected, ke2, ke2Expected, ke3, ke3Expected, 
ke4, ke4Expected);
 +
 +    tabletMeta = EasyMock.createMock(TabletMetadata.class);
 +    
EasyMock.expect(tabletMeta.getFilesMap()).andReturn(tabletFiles2).anyTimes();
 +    EasyMock.replay(tabletMeta);
 +
 +    Map<KeyExtent,Map<StoredTabletFile,DataFileValue>> results2 =
 +        UpdateTablets.getNewTabletFiles(newExtents, tabletMeta, 
firstAndLastKeys::get);
 +    assertEquals(expected.keySet(), results2.keySet());
 +    expected.forEach(((extent, files) -> {
 +      assertEquals(files, results2.get(extent));
 +    }));
 +
 +  }
 +
 +  /**
 +   * The purpose of this test is create tablet with as many columns in its 
metadata set as possible
 +   * and exercise the split code with that tablet.
 +   */
 +  @Test
 +  public void testManyColumns() throws Exception {
 +    TableId tableId = TableId.of("123");
 +    KeyExtent origExtent = new KeyExtent(tableId, new Text("m"), null);
 +
 +    var newExtent1 = new KeyExtent(tableId, new Text("c"), null);
 +    var newExtent2 = new KeyExtent(tableId, new Text("h"), new Text("c"));
 +    var newExtent3 = new KeyExtent(tableId, new Text("m"), new Text("h"));
 +
 +    var file1 = newSTF(1);
 +    var file2 = newSTF(2);
 +    var file3 = newSTF(3);
 +    var file4 = newSTF(4);
 +
 +    var loaded1 = newSTF(5);
 +    var loaded2 = newSTF(6);
 +
 +    var flid1 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    var flid2 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    var loaded = Map.of(loaded1, flid1, loaded2, flid2);
 +
 +    var dfv1 = new DataFileValue(1000, 100, 20);
 +    var dfv2 = new DataFileValue(500, 50, 20);
 +    var dfv3 = new DataFileValue(4000, 400);
 +    var dfv4 = new DataFileValue(2000, 200);
 +
 +    var tabletFiles = Map.of(file1, dfv1, file2, dfv2, file3, dfv3, file4, 
dfv4);
 +
 +    var cid1 = ExternalCompactionId.generate(UUID.randomUUID());
 +    var cid2 = ExternalCompactionId.generate(UUID.randomUUID());
 +    var cid3 = ExternalCompactionId.generate(UUID.randomUUID());
 +
 +    var fateId = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    var opid = TabletOperationId.from(TabletOperationType.SPLITTING, fateId);
 +    var tabletTime = MetadataTime.parse("L30");
 +    var flushID = OptionalLong.of(40);
 +    var availability = TabletAvailability.HOSTED;
 +    var lastLocation = TabletMetadata.Location.last("1.2.3.4:1234", 
"123456789");
 +    var suspendingTServer = SuspendingTServer.fromValue(new 
Value("1.2.3.4:5|56"));
 +
 +    String dir1 = "dir1";
 +    String dir2 = "dir2";
 +
 +    Manager manager = EasyMock.mock(Manager.class);
 +    ServerContext context = EasyMock.mock(ServerContext.class);
 +    EasyMock.expect(manager.getContext()).andReturn(context).atLeastOnce();
 +    Ample ample = EasyMock.mock(Ample.class);
 +    EasyMock.expect(context.getAmple()).andReturn(ample).atLeastOnce();
 +    Splitter splitter = EasyMock.mock(Splitter.class);
 +    EasyMock.expect(splitter.getCachedFileInfo(tableId, 
file1)).andReturn(newFileInfo("a", "z"));
 +    EasyMock.expect(splitter.getCachedFileInfo(tableId, 
file2)).andReturn(newFileInfo("a", "b"));
 +    EasyMock.expect(splitter.getCachedFileInfo(tableId, 
file3)).andReturn(newFileInfo("d", "f"));
 +    EasyMock.expect(splitter.getCachedFileInfo(tableId, 
file4)).andReturn(newFileInfo("d", "j"));
 +    EasyMock.expect(manager.getSplitter()).andReturn(splitter).atLeastOnce();
 +    
EasyMock.expect(manager.getSteadyTime()).andReturn(SteadyTime.from(100_000, 
TimeUnit.SECONDS))
 +        .atLeastOnce();
 +
 +    ServiceLock managerLock = EasyMock.mock(ServiceLock.class);
 +    
EasyMock.expect(context.getServiceLock()).andReturn(managerLock).anyTimes();
 +
 +    // Setup the metadata for the tablet that is going to split, set as many 
columns as possible on
 +    // it.
 +    TabletMetadata tabletMeta = EasyMock.mock(TabletMetadata.class);
 +    
EasyMock.expect(tabletMeta.getExtent()).andReturn(origExtent).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getOperationId()).andReturn(opid).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getLocation()).andReturn(null).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getLogs()).andReturn(List.of()).atLeastOnce();
 +    EasyMock.expect(tabletMeta.hasMerged()).andReturn(false).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getCloned()).andReturn(null).atLeastOnce();
 +    Map<ExternalCompactionId,CompactionMetadata> compactions = 
EasyMock.mock(Map.class);
 +    EasyMock.expect(compactions.keySet()).andReturn(Set.of(cid1, cid2, 
cid3)).anyTimes();
 +    
EasyMock.expect(tabletMeta.getExternalCompactions()).andReturn(compactions).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getFilesMap()).andReturn(tabletFiles).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getFiles()).andReturn(tabletFiles.keySet());
 +    SelectedFiles selectedFiles = EasyMock.mock(SelectedFiles.class);
 +    EasyMock.expect(selectedFiles.getFateId()).andReturn(null);
 +    
EasyMock.expect(tabletMeta.getSelectedFiles()).andReturn(selectedFiles).atLeastOnce();
 +    FateId ucfid1 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    FateId ucfid2 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    
EasyMock.expect(tabletMeta.getUserCompactionsRequested()).andReturn(Set.of(ucfid1,
 ucfid2))
 +        .atLeastOnce();
 +    FateId ucfid3 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    EasyMock.expect(tabletMeta.getCompacted()).andReturn(Set.of(ucfid1, 
ucfid3)).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getScans()).andReturn(List.of(file1, 
file2)).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getTime()).andReturn(tabletTime).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getFlushId()).andReturn(flushID).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getTabletAvailability()).andReturn(availability).atLeastOnce();
 +    EasyMock.expect(tabletMeta.getLoaded()).andReturn(loaded).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getHostingRequested()).andReturn(true).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getSuspend()).andReturn(suspendingTServer).atLeastOnce();
 +    
EasyMock.expect(tabletMeta.getLast()).andReturn(lastLocation).atLeastOnce();
 +    UnSplittableMetadata usm =
 +        UnSplittableMetadata.toUnSplittable(origExtent, 1000, 1001, 1002, 
tabletFiles.keySet());
 +    
EasyMock.expect(tabletMeta.getUnSplittable()).andReturn(usm).atLeastOnce();
 +
 +    EasyMock.expect(ample.readTablet(origExtent)).andReturn(tabletMeta);
 +
 +    Ample.ConditionalTabletsMutator tabletsMutator =
 +        EasyMock.mock(Ample.ConditionalTabletsMutator.class);
 +    
EasyMock.expect(ample.conditionallyMutateTablets()).andReturn(tabletsMutator).atLeastOnce();
 +
 +    // Setup the mutator for creating the first new tablet
 +    ConditionalTabletMutatorImpl tablet1Mutator = 
EasyMock.mock(ConditionalTabletMutatorImpl.class);
 +    
EasyMock.expect(tablet1Mutator.requireAbsentTablet()).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putOperation(opid)).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putDirName(dir1)).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putTime(tabletTime)).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putFlushId(flushID.getAsLong())).andReturn(tablet1Mutator);
 +    EasyMock.expect(tablet1Mutator.putPrevEndRow(newExtent1.prevEndRow()))
 +        .andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putCompacted(ucfid1)).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putCompacted(ucfid3)).andReturn(tablet1Mutator);
 +    
EasyMock.expect(tablet1Mutator.putTabletAvailability(availability)).andReturn(tablet1Mutator);
 +    EasyMock.expect(tablet1Mutator.putBulkFile(loaded1.getTabletFile(), 
flid1))
 +        .andReturn(tablet1Mutator);
 +    EasyMock.expect(tablet1Mutator.putBulkFile(loaded2.getTabletFile(), 
flid2))
 +        .andReturn(tablet1Mutator);
 +    EasyMock.expect(tablet1Mutator.putFile(file1, new DataFileValue(333, 33, 
20)))
 +        .andReturn(tablet1Mutator);
 +    EasyMock.expect(tablet1Mutator.putFile(file2, 
dfv2)).andReturn(tablet1Mutator);
 +    // SplitInfo marked as system generated so should be set to ALWAYS (0 
delay)
 +    EasyMock
 +        .expect(tablet1Mutator.putTabletMergeability(
 +            TabletMergeabilityMetadata.always(SteadyTime.from(100_000, 
TimeUnit.SECONDS))))
 +        .andReturn(tablet1Mutator);
 +    tablet1Mutator.submit(EasyMock.anyObject());
 +    EasyMock.expectLastCall().once();
 +    
EasyMock.expect(tabletsMutator.mutateTablet(newExtent1)).andReturn(tablet1Mutator);
 +
 +    // Setup the mutator for creating the second new tablet
 +    ConditionalTabletMutatorImpl tablet2Mutator = 
EasyMock.mock(ConditionalTabletMutatorImpl.class);
 +    
EasyMock.expect(tablet2Mutator.requireAbsentTablet()).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putOperation(opid)).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putDirName(dir2)).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putTime(tabletTime)).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putFlushId(flushID.getAsLong())).andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putPrevEndRow(newExtent2.prevEndRow()))
 +        .andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putCompacted(ucfid1)).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putCompacted(ucfid3)).andReturn(tablet2Mutator);
 +    
EasyMock.expect(tablet2Mutator.putTabletAvailability(availability)).andReturn(tablet2Mutator);
 +    // SplitInfo marked as system generated so should be set to ALWAYS (0 
delay)
 +    EasyMock
 +        .expect(tablet2Mutator.putTabletMergeability(
 +            TabletMergeabilityMetadata.always(SteadyTime.from(100_000, 
TimeUnit.SECONDS))))
 +        .andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putBulkFile(loaded1.getTabletFile(), 
flid1))
 +        .andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putBulkFile(loaded2.getTabletFile(), 
flid2))
 +        .andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putFile(file1, new DataFileValue(333, 33, 
20)))
 +        .andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putFile(file3, 
dfv3)).andReturn(tablet2Mutator);
 +    EasyMock.expect(tablet2Mutator.putFile(file4, new DataFileValue(1000, 
100)))
 +        .andReturn(tablet2Mutator);
 +    tablet2Mutator.submit(EasyMock.anyObject());
 +    EasyMock.expectLastCall().once();
 +    
EasyMock.expect(tabletsMutator.mutateTablet(newExtent2)).andReturn(tablet2Mutator);
 +
 +    // Setup the mutator for updating the existing tablet
 +    ConditionalTabletMutatorImpl tablet3Mutator = 
mock(ConditionalTabletMutatorImpl.class);
 +    
EasyMock.expect(tablet3Mutator.requireOperation(opid)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.requireAbsentLocation()).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.requireAbsentLogs()).andReturn(tablet3Mutator);
 +    EasyMock.expect(tablet3Mutator.putPrevEndRow(newExtent3.prevEndRow()))
 +        .andReturn(tablet3Mutator);
 +    EasyMock.expect(tablet3Mutator.putFile(file1, new DataFileValue(333, 33, 
20)))
 +        .andReturn(tablet3Mutator);
 +    EasyMock.expect(tablet3Mutator.putFile(file4, new DataFileValue(1000, 
100)))
 +        .andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteFile(file2)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteFile(file3)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteExternalCompaction(cid1)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteExternalCompaction(cid2)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteExternalCompaction(cid3)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteSelectedFiles()).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteUserCompactionRequested(ucfid1)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteUserCompactionRequested(ucfid2)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteScan(file1)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteScan(file2)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteHostingRequested()).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteSuspension()).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteLocation(lastLocation)).andReturn(tablet3Mutator);
 +    
EasyMock.expect(tablet3Mutator.deleteUnSplittable()).andReturn(tablet3Mutator);
 +    tablet3Mutator.submit(EasyMock.anyObject());
 +    EasyMock.expectLastCall().once();
 +    
EasyMock.expect(tabletsMutator.mutateTablet(origExtent)).andReturn(tablet3Mutator);
 +
 +    // setup processing of conditional mutations
-     Ample.ConditionalResult cr = 
EasyMock.niceMock(Ample.ConditionalResult.class);
++    Ample.ConditionalResult cr = 
EasyMock.createMock(Ample.ConditionalResult.class);
++    EasyMock.expect(cr.getExtent()).andReturn(origExtent).atLeastOnce();
 +    
EasyMock.expect(cr.getStatus()).andReturn(Ample.ConditionalResult.Status.ACCEPTED)
 +        .atLeastOnce();
 +    EasyMock.expect(tabletsMutator.process())
 +        .andReturn(Map.of(newExtent1, cr, newExtent2, cr, origExtent, 
cr)).atLeastOnce();
 +    tabletsMutator.close();
 +    EasyMock.expectLastCall().anyTimes();
 +
 +    EasyMock.replay(manager, context, ample, tabletMeta, splitter, 
tabletsMutator, tablet1Mutator,
 +        tablet2Mutator, tablet3Mutator, cr, compactions);
 +    // Now we can actually test the split code that writes the new tablets 
with a bunch columns in
 +    // the original tablet
 +    SortedSet<Text> splits = new TreeSet<>(List.of(newExtent1.endRow(), 
newExtent2.endRow()));
 +    UpdateTablets updateTablets =
 +        new UpdateTablets(new SplitInfo(origExtent, splits, true), 
List.of(dir1, dir2));
 +    updateTablets.call(fateId, manager);
 +
 +    EasyMock.verify(manager, context, ample, tabletMeta, splitter, 
tabletsMutator, tablet1Mutator,
 +        tablet2Mutator, tablet3Mutator, cr, compactions);
 +  }
 +
 +  @Test
 +  public void testErrors() throws Exception {
 +    TableId tableId = TableId.of("123");
 +    KeyExtent origExtent = new KeyExtent(tableId, new Text("m"), null);
 +
 +    var fateId = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    var opid = TabletOperationId.from(TabletOperationType.SPLITTING, fateId);
 +
 +    // Test splitting a tablet with a location
 +    var location = TabletMetadata.Location.future(new 
TServerInstance("1.2.3.4:1234", 123456789L));
 +    var tablet1 =
 +        
TabletMetadata.builder(origExtent).putOperation(opid).putLocation(location).build();
 +    var e = assertThrows(IllegalStateException.class, () -> 
testError(origExtent, tablet1, fateId));
 +    assertTrue(e.getMessage().contains(location.toString()));
 +
 +    // Test splitting a tablet without an operation id set
 +    var tablet2 = TabletMetadata.builder(origExtent).build(ColumnType.OPID);
 +    e = assertThrows(IllegalStateException.class, () -> testError(origExtent, 
tablet2, fateId));
 +    assertTrue(e.getMessage().contains("does not have expected operation id 
"));
 +    assertTrue(e.getMessage().contains("null"));
 +
 +    // Test splitting a tablet with an unexpected operation id
 +    var fateId2 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
 +    var opid2 = TabletOperationId.from(TabletOperationType.SPLITTING, 
fateId2);
 +    var tablet3 = 
TabletMetadata.builder(origExtent).putOperation(opid2).build();
 +    e = assertThrows(IllegalStateException.class, () -> testError(origExtent, 
tablet3, fateId));
 +    assertTrue(e.getMessage().contains("does not have expected operation id 
"));
 +    assertTrue(e.getMessage().contains(opid2.toString()));
 +
 +    // Test splitting a tablet with walogs
 +    var walog = LogEntry.fromPath("localhost+8020/" + UUID.randomUUID());
 +    var tablet4 = 
TabletMetadata.builder(origExtent).putOperation(opid).putWal(walog)
 +        .build(ColumnType.LOCATION);
 +    e = assertThrows(IllegalStateException.class, () -> testError(origExtent, 
tablet4, fateId));
 +    assertTrue(e.getMessage().contains("unexpectedly had walogs"));
 +    assertTrue(e.getMessage().contains(walog.toString()));
 +
 +    // Test splitting tablet with merged marker
 +    var tablet5 = 
TabletMetadata.builder(origExtent).putOperation(opid).setMerged()
 +        .build(ColumnType.LOCATION, ColumnType.LOGS);
 +    e = assertThrows(IllegalStateException.class, () -> testError(origExtent, 
tablet5, fateId));
 +    assertTrue(e.getMessage().contains("unexpectedly has a merged"));
 +
 +    // Test splitting tablet with cloned marker
 +    TabletMetadata tablet6 = EasyMock.mock(TabletMetadata.class);
 +    EasyMock.expect(tablet6.getExtent()).andReturn(origExtent).anyTimes();
 +    EasyMock.expect(tablet6.getOperationId()).andReturn(opid).anyTimes();
 +    EasyMock.expect(tablet6.getLocation()).andReturn(null).anyTimes();
 +    EasyMock.expect(tablet6.getLogs()).andReturn(List.of()).anyTimes();
 +    EasyMock.expect(tablet6.hasMerged()).andReturn(false);
 +    EasyMock.expect(tablet6.getCloned()).andReturn("OK").atLeastOnce();
 +    EasyMock.replay(tablet6);
 +    e = assertThrows(IllegalStateException.class, () -> testError(origExtent, 
tablet6, fateId));
 +    assertTrue(e.getMessage().contains("unexpectedly has a cloned"));
 +    EasyMock.verify(tablet6);
 +  }
 +
 +  private static void testError(KeyExtent origExtent, TabletMetadata tm1, 
FateId fateId)
 +      throws Exception {
 +    Manager manager = EasyMock.mock(Manager.class);
 +    ServerContext context = EasyMock.mock(ServerContext.class);
 +    EasyMock.expect(manager.getContext()).andReturn(context).atLeastOnce();
 +    Ample ample = EasyMock.mock(Ample.class);
 +    EasyMock.expect(context.getAmple()).andReturn(ample).atLeastOnce();
 +
 +    EasyMock.expect(ample.readTablet(origExtent)).andReturn(tm1);
 +
 +    EasyMock.replay(manager, context, ample);
 +    // Now we can actually test the split code that writes the new tablets 
with a bunch columns in
 +    // the original tablet
 +    SortedSet<Text> splits = new TreeSet<>(List.of(new Text("c")));
 +    UpdateTablets updateTablets =
 +        new UpdateTablets(new SplitInfo(origExtent, splits, true), 
List.of("d1"));
 +    updateTablets.call(fateId, manager);
 +
 +    EasyMock.verify(manager, context, ample);
 +  }
 +}
diff --cc 
server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
index 04742d810b,1228ac1fbd..953bbd4cfa
--- 
a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
+++ 
b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
@@@ -394,31 -396,10 +394,31 @@@ public class TabletServerResourceManage
      memMgmt = new MemoryManagementFramework();
      memMgmt.startThreads();
  
 +    var rootConditionalPool = 
ThreadPools.getServerThreadPools().createExecutorService(acuConf,
 +        Property.TSERV_CONDITIONAL_UPDATE_THREADS_ROOT, enableMetrics);
 +    modifyThreadPoolSizesAtRuntime(
 +        () -> 
context.getConfiguration().getCount(Property.TSERV_CONDITIONAL_UPDATE_THREADS_ROOT),
 +        TSERVER_CONDITIONAL_UPDATE_ROOT_POOL.poolName, rootConditionalPool);
 +
 +    var metaConditionalPool = 
ThreadPools.getServerThreadPools().createExecutorService(acuConf,
 +        Property.TSERV_CONDITIONAL_UPDATE_THREADS_META, enableMetrics);
 +    modifyThreadPoolSizesAtRuntime(
 +        () -> 
context.getConfiguration().getCount(Property.TSERV_CONDITIONAL_UPDATE_THREADS_META),
 +        TSERVER_CONDITIONAL_UPDATE_META_POOL.poolName, metaConditionalPool);
 +
 +    var userConditionalPool = 
ThreadPools.getServerThreadPools().createExecutorService(acuConf,
 +        Property.TSERV_CONDITIONAL_UPDATE_THREADS_USER, enableMetrics);
 +    modifyThreadPoolSizesAtRuntime(
 +        () -> 
context.getConfiguration().getCount(Property.TSERV_CONDITIONAL_UPDATE_THREADS_USER),
 +        TSERVER_CONDITIONAL_UPDATE_USER_POOL.poolName, userConditionalPool);
 +
 +    conditionalMutationExecutors = Map.of(Ample.DataLevel.ROOT, 
rootConditionalPool,
 +        Ample.DataLevel.METADATA, metaConditionalPool, Ample.DataLevel.USER, 
userConditionalPool);
 +
      // We can use the same map for both metadata and normal assignments since 
the keyspace (extent)
      // is guaranteed to be unique. Schedule the task once, the task will 
reschedule itself.
-     
ThreadPools.watchCriticalScheduledTask(context.getScheduledExecutor().schedule(
-         new AssignmentWatcher(acuConf, context, activeAssignments), 5000, 
TimeUnit.MILLISECONDS));
+     ThreadPools.watchCriticalScheduledTask(context.getScheduledExecutor()
+         .schedule(new AssignmentWatcher(context, activeAssignments), 5000, 
TimeUnit.MILLISECONDS));
    }
  
    public int getOpenFiles() {

Reply via email to