This is an automated email from the ASF dual-hosted git repository.

ddanielr pushed a commit to branch elasticity
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit 9aabd60ef55d8054389f5c6c6a4833f86e796af4
Merge: fe8db50bc2 03934832f5
Author: Daniel Roberts <ddani...@gmail.com>
AuthorDate: Thu Jan 4 05:02:31 2024 +0000

    Merge branch 'main' into elasticity

 .../apache/accumulo/test/functional/MergeIT.java   | 59 ++++++++++++++++++++++
 1 file changed, 59 insertions(+)

diff --cc test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
index cf37fb3369,f63a2a99cd..856b778f19
--- a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
@@@ -23,7 -23,7 +23,8 @@@ import static org.apache.accumulo.test.
  import static 
org.apache.accumulo.test.util.FileMetadataUtil.verifyMergedMarkerCleared;
  import static org.junit.jupiter.api.Assertions.assertArrayEquals;
  import static org.junit.jupiter.api.Assertions.assertEquals;
+ import static org.junit.jupiter.api.Assertions.assertFalse;
 +import static org.junit.jupiter.api.Assertions.assertThrows;
  import static org.junit.jupiter.api.Assertions.assertTrue;
  import static org.junit.jupiter.api.Assertions.fail;
  
@@@ -34,10 -34,10 +35,12 @@@ import java.util.HashSet
  import java.util.List;
  import java.util.Map;
  import java.util.Map.Entry;
+ import java.util.Set;
  import java.util.SortedSet;
  import java.util.TreeSet;
+ import java.util.UUID;
 +import java.util.stream.Collectors;
 +import java.util.stream.IntStream;
  
  import org.apache.accumulo.core.client.Accumulo;
  import org.apache.accumulo.core.client.AccumuloClient;
@@@ -55,14 -53,18 +58,21 @@@ import org.apache.accumulo.core.data.Mu
  import org.apache.accumulo.core.data.Range;
  import org.apache.accumulo.core.data.TableId;
  import org.apache.accumulo.core.data.Value;
+ import org.apache.accumulo.core.dataImpl.KeyExtent;
 +import org.apache.accumulo.core.metadata.MetadataTable;
+ import org.apache.accumulo.core.metadata.ReferencedTabletFile;
  import org.apache.accumulo.core.metadata.StoredTabletFile;
  import org.apache.accumulo.core.metadata.schema.DataFileValue;
+ import org.apache.accumulo.core.metadata.schema.ExternalCompactionId;
+ import org.apache.accumulo.core.metadata.schema.ExternalCompactionMetadata;
 +import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.HostingColumnFamily;
 +import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
  import org.apache.accumulo.core.security.Authorizations;
+ import org.apache.accumulo.core.spi.compaction.CompactionExecutorId;
+ import org.apache.accumulo.core.spi.compaction.CompactionKind;
  import org.apache.accumulo.core.util.FastFormat;
  import org.apache.accumulo.core.util.Merge;
+ import org.apache.accumulo.core.util.compaction.CompactionExecutorIdImpl;
  import org.apache.accumulo.harness.AccumuloClusterHarness;
  import org.apache.accumulo.test.TestIngest;
  import org.apache.accumulo.test.TestIngest.IngestParams;
@@@ -662,4 -579,53 +672,53 @@@ public class MergeIT extends AccumuloCl
        }
      }
    }
+ 
+   // Test that merge handles metadata from compactions
+   @Test
+   public void testCompactionMetadata() throws Exception {
+     try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
+       String tableName = getUniqueNames(1)[0];
+       c.tableOperations().create(tableName);
+ 
+       var split = new Text("m");
+       c.tableOperations().addSplits(tableName, new TreeSet<>(List.of(split)));
+ 
+       TableId tableId = getServerContext().getTableId(tableName);
+ 
+       // add metadata from compactions to tablets prior to merge
+       try (var tabletsMutator = 
getServerContext().getAmple().mutateTablets()) {
+         for (var extent : List.of(new KeyExtent(tableId, split, null),
+             new KeyExtent(tableId, null, split))) {
+           var tablet = tabletsMutator.mutateTablet(extent);
+           ExternalCompactionId ecid = 
ExternalCompactionId.generate(UUID.randomUUID());
+ 
+           ReferencedTabletFile tmpFile =
+               ReferencedTabletFile.of(new 
Path("file:///accumulo/tables/t-0/b-0/c1.rf"));
+           CompactionExecutorId ceid = 
CompactionExecutorIdImpl.externalId("G1");
+           Set<StoredTabletFile> jobFiles =
+               Set.of(StoredTabletFile.of(new 
Path("file:///accumulo/tables/t-0/b-0/b2.rf")));
 -          ExternalCompactionMetadata ecMeta = new 
ExternalCompactionMetadata(jobFiles, jobFiles,
 -              tmpFile, "localhost:4444", CompactionKind.SYSTEM, (short) 2, 
ceid, false, false, 44L);
++          ExternalCompactionMetadata ecMeta = new 
ExternalCompactionMetadata(jobFiles, tmpFile,
++              "localhost:4444", CompactionKind.SYSTEM, (short) 2, ceid, 
false, 44L);
+           tablet.putExternalCompaction(ecid, ecMeta);
+           tablet.mutate();
+         }
+       }
+ 
+       // ensure data is in metadata table as expected
+       try (var tablets = 
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
+         for (var tablet : tablets) {
+           assertFalse(tablet.getExternalCompactions().isEmpty());
+         }
+       }
+ 
+       c.tableOperations().merge(tableName, null, null);
+ 
+       // ensure merge operation remove compaction entries
+       try (var tablets = 
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
+         for (var tablet : tablets) {
+           assertTrue(tablet.getExternalCompactions().isEmpty());
+         }
+       }
+     }
+   }
  }

Reply via email to