This is an automated email from the ASF dual-hosted git repository. cshannon pushed a commit to branch main in repository https://gitbox.apache.org/repos/asf/accumulo.git
The following commit(s) were added to refs/heads/main by this push: new 999f25576c Test external compaction on table with fenced files (#3932) 999f25576c is described below commit 999f25576ca1cdcbb2b5cc1cf59eb674241d7159 Author: Christopher L. Shannon <christopher.l.shan...@gmail.com> AuthorDate: Tue Nov 7 07:49:42 2023 -0500 Test external compaction on table with fenced files (#3932) This adds a test to verify that external compactions that run on a tablet with fenced files do not contain extra data after compaction outside of the ranges This addresses part of #3766 --- .../test/compaction/ExternalCompaction_1_IT.java | 60 ++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/test/src/main/java/org/apache/accumulo/test/compaction/ExternalCompaction_1_IT.java b/test/src/main/java/org/apache/accumulo/test/compaction/ExternalCompaction_1_IT.java index 57ffd0a36b..35630437ab 100644 --- a/test/src/main/java/org/apache/accumulo/test/compaction/ExternalCompaction_1_IT.java +++ b/test/src/main/java/org/apache/accumulo/test/compaction/ExternalCompaction_1_IT.java @@ -34,6 +34,8 @@ import static org.apache.accumulo.test.compaction.ExternalCompactionTestUtils.ge import static org.apache.accumulo.test.compaction.ExternalCompactionTestUtils.row; import static org.apache.accumulo.test.compaction.ExternalCompactionTestUtils.verify; import static org.apache.accumulo.test.compaction.ExternalCompactionTestUtils.writeData; +import static org.apache.accumulo.test.util.FileMetadataUtil.countFencedFiles; +import static org.apache.accumulo.test.util.FileMetadataUtil.splitFilesIntoRanges; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -46,6 +48,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.TimeUnit; @@ -69,6 +72,7 @@ import org.apache.accumulo.core.client.admin.compaction.CompressionConfigurer; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.TableId; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.iterators.DevNull; @@ -450,6 +454,62 @@ public class ExternalCompaction_1_IT extends SharedMiniClusterBase { } } + @Test + public void testExternalCompactionWithFencedFiles() throws Exception { + String[] names = this.getUniqueNames(2); + try (AccumuloClient client = + Accumulo.newClient().from(getCluster().getClientProperties()).build()) { + + String table1 = names[0]; + createTable(client, table1, "cs1"); + + String table2 = names[1]; + createTable(client, table2, "cs2"); + + writeData(client, table1); + writeData(client, table2); + + // Verify that all data can be seen + verify(client, table1, 1, MAX_DATA); + verify(client, table2, 1, MAX_DATA); + + // Split file in table1 into two files each fenced off by 100 rows for a total of 200 + splitFilesIntoRanges(getCluster().getServerContext(), table1, + Set.of(new Range(new Text(row(100)), new Text(row(199))), + new Range(new Text(row(300)), new Text(row(399))))); + assertEquals(2, countFencedFiles(getCluster().getServerContext(), table1)); + + // Fence file in table2 to 600 rows + splitFilesIntoRanges(getCluster().getServerContext(), table2, + Set.of(new Range(new Text(row(200)), new Text(row(799))))); + assertEquals(1, countFencedFiles(getCluster().getServerContext(), table2)); + + // Verify that a subset of the data is now seen after fencing + verify(client, table1, 1, 200); + verify(client, table2, 1, 600); + + getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class); + getCluster().getClusterControl().startCompactors(Compactor.class, 1, QUEUE1); + getCluster().getClusterControl().startCompactors(Compactor.class, 1, QUEUE2); + + // Compact and verify previousy fenced data didn't come back + compact(client, table1, 2, QUEUE1, true); + verify(client, table1, 2, 200); + + SortedSet<Text> splits = new TreeSet<>(); + splits.add(new Text(row(MAX_DATA / 2))); + client.tableOperations().addSplits(table2, splits); + + // Compact and verify previousy fenced data didn't come back + compact(client, table2, 3, QUEUE2, true); + verify(client, table2, 3, 600); + + // should be no more fenced files after compaction + assertEquals(0, countFencedFiles(getCluster().getServerContext(), table1)); + assertEquals(0, countFencedFiles(getCluster().getServerContext(), table2)); + } + } + public static class FSelector implements CompactionSelector { @Override