This is an automated email from the ASF dual-hosted git repository.

cshannon pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new 05950cd855 Update no-chop merge tests to disable compaction (#3775)
05950cd855 is described below

commit 05950cd855ddbafd782e1a039b6c0b2977c08cf8
Author: Christopher L. Shannon <christopher.l.shan...@gmail.com>
AuthorDate: Tue Sep 26 06:36:19 2023 -0400

    Update no-chop merge tests to disable compaction (#3775)
    
    Updates ITs to make sure that compactions will not run which could cause
    non-deterministic failures when verifying files and counts in the
    metadata table. This addresses part of #3766
---
 .../accumulo/test/functional/FileMetadataIT.java       | 18 ++++++++++++++----
 .../org/apache/accumulo/test/functional/MergeIT.java   | 17 ++++++++++++-----
 2 files changed, 26 insertions(+), 9 deletions(-)

diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/FileMetadataIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/FileMetadataIT.java
index c642dbd052..47fd8da48e 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/FileMetadataIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/FileMetadataIT.java
@@ -24,6 +24,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 
 import java.time.Duration;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.SortedSet;
 import java.util.TreeSet;
@@ -32,6 +33,7 @@ import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.admin.CompactionConfig;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
@@ -120,7 +122,7 @@ public class FileMetadataIT extends AccumuloClusterHarness {
 
       final int rows = 10000;
       final String tableName = getUniqueNames(1)[0];
-      accumuloClient.tableOperations().create(tableName);
+      createTableAndDisableCompactions(accumuloClient, tableName);
       final TableId tableId =
           
TableId.of(accumuloClient.tableOperations().tableIdMap().get(tableName));
 
@@ -194,7 +196,7 @@ public class FileMetadataIT extends AccumuloClusterHarness {
       int rowsPerRange = rows / ranges;
 
       final String tableName = getUniqueNames(1)[0];
-      accumuloClient.tableOperations().create(tableName);
+      createTableAndDisableCompactions(accumuloClient, tableName);
       final TableId tableId =
           
TableId.of(accumuloClient.tableOperations().tableIdMap().get(tableName));
 
@@ -282,7 +284,7 @@ public class FileMetadataIT extends AccumuloClusterHarness {
 
       final int rows = 100000;
       final String tableName = getUniqueNames(1)[0];
-      accumuloClient.tableOperations().create(tableName);
+      createTableAndDisableCompactions(accumuloClient, tableName);
       final TableId tableId =
           
TableId.of(accumuloClient.tableOperations().tableIdMap().get(tableName));
 
@@ -362,7 +364,7 @@ public class FileMetadataIT extends AccumuloClusterHarness {
       int rowsPerRange = rows / ranges;
 
       final String tableName = getUniqueNames(1)[0];
-      accumuloClient.tableOperations().create(tableName);
+      createTableAndDisableCompactions(accumuloClient, tableName);
       final TableId tableId =
           
TableId.of(accumuloClient.tableOperations().tableIdMap().get(tableName));
 
@@ -475,4 +477,12 @@ public class FileMetadataIT extends AccumuloClusterHarness 
{
       assertTrue(e.getMessage().contains("Did not read expected number of 
rows. Saw 0"));
     }
   }
+
+  private static void createTableAndDisableCompactions(AccumuloClient c, 
String tableName)
+      throws Exception {
+    // disable compactions
+    NewTableConfiguration ntc = new NewTableConfiguration();
+    ntc.setProperties(Map.of(Property.TABLE_MAJC_RATIO.getKey(), "9999"));
+    c.tableOperations().create(tableName, ntc);
+  }
 }
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
index cb3d5ecff7..c8e8f93de5 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
@@ -86,7 +86,7 @@ public class MergeIT extends AccumuloClusterHarness {
     try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
       var ntc = new NewTableConfiguration().withSplits(splits("a b c d e f g h 
i j k".split(" ")));
-      c.tableOperations().create(tableName, ntc);
+      createTableAndDisableCompactions(c, tableName, ntc);
       try (BatchWriter bw = c.createBatchWriter(tableName)) {
         for (String row : "a b c d e f g h i j k".split(" ")) {
           Mutation m = new Mutation(row);
@@ -106,7 +106,7 @@ public class MergeIT extends AccumuloClusterHarness {
       String tableName = getUniqueNames(1)[0];
       NewTableConfiguration ntc = new NewTableConfiguration()
           .withSplits(splits("a b c d e f g h i j k l m n o p q r s t u v w x 
y z".split(" ")));
-      c.tableOperations().create(tableName, ntc);
+      createTableAndDisableCompactions(c, tableName, ntc);
       try (BatchWriter bw = c.createBatchWriter(tableName)) {
         for (String row : "c e f y".split(" ")) {
           Mutation m = new Mutation(row);
@@ -128,7 +128,7 @@ public class MergeIT extends AccumuloClusterHarness {
   public void noChopMergeTest() throws Exception {
     try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
-      c.tableOperations().create(tableName);
+      createTableAndDisableCompactions(c, tableName, new 
NewTableConfiguration());
       final TableId tableId = 
TableId.of(c.tableOperations().tableIdMap().get(tableName));
 
       // First write 1000 rows to a file in the default tablet
@@ -196,7 +196,7 @@ public class MergeIT extends AccumuloClusterHarness {
   public void noChopMergeDeleteAcrossTablets() throws Exception {
     try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
-      c.tableOperations().create(tableName);
+      createTableAndDisableCompactions(c, tableName, new 
NewTableConfiguration());
       // disable compactions
       c.tableOperations().setProperty(tableName, 
Property.TABLE_MAJC_RATIO.getKey(), "9999");
       final TableId tableId = 
TableId.of(c.tableOperations().tableIdMap().get(tableName));
@@ -392,6 +392,13 @@ public class MergeIT extends AccumuloClusterHarness {
     }
   }
 
+  private static void createTableAndDisableCompactions(AccumuloClient c, 
String tableName,
+      NewTableConfiguration ntc) throws Exception {
+    // disable compactions
+    ntc.setProperties(Map.of(Property.TABLE_MAJC_RATIO.getKey(), "9999"));
+    c.tableOperations().create(tableName, ntc);
+  }
+
   public static void ingest(AccumuloClient accumuloClient, int rows, int 
offset, String tableName)
       throws Exception {
     IngestParams params = new IngestParams(accumuloClient.properties(), 
tableName, rows);
@@ -497,7 +504,7 @@ public class MergeIT extends AccumuloClusterHarness {
     if (!splitSet.isEmpty()) {
       ntc = ntc.withSplits(splitSet);
     }
-    client.tableOperations().create(table, ntc);
+    createTableAndDisableCompactions(client, table, ntc);
 
     HashSet<String> expected = new HashSet<>();
     try (BatchWriter bw = client.createBatchWriter(table)) {

Reply via email to