This is an automated email from the ASF dual-hosted git repository.

cshannon pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new 410c54d182 Add tests for ranged files to CloneIT (#3925)
410c54d182 is described below

commit 410c54d1828aaa53ac222a8387b6eaa4bfc241a1
Author: Christopher L. Shannon <christopher.l.shan...@gmail.com>
AuthorDate: Fri Nov 17 12:28:14 2023 -0500

    Add tests for ranged files to CloneIT (#3925)
    
    This updates tests that cloned tables work correctly with files that are
    fenced with non-infinite ranges
    
    This addresses part of #3766
---
 test/pom.xml                                       |   4 +
 .../java/org/apache/accumulo/test/CloneIT.java     | 149 +++++++++++++--------
 2 files changed, 94 insertions(+), 59 deletions(-)

diff --git a/test/pom.xml b/test/pom.xml
index 9ec2a1a2b7..7fbc514f9b 100644
--- a/test/pom.xml
+++ b/test/pom.xml
@@ -202,6 +202,10 @@
       <groupId>org.junit.jupiter</groupId>
       <artifactId>junit-jupiter-engine</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.junit.jupiter</groupId>
+      <artifactId>junit-jupiter-params</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-api</artifactId>
diff --git a/test/src/main/java/org/apache/accumulo/test/CloneIT.java 
b/test/src/main/java/org/apache/accumulo/test/CloneIT.java
index a55734bb36..74dad9787e 100644
--- a/test/src/main/java/org/apache/accumulo/test/CloneIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CloneIT.java
@@ -24,6 +24,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.HashSet;
 import java.util.Map.Entry;
+import java.util.stream.Stream;
 
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
@@ -31,6 +32,7 @@ import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.TableId;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.dataImpl.KeyExtent;
@@ -46,6 +48,11 @@ import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtensionContext;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.ArgumentsProvider;
+import org.junit.jupiter.params.provider.ArgumentsSource;
 
 public class CloneIT extends AccumuloClusterHarness {
 
@@ -76,8 +83,9 @@ public class CloneIT extends AccumuloClusterHarness {
     }
   }
 
-  @Test
-  public void testFilesChange() throws Exception {
+  @ParameterizedTest
+  @ArgumentsSource(RangeArgumentsProvider.class)
+  public void testFilesChange(Range range1, Range range2) throws Exception {
     String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
@@ -89,7 +97,7 @@ public class CloneIT extends AccumuloClusterHarness {
       ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0"));
       ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new 
Value("/default_tablet"));
       mut.put(DataFileColumnFamily.NAME.toString(),
-          getMetadata(filePrefix + "/default_tablet/0_0.rf"),
+          getMetadata(filePrefix + "/default_tablet/0_0.rf", range1),
           new DataFileValue(1, 200).encodeAsString());
 
       try (BatchWriter bw1 = client.createBatchWriter(tableName);
@@ -102,9 +110,9 @@ public class CloneIT extends AccumuloClusterHarness {
 
         Mutation mut2 = new Mutation(ke.toMetaRow());
         mut2.putDelete(DataFileColumnFamily.NAME.toString(),
-            getMetadata(filePrefix + "/default_tablet/0_0.rf"));
+            getMetadata(filePrefix + "/default_tablet/0_0.rf", range1));
         mut2.put(DataFileColumnFamily.NAME.toString(),
-            getMetadata(filePrefix + "/default_tablet/1_0.rf"),
+            getMetadata(filePrefix + "/default_tablet/1_0.rf", range2),
             new DataFileValue(2, 300).encodeAsString());
 
         bw1.addMutation(mut2);
@@ -131,13 +139,14 @@ public class CloneIT extends AccumuloClusterHarness {
         }
       }
       assertEquals(1, files.size());
-      assertTrue(files.contains(getMetadata(filePrefix + 
"/default_tablet/1_0.rf")));
+      assertTrue(files.contains(getMetadata(filePrefix + 
"/default_tablet/1_0.rf", range2)));
     }
   }
 
   // test split where files of children are the same
-  @Test
-  public void testSplit1() throws Exception {
+  @ParameterizedTest
+  @ArgumentsSource(RangeArgumentsProvider.class)
+  public void testSplit1(Range range) throws Exception {
     String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
 
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
@@ -147,16 +156,16 @@ public class CloneIT extends AccumuloClusterHarness {
       try (BatchWriter bw1 = client.createBatchWriter(tableName);
           BatchWriter bw2 = client.createBatchWriter(tableName)) {
         bw1.addMutation(createTablet("0", null, null, "/default_tablet",
-            filePrefix + "/default_tablet/0_0.rf"));
+            filePrefix + "/default_tablet/0_0.rf", range));
 
         bw1.flush();
 
         MetadataTableUtil.initializeClone(tableName, TableId.of("0"), 
TableId.of("1"), client, bw2);
 
+        bw1.addMutation(createTablet("0", "m", null, "/default_tablet",
+            filePrefix + "/default_tablet/0_0.rf", range));
         bw1.addMutation(
-            createTablet("0", "m", null, "/default_tablet", filePrefix + 
"/default_tablet/0_0.rf"));
-        bw1.addMutation(
-            createTablet("0", null, "m", "/t-1", filePrefix + 
"/default_tablet/0_0.rf"));
+            createTablet("0", null, "m", "/t-1", filePrefix + 
"/default_tablet/0_0.rf", range));
 
         bw1.flush();
 
@@ -179,13 +188,14 @@ public class CloneIT extends AccumuloClusterHarness {
       }
       assertEquals(1, count);
       assertEquals(1, files.size());
-      assertTrue(files.contains(getMetadata(filePrefix + 
"/default_tablet/0_0.rf")));
+      assertTrue(files.contains(getMetadata(filePrefix + 
"/default_tablet/0_0.rf", range)));
     }
   }
 
   // test split where files of children differ... like majc and split occurred
-  @Test
-  public void testSplit2() throws Exception {
+  @ParameterizedTest
+  @ArgumentsSource(RangeArgumentsProvider.class)
+  public void testSplit2(Range range) throws Exception {
     String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
@@ -194,17 +204,18 @@ public class CloneIT extends AccumuloClusterHarness {
       try (BatchWriter bw1 = client.createBatchWriter(tableName);
           BatchWriter bw2 = client.createBatchWriter(tableName)) {
         bw1.addMutation(createTablet("0", null, null, "/default_tablet",
-            filePrefix + "/default_tablet/0_0.rf"));
+            filePrefix + "/default_tablet/0_0.rf", range));
 
         bw1.flush();
 
         MetadataTableUtil.initializeClone(tableName, TableId.of("0"), 
TableId.of("1"), client, bw2);
 
-        bw1.addMutation(
-            createTablet("0", "m", null, "/default_tablet", filePrefix + 
"/default_tablet/1_0.rf"));
-        Mutation mut3 = createTablet("0", null, "m", "/t-1", filePrefix + 
"/default_tablet/1_0.rf");
+        bw1.addMutation(createTablet("0", "m", null, "/default_tablet",
+            filePrefix + "/default_tablet/1_0.rf", range));
+        Mutation mut3 =
+            createTablet("0", null, "m", "/t-1", filePrefix + 
"/default_tablet/1_0.rf", range);
         mut3.putDelete(DataFileColumnFamily.NAME.toString(),
-            getMetadata(filePrefix + "/default_tablet/0_0.rf"));
+            getMetadata(filePrefix + "/default_tablet/0_0.rf", range));
         bw1.addMutation(mut3);
 
         bw1.flush();
@@ -232,39 +243,41 @@ public class CloneIT extends AccumuloClusterHarness {
       }
       assertEquals(1, files.size());
       assertEquals(2, count);
-      assertTrue(files.contains(getMetadata(filePrefix + 
"/default_tablet/1_0.rf")));
+      assertTrue(files.contains(getMetadata(filePrefix + 
"/default_tablet/1_0.rf", range)));
     }
   }
 
-  private static Mutation deleteTablet(String tid, String endRow, String 
prevRow, String file) {
+  private static Mutation deleteTablet(String tid, String endRow, String 
prevRow, String file,
+      Range range) {
     KeyExtent ke = new KeyExtent(TableId.of(tid), endRow == null ? null : new 
Text(endRow),
         prevRow == null ? null : new Text(prevRow));
     Mutation mut = new Mutation(ke.toMetaRow());
     TabletColumnFamily.PREV_ROW_COLUMN.putDelete(mut);
     ServerColumnFamily.TIME_COLUMN.putDelete(mut);
     ServerColumnFamily.DIRECTORY_COLUMN.putDelete(mut);
-    mut.putDelete(DataFileColumnFamily.NAME.toString(), getMetadata(file));
+    mut.putDelete(DataFileColumnFamily.NAME.toString(), getMetadata(file, 
range));
 
     return mut;
   }
 
   private static Mutation createTablet(String tid, String endRow, String 
prevRow, String dir,
-      String file) {
+      String file, Range range) {
     KeyExtent ke = new KeyExtent(TableId.of(tid), endRow == null ? null : new 
Text(endRow),
         prevRow == null ? null : new Text(prevRow));
     Mutation mut = TabletColumnFamily.createPrevRowMutation(ke);
 
     ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0"));
     ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir));
-    mut.put(DataFileColumnFamily.NAME.toString(), getMetadata(file),
+    mut.put(DataFileColumnFamily.NAME.toString(), getMetadata(file, range),
         new DataFileValue(10, 200).encodeAsString());
 
     return mut;
   }
 
   // test two tablets splitting into four
-  @Test
-  public void testSplit3() throws Exception {
+  @ParameterizedTest
+  @ArgumentsSource(RangeArgumentsProvider.class)
+  public void testSplit3(Range range1, Range range2, Range range3) throws 
Exception {
     String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
@@ -272,17 +285,17 @@ public class CloneIT extends AccumuloClusterHarness {
 
       try (BatchWriter bw1 = client.createBatchWriter(tableName);
           BatchWriter bw2 = client.createBatchWriter(tableName)) {
-        bw1.addMutation(createTablet("0", "m", null, "/d1", filePrefix + 
"/d1/file1.rf"));
-        bw1.addMutation(createTablet("0", null, "m", "/d2", filePrefix + 
"/d2/file2.rf"));
+        bw1.addMutation(createTablet("0", "m", null, "/d1", filePrefix + 
"/d1/file1.rf", range1));
+        bw1.addMutation(createTablet("0", null, "m", "/d2", filePrefix + 
"/d2/file2.rf", range2));
 
         bw1.flush();
 
         MetadataTableUtil.initializeClone(tableName, TableId.of("0"), 
TableId.of("1"), client, bw2);
 
-        bw1.addMutation(createTablet("0", "f", null, "/d1", filePrefix + 
"/d1/file3.rf"));
-        bw1.addMutation(createTablet("0", "m", "f", "/d3", filePrefix + 
"/d1/file1.rf"));
-        bw1.addMutation(createTablet("0", "s", "m", "/d2", filePrefix + 
"/d2/file2.rf"));
-        bw1.addMutation(createTablet("0", null, "s", "/d4", filePrefix + 
"/d2/file2.rf"));
+        bw1.addMutation(createTablet("0", "f", null, "/d1", filePrefix + 
"/d1/file3.rf", range3));
+        bw1.addMutation(createTablet("0", "m", "f", "/d3", filePrefix + 
"/d1/file1.rf", range1));
+        bw1.addMutation(createTablet("0", "s", "m", "/d2", filePrefix + 
"/d2/file2.rf", range2));
+        bw1.addMutation(createTablet("0", null, "s", "/d4", filePrefix + 
"/d2/file2.rf", range2));
 
         bw1.flush();
 
@@ -306,14 +319,15 @@ public class CloneIT extends AccumuloClusterHarness {
       }
       assertEquals(2, count);
       assertEquals(2, files.size());
-      assertTrue(files.contains(getMetadata(filePrefix + "/d1/file1.rf")));
-      assertTrue(files.contains(getMetadata(filePrefix + "/d2/file2.rf")));
+      assertTrue(files.contains(getMetadata(filePrefix + "/d1/file1.rf", 
range1)));
+      assertTrue(files.contains(getMetadata(filePrefix + "/d2/file2.rf", 
range2)));
     }
   }
 
   // test cloned marker
-  @Test
-  public void testClonedMarker() throws Exception {
+  @ParameterizedTest
+  @ArgumentsSource(RangeArgumentsProvider.class)
+  public void testClonedMarker(Range range1, Range range2, Range range3) 
throws Exception {
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
       client.tableOperations().create(tableName);
@@ -321,22 +335,22 @@ public class CloneIT extends AccumuloClusterHarness {
 
       try (BatchWriter bw1 = client.createBatchWriter(tableName);
           BatchWriter bw2 = client.createBatchWriter(tableName)) {
-        bw1.addMutation(createTablet("0", "m", null, "/d1", filePrefix + 
"/d1/file1.rf"));
-        bw1.addMutation(createTablet("0", null, "m", "/d2", filePrefix + 
"/d2/file2.rf"));
+        bw1.addMutation(createTablet("0", "m", null, "/d1", filePrefix + 
"/d1/file1.rf", range1));
+        bw1.addMutation(createTablet("0", null, "m", "/d2", filePrefix + 
"/d2/file2.rf", range2));
 
         bw1.flush();
 
         MetadataTableUtil.initializeClone(tableName, TableId.of("0"), 
TableId.of("1"), client, bw2);
 
-        bw1.addMutation(deleteTablet("0", "m", null, filePrefix + 
"/d1/file1.rf"));
-        bw1.addMutation(deleteTablet("0", null, "m", filePrefix + 
"/d2/file2.rf"));
+        bw1.addMutation(deleteTablet("0", "m", null, filePrefix + 
"/d1/file1.rf", range1));
+        bw1.addMutation(deleteTablet("0", null, "m", filePrefix + 
"/d2/file2.rf", range2));
 
         bw1.flush();
 
-        bw1.addMutation(createTablet("0", "f", null, "/d1", filePrefix + 
"/d1/file3.rf"));
-        bw1.addMutation(createTablet("0", "m", "f", "/d3", filePrefix + 
"/d1/file1.rf"));
-        bw1.addMutation(createTablet("0", "s", "m", "/d2", filePrefix + 
"/d2/file3.rf"));
-        bw1.addMutation(createTablet("0", null, "s", "/d4", filePrefix + 
"/d4/file3.rf"));
+        bw1.addMutation(createTablet("0", "f", null, "/d1", filePrefix + 
"/d1/file3.rf", range3));
+        bw1.addMutation(createTablet("0", "m", "f", "/d3", filePrefix + 
"/d1/file1.rf", range1));
+        bw1.addMutation(createTablet("0", "s", "m", "/d2", filePrefix + 
"/d2/file3.rf", range3));
+        bw1.addMutation(createTablet("0", null, "s", "/d4", filePrefix + 
"/d4/file3.rf", range3));
 
         bw1.flush();
 
@@ -345,11 +359,11 @@ public class CloneIT extends AccumuloClusterHarness {
 
         assertEquals(1, rc);
 
-        bw1.addMutation(deleteTablet("0", "m", "f", filePrefix + 
"/d1/file1.rf"));
+        bw1.addMutation(deleteTablet("0", "m", "f", filePrefix + 
"/d1/file1.rf", range1));
 
         bw1.flush();
 
-        bw1.addMutation(createTablet("0", "m", "f", "/d3", filePrefix + 
"/d1/file3.rf"));
+        bw1.addMutation(createTablet("0", "m", "f", "/d3", filePrefix + 
"/d1/file3.rf", range3));
 
         bw1.flush();
 
@@ -371,15 +385,19 @@ public class CloneIT extends AccumuloClusterHarness {
       }
       assertEquals(3, count);
       assertEquals(3, files.size());
-      
assertTrue(files.contains(getMetadata("hdfs://nn:8000/accumulo/tables/0/d1/file1.rf")));
-      
assertTrue(files.contains(getMetadata("hdfs://nn:8000/accumulo/tables/0/d2/file3.rf")));
-      
assertTrue(files.contains(getMetadata("hdfs://nn:8000/accumulo/tables/0/d4/file3.rf")));
+      assertTrue(
+          
files.contains(getMetadata("hdfs://nn:8000/accumulo/tables/0/d1/file1.rf", 
range1)));
+      assertTrue(
+          
files.contains(getMetadata("hdfs://nn:8000/accumulo/tables/0/d2/file3.rf", 
range3)));
+      assertTrue(
+          
files.contains(getMetadata("hdfs://nn:8000/accumulo/tables/0/d4/file3.rf", 
range3)));
     }
   }
 
   // test two tablets splitting into four
-  @Test
-  public void testMerge() throws Exception {
+  @ParameterizedTest
+  @ArgumentsSource(RangeArgumentsProvider.class)
+  public void testMerge(Range range1, Range range2) throws Exception {
     String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
@@ -387,16 +405,17 @@ public class CloneIT extends AccumuloClusterHarness {
 
       try (BatchWriter bw1 = client.createBatchWriter(tableName);
           BatchWriter bw2 = client.createBatchWriter(tableName)) {
-        bw1.addMutation(createTablet("0", "m", null, "/d1", filePrefix + 
"/d1/file1.rf"));
-        bw1.addMutation(createTablet("0", null, "m", "/d2", filePrefix + 
"/d2/file2.rf"));
+        bw1.addMutation(createTablet("0", "m", null, "/d1", filePrefix + 
"/d1/file1.rf", range1));
+        bw1.addMutation(createTablet("0", null, "m", "/d2", filePrefix + 
"/d2/file2.rf", range2));
 
         bw1.flush();
 
         MetadataTableUtil.initializeClone(tableName, TableId.of("0"), 
TableId.of("1"), client, bw2);
 
-        bw1.addMutation(deleteTablet("0", "m", null, filePrefix + 
"/d1/file1.rf"));
-        Mutation mut = createTablet("0", null, null, "/d2", filePrefix + 
"/d2/file2.rf");
-        mut.put(DataFileColumnFamily.NAME.toString(), getMetadata(filePrefix + 
"/d1/file1.rf"),
+        bw1.addMutation(deleteTablet("0", "m", null, filePrefix + 
"/d1/file1.rf", range1));
+        Mutation mut = createTablet("0", null, null, "/d2", filePrefix + 
"/d2/file2.rf", range2);
+        mut.put(DataFileColumnFamily.NAME.toString(),
+            getMetadata(filePrefix + "/d1/file1.rf", range1),
             new DataFileValue(10, 200).encodeAsString());
         bw1.addMutation(mut);
 
@@ -408,7 +427,19 @@ public class CloneIT extends AccumuloClusterHarness {
     }
   }
 
-  private static String getMetadata(String file) {
-    return StoredTabletFile.of(new Path(file)).getMetadata();
+  private static String getMetadata(String file, Range range) {
+    return StoredTabletFile.of(new Path(file), range).getMetadata();
+  }
+
+  static class RangeArgumentsProvider implements ArgumentsProvider {
+
+    @Override
+    public Stream<? extends Arguments> provideArguments(ExtensionContext 
context) {
+      return Stream.of(
+          // Pass in up to 3 arguments of infinite ranges to test non-ranged 
files
+          Arguments.of(new Range(), new Range(), new Range()),
+          // For second run pass in up to 3 arguments with the first two 
non-infinite ranges
+          Arguments.of(new Range("row_0"), new Range("row_1"), new Range()));
+    }
   }
 }

Reply via email to