This is an automated email from the ASF dual-hosted git repository.

kturner pushed a commit to branch 2.1
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/2.1 by this push:
     new c99e7c02a1 Add more test cases to bulk skip test (#5436)
c99e7c02a1 is described below

commit c99e7c02a18ce9d88add9dce92a451834aea5694
Author: Keith Turner <ktur...@apache.org>
AuthorDate: Mon Mar 31 10:23:44 2025 -0400

    Add more test cases to bulk skip test (#5436)
    
    Adds tablet gaps of size 1,2,3,...,9 tablets to the bulk import load
    plan in the skip test. The goal of this is to attempt to cover different
    edge case w/ the bulk import skip property.  Like when the skip property
    is set to 4 the load plan will have gaps of 3,4,5 tablets that is has to
    deal with.
---
 .../apache/accumulo/core/util/PeekingIterator.java |  4 +++
 .../test/functional/BulkNewMetadataSkipIT.java     | 39 +++++++++++++++++++---
 2 files changed, 38 insertions(+), 5 deletions(-)

diff --git 
a/core/src/main/java/org/apache/accumulo/core/util/PeekingIterator.java 
b/core/src/main/java/org/apache/accumulo/core/util/PeekingIterator.java
index 37d7e55277..dd49e90a66 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/PeekingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/PeekingIterator.java
@@ -59,6 +59,10 @@ public class PeekingIterator<E> implements Iterator<E> {
     return this;
   }
 
+  /**
+   * @return If this iterator has a next this will return what calling next() 
would return otherwise
+   *         returns null.
+   */
   public E peek() {
     if (!isInitialized) {
       throw new IllegalStateException("Iterator has not yet been initialized");
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/BulkNewMetadataSkipIT.java
 
b/test/src/main/java/org/apache/accumulo/test/functional/BulkNewMetadataSkipIT.java
index 8f286d8f7c..5a29a1ed33 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/functional/BulkNewMetadataSkipIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/functional/BulkNewMetadataSkipIT.java
@@ -109,7 +109,7 @@ public class BulkNewMetadataSkipIT extends 
AccumuloClusterHarness {
   }
 
   @ParameterizedTest
-  @ValueSource(ints = {0, 0, 2, 4, 8, 16, 32, 64, 128})
+  @ValueSource(ints = {0, 1, 2, 4, 8, 16, 32, 64, 128})
   public void test(int skipDistance) throws Exception {
 
     final String tableName = getUniqueNames(1)[0] + "_" + skipDistance;
@@ -153,6 +153,15 @@ public class BulkNewMetadataSkipIT extends 
AccumuloClusterHarness {
       hashes.get(row(i)).add(h4);
     }
 
+    // create data that skips 0,1,2,..,9 tablets
+    int[] h5Rows = new int[] {50, 50 + 1, 50 + 1 + 2, 50 + 1 + 2 + 3, 50 + 1 + 
2 + 3 + 4,
+        50 + 1 + 2 + 3 + 4 + 5, 50 + 1 + 2 + 3 + 4 + 5 + 6, 50 + 1 + 2 + 3 + 4 
+ 5 + 6 + 7,
+        50 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 50 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 
+ 9};
+    String h5 = writeNonContiguousData(fs, dir + "/f5.", aconf, h5Rows);
+    for (int i : h5Rows) {
+      hashes.get(row(i)).add(h5);
+    }
+
     final LoadPlan loadPlan =
         LoadPlan.builder().loadFileTo("f1.rf", RangeType.FILE, row(0), row(11))
             .loadFileTo("f2.rf", RangeType.TABLE, row(10), row(11))
@@ -163,7 +172,23 @@ public class BulkNewMetadataSkipIT extends 
AccumuloClusterHarness {
             .loadFileTo("f3.rf", RangeType.FILE, row(272), row(273))
             .loadFileTo("f4.rf", RangeType.FILE, row(300), row(301))
             .loadFileTo("f4.rf", RangeType.TABLE, row(671), row(672))
-            .loadFileTo("f4.rf", RangeType.TABLE, row(997), row(998)).build();
+            .loadFileTo("f4.rf", RangeType.TABLE, row(997), row(998))
+            .loadFileTo("f5.rf", RangeType.TABLE, row(49), row(50))
+            .loadFileTo("f5.rf", RangeType.TABLE, row(49 + 1), row(50 + 1))
+            .loadFileTo("f5.rf", RangeType.TABLE, row(49 + 1 + 2), row(50 + 1 
+ 2))
+            .loadFileTo("f5.rf", RangeType.TABLE, row(49 + 1 + 2 + 3), row(50 
+ 1 + 2 + 3))
+            .loadFileTo("f5.rf", RangeType.TABLE, row(49 + 1 + 2 + 3 + 4), 
row(50 + 1 + 2 + 3 + 4))
+            .loadFileTo("f5.rf", RangeType.TABLE, row(49 + 1 + 2 + 3 + 4 + 5),
+                row(50 + 1 + 2 + 3 + 4 + 5))
+            .loadFileTo("f5.rf", RangeType.TABLE, row(49 + 1 + 2 + 3 + 4 + 5 + 
6),
+                row(50 + 1 + 2 + 3 + 4 + 5 + 6))
+            .loadFileTo("f5.rf", RangeType.TABLE, row(49 + 1 + 2 + 3 + 4 + 5 + 
6 + 7),
+                row(50 + 1 + 2 + 3 + 4 + 5 + 6 + 7))
+            .loadFileTo("f5.rf", RangeType.TABLE, row(49 + 1 + 2 + 3 + 4 + 5 + 
6 + 7 + 8),
+                row(50 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8))
+            .loadFileTo("f5.rf", RangeType.TABLE, row(49 + 1 + 2 + 3 + 4 + 5 + 
6 + 7 + 8 + 9),
+                row(50 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9))
+            .build();
 
     try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
 
@@ -183,8 +208,13 @@ public class BulkNewMetadataSkipIT extends 
AccumuloClusterHarness {
 
       
c.tableOperations().importDirectory(dir).to(tableName).plan(loadPlan).load();
 
-      verifyData(c, tableName, new int[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 
11, 13, 199, 200, 204,
-          272, 273, 300, 301, 672, 998}, false);
+      verifyData(c, tableName,
+          new int[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 50, 50 + 1, 50 
+ 1 + 2,
+              50 + 1 + 2 + 3, 50 + 1 + 2 + 3 + 4, 50 + 1 + 2 + 3 + 4 + 5,
+              50 + 1 + 2 + 3 + 4 + 5 + 6, 50 + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+              50 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 50 + 1 + 2 + 3 + 4 + 5 + 6 + 
7 + 8 + 9, 199, 200,
+              204, 272, 273, 300, 301, 672, 998},
+          false);
       verifyMetadata(c, tableName, hashes);
     }
   }
@@ -217,5 +247,4 @@ public class BulkNewMetadataSkipIT extends 
AccumuloClusterHarness {
       }
     }
   }
-
 }

Reply via email to