This is an automated email from the ASF dual-hosted git repository.

kturner pushed a commit to branch elasticity
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit 5c420f42ebfb91c33cc1da4080d61966f6cb0ad0
Merge: afe9a8a87e 6abb607d12
Author: Keith Turner <ktur...@apache.org>
AuthorDate: Thu Nov 30 16:55:06 2023 -0500

    Merge branch 'main' into elasticity

 .../apache/accumulo/test/functional/BulkNewIT.java | 135 +++++++++++++++++++++
 1 file changed, 135 insertions(+)

diff --cc test/src/main/java/org/apache/accumulo/test/functional/BulkNewIT.java
index 3e63bed845,6a0448749c..bbdb091fd3
--- a/test/src/main/java/org/apache/accumulo/test/functional/BulkNewIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkNewIT.java
@@@ -22,6 -23,8 +23,7 @@@ import static org.apache.accumulo.core.
  import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.LOADED;
  import static 
org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.PREV_ROW;
  import static org.junit.jupiter.api.Assertions.assertEquals;
 -import static org.junit.jupiter.api.Assertions.assertFalse;
+ import static org.junit.jupiter.api.Assertions.assertNull;
  import static org.junit.jupiter.api.Assertions.assertThrows;
  import static org.junit.jupiter.api.Assertions.assertTrue;
  
@@@ -48,24 -53,28 +51,32 @@@ import java.util.stream.Collectors
  import org.apache.accumulo.core.client.Accumulo;
  import org.apache.accumulo.core.client.AccumuloClient;
  import org.apache.accumulo.core.client.AccumuloException;
+ import org.apache.accumulo.core.client.AccumuloSecurityException;
+ import org.apache.accumulo.core.client.MutationsRejectedException;
  import org.apache.accumulo.core.client.Scanner;
+ import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.admin.CompactionConfig;
  import org.apache.accumulo.core.client.admin.NewTableConfiguration;
  import org.apache.accumulo.core.client.admin.TimeType;
 +import org.apache.accumulo.core.clientImpl.ClientContext;
  import org.apache.accumulo.core.conf.AccumuloConfiguration;
  import org.apache.accumulo.core.conf.Property;
  import org.apache.accumulo.core.data.Key;
  import org.apache.accumulo.core.data.LoadPlan;
  import org.apache.accumulo.core.data.LoadPlan.RangeType;
+ import org.apache.accumulo.core.data.Mutation;
  import org.apache.accumulo.core.data.TableId;
  import org.apache.accumulo.core.data.Value;
+ import org.apache.accumulo.core.data.constraints.Constraint;
 +import org.apache.accumulo.core.dataImpl.KeyExtent;
  import org.apache.accumulo.core.file.FileOperations;
  import org.apache.accumulo.core.file.FileSKVWriter;
  import org.apache.accumulo.core.file.rfile.RFile;
+ import org.apache.accumulo.core.metadata.MetadataTable;
+ import org.apache.accumulo.core.metadata.StoredTabletFile;
  import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
+ import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 +import org.apache.accumulo.core.metadata.schema.MetadataTime;
  import org.apache.accumulo.core.metadata.schema.TabletMetadata;
  import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
  import org.apache.accumulo.core.security.Authorizations;
@@@ -511,29 -521,43 +528,56 @@@ public class BulkNewIT extends SharedMi
      }
    }
  
 +  @Test
 +  public void testManyFiles() throws Exception {
 +    try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
 +      String dir = getDir("/testBulkFile-");
 +      FileSystem fs = getCluster().getFileSystem();
 +      fs.mkdirs(new Path(dir));
 +
 +      addSplits(c, tableName, "5000");
 +
 +      for (int i = 0; i < 100; i++) {
 +        writeData(dir + "/f" + i + ".", aconf, i * 100, (i + 1) * 100 - 1);
 +      }
 +
 +      c.tableOperations().importDirectory(dir).to(tableName).load();
 +
 +      verifyData(c, tableName, 0, 100 * 100 - 1, false);
 +
 +      c.tableOperations().compact(tableName, new 
CompactionConfig().setWait(true));
 +
 +      verifyData(c, tableName, 0, 100 * 100 - 1, false);
 +    }
 +  }
 +
+   @Test
+   public void testExceptionInMetadataUpdate() throws Exception {
+     try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
+ 
 -      // after setting this up, bulk imports should never succeed on a tablet 
server
++      // after setting this up, bulk imports should fail
+       setupBulkConstraint(getPrincipal(), c);
+ 
+       String dir = getDir("/testExceptionInMetadataUpdate-");
+ 
 -      String h1 = writeData(dir + "/f1.", aconf, 0, 333);
 -
 -      var executor = Executors.newSingleThreadExecutor();
 -      // With the constraint configured that makes tservers throw an 
exception on bulk import, the
 -      // bulk import should never succeed. So run the bulk import in another 
thread.
 -      var future = executor.submit(() -> {
 -        c.tableOperations().importDirectory(dir).to(tableName).load();
 -        return null;
 -      });
 -
 -      Thread.sleep(10000);
++      writeData(dir + "/f1.", aconf, 0, 333);
+ 
 -      // the bulk import should not be done
 -      assertFalse(future.isDone());
++      // operation should fail with the constraint on the table
++      assertThrows(AccumuloException.class,
++          () -> 
c.tableOperations().importDirectory(dir).to(tableName).load());
+ 
 -      // remove the constraint which should allow the bulk import running in 
the background thread
 -      // to complete
+       removeBulkConstraint(getPrincipal(), c);
+ 
 -      // wait for the future to complete and ensure it had no exceptions
 -      future.get();
++      // should succeed after removing the constraint
++      String h1 = writeData(dir + "/f1.", aconf, 0, 333);
++      c.tableOperations().importDirectory(dir).to(tableName).load();
+ 
+       // verifty the data was bulk imported
+       verifyData(c, tableName, 0, 333, false);
+       verifyMetadata(c, tableName, Map.of("null", Set.of(h1)));
+     }
+   }
+ 
    private void addSplits(AccumuloClient client, String tableName, String 
splitString)
        throws Exception {
      SortedSet<Text> splits = new TreeSet<>();

Reply via email to