This is an automated email from the ASF dual-hosted git repository.

ddanielr pushed a commit to branch elasticity
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/elasticity by this push:
     new e6799c9cf7 Fix CompactionConfigChangeIT (#4060)
e6799c9cf7 is described below

commit e6799c9cf7e3196e7af0aea3e679b07925503f2c
Author: Daniel Roberts <ddani...@gmail.com>
AuthorDate: Wed Dec 13 09:40:21 2023 -0500

    Fix CompactionConfigChangeIT (#4060)
    
    Modified the IT to use external compactors
---
 .../test/compaction/CompactionConfigChangeIT.java  | 35 ++++++++++++++--------
 1 file changed, 22 insertions(+), 13 deletions(-)

diff --git 
a/test/src/main/java/org/apache/accumulo/test/compaction/CompactionConfigChangeIT.java
 
b/test/src/main/java/org/apache/accumulo/test/compaction/CompactionConfigChangeIT.java
index 956d3bac67..22e8438695 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/compaction/CompactionConfigChangeIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/compaction/CompactionConfigChangeIT.java
@@ -33,14 +33,32 @@ import org.apache.accumulo.core.clientImpl.ClientContext;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
 import org.apache.accumulo.test.functional.SlowIterator;
 import org.apache.accumulo.test.util.Wait;
-import org.junit.jupiter.api.Disabled;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.junit.jupiter.api.Test;
 
-@Disabled // ELASTICITY_TODO
 public class CompactionConfigChangeIT extends AccumuloClusterHarness {
 
+  @Override
+  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {
+
+    cfg.getClusterServerConfiguration().addCompactorResourceGroup("e4", 1);
+    cfg.getClusterServerConfiguration().addCompactorResourceGroup("e5", 1);
+
+    cfg.setProperty(Property.COMPACTION_SERVICE_PREFIX.getKey() + 
"cs1.planner",
+        DefaultCompactionPlanner.class.getName());
+    cfg.setProperty(Property.COMPACTION_SERVICE_PREFIX.getKey() + 
"cs1.planner.opts.executors",
+        ("[{'name':'small','type':'external','maxSize':'2M', 'group': 'e1'},"
+            + "{'name':'medium','type':'external','maxSize':'128M', 
'group':'e2'},"
+            + 
"{'name':'large','type':'external','group':'e3'}]").replaceAll("'", "\""));
+
+    // use raw local file system so walogs sync and flush will work
+    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+  }
+
   public static long countFiles(AccumuloClient client, String table, String 
fileNamePrefix)
       throws Exception {
     var ctx = ((ClientContext) client);
@@ -58,15 +76,6 @@ public class CompactionConfigChangeIT extends 
AccumuloClusterHarness {
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
       final String table = getUniqueNames(1)[0];
 
-      client.instanceOperations().setProperty(
-          Property.COMPACTION_SERVICE_PREFIX.getKey() + "cs1.planner",
-          DefaultCompactionPlanner.class.getName());
-      client.instanceOperations().setProperty(
-          Property.COMPACTION_SERVICE_PREFIX.getKey() + 
"cs1.planner.opts.executors",
-          ("[{'name':'small','type':'internal','maxSize':'2M','numThreads':2},"
-              + 
"{'name':'medium','type':'internal','maxSize':'128M','numThreads':2},"
-              + 
"{'name':'large','type':'internal','numThreads':2}]").replaceAll("'", "\""));
-
       createTable(client, table, "cs1", 100);
 
       ExternalCompactionTestUtils.writeData(client, table, MAX_DATA);
@@ -94,8 +103,8 @@ public class CompactionConfigChangeIT extends 
AccumuloClusterHarness {
       // with running compactions.
       client.instanceOperations().setProperty(
           Property.COMPACTION_SERVICE_PREFIX.getKey() + 
"cs1.planner.opts.executors",
-          
("[{'name':'little','type':'internal','maxSize':'128M','numThreads':8},"
-              + 
"{'name':'big','type':'internal','numThreads':2}]").replaceAll("'", "\""));
+          
("[{'name':'little','type':'external','maxSize':'128M','group':'e4'},"
+              + 
"{'name':'big','type':'external','group':'e5'}]").replaceAll("'", "\""));
 
       Wait.waitFor(() -> countFiles(client, table, "F") == 0, 60000);
 

Reply via email to