Repository: accumulo-testing
Updated Branches:
  refs/heads/master b81229d78 -> fc3ddfc4f


http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/security/Validate.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/security/Validate.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/security/Validate.java
index 8ad104a..32c815e 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/security/Validate.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/security/Validate.java
@@ -26,7 +26,7 @@ import 
org.apache.accumulo.core.client.security.SecurityErrorCode;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.slf4j.Logger;
@@ -34,12 +34,12 @@ import org.slf4j.Logger;
 public class Validate extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     validate(state, env, log);
   }
 
-  public static void validate(State state, Environment env, Logger log) throws 
Exception {
-    Connector conn = env.getConnector();
+  public static void validate(State state, RandWalkEnv env, Logger log) throws 
Exception {
+    Connector conn = env.getAccumuloConnector();
 
     boolean tableExists = WalkingSecurity.get(state, env).getTableExists();
     boolean cloudTableExists = 
conn.tableOperations().list().contains(WalkingSecurity.get(state, 
env).getTableName());
@@ -53,9 +53,9 @@ public class Validate extends Test {
 
     Properties props = new Properties();
     props.setProperty("target", "system");
-    Authenticate.authenticate(env.getUserName(), env.getToken(), state, env, 
props);
+    Authenticate.authenticate(env.getAccumuloUserName(), env.getToken(), 
state, env, props);
     props.setProperty("target", "table");
-    Authenticate.authenticate(env.getUserName(), env.getToken(), state, env, 
props);
+    Authenticate.authenticate(env.getAccumuloUserName(), env.getToken(), 
state, env, props);
 
     for (String user : new String[] {WalkingSecurity.get(state, 
env).getSysUserName(), WalkingSecurity.get(state, env).getTabUserName()}) {
       for (SystemPermission sp : SystemPermission.values()) {

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/security/WalkingSecurity.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/security/WalkingSecurity.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/security/WalkingSecurity.java
index 302d6ec..7cf0ec5 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/security/WalkingSecurity.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/security/WalkingSecurity.java
@@ -47,7 +47,7 @@ import org.apache.accumulo.server.security.SecurityOperation;
 import org.apache.accumulo.server.security.handler.Authenticator;
 import org.apache.accumulo.server.security.handler.Authorizor;
 import org.apache.accumulo.server.security.handler.PermissionHandler;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.hadoop.fs.FileSystem;
 import org.slf4j.Logger;
@@ -58,7 +58,7 @@ import org.slf4j.LoggerFactory;
  */
 public class WalkingSecurity extends SecurityOperation implements Authorizor, 
Authenticator, PermissionHandler {
   State state = null;
-  Environment env = null;
+  RandWalkEnv env = null;
   private static final Logger log = 
LoggerFactory.getLogger(WalkingSecurity.class);
 
   private static final String tableName = "SecurityTableName";
@@ -82,7 +82,7 @@ public class WalkingSecurity extends SecurityOperation 
implements Authorizor, Au
     super(context, author, authent, pm);
   }
 
-  public WalkingSecurity(State state2, Environment env2) {
+  public WalkingSecurity(State state2, RandWalkEnv env2) {
     super(new AccumuloServerContext(new 
ServerConfigurationFactory(HdfsZooInstance.getInstance())));
     this.state = state2;
     this.env = env2;
@@ -91,7 +91,7 @@ public class WalkingSecurity extends SecurityOperation 
implements Authorizor, Au
     permHandle = this;
   }
 
-  public static WalkingSecurity get(State state, Environment env) {
+  public static WalkingSecurity get(State state, RandWalkEnv env) {
     if (instance == null || instance.state != state) {
       instance = new WalkingSecurity(state, env);
       state.set(tableExists, Boolean.toString(false));
@@ -359,11 +359,11 @@ public class WalkingSecurity extends SecurityOperation 
implements Authorizor, Au
   }
 
   public TCredentials getSysCredentials() {
-    return new Credentials(getSysUserName(), 
getSysToken()).toThrift(this.env.getInstance());
+    return new Credentials(getSysUserName(), 
getSysToken()).toThrift(this.env.getAccumuloInstance());
   }
 
   public TCredentials getTabCredentials() {
-    return new Credentials(getTabUserName(), 
getTabToken()).toThrift(this.env.getInstance());
+    return new Credentials(getTabUserName(), 
getTabToken()).toThrift(this.env.getAccumuloInstance());
   }
 
   public AuthenticationToken getSysToken() {

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/BatchVerify.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/BatchVerify.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/BatchVerify.java
index dc59b05..fc1f202 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/BatchVerify.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/BatchVerify.java
@@ -30,7 +30,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.apache.hadoop.io.Text;
@@ -38,7 +38,7 @@ import org.apache.hadoop.io.Text;
 public class BatchVerify extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
 
     Random rand = new Random();
 
@@ -50,7 +50,7 @@ public class BatchVerify extends Test {
       numVerify = numWrites / 4;
     }
 
-    Connector conn = env.getConnector();
+    Connector conn = env.getAccumuloConnector();
     BatchScanner scanner = 
conn.createBatchScanner(state.getString("seqTableName"), new Authorizations(), 
2);
 
     try {

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/Commit.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/Commit.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/Commit.java
index 6865557..7ecd063 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/Commit.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/Commit.java
@@ -18,14 +18,14 @@ package 
org.apache.accumulo.testing.core.randomwalk.sequential;
 
 import java.util.Properties;
 
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class Commit extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
 
     env.getMultiTableBatchWriter().flush();
 

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/MapRedVerify.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/MapRedVerify.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/MapRedVerify.java
index e17c98d..95b7d32 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/MapRedVerify.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/MapRedVerify.java
@@ -26,7 +26,7 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.testing.core.TestProps;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.apache.hadoop.util.ToolRunner;
@@ -34,16 +34,16 @@ import org.apache.hadoop.util.ToolRunner;
 public class MapRedVerify extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
 
     String[] args = new String[6];
-    args[0] = env.getUserName();
-    args[1] = env.getPassword();
+    args[0] = env.getAccumuloUserName();
+    args[1] = env.getAccumuloPassword();
     if (null == args[1]) {
-      args[1] = env.getKeytab();
+      args[1] = env.getAccumuloKeytab();
     }
     args[2] = state.getString("seqTableName");
-    args[3] = env.getInstance().getInstanceName();
+    args[3] = env.getAccumuloInstance().getInstanceName();
     args[4] = env.getConfigProperty(TestProps.ZOOKEEPERS);
     args[5] = args[2] + "_MR";
 
@@ -52,7 +52,7 @@ public class MapRedVerify extends Test {
       return;
     }
 
-    Scanner outputScanner = env.getConnector().createScanner(args[7], 
Authorizations.EMPTY);
+    Scanner outputScanner = env.getAccumuloConnector().createScanner(args[7], 
Authorizations.EMPTY);
     outputScanner.setRange(new Range());
 
     int count = 0;
@@ -71,7 +71,7 @@ public class MapRedVerify extends Test {
     }
 
     log.debug("Dropping table: " + args[5]);
-    Connector conn = env.getConnector();
+    Connector conn = env.getAccumuloConnector();
     conn.tableOperations().delete(args[5]);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/SequentialFixture.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/SequentialFixture.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/SequentialFixture.java
index de8af18..e6372d7 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/SequentialFixture.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/SequentialFixture.java
@@ -24,7 +24,7 @@ import org.apache.accumulo.core.client.MultiTableBatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.Fixture;
 import org.apache.accumulo.testing.core.randomwalk.State;
 
@@ -33,10 +33,10 @@ public class SequentialFixture extends Fixture {
   String seqTableName;
 
   @Override
-  public void setUp(State state, Environment env) throws Exception {
+  public void setUp(State state, RandWalkEnv env) throws Exception {
 
-    Connector conn = env.getConnector();
-    Instance instance = env.getInstance();
+    Connector conn = env.getAccumuloConnector();
+    Instance instance = env.getAccumuloInstance();
 
     String hostname = 
InetAddress.getLocalHost().getHostName().replaceAll("[-.]", "_");
 
@@ -57,7 +57,7 @@ public class SequentialFixture extends Fixture {
   }
 
   @Override
-  public void tearDown(State state, Environment env) throws Exception {
+  public void tearDown(State state, RandWalkEnv env) throws Exception {
     // We have resources we need to clean up
     if (env.isMultiTableBatchWriterInitialized()) {
       MultiTableBatchWriter mtbw = env.getMultiTableBatchWriter();
@@ -73,7 +73,7 @@ public class SequentialFixture extends Fixture {
 
     log.debug("Dropping tables: " + seqTableName);
 
-    Connector conn = env.getConnector();
+    Connector conn = env.getAccumuloConnector();
 
     conn.tableOperations().delete(seqTableName);
   }

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/Write.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/Write.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/Write.java
index 80e0e38..9394e28 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/Write.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/sequential/Write.java
@@ -23,7 +23,7 @@ import java.util.Properties;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.apache.hadoop.io.Text;
@@ -31,7 +31,7 @@ import org.apache.hadoop.io.Text;
 public class Write extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
 
     BatchWriter bw = 
env.getMultiTableBatchWriter().getBatchWriter(state.getString("seqTableName"));
 

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/BulkInsert.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/BulkInsert.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/BulkInsert.java
index 76b9ef6..f28bf8f 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/BulkInsert.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/BulkInsert.java
@@ -36,9 +36,8 @@ import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.apache.hadoop.conf.Configuration;
@@ -96,7 +95,7 @@ public class BulkInsert extends Test {
   }
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
 
     String indexTableName = (String) state.get("indexTableName");
     String dataTableName = (String) state.get("docTableName");
@@ -139,14 +138,14 @@ public class BulkInsert extends Test {
     fs.delete(new Path(rootDir), true);
   }
 
-  private void bulkImport(FileSystem fs, State state, Environment env, String 
tableName, String rootDir, String prefix) throws Exception {
+  private void bulkImport(FileSystem fs, State state, RandWalkEnv env, String 
tableName, String rootDir, String prefix) throws Exception {
     while (true) {
       String bulkDir = rootDir + "/" + prefix + "_bulk";
       String failDir = rootDir + "/" + prefix + "_failure";
       Path failPath = new Path(failDir);
       fs.delete(failPath, true);
       fs.mkdirs(failPath);
-      env.getConnector().tableOperations().importDirectory(tableName, bulkDir, 
failDir, true);
+      env.getAccumuloConnector().tableOperations().importDirectory(tableName, 
bulkDir, failDir, true);
 
       FileStatus[] failures = fs.listStatus(failPath);
       if (failures != null && failures.length > 0) {
@@ -164,12 +163,12 @@ public class BulkInsert extends Test {
     }
   }
 
-  private void sort(State state, Environment env, FileSystem fs, String 
tableName, String seqFile, String outputDir, String workDir, int maxSplits)
+  private void sort(State state, RandWalkEnv env, FileSystem fs, String 
tableName, String seqFile, String outputDir, String workDir, int maxSplits)
       throws Exception {
 
     PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(new 
Path(workDir + "/splits.txt"))), false, UTF_8.name());
 
-    Connector conn = env.getConnector();
+    Connector conn = env.getAccumuloConnector();
 
     Collection<Text> splits = conn.tableOperations().listSplits(tableName, 
maxSplits);
     for (Text split : splits)

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/CloneIndex.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/CloneIndex.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/CloneIndex.java
index c47d2a8..fb01d34 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/CloneIndex.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/CloneIndex.java
@@ -20,22 +20,22 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Properties;
 
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class CloneIndex extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
 
     String indexTableName = (String) state.get("indexTableName");
     String tmpIndexTableName = indexTableName + "_tmp";
 
     long t1 = System.currentTimeMillis();
-    env.getConnector().tableOperations().flush(indexTableName, null, null, 
true);
+    env.getAccumuloConnector().tableOperations().flush(indexTableName, null, 
null, true);
     long t2 = System.currentTimeMillis();
-    env.getConnector().tableOperations().clone(indexTableName, 
tmpIndexTableName, false, new HashMap<String,String>(), new HashSet<String>());
+    env.getAccumuloConnector().tableOperations().clone(indexTableName, 
tmpIndexTableName, false, new HashMap<String,String>(), new HashSet<String>());
     long t3 = System.currentTimeMillis();
 
     log.debug("Cloned " + tmpIndexTableName + " from " + indexTableName + " 
flush: " + (t2 - t1) + "ms clone: " + (t3 - t2) + "ms");

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Commit.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Commit.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Commit.java
index 06e8b44..32bb6cf 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Commit.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Commit.java
@@ -18,14 +18,14 @@ package org.apache.accumulo.testing.core.randomwalk.shard;
 
 import java.util.Properties;
 
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class Commit extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     env.getMultiTableBatchWriter().flush();
     log.debug("Committed inserts ");
   }

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/CompactFilter.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/CompactFilter.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/CompactFilter.java
index eacd36b..4fe6641 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/CompactFilter.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/CompactFilter.java
@@ -30,7 +30,7 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.apache.hadoop.io.Text;
@@ -41,7 +41,7 @@ import org.apache.hadoop.io.Text;
 public class CompactFilter extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     String indexTableName = (String) state.get("indexTableName");
     String docTableName = (String) state.get("docTableName");
     Random rand = (Random) state.get("rand");
@@ -57,7 +57,7 @@ public class CompactFilter extends Test {
     documentFilters.add(is);
 
     long t1 = System.currentTimeMillis();
-    env.getConnector().tableOperations().compact(docTableName, null, null, 
documentFilters, true, true);
+    env.getAccumuloConnector().tableOperations().compact(docTableName, null, 
null, documentFilters, true, true);
     long t2 = System.currentTimeMillis();
     long t3 = t2 - t1;
 
@@ -69,12 +69,12 @@ public class CompactFilter extends Test {
     indexFilters.add(is);
 
     t1 = System.currentTimeMillis();
-    env.getConnector().tableOperations().compact(indexTableName, null, null, 
indexFilters, true, true);
+    env.getAccumuloConnector().tableOperations().compact(indexTableName, null, 
null, indexFilters, true, true);
     t2 = System.currentTimeMillis();
 
     log.debug("Filtered documents using compaction iterators " + regex + " " + 
(t3) + " " + (t2 - t1));
 
-    BatchScanner bscanner = 
env.getConnector().createBatchScanner(docTableName, new Authorizations(), 10);
+    BatchScanner bscanner = 
env.getAccumuloConnector().createBatchScanner(docTableName, new 
Authorizations(), 10);
 
     List<Range> ranges = new ArrayList<>();
     for (int i = 0; i < 16; i++) {

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Delete.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Delete.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Delete.java
index e2c8bea..6cd24ca 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Delete.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Delete.java
@@ -23,14 +23,14 @@ import java.util.Random;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class Delete extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     String indexTableName = (String) state.get("indexTableName");
     String dataTableName = (String) state.get("docTableName");
     int numPartitions = (Integer) state.get("numPartitions");

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/DeleteSomeDocs.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/DeleteSomeDocs.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/DeleteSomeDocs.java
index 2b790bd..4f6361e 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/DeleteSomeDocs.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/DeleteSomeDocs.java
@@ -27,7 +27,7 @@ import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
@@ -35,7 +35,7 @@ import org.apache.accumulo.testing.core.randomwalk.Test;
 public class DeleteSomeDocs extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     // delete documents that where the document id matches a given pattern 
from doc and index table
     // using the batch deleter
 
@@ -51,7 +51,7 @@ public class DeleteSomeDocs extends Test {
 
     String pattern = patterns.get(rand.nextInt(patterns.size()));
     BatchWriterConfig bwc = new BatchWriterConfig();
-    BatchDeleter ibd = env.getConnector().createBatchDeleter(indexTableName, 
Authorizations.EMPTY, 8, bwc);
+    BatchDeleter ibd = 
env.getAccumuloConnector().createBatchDeleter(indexTableName, 
Authorizations.EMPTY, 8, bwc);
     ibd.setRanges(Collections.singletonList(new Range()));
 
     IteratorSetting iterSettings = new IteratorSetting(100, RegExFilter.class);
@@ -63,7 +63,7 @@ public class DeleteSomeDocs extends Test {
 
     ibd.close();
 
-    BatchDeleter dbd = env.getConnector().createBatchDeleter(dataTableName, 
Authorizations.EMPTY, 8, bwc);
+    BatchDeleter dbd = 
env.getAccumuloConnector().createBatchDeleter(dataTableName, 
Authorizations.EMPTY, 8, bwc);
     dbd.setRanges(Collections.singletonList(new Range()));
 
     iterSettings = new IteratorSetting(100, RegExFilter.class);

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/DeleteWord.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/DeleteWord.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/DeleteWord.java
index 544c35e..b380dde 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/DeleteWord.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/DeleteWord.java
@@ -29,7 +29,7 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.apache.hadoop.io.Text;
@@ -42,7 +42,7 @@ import org.apache.hadoop.io.Text;
 public class DeleteWord extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     String indexTableName = (String) state.get("indexTableName");
     String docTableName = (String) state.get("docTableName");
     int numPartitions = (Integer) state.get("numPartitions");
@@ -51,7 +51,7 @@ public class DeleteWord extends Test {
     String wordToDelete = Insert.generateRandomWord(rand);
 
     // use index to find all documents containing word
-    Scanner scanner = env.getConnector().createScanner(indexTableName, 
Authorizations.EMPTY);
+    Scanner scanner = env.getAccumuloConnector().createScanner(indexTableName, 
Authorizations.EMPTY);
     scanner.fetchColumnFamily(new Text(wordToDelete));
 
     ArrayList<Range> documentsToDelete = new ArrayList<>();
@@ -61,7 +61,7 @@ public class DeleteWord extends Test {
 
     if (documentsToDelete.size() > 0) {
       // use a batch scanner to fetch all documents
-      BatchScanner bscanner = 
env.getConnector().createBatchScanner(docTableName, Authorizations.EMPTY, 8);
+      BatchScanner bscanner = 
env.getAccumuloConnector().createBatchScanner(docTableName, 
Authorizations.EMPTY, 8);
       bscanner.setRanges(documentsToDelete);
 
       BatchWriter ibw = 
env.getMultiTableBatchWriter().getBatchWriter(indexTableName);

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/ExportIndex.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/ExportIndex.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/ExportIndex.java
index 0e4853d..5f521d3 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/ExportIndex.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/ExportIndex.java
@@ -26,8 +26,7 @@ import java.util.Map.Entry;
 import java.util.Properties;
 
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.apache.hadoop.fs.FileSystem;
@@ -41,7 +40,7 @@ import org.apache.hadoop.io.Text;
 public class ExportIndex extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
 
     String indexTableName = (String) state.get("indexTableName");
     String tmpIndexTableName = indexTableName + "_tmp";
@@ -55,16 +54,16 @@ public class ExportIndex extends Test {
     fs.delete(new Path("/tmp/shard_export/" + tmpIndexTableName), true);
 
     // disable spits, so that splits can be compared later w/o worrying one 
table splitting and the other not
-    env.getConnector().tableOperations().setProperty(indexTableName, 
Property.TABLE_SPLIT_THRESHOLD.getKey(), "20G");
+    env.getAccumuloConnector().tableOperations().setProperty(indexTableName, 
Property.TABLE_SPLIT_THRESHOLD.getKey(), "20G");
 
     long t1 = System.currentTimeMillis();
 
-    env.getConnector().tableOperations().flush(indexTableName, null, null, 
true);
-    env.getConnector().tableOperations().offline(indexTableName);
+    env.getAccumuloConnector().tableOperations().flush(indexTableName, null, 
null, true);
+    env.getAccumuloConnector().tableOperations().offline(indexTableName);
 
     long t2 = System.currentTimeMillis();
 
-    env.getConnector().tableOperations().exportTable(indexTableName, 
exportDir);
+    env.getAccumuloConnector().tableOperations().exportTable(indexTableName, 
exportDir);
 
     long t3 = System.currentTimeMillis();
 
@@ -81,34 +80,34 @@ public class ExportIndex extends Test {
 
     long t4 = System.currentTimeMillis();
 
-    env.getConnector().tableOperations().online(indexTableName);
-    env.getConnector().tableOperations().importTable(tmpIndexTableName, 
copyDir);
+    env.getAccumuloConnector().tableOperations().online(indexTableName);
+    
env.getAccumuloConnector().tableOperations().importTable(tmpIndexTableName, 
copyDir);
 
     long t5 = System.currentTimeMillis();
 
     fs.delete(new Path(exportDir), true);
     fs.delete(new Path(copyDir), true);
 
-    HashSet<Text> splits1 = new 
HashSet<>(env.getConnector().tableOperations().listSplits(indexTableName));
-    HashSet<Text> splits2 = new 
HashSet<>(env.getConnector().tableOperations().listSplits(tmpIndexTableName));
+    HashSet<Text> splits1 = new 
HashSet<>(env.getAccumuloConnector().tableOperations().listSplits(indexTableName));
+    HashSet<Text> splits2 = new 
HashSet<>(env.getAccumuloConnector().tableOperations().listSplits(tmpIndexTableName));
 
     if (!splits1.equals(splits2))
       throw new Exception("Splits not equals " + indexTableName + " " + 
tmpIndexTableName);
 
     HashMap<String,String> props1 = new HashMap<>();
-    for (Entry<String,String> entry : 
env.getConnector().tableOperations().getProperties(indexTableName))
+    for (Entry<String,String> entry : 
env.getAccumuloConnector().tableOperations().getProperties(indexTableName))
       props1.put(entry.getKey(), entry.getValue());
 
     HashMap<String,String> props2 = new HashMap<>();
-    for (Entry<String,String> entry : 
env.getConnector().tableOperations().getProperties(tmpIndexTableName))
+    for (Entry<String,String> entry : 
env.getAccumuloConnector().tableOperations().getProperties(tmpIndexTableName))
       props2.put(entry.getKey(), entry.getValue());
 
     if (!props1.equals(props2))
       throw new Exception("Props not equals " + indexTableName + " " + 
tmpIndexTableName);
 
     // unset the split threshold
-    env.getConnector().tableOperations().removeProperty(indexTableName, 
Property.TABLE_SPLIT_THRESHOLD.getKey());
-    env.getConnector().tableOperations().removeProperty(tmpIndexTableName, 
Property.TABLE_SPLIT_THRESHOLD.getKey());
+    
env.getAccumuloConnector().tableOperations().removeProperty(indexTableName, 
Property.TABLE_SPLIT_THRESHOLD.getKey());
+    
env.getAccumuloConnector().tableOperations().removeProperty(tmpIndexTableName, 
Property.TABLE_SPLIT_THRESHOLD.getKey());
 
     log.debug("Imported " + tmpIndexTableName + " from " + indexTableName + " 
flush: " + (t2 - t1) + "ms export: " + (t3 - t2) + "ms copy:" + (t4 - t3)
         + "ms import:" + (t5 - t4) + "ms");

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Flush.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Flush.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Flush.java
index e6ac574..f8d5183 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Flush.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Flush.java
@@ -19,14 +19,14 @@ package org.apache.accumulo.testing.core.randomwalk.shard;
 import java.util.Properties;
 import java.util.Random;
 
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class Flush extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     String indexTableName = (String) state.get("indexTableName");
     String dataTableName = (String) state.get("docTableName");
     Random rand = (Random) state.get("rand");
@@ -38,7 +38,7 @@ public class Flush extends Test {
     else
       table = dataTableName;
 
-    env.getConnector().tableOperations().flush(table, null, null, true);
+    env.getAccumuloConnector().tableOperations().flush(table, null, null, 
true);
     log.debug("Flushed " + table);
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Grep.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Grep.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Grep.java
index 5892626..b8a79e5 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Grep.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Grep.java
@@ -31,7 +31,7 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.IntersectingIterator;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.apache.hadoop.io.Text;
@@ -39,7 +39,7 @@ import org.apache.hadoop.io.Text;
 public class Grep extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     // pick a few randoms words... grep for those words and search the index
     // ensure both return the same set of documents
 
@@ -53,7 +53,7 @@ public class Grep extends Test {
       words[i] = new Text(Insert.generateRandomWord(rand));
     }
 
-    BatchScanner bs = env.getConnector().createBatchScanner(indexTableName, 
Authorizations.EMPTY, 16);
+    BatchScanner bs = 
env.getAccumuloConnector().createBatchScanner(indexTableName, 
Authorizations.EMPTY, 16);
     IteratorSetting ii = new IteratorSetting(20, "ii", 
IntersectingIterator.class.getName());
     IntersectingIterator.setColumnFamilies(ii, words);
     bs.addScanIterator(ii);
@@ -67,7 +67,7 @@ public class Grep extends Test {
 
     bs.close();
 
-    bs = env.getConnector().createBatchScanner(dataTableName, 
Authorizations.EMPTY, 16);
+    bs = env.getAccumuloConnector().createBatchScanner(dataTableName, 
Authorizations.EMPTY, 16);
 
     for (int i = 0; i < words.length; i++) {
       IteratorSetting more = new IteratorSetting(20 + i, "ii" + i, 
RegExFilter.class);

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Insert.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Insert.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Insert.java
index 1b15323..9f480eb 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Insert.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Insert.java
@@ -26,7 +26,7 @@ import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
@@ -37,7 +37,7 @@ public class Insert extends Test {
   static final int MAX_WORDS_PER_DOC = 3000;
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     String indexTableName = (String) state.get("indexTableName");
     String dataTableName = (String) state.get("docTableName");
     int numPartitions = (Integer) state.get("numPartitions");

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Merge.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Merge.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Merge.java
index 106ab3b..afd36fd 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Merge.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Merge.java
@@ -21,7 +21,7 @@ import java.util.Properties;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.apache.hadoop.io.Text;
@@ -29,16 +29,16 @@ import org.apache.hadoop.io.Text;
 public class Merge extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     String indexTableName = (String) state.get("indexTableName");
 
-    Collection<Text> splits = 
env.getConnector().tableOperations().listSplits(indexTableName);
+    Collection<Text> splits = 
env.getAccumuloConnector().tableOperations().listSplits(indexTableName);
     SortedSet<Text> splitSet = new TreeSet<>(splits);
     log.debug("merging " + indexTableName);
-    env.getConnector().tableOperations().merge(indexTableName, null, null);
+    env.getAccumuloConnector().tableOperations().merge(indexTableName, null, 
null);
     org.apache.accumulo.core.util.Merge merge = new 
org.apache.accumulo.core.util.Merge();
-    merge.mergomatic(env.getConnector(), indexTableName, null, null, 256 * 
1024 * 1024, true);
-    splits = env.getConnector().tableOperations().listSplits(indexTableName);
+    merge.mergomatic(env.getAccumuloConnector(), indexTableName, null, null, 
256 * 1024 * 1024, true);
+    splits = 
env.getAccumuloConnector().tableOperations().listSplits(indexTableName);
     if (splits.size() > splitSet.size()) {
       // throw an excpetion so that test will die an no further changes to 
table will occur...
       // this way table is left as is for debugging.

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Reindex.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Reindex.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Reindex.java
index ac0c872..95fa6b8 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Reindex.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Reindex.java
@@ -26,14 +26,14 @@ import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class Reindex extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     String indexTableName = (String) state.get("indexTableName");
     String tmpIndexTableName = indexTableName + "_tmp";
     String docTableName = (String) state.get("docTableName");
@@ -43,8 +43,8 @@ public class Reindex extends Test {
 
     ShardFixture.createIndexTable(this.log, state, env, "_tmp", rand);
 
-    Scanner scanner = env.getConnector().createScanner(docTableName, 
Authorizations.EMPTY);
-    BatchWriter tbw = env.getConnector().createBatchWriter(tmpIndexTableName, 
new BatchWriterConfig());
+    Scanner scanner = env.getAccumuloConnector().createScanner(docTableName, 
Authorizations.EMPTY);
+    BatchWriter tbw = 
env.getAccumuloConnector().createBatchWriter(tmpIndexTableName, new 
BatchWriterConfig());
 
     int count = 0;
 

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Search.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Search.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Search.java
index c07397d..440469f 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Search.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Search.java
@@ -31,7 +31,7 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.IntersectingIterator;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.apache.hadoop.io.Text;
@@ -39,7 +39,7 @@ import org.apache.hadoop.io.Text;
 public class Search extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     String indexTableName = (String) state.get("indexTableName");
     String dataTableName = (String) state.get("docTableName");
 
@@ -69,7 +69,7 @@ public class Search extends Test {
 
     log.debug("Looking up terms " + searchTerms + " expect to find " + docID);
 
-    BatchScanner bs = env.getConnector().createBatchScanner(indexTableName, 
Authorizations.EMPTY, 10);
+    BatchScanner bs = 
env.getAccumuloConnector().createBatchScanner(indexTableName, 
Authorizations.EMPTY, 10);
     IteratorSetting ii = new IteratorSetting(20, "ii", 
IntersectingIterator.class);
     IntersectingIterator.setColumnFamilies(ii, columns);
     bs.addScanIterator(ii);
@@ -90,8 +90,8 @@ public class Search extends Test {
       throw new Exception("Did not see doc " + docID + " in index.  terms:" + 
searchTerms + " " + indexTableName + " " + dataTableName);
   }
 
-  static Entry<Key,Value> findRandomDocument(State state, Environment env, 
String dataTableName, Random rand) throws Exception {
-    Scanner scanner = env.getConnector().createScanner(dataTableName, 
Authorizations.EMPTY);
+  static Entry<Key,Value> findRandomDocument(State state, RandWalkEnv env, 
String dataTableName, Random rand) throws Exception {
+    Scanner scanner = env.getAccumuloConnector().createScanner(dataTableName, 
Authorizations.EMPTY);
     scanner.setBatchSize(1);
     scanner.setRange(new Range(Integer.toString(rand.nextInt(0xfffffff), 16), 
null));
 

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/ShardFixture.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/ShardFixture.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/ShardFixture.java
index 3462442..2d83326 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/ShardFixture.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/ShardFixture.java
@@ -25,7 +25,7 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MultiTableBatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.Fixture;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.hadoop.io.Text;
@@ -49,8 +49,8 @@ public class ShardFixture extends Fixture {
     return splits;
   }
 
-  static void createIndexTable(Logger log, State state, Environment env, 
String suffix, Random rand) throws Exception {
-    Connector conn = env.getConnector();
+  static void createIndexTable(Logger log, State state, RandWalkEnv env, 
String suffix, Random rand) throws Exception {
+    Connector conn = env.getAccumuloConnector();
     String name = (String) state.get("indexTableName") + suffix;
     int numPartitions = (Integer) state.get("numPartitions");
     boolean enableCache = (Boolean) state.get("cacheIndex");
@@ -73,7 +73,7 @@ public class ShardFixture extends Fixture {
   }
 
   @Override
-  public void setUp(State state, Environment env) throws Exception {
+  public void setUp(State state, RandWalkEnv env) throws Exception {
     String hostname = 
InetAddress.getLocalHost().getHostName().replaceAll("[-.]", "_");
     String pid = env.getPid();
 
@@ -88,7 +88,7 @@ public class ShardFixture extends Fixture {
     state.set("rand", rand);
     state.set("nextDocID", Long.valueOf(0));
 
-    Connector conn = env.getConnector();
+    Connector conn = env.getAccumuloConnector();
 
     createIndexTable(this.log, state, env, "", rand);
 
@@ -110,7 +110,7 @@ public class ShardFixture extends Fixture {
   }
 
   @Override
-  public void tearDown(State state, Environment env) throws Exception {
+  public void tearDown(State state, RandWalkEnv env) throws Exception {
     // We have resources we need to clean up
     if (env.isMultiTableBatchWriterInitialized()) {
       MultiTableBatchWriter mtbw = env.getMultiTableBatchWriter();
@@ -124,7 +124,7 @@ public class ShardFixture extends Fixture {
       env.resetMultiTableBatchWriter();
     }
 
-    Connector conn = env.getConnector();
+    Connector conn = env.getAccumuloConnector();
 
     log.info("Deleting index and doc tables");
 

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Split.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Split.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Split.java
index bef5104..d150e99 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Split.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/Split.java
@@ -20,7 +20,7 @@ import java.util.Properties;
 import java.util.Random;
 import java.util.SortedSet;
 
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 import org.apache.hadoop.io.Text;
@@ -28,14 +28,14 @@ import org.apache.hadoop.io.Text;
 public class Split extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
     String indexTableName = (String) state.get("indexTableName");
     int numPartitions = (Integer) state.get("numPartitions");
     Random rand = (Random) state.get("rand");
 
     SortedSet<Text> splitSet = ShardFixture.genSplits(numPartitions, 
rand.nextInt(numPartitions) + 1, "%06x");
     log.debug("adding splits " + indexTableName);
-    env.getConnector().tableOperations().addSplits(indexTableName, splitSet);
+    env.getAccumuloConnector().tableOperations().addSplits(indexTableName, 
splitSet);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/VerifyIndex.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/VerifyIndex.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/VerifyIndex.java
index caba1d7..cd1a1cd 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/VerifyIndex.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/shard/VerifyIndex.java
@@ -25,21 +25,21 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class VerifyIndex extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {
 
     String indexTableName = (String) state.get("indexTableName");
     String tmpIndexTableName = indexTableName + "_tmp";
 
     // scan new and old index and verify identical
-    Scanner indexScanner1 = 
env.getConnector().createScanner(tmpIndexTableName, Authorizations.EMPTY);
-    Scanner indexScanner2 = env.getConnector().createScanner(indexTableName, 
Authorizations.EMPTY);
+    Scanner indexScanner1 = 
env.getAccumuloConnector().createScanner(tmpIndexTableName, 
Authorizations.EMPTY);
+    Scanner indexScanner2 = 
env.getAccumuloConnector().createScanner(indexTableName, Authorizations.EMPTY);
 
     Iterator<Entry<Key,Value>> iter = indexScanner2.iterator();
 
@@ -64,8 +64,8 @@ public class VerifyIndex extends Test {
 
     log.debug("Verified " + count + " index entries ");
 
-    env.getConnector().tableOperations().delete(indexTableName);
-    env.getConnector().tableOperations().rename(tmpIndexTableName, 
indexTableName);
+    env.getAccumuloConnector().tableOperations().delete(indexTableName);
+    env.getAccumuloConnector().tableOperations().rename(tmpIndexTableName, 
indexTableName);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/CreateTable.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/CreateTable.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/CreateTable.java
index df1df59..1282993 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/CreateTable.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/CreateTable.java
@@ -19,12 +19,12 @@ package org.apache.accumulo.testing.core.randomwalk.unit;
 
 import java.util.Properties;
 
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class CreateTable extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {}
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {}
 }

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/DeleteTable.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/DeleteTable.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/DeleteTable.java
index f7226cb..af90276 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/DeleteTable.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/DeleteTable.java
@@ -18,12 +18,12 @@ package org.apache.accumulo.testing.core.randomwalk.unit;
 
 import java.util.Properties;
 
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class DeleteTable extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {}
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {}
 }

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Ingest.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Ingest.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Ingest.java
index 5681402..1b37baf 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Ingest.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Ingest.java
@@ -18,12 +18,12 @@ package org.apache.accumulo.testing.core.randomwalk.unit;
 
 import java.util.Properties;
 
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class Ingest extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {}
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {}
 }

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Scan.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Scan.java 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Scan.java
index c677cf9..8f6bdae 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Scan.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Scan.java
@@ -18,12 +18,12 @@ package org.apache.accumulo.testing.core.randomwalk.unit;
 
 import java.util.Properties;
 
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class Scan extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {}
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {}
 }

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Verify.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Verify.java
 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Verify.java
index 95acf4f..4dd5f29 100644
--- 
a/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Verify.java
+++ 
b/core/src/main/java/org/apache/accumulo/testing/core/randomwalk/unit/Verify.java
@@ -18,12 +18,12 @@ package org.apache.accumulo.testing.core.randomwalk.unit;
 
 import java.util.Properties;
 
-import org.apache.accumulo.testing.core.randomwalk.Environment;
+import org.apache.accumulo.testing.core.randomwalk.RandWalkEnv;
 import org.apache.accumulo.testing.core.randomwalk.State;
 import org.apache.accumulo.testing.core.randomwalk.Test;
 
 public class Verify extends Test {
 
   @Override
-  public void visit(State state, Environment env, Properties props) throws 
Exception {}
+  public void visit(State state, RandWalkEnv env, Properties props) throws 
Exception {}
 }

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/core/src/test/java/org/apache/accumulo/testing/core/randomwalk/ReplicationRandomWalkIT.java
----------------------------------------------------------------------
diff --git 
a/core/src/test/java/org/apache/accumulo/testing/core/randomwalk/ReplicationRandomWalkIT.java
 
b/core/src/test/java/org/apache/accumulo/testing/core/randomwalk/ReplicationRandomWalkIT.java
index c288bd7..6a6e713 100644
--- 
a/core/src/test/java/org/apache/accumulo/testing/core/randomwalk/ReplicationRandomWalkIT.java
+++ 
b/core/src/test/java/org/apache/accumulo/testing/core/randomwalk/ReplicationRandomWalkIT.java
@@ -43,19 +43,19 @@ public class ReplicationRandomWalkIT extends 
ConfigurableMacBase {
   public void runReplicationRandomWalkStep() throws Exception {
     Replication r = new Replication();
 
-    Environment env = new Environment(new Properties()) {
+    RandWalkEnv env = new RandWalkEnv(new Properties()) {
       @Override
-      public String getUserName() {
+      public String getAccumuloUserName() {
         return "root";
       }
 
       @Override
-      public String getPassword() {
+      public String getAccumuloPassword() {
         return ROOT_PASSWORD;
       }
 
       @Override
-      public Connector getConnector() throws AccumuloException, 
AccumuloSecurityException {
+      public Connector getAccumuloConnector() throws AccumuloException, 
AccumuloSecurityException {
         return ReplicationRandomWalkIT.this.getConnector();
       }
 

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/libexec/analyze-missing.pl
----------------------------------------------------------------------
diff --git a/libexec/analyze-missing.pl b/libexec/analyze-missing.pl
new file mode 100755
index 0000000..5cce1b1
--- /dev/null
+++ b/libexec/analyze-missing.pl
@@ -0,0 +1,127 @@
+#! /usr/bin/env perl
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+use POSIX qw(strftime);
+
+if(scalar(@ARGV) != 4){
+       print "Usage : analyze-missing.pl <accumulo home> <continuous log dir> 
<user> <pass> \n";
+       exit(1);
+}
+
+$ACCUMULO_HOME=$ARGV[0];
+$CONTINUOUS_LOG_DIR=$ARGV[1];
+$USER=$ARGV[2];
+$PASS=$ARGV[3];
+
+
+@missing = `grep MIS $CONTINUOUS_LOG_DIR/*.err`;
+
+
+
+for $miss (@missing) {
+       chomp($miss);
+       ($file, $type, $time, $row) = split(/[: ]/, $miss);
+
+       substr($file, -3, 3, "out");
+
+       $prevRowLine = `grep -B 1 $row $file | grep SRQ | grep -v $row`;
+
+       @prla = split(/\s+/, $prevRowLine);
+       $prevRow = $prla[2];
+#      print $prevRow."\n";
+
+       $aScript = `mktemp /tmp/miss_script.XXXXXXXXXX`;
+       chomp($aScript);
+       open(AS, ">$aScript") || die;
+
+       print AS "table ci\n";
+       print AS "scan -b $prevRow -e $prevRow\n";
+       print AS "scan -b $row -e $row\n";
+       print AS "quit\n";
+       close(AS);
+
+       $exist = 0;
+       $ingestIDSame = 0;
+       $ingestId = "";
+       $count = 0;
+
+       @entries = `$ACCUMULO_HOME/bin/accumulo shell -u $USER -p $PASS -f 
$aScript | grep $row`;
+       system("rm $aScript");
+
+       for $entry (@entries){
+               chomp($entry);
+               @entryA = split(/[: ]+/, $entry);
+               if($entryA[0] eq $row){
+                       $exist = 1;
+
+                       if($entryA[4] eq $ingestId){
+                               $ingestIDSame = 1;
+                       }
+               }else{
+                       $ingestId = $entryA[4];
+                       $count = hex($entryA[5]);
+               }
+       }
+
+
+       #look in ingest logs
+       @ingestLogs = `ls  $CONTINUOUS_LOG_DIR/*ingest*.out`;
+       @flushTimes = ();
+       chomp(@ingestLogs);
+       for $ingestLog (@ingestLogs){
+               open(IL, "<$ingestLog") || die;
+               
+
+               while($firstLine = <IL>){
+                       chomp($firstLine);
+                       if($firstLine =~ /UUID.*/){
+                               last;
+                       }
+               }
+
+               @iinfo = split(/\s+/,$firstLine);
+               if($iinfo[2] eq $ingestId){
+                       while($line = <IL>){
+                               if($line =~ /FLUSH (\d+) \d+ \d+ (\d+) \d+/){
+                                       push(@flushTimes, $1);
+                                       if(scalar(@flushTimes) > 3){
+                                               shift(@flushTimes);
+                                       }
+                                       if($count < $2){
+                                               last;
+                                       }
+                               }
+                       }
+               }
+               
+               
+
+               close(IL);
+       
+               if(scalar(@flushTimes) > 0){
+                       last;
+               }
+       } 
+
+       $its0 = strftime "%m/%d/%Y_%H:%M:%S", gmtime($flushTimes[0]/1000);
+       $its1 = strftime "%m/%d/%Y_%H:%M:%S", gmtime($flushTimes[1]/1000);
+       $mts = strftime "%m/%d/%Y_%H:%M:%S", gmtime($time/1000);
+
+       print "$row $exist $ingestIDSame $prevRow $ingestId   $its0   $its1   
$mts\n";
+}
+

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/libexec/datanode-agitator.pl
----------------------------------------------------------------------
diff --git a/libexec/datanode-agitator.pl b/libexec/datanode-agitator.pl
new file mode 100755
index 0000000..a98bb66
--- /dev/null
+++ b/libexec/datanode-agitator.pl
@@ -0,0 +1,140 @@
+#! /usr/bin/env perl
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+use POSIX qw(strftime);
+use Cwd qw();
+
+if(scalar(@ARGV) != 5 && scalar(@ARGV) != 3){
+  print "Usage : datanode-agitator.pl <min sleep before kill in minutes>[:max 
sleep before kill in minutes] <min sleep before restart in minutes>[:max sleep 
before restart in minutes] HADOOP_PREFIX [<min kill> <max kill>]\n";
+  exit(1);
+}
+
+my $ACCUMULO_HOME;
+if( defined $ENV{'ACCUMULO_HOME'} ){
+  $ACCUMULO_HOME = $ENV{'ACCUMULO_HOME'};
+} else {
+  $cwd=Cwd::cwd();
+  $ACCUMULO_HOME=$cwd . '/../../..';
+}
+$HADOOP_PREFIX=$ARGV[2];
+
+print "ACCUMULO_HOME=$ACCUMULO_HOME\n";
+print "HADOOP_PREFIX=$HADOOP_PREFIX\n";
+
+@sleeprange1 = split(/:/, $ARGV[0]);
+$sleep1 = $sleeprange1[0];
+
+@sleeprange2 = split(/:/, $ARGV[1]);
+$sleep2 = $sleeprange2[0];
+
+if (scalar(@sleeprange1) > 1) {
+  $sleep1max = $sleeprange1[1] + 1;
+} else {
+  $sleep1max = $sleep1;
+}
+
+if ($sleep1 > $sleep1max) {
+  die("sleep1 > sleep1max $sleep1 > $sleep1max");
+}
+
+if (scalar(@sleeprange2) > 1) {
+  $sleep2max = $sleeprange2[1] + 1;
+} else {
+  $sleep2max = $sleep2;
+}
+
+if($sleep2 > $sleep2max){
+  die("sleep2 > sleep2max $sleep2 > $sleep2max");
+}
+
+if(defined $ENV{'ACCUMULO_CONF_DIR'}){
+  $ACCUMULO_CONF_DIR = $ENV{'ACCUMULO_CONF_DIR'};
+}else{
+  $ACCUMULO_CONF_DIR = $ACCUMULO_HOME . '/conf';
+}
+
+if(scalar(@ARGV) == 5){
+  $minKill = $ARGV[3];
+  $maxKill = $ARGV[4];
+}else{
+  $minKill = 1;
+  $maxKill = 1;
+}
+
+if($minKill > $maxKill){
+  die("minKill > maxKill $minKill > $maxKill");
+}
+
+@tserversRaw = `cat $ACCUMULO_CONF_DIR/tservers`;
+chomp(@tserversRaw);
+
+for $tserver (@tserversRaw){
+  if($tserver eq "" || substr($tserver,0,1) eq "#"){
+    next;
+  }
+
+  push(@tservers, $tserver);
+}
+
+
+if(scalar(@tservers) < $maxKill){
+  print STDERR "WARN setting maxKill to ".scalar(@tservers)."\n";
+  $maxKill = scalar(@tservers);
+}
+
+if ($minKill > $maxKill){
+  print STDERR "WARN setting minKill to equal maxKill\n";
+  $minKill = $maxKill;
+}
+
+while(1){
+
+  $numToKill = int(rand($maxKill - $minKill + 1)) + $minKill;
+  %killed = ();
+  $server = "";
+
+  for($i = 0; $i < $numToKill; $i++){
+    while($server eq "" || $killed{$server} != undef){
+      $index = int(rand(scalar(@tservers)));
+      $server = $tservers[$index];
+    }
+
+    $killed{$server} = 1;
+
+    $t = strftime "%Y%m%d %H:%M:%S", localtime;
+
+    print STDERR "$t Killing datanode on $server\n";
+    system("ssh $server \"pkill -9 -f '[p]roc_datanode'\"");
+  }
+
+  $nextsleep2 = int(rand($sleep2max - $sleep2)) + $sleep2;
+  sleep($nextsleep2 * 60);
+
+  foreach $restart (keys %killed) {
+
+    $t = strftime "%Y%m%d %H:%M:%S", localtime;
+
+    print STDERR "$t Starting datanode on $restart\n";
+    # We can just start as we're the HDFS user
+    system("ssh $restart '$HADOOP_PREFIX/sbin/hadoop-daemon.sh start 
datanode'");
+  }
+
+  $nextsleep1 = int(rand($sleep1max - $sleep1)) + $sleep1;
+  sleep($nextsleep1 * 60);
+}
+

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/libexec/hdfs-agitator.pl
----------------------------------------------------------------------
diff --git a/libexec/hdfs-agitator.pl b/libexec/hdfs-agitator.pl
new file mode 100755
index 0000000..85eab32
--- /dev/null
+++ b/libexec/hdfs-agitator.pl
@@ -0,0 +1,217 @@
+#! /usr/bin/env perl
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+use strict;
+use warnings;
+use POSIX qw(strftime);
+use Getopt::Long;
+use Pod::Usage;
+
+my $help = 0;
+my $man = 0;
+my $sleep = 10;
+my $superuser = 'hdfs';
+my $hdfsCmd;
+if( defined $ENV{'HADOOP_PREFIX'} ){
+  $hdfsCmd = $ENV{'HADOOP_PREFIX'} . '/share/hadoop/hdfs/bin/hdfs';
+}
+my $sudo;
+my $nameservice;
+
+GetOptions('help|?' => \$help, 'man' => \$man, 'sleep=i' => \$sleep, 
'nameservice=s' => \$nameservice, 'superuser=s' => \$superuser, 'hdfs-cmd=s' => 
\$hdfsCmd, 'sudo:s' => \$sudo) or pod2usage(2);
+pod2usage(-exitval => 0, -verbose => 1) if $help;
+pod2usage(-exitval => 0, -verbose => 2) if $man;
+pod2usage(-exitval => 1, -verbose => 1, -message => '$HADOOP_PREFIX not 
defined and no hdfs-cmd given. please use --hdfs-cmd to specify where your hdfs 
cli is.') if not defined $hdfsCmd;
+pod2usage(-exitval => 1, -verbose => 1, -message => "Your specified hdfs cli 
'$hdfsCmd' is not executable.") if not -x $hdfsCmd;
+if( defined $sudo and "" eq $sudo ){
+  $sudo = `which sudo`;
+  pod2usage(-exitval => 1, -verbose => 1, -message => "Error attempting to 
find the sudo command, please specify it with --sudo /path/to/sudo") if 0 != $?;
+  chomp($sudo);
+}
+if( defined $sudo ){
+  pod2usage(-exitval => 1, -verbose => 1, -message => "Your specified sudo 
command '$sudo' is not executable.") if not -x $sudo;
+}
+
+my $needsudo = defined $sudo;
+my $haadmin = "$hdfsCmd haadmin";
+if($needsudo) {
+  $haadmin = "$sudo -u $superuser $haadmin";
+  print STDERR "Starting HDFS agitator, configured to fail over every $sleep 
minutes. will run hdfs command '$hdfsCmd' as user '$superuser' via '$sudo'.\n";
+} else {
+  print STDERR "Starting HDFS agitator, configured to fail over every $sleep 
minutes. will run hdfs command '$hdfsCmd' as the current user.\n";
+}
+while(1){
+  sleep($sleep * 60);
+  my $t = strftime "%Y%m%d %H:%M:%S", localtime;
+  my @failServices;
+  if( defined $nameservice ){
+    @failServices = ($nameservice);
+  } else {
+    my $nameservicesRaw = `$hdfsCmd getconf -confKey dfs.nameservices`;
+    if(0 != $?) {
+      print STDERR "$t HDFS CLI failed. please see --help to set it 
correctly\n";
+      exit(1);
+    }
+    chomp($nameservicesRaw);
+    my @nameservices = split(/,/, $nameservicesRaw);
+    if(1 > scalar(@nameservices)) {
+      print STDERR "$t No HDFS NameServices found. Are you sure you're running 
in HA?\n";
+      exit(1);
+    }
+    if(rand(1) < .5){
+      my $serviceToFail = $nameservices[int(rand(scalar(@nameservices)))];
+      print STDERR "$t Failing over nameservice $serviceToFail\n";
+      @failServices = ($serviceToFail);
+    } else {
+      print STDERR "$t Failing over all nameservices\n";
+      @failServices = @nameservices;
+    }
+  }
+  for my $toFail (@failServices){
+    my $namenodesRaw = `$hdfsCmd getconf -confKey dfs.ha.namenodes.$toFail`;
+    if(0 != $?) {
+      print STDERR "$t HDFS CLI failed to look up namenodes in service 
$toFail.\n";
+      exit(1);
+    }
+    chomp($namenodesRaw);
+    my @namenodes = split(/,/, $namenodesRaw);
+    if(2 > scalar(@namenodes)) {
+      print STDERR "$t WARN NameService $toFail does not have at least 2 
namenodes according to the HDFS configuration, skipping.\n";
+      next;
+    }
+    my $active;
+    for my $namenode (@namenodes){
+      my $status = `$haadmin -ns $toFail -getServiceState $namenode`;
+      if(0 != $?) {
+        if($needsudo) {
+          print STDERR "$t WARN Error while attempting to get the service 
state of $toFail :: $namenode\n";
+          $status = 'error';
+        } else {
+          print STDERR "$t WARN Current user may not run the HDFS haadmin 
utility, attempting to sudo to the $superuser user.\n";
+          $needsudo = 1;
+          if(not defined $sudo) {
+            $sudo = `which sudo`;
+            pod2usage(-exitval => 1, -verbose => 1, -message => "Error 
attempting to find the sudo command, please specify it with --sudo") if 0 != $?;
+            chomp($sudo);
+            pod2usage(-exitval => 1, -verbose => 1, -message => "The sudo 
command '$sudo' is not executable. please specify sudo with --sudo") if not -x 
$sudo;
+          }
+          $haadmin = "$sudo -u $superuser $haadmin";
+          redo;
+        }
+      }
+      chomp($status);
+      if( 'active' eq $status ){
+        $active = $namenode;
+        last;
+      }
+    }
+    if( defined $active ){
+      my @standby = grep { $_ ne $active } @namenodes;
+      my $newActive = $standby[int(rand(scalar(@standby)))];
+      print STDERR "$t Transitioning nameservice $toFail from $active to 
$newActive\n";
+      my $cmd = "$haadmin -ns $toFail -failover $active $newActive";
+      print "$t $cmd\n";
+      system($cmd);
+    } else {
+      my $newActive = $namenodes[int(rand(scalar(@namenodes)))];
+      print STDERR "$t WARN nameservice $toFail did not have an active 
namenode. Transitioning a random namenode to active. This will fail if HDFS is 
configured for automatic failover.\n";
+      my $cmd = "$haadmin -ns $toFail -transitionToActive $newActive";
+      print "$t $cmd\n";
+      system($cmd);
+    }
+  }
+}
+__END__
+
+=head1 NAME
+
+hdfs-agitator - causes HDFS to failover
+
+=head1 DESCRIPTION
+
+Sleeps for a configurable amount of time, then causes a NameNode failover in 
one
+or more HDFS NameServices. If a given NameService does not have an Active
+NameNode when it comes time to failover, a random standby is promoted.
+
+Only works on HDFS versions that support HA configurations and the haadmin
+command. In order to function, the user running this script must be able to
+use the haadmin command. This requires access to an HDFS superuser. By default,
+it will attempt to sudo to perform calls.
+
+=head1 SYNOPSIS
+
+hdfs-agitator [options]
+
+  Options:
+    --help         Brief help message
+    --man          Full documentation
+    --sleep        Time to sleep between failovers in minutes. Default 10
+    --superuser    HDFS superuser. Default 'hdfs'
+    --hdfs-cmd     hdfs command path. Default 
'$HADOOP_PREFIX/share/hadoop/hdfs/bin/hdfs'
+    --nameservice  Limit failovers to specified nameservice. Default all 
nameservices
+    --sudo         command to call to sudo to the HDFS superuser. Default 
'sudo' if needed.
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--sleep>
+
+Sleep the given number of minutes between attempts to fail over nameservices.
+
+=item B<--nameservice>
+
+Limit failover attempts to the given nameservice. By default, we attempt ot 
list
+all known nameservices and choose either one or all of them to failover in a
+given cycle.
+
+=item B<--superuser>
+
+An HDFS superuser capable of running the haadmin command. Defaults to "hdfs".
+
+=item B<--hdfs-cmd>
+
+Path to the HDFS cli. Will be used both for non-administrative commands (e.g.
+listing the nameservices and serviceids in a given nameservice) and admin-only
+actions such as checking status and failing over.
+
+Defaults to using $HADOOP_PREFIX.
+
+=item B<--sudo>
+
+Command to allow us to act as the given HDFS superuser. By default we assume 
the current user
+can run HDFS administrative commands. When this argument is specified we will 
instead attempt
+to use the HDFS superuser instead. If given an argument, it will be called like
+sudo, i.e. "sudo -u $superuser $cmd". Defaults to "sudo" on the shell's path.
+
+=back
+
+=head1 SEE ALSO
+
+See the Apache Hadoop documentation on configuring HDFS HA
+
+=over 8
+
+=item B<HA with QJM>
+
+http://hadoop.apache.org/docs/r2.2.0/hadoop-yarn/hadoop-yarn-site/HDFSHighAvailabilityWithQJM.html#Administrative_commands
+
+=item B<HA with NFS>
+
+http://hadoop.apache.org/docs/r2.2.0/hadoop-yarn/hadoop-yarn-site/HDFSHighAvailabilityWithNFS.html#Administrative_commands
+
+=back

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/libexec/master-agitator.pl
----------------------------------------------------------------------
diff --git a/libexec/master-agitator.pl b/libexec/master-agitator.pl
new file mode 100755
index 0000000..d87f17e
--- /dev/null
+++ b/libexec/master-agitator.pl
@@ -0,0 +1,92 @@
+#! /usr/bin/env perl
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+use POSIX qw(strftime);
+use Cwd qw();
+
+if(scalar(@ARGV) != 2){
+       print "Usage : master-agitator.pl <sleep before kill in minutes> <sleep 
before start in minutes>\n";
+       exit(1);
+}
+
+my $ACCUMULO_HOME;
+if( defined $ENV{'ACCUMULO_HOME'} ){
+  $ACCUMULO_HOME = $ENV{'ACCUMULO_HOME'};
+} else {
+  $cwd=Cwd::cwd();
+  $ACCUMULO_HOME=$cwd . '/../../..';
+}
+
+if(defined $ENV{'ACCUMULO_CONF_DIR'}){
+        $ACCUMULO_CONF_DIR = $ENV{'ACCUMULO_CONF_DIR'};
+}else{
+       $ACCUMULO_CONF_DIR = $ACCUMULO_HOME . '/conf';
+}
+
+$sleep1 = $ARGV[0];
+$sleep2 = $ARGV[1];
+
+@mastersRaw = `cat $ACCUMULO_CONF_DIR/masters`;
+chomp(@mastersRaw);
+
+for $master (@mastersRaw){
+       if($master eq "" || substr($master,0,1) eq "#"){
+               next;
+       }
+
+       push(@masters, $master);
+}
+
+
+while(1){
+       sleep($sleep1 * 60);
+       $t = strftime "%Y%m%d %H:%M:%S", localtime;
+       if(rand(1) < .5){
+               $masterNodeToWack = $masters[int(rand(scalar(@masters)))];
+               print STDERR "$t Killing master on $masterNodeToWack\n";
+               $cmd = "ssh $masterNodeToWack \"pkill -f '[ 
]org.apache.accumulo.start.*master'\"";
+               print "$t $cmd\n";
+               system($cmd);
+       }else{
+               print STDERR "$t Killing all masters\n";
+               $cmd = "pssh -h $ACCUMULO_CONF_DIR/masters \"pkill -f '[ 
]org.apache.accumulo.start.*master'\" < /dev/null";
+               print "$t $cmd\n";
+               system($cmd);
+
+               $file = '';
+               if (-e "$ACCUMULO_CONF_DIR/gc") {
+                       $file = 'gc';
+               } else {
+                       $file = 'masters';
+               }
+
+               $cmd = "pssh -h $ACCUMULO_CONF_DIR/$file \"pkill -f '[ 
]org.apache.accumulo.start.*gc'\" < /dev/null";
+               print "$t $cmd\n";
+               system($cmd);
+       }
+
+       sleep($sleep2 * 60);
+       $t = strftime "%Y%m%d %H:%M:%S", localtime;
+       print STDERR "$t Running start-all\n";
+
+       $cmd = "pssh -h $ACCUMULO_CONF_DIR/masters 
\"$ACCUMULO_HOME/bin/accumulo-service master start\" < /dev/null";
+       print "$t $cmd\n";
+       system($cmd);
+}
+
+

http://git-wip-us.apache.org/repos/asf/accumulo-testing/blob/fc3ddfc4/libexec/tserver-agitator.pl
----------------------------------------------------------------------
diff --git a/libexec/tserver-agitator.pl b/libexec/tserver-agitator.pl
new file mode 100755
index 0000000..de29e3a
--- /dev/null
+++ b/libexec/tserver-agitator.pl
@@ -0,0 +1,134 @@
+#! /usr/bin/env perl
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+use POSIX qw(strftime);
+use Cwd qw();
+
+if(scalar(@ARGV) != 4 && scalar(@ARGV) != 2){
+  print "Usage : tserver-agitator.pl <min sleep before kill in minutes>[:max 
sleep before kill in minutes] <min sleep before tup in minutes>[:max sleep 
before tup in minutes] [<min kill> <max kill>]\n";
+  exit(1);
+}
+
+my $ACCUMULO_HOME;
+if( defined $ENV{'ACCUMULO_HOME'} ){
+  $ACCUMULO_HOME = $ENV{'ACCUMULO_HOME'};
+} else {
+  $cwd=Cwd::cwd();
+  $ACCUMULO_HOME=$cwd . '/../../..';
+}
+
+print "ACCUMULO_HOME=$ACCUMULO_HOME\n";
+
+@sleeprange1 = split(/:/, $ARGV[0]);
+$sleep1 = $sleeprange1[0];
+
+@sleeprange2 = split(/:/, $ARGV[1]);
+$sleep2 = $sleeprange2[0];
+
+if (scalar(@sleeprange1) > 1) {
+  $sleep1max = $sleeprange1[1] + 1;
+} else {
+  $sleep1max = $sleep1;
+}
+
+if ($sleep1 > $sleep1max) {
+  die("sleep1 > sleep1max $sleep1 > $sleep1max");
+}
+
+if (scalar(@sleeprange2) > 1) {
+  $sleep2max = $sleeprange2[1] + 1;
+} else {
+  $sleep2max = $sleep2;
+}
+
+if($sleep2 > $sleep2max){
+  die("sleep2 > sleep2max $sleep2 > $sleep2max");
+}
+
+if(defined $ENV{'ACCUMULO_CONF_DIR'}){
+  $ACCUMULO_CONF_DIR = $ENV{'ACCUMULO_CONF_DIR'};
+}else{
+  $ACCUMULO_CONF_DIR = $ACCUMULO_HOME . '/conf';
+}
+
+if(scalar(@ARGV) == 4){
+  $minKill = $ARGV[2];
+  $maxKill = $ARGV[3];
+}else{
+  $minKill = 1;
+  $maxKill = 1;
+}
+
+if($minKill > $maxKill){
+  die("minKill > maxKill $minKill > $maxKill");
+}
+
+@tserversRaw = `cat $ACCUMULO_CONF_DIR/tservers`;
+chomp(@tserversRaw);
+
+for $tserver (@tserversRaw){
+  if($tserver eq "" || substr($tserver,0,1) eq "#"){
+    next;
+  }
+
+  push(@tservers, $tserver);
+}
+
+
+if(scalar(@tservers) < $maxKill){
+  print STDERR "WARN setting maxKill to ".scalar(@tservers)."\n";
+  $maxKill = scalar(@tservers);
+}
+
+if ($minKill > $maxKill){
+  print STDERR "WARN setting minKill to equal maxKill\n";
+  $minKill = $maxKill;
+}
+
+while(1){
+
+  $numToKill = int(rand($maxKill - $minKill + 1)) + $minKill;
+  %killed = {};
+  $server = "";
+
+  for($i = 0; $i < $numToKill; $i++){
+    while($server eq "" || $killed{$server} != undef){
+      $index = int(rand(scalar(@tservers)));
+      $server = $tservers[$index];
+    }
+
+    $killed{$server} = 1;
+
+    $t = strftime "%Y%m%d %H:%M:%S", localtime;
+
+    print STDERR "$t Killing tserver on $server\n";
+    # We're the accumulo user, just run the commandj
+    system("ssh $server '$ACCUMULO_HOME/bin/accumulo-service tserver stop'");
+  }
+
+  $nextsleep2 = int(rand($sleep2max - $sleep2)) + $sleep2;
+  sleep($nextsleep2 * 60);
+  $t = strftime "%Y%m%d %H:%M:%S", localtime;
+  print STDERR "$t Running tup\n";
+  # restart the as them as the accumulo user
+  system("$ACCUMULO_HOME/libexec/cluster.sh start-tservers");
+
+  $nextsleep1 = int(rand($sleep1max - $sleep1)) + $sleep1;
+  sleep($nextsleep1 * 60);
+}
+

Reply via email to