http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/src/main/java/org/apache/accumulo/test/continuous/TimeBinner.java
----------------------------------------------------------------------
diff --git 
a/test/src/main/java/org/apache/accumulo/test/continuous/TimeBinner.java 
b/test/src/main/java/org/apache/accumulo/test/continuous/TimeBinner.java
deleted file mode 100644
index 6ecb14e..0000000
--- a/test/src/main/java/org/apache/accumulo/test/continuous/TimeBinner.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.continuous;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.io.BufferedReader;
-import java.io.InputStreamReader;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.cli.ClientOpts.TimeConverter;
-import org.apache.accumulo.core.cli.Help;
-
-import com.beust.jcommander.Parameter;
-
-public class TimeBinner {
-
-  enum Operation {
-    AVG, SUM, MIN, MAX, COUNT, CUMULATIVE, AMM, // avg,min,max
-    AMM_HACK1 // special case
-  }
-
-  private static class DoubleWrapper {
-    double d;
-  }
-
-  private static DoubleWrapper get(long l, HashMap<Long,DoubleWrapper> m, 
double init) {
-    DoubleWrapper dw = m.get(l);
-    if (dw == null) {
-      dw = new DoubleWrapper();
-      dw.d = init;
-      m.put(l, dw);
-    }
-    return dw;
-  }
-
-  static class Opts extends Help {
-    @Parameter(names = "--period", description = "period", converter = 
TimeConverter.class, required = true)
-    long period = 0;
-    @Parameter(names = "--timeColumn", description = "time column", required = 
true)
-    int timeColumn = 0;
-    @Parameter(names = "--dataColumn", description = "data column", required = 
true)
-    int dataColumn = 0;
-    @Parameter(names = "--operation", description = "one of: AVG, SUM, MIN, 
MAX, COUNT", required = true)
-    String operation;
-    @Parameter(names = "--dateFormat", description = "a SimpleDataFormat 
string that describes the data format")
-    String dateFormat = "MM/dd/yy-HH:mm:ss";
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(TimeBinner.class.getName(), args);
-
-    Operation operation = Operation.valueOf(opts.operation);
-    SimpleDateFormat sdf = new SimpleDateFormat(opts.dateFormat);
-
-    BufferedReader in = new BufferedReader(new InputStreamReader(System.in, 
UTF_8));
-
-    String line = null;
-
-    HashMap<Long,DoubleWrapper> aggregation1 = new HashMap<>();
-    HashMap<Long,DoubleWrapper> aggregation2 = new HashMap<>();
-    HashMap<Long,DoubleWrapper> aggregation3 = new HashMap<>();
-    HashMap<Long,DoubleWrapper> aggregation4 = new HashMap<>();
-
-    while ((line = in.readLine()) != null) {
-
-      try {
-        String tokens[] = line.split("\\s+");
-
-        long time = (long) Double.parseDouble(tokens[opts.timeColumn]);
-        double data = Double.parseDouble(tokens[opts.dataColumn]);
-
-        time = (time / opts.period) * opts.period;
-
-        switch (operation) {
-          case AMM_HACK1: {
-            if (opts.dataColumn < 2) {
-              throw new IllegalArgumentException("--dataColumn must be at 
least 2");
-            }
-            double data_min = Double.parseDouble(tokens[opts.dataColumn - 2]);
-            double data_max = Double.parseDouble(tokens[opts.dataColumn - 1]);
-
-            updateMin(time, aggregation3, data, data_min);
-            updateMax(time, aggregation4, data, data_max);
-
-            increment(time, aggregation1, data);
-            increment(time, aggregation2, 1);
-            break;
-          }
-          case AMM: {
-            updateMin(time, aggregation3, data, data);
-            updateMax(time, aggregation4, data, data);
-
-            increment(time, aggregation1, data);
-            increment(time, aggregation2, 1);
-            break;
-          }
-          case AVG: {
-            increment(time, aggregation1, data);
-            increment(time, aggregation2, 1);
-            break;
-          }
-          case MAX: {
-            updateMax(time, aggregation1, data, data);
-            break;
-          }
-          case MIN: {
-            updateMin(time, aggregation1, data, data);
-            break;
-          }
-          case COUNT: {
-            increment(time, aggregation1, 1);
-            break;
-          }
-          case SUM:
-          case CUMULATIVE: {
-            increment(time, aggregation1, data);
-            break;
-          }
-        }
-
-      } catch (Exception e) {
-        System.err.println("Failed to process line : " + line + "  " + 
e.getMessage());
-      }
-    }
-
-    TreeMap<Long,DoubleWrapper> sorted = new TreeMap<>(aggregation1);
-
-    Set<Entry<Long,DoubleWrapper>> es = sorted.entrySet();
-
-    double cumulative = 0;
-    for (Entry<Long,DoubleWrapper> entry : es) {
-      String value;
-
-      switch (operation) {
-        case AMM_HACK1:
-        case AMM: {
-          DoubleWrapper countdw = aggregation2.get(entry.getKey());
-          value = "" + (entry.getValue().d / countdw.d) + " " + 
aggregation3.get(entry.getKey()).d + " " + aggregation4.get(entry.getKey()).d;
-          break;
-        }
-        case AVG: {
-          DoubleWrapper countdw = aggregation2.get(entry.getKey());
-          value = "" + (entry.getValue().d / countdw.d);
-          break;
-        }
-        case CUMULATIVE: {
-          cumulative += entry.getValue().d;
-          value = "" + cumulative;
-          break;
-        }
-        default:
-          value = "" + entry.getValue().d;
-      }
-
-      System.out.println(sdf.format(new Date(entry.getKey())) + " " + value);
-    }
-
-  }
-
-  private static void increment(long time, HashMap<Long,DoubleWrapper> 
aggregation, double amount) {
-    get(time, aggregation, 0).d += amount;
-  }
-
-  private static void updateMax(long time, HashMap<Long,DoubleWrapper> 
aggregation, double data, double new_max) {
-    DoubleWrapper maxdw = get(time, aggregation, Double.NEGATIVE_INFINITY);
-    if (data > maxdw.d)
-      maxdw.d = new_max;
-  }
-
-  private static void updateMin(long time, HashMap<Long,DoubleWrapper> 
aggregation, double data, double new_min) {
-    DoubleWrapper mindw = get(time, aggregation, Double.POSITIVE_INFINITY);
-    if (data < mindw.d)
-      mindw.d = new_min;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/src/main/java/org/apache/accumulo/test/continuous/UndefinedAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/test/src/main/java/org/apache/accumulo/test/continuous/UndefinedAnalyzer.java 
b/test/src/main/java/org/apache/accumulo/test/continuous/UndefinedAnalyzer.java
deleted file mode 100644
index bcd35d8..0000000
--- 
a/test/src/main/java/org/apache/accumulo/test/continuous/UndefinedAnalyzer.java
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.continuous;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FilenameFilter;
-import java.io.InputStreamReader;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.cli.BatchScannerOpts;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.server.cli.ClientOnDefaultTable;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * BUGS This code does not handle the fact that these files could include log 
events from previous months. It therefore it assumes all dates are in the 
current
- * month. One solution might be to skip log files that haven't been touched in 
the last month, but that doesn't prevent newer files that have old dates in 
them.
- *
- */
-public class UndefinedAnalyzer {
-
-  static class UndefinedNode {
-
-    public UndefinedNode(String undef2, String ref2) {
-      this.undef = undef2;
-      this.ref = ref2;
-    }
-
-    String undef;
-    String ref;
-  }
-
-  static class IngestInfo {
-
-    Map<String,TreeMap<Long,Long>> flushes = new HashMap<>();
-
-    public IngestInfo(String logDir) throws Exception {
-      File dir = new File(logDir);
-      File[] ingestLogs = dir.listFiles(new FilenameFilter() {
-        @Override
-        public boolean accept(File dir, String name) {
-          return name.endsWith("ingest.out");
-        }
-      });
-
-      if (ingestLogs != null) {
-        for (File log : ingestLogs) {
-          parseLog(log);
-        }
-      }
-    }
-
-    private void parseLog(File log) throws Exception {
-      BufferedReader reader = new BufferedReader(new InputStreamReader(new 
FileInputStream(log), UTF_8));
-      String line;
-      TreeMap<Long,Long> tm = null;
-      try {
-        while ((line = reader.readLine()) != null) {
-          if (!line.startsWith("UUID"))
-            continue;
-          String[] tokens = line.split("\\s");
-          String time = tokens[1];
-          String uuid = tokens[2];
-
-          if (flushes.containsKey(uuid)) {
-            System.err.println("WARN Duplicate uuid " + log);
-            return;
-          }
-
-          tm = new TreeMap<>(Collections.reverseOrder());
-          tm.put(0l, Long.parseLong(time));
-          flushes.put(uuid, tm);
-          break;
-
-        }
-        if (tm == null) {
-          System.err.println("WARN Bad ingest log " + log);
-          return;
-        }
-
-        while ((line = reader.readLine()) != null) {
-          String[] tokens = line.split("\\s");
-
-          if (!tokens[0].equals("FLUSH"))
-            continue;
-
-          String time = tokens[1];
-          String count = tokens[4];
-
-          tm.put(Long.parseLong(count), Long.parseLong(time));
-        }
-      } finally {
-        reader.close();
-      }
-    }
-
-    Iterator<Long> getTimes(String uuid, long count) {
-      TreeMap<Long,Long> tm = flushes.get(uuid);
-
-      if (tm == null)
-        return null;
-
-      return tm.tailMap(count).values().iterator();
-    }
-  }
-
-  static class TabletAssignment {
-    String tablet;
-    String endRow;
-    String prevEndRow;
-    String server;
-    long time;
-
-    TabletAssignment(String tablet, String er, String per, String server, long 
time) {
-      this.tablet = tablet;
-      this.endRow = er;
-      this.prevEndRow = per;
-      this.server = server;
-      this.time = time;
-    }
-
-    public boolean contains(String row) {
-      return prevEndRow.compareTo(row) < 0 && endRow.compareTo(row) >= 0;
-    }
-  }
-
-  static class TabletHistory {
-
-    List<TabletAssignment> assignments = new ArrayList<>();
-
-    TabletHistory(String tableId, String acuLogDir) throws Exception {
-      File dir = new File(acuLogDir);
-      File[] masterLogs = dir.listFiles(new FilenameFilter() {
-        @Override
-        public boolean accept(File dir, String name) {
-          return name.matches("master.*debug.log.*");
-        }
-      });
-
-      SimpleDateFormat sdf = new SimpleDateFormat("dd HH:mm:ss,SSS yyyy MM");
-      String currentYear = (Calendar.getInstance().get(Calendar.YEAR)) + "";
-      String currentMonth = (Calendar.getInstance().get(Calendar.MONTH) + 1) + 
"";
-
-      if (masterLogs != null) {
-        for (File masterLog : masterLogs) {
-
-          BufferedReader reader = new BufferedReader(new InputStreamReader(new 
FileInputStream(masterLog), UTF_8));
-          String line;
-          try {
-            while ((line = reader.readLine()) != null) {
-              if (line.contains("TABLET_LOADED")) {
-                String[] tokens = line.split("\\s+");
-                String tablet = tokens[8];
-                String server = tokens[10];
-
-                int pos1 = -1;
-                int pos2 = -1;
-                int pos3 = -1;
-
-                for (int i = 0; i < tablet.length(); i++) {
-                  if (tablet.charAt(i) == '<' || tablet.charAt(i) == ';') {
-                    if (pos1 == -1) {
-                      pos1 = i;
-                    } else if (pos2 == -1) {
-                      pos2 = i;
-                    } else {
-                      pos3 = i;
-                    }
-                  }
-                }
-
-                if (pos1 > 0 && pos2 > 0 && pos3 == -1) {
-                  String tid = tablet.substring(0, pos1);
-                  String endRow = tablet.charAt(pos1) == '<' ? 
"8000000000000000" : tablet.substring(pos1 + 1, pos2);
-                  String prevEndRow = tablet.charAt(pos2) == '<' ? "" : 
tablet.substring(pos2 + 1);
-                  if (tid.equals(tableId)) {
-                    // System.out.println(" "+server+" "+tid+" "+endRow+" 
"+prevEndRow);
-                    Date date = sdf.parse(tokens[0] + " " + tokens[1] + " " + 
currentYear + " " + currentMonth);
-                    // System.out.println(" "+date);
-
-                    assignments.add(new TabletAssignment(tablet, endRow, 
prevEndRow, server, date.getTime()));
-
-                  }
-                } else if (!tablet.startsWith("!0")) {
-                  System.err.println("Cannot parse tablet " + tablet);
-                }
-
-              }
-            }
-          } finally {
-            reader.close();
-          }
-        }
-      }
-    }
-
-    TabletAssignment findMostRecentAssignment(String row, long time1, long 
time2) {
-
-      long latest = Long.MIN_VALUE;
-      TabletAssignment ret = null;
-
-      for (TabletAssignment assignment : assignments) {
-        if (assignment.contains(row) && assignment.time <= time2 && 
assignment.time > latest) {
-          latest = assignment.time;
-          ret = assignment;
-        }
-      }
-
-      return ret;
-    }
-  }
-
-  static class Opts extends ClientOnDefaultTable {
-    @Parameter(names = "--logdir", description = "directory containing the log 
files", required = true)
-    String logDir;
-
-    Opts() {
-      super("ci");
-    }
-  }
-
-  /**
-   * Class to analyze undefined references and accumulo logs to isolate the 
time/tablet where data was lost.
-   */
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    BatchScannerOpts bsOpts = new BatchScannerOpts();
-    opts.parseArgs(UndefinedAnalyzer.class.getName(), args, bsOpts);
-
-    List<UndefinedNode> undefs = new ArrayList<>();
-
-    BufferedReader reader = new BufferedReader(new 
InputStreamReader(System.in, UTF_8));
-    String line;
-    while ((line = reader.readLine()) != null) {
-      String[] tokens = line.split("\\s");
-      String undef = tokens[0];
-      String ref = tokens[1];
-
-      undefs.add(new UndefinedNode(undef, ref));
-    }
-
-    Connector conn = opts.getConnector();
-    BatchScanner bscanner = conn.createBatchScanner(opts.getTableName(), 
opts.auths, bsOpts.scanThreads);
-    bscanner.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
-    List<Range> refs = new ArrayList<>();
-
-    for (UndefinedNode undefinedNode : undefs)
-      refs.add(new Range(new Text(undefinedNode.ref)));
-
-    bscanner.setRanges(refs);
-
-    HashMap<String,List<String>> refInfo = new HashMap<>();
-
-    for (Entry<Key,Value> entry : bscanner) {
-      String ref = entry.getKey().getRow().toString();
-      List<String> vals = refInfo.get(ref);
-      if (vals == null) {
-        vals = new ArrayList<>();
-        refInfo.put(ref, vals);
-      }
-
-      vals.add(entry.getValue().toString());
-    }
-
-    bscanner.close();
-
-    IngestInfo ingestInfo = new IngestInfo(opts.logDir);
-    TabletHistory tabletHistory = new 
TabletHistory(Tables.getTableId(conn.getInstance(), opts.getTableName()), 
opts.logDir);
-
-    SimpleDateFormat sdf = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss");
-
-    for (UndefinedNode undefinedNode : undefs) {
-
-      List<String> refVals = refInfo.get(undefinedNode.ref);
-      if (refVals != null) {
-        for (String refVal : refVals) {
-          TabletAssignment ta = null;
-
-          String[] tokens = refVal.split(":");
-
-          String uuid = tokens[0];
-          String count = tokens[1];
-
-          String t1 = "";
-          String t2 = "";
-
-          Iterator<Long> times = ingestInfo.getTimes(uuid, 
Long.parseLong(count, 16));
-          if (times != null) {
-            if (times.hasNext()) {
-              long time2 = times.next();
-              t2 = sdf.format(new Date(time2));
-              if (times.hasNext()) {
-                long time1 = times.next();
-                t1 = sdf.format(new Date(time1));
-                ta = 
tabletHistory.findMostRecentAssignment(undefinedNode.undef, time1, time2);
-              }
-            }
-          }
-
-          if (ta == null)
-            System.out.println(undefinedNode.undef + " " + undefinedNode.ref + 
" " + uuid + " " + t1 + " " + t2);
-          else
-            System.out.println(undefinedNode.undef + " " + undefinedNode.ref + 
" " + ta.tablet + " " + ta.server + " " + uuid + " " + t1 + " " + t2);
-
-        }
-      } else {
-        System.out.println(undefinedNode.undef + " " + undefinedNode.ref);
-      }
-
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/src/main/java/org/apache/accumulo/test/performance/ContinuousIngest.java
----------------------------------------------------------------------
diff --git 
a/test/src/main/java/org/apache/accumulo/test/performance/ContinuousIngest.java 
b/test/src/main/java/org/apache/accumulo/test/performance/ContinuousIngest.java
new file mode 100644
index 0000000..42910af
--- /dev/null
+++ 
b/test/src/main/java/org/apache/accumulo/test/performance/ContinuousIngest.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.performance;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.UUID;
+import java.util.zip.CRC32;
+import java.util.zip.Checksum;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOnDefaultTable;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.trace.CountSampler;
+import org.apache.accumulo.core.trace.Trace;
+import org.apache.accumulo.core.util.FastFormat;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+
+public class ContinuousIngest {
+
+  private static final byte[] EMPTY_BYTES = new byte[0];
+
+  private static List<ColumnVisibility> visibilities;
+
+  private static void initVisibilities(ContinuousOpts opts) throws Exception {
+    if (opts.visFile == null) {
+      visibilities = Collections.singletonList(new ColumnVisibility());
+      return;
+    }
+
+    visibilities = new ArrayList<>();
+
+    FileSystem fs = FileSystem.get(new Configuration());
+    BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new 
Path(opts.visFile)), UTF_8));
+
+    String line;
+
+    while ((line = in.readLine()) != null) {
+      visibilities.add(new ColumnVisibility(line));
+    }
+
+    in.close();
+  }
+
+  private static ColumnVisibility getVisibility(Random rand) {
+    return visibilities.get(rand.nextInt(visibilities.size()));
+  }
+
+  public static void main(String[] args) throws Exception {
+
+    ContinuousOpts opts = new ContinuousOpts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    ClientOnDefaultTable clientOpts = new ClientOnDefaultTable("ci");
+    clientOpts.parseArgs(ContinuousIngest.class.getName(), args, bwOpts, opts);
+
+    initVisibilities(opts);
+
+    if (opts.min < 0 || opts.max < 0 || opts.max <= opts.min) {
+      throw new IllegalArgumentException("bad min and max");
+    }
+    Connector conn = clientOpts.getConnector();
+
+    if (!conn.tableOperations().exists(clientOpts.getTableName())) {
+      throw new TableNotFoundException(null, clientOpts.getTableName(), 
"Consult the README and create the table before starting ingest.");
+    }
+
+    BatchWriter bw = conn.createBatchWriter(clientOpts.getTableName(), 
bwOpts.getBatchWriterConfig());
+    bw = Trace.wrapAll(bw, new CountSampler(1024));
+
+    Random r = new Random();
+
+    byte[] ingestInstanceId = UUID.randomUUID().toString().getBytes(UTF_8);
+
+    System.out.printf("UUID %d %s%n", System.currentTimeMillis(), new 
String(ingestInstanceId, UTF_8));
+
+    long count = 0;
+    final int flushInterval = 1000000;
+    final int maxDepth = 25;
+
+    // always want to point back to flushed data. This way the previous item 
should
+    // always exist in accumulo when verifying data. To do this make insert N 
point
+    // back to the row from insert (N - flushInterval). The array below is 
used to keep
+    // track of this.
+    long prevRows[] = new long[flushInterval];
+    long firstRows[] = new long[flushInterval];
+    int firstColFams[] = new int[flushInterval];
+    int firstColQuals[] = new int[flushInterval];
+
+    long lastFlushTime = System.currentTimeMillis();
+
+    out: while (true) {
+      // generate first set of nodes
+      ColumnVisibility cv = getVisibility(r);
+
+      for (int index = 0; index < flushInterval; index++) {
+        long rowLong = genLong(opts.min, opts.max, r);
+        prevRows[index] = rowLong;
+        firstRows[index] = rowLong;
+
+        int cf = r.nextInt(opts.maxColF);
+        int cq = r.nextInt(opts.maxColQ);
+
+        firstColFams[index] = cf;
+        firstColQuals[index] = cq;
+
+        Mutation m = genMutation(rowLong, cf, cq, cv, ingestInstanceId, count, 
null, r, opts.checksum);
+        count++;
+        bw.addMutation(m);
+      }
+
+      lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
+      if (count >= opts.num)
+        break out;
+
+      // generate subsequent sets of nodes that link to previous set of nodes
+      for (int depth = 1; depth < maxDepth; depth++) {
+        for (int index = 0; index < flushInterval; index++) {
+          long rowLong = genLong(opts.min, opts.max, r);
+          byte[] prevRow = genRow(prevRows[index]);
+          prevRows[index] = rowLong;
+          Mutation m = genMutation(rowLong, r.nextInt(opts.maxColF), 
r.nextInt(opts.maxColQ), cv, ingestInstanceId, count, prevRow, r, 
opts.checksum);
+          count++;
+          bw.addMutation(m);
+        }
+
+        lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
+        if (count >= opts.num)
+          break out;
+      }
+
+      // create one big linked list, this makes all of the first inserts
+      // point to something
+      for (int index = 0; index < flushInterval - 1; index++) {
+        Mutation m = genMutation(firstRows[index], firstColFams[index], 
firstColQuals[index], cv, ingestInstanceId, count, genRow(prevRows[index + 1]), 
r,
+            opts.checksum);
+        count++;
+        bw.addMutation(m);
+      }
+      lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
+      if (count >= opts.num)
+        break out;
+    }
+
+    bw.close();
+    clientOpts.stopTracing();
+  }
+
+  private static long flush(BatchWriter bw, long count, final int 
flushInterval, long lastFlushTime) throws MutationsRejectedException {
+    long t1 = System.currentTimeMillis();
+    bw.flush();
+    long t2 = System.currentTimeMillis();
+    System.out.printf("FLUSH %d %d %d %d %d%n", t2, (t2 - lastFlushTime), (t2 
- t1), count, flushInterval);
+    lastFlushTime = t2;
+    return lastFlushTime;
+  }
+
+  public static Mutation genMutation(long rowLong, int cfInt, int cqInt, 
ColumnVisibility cv, byte[] ingestInstanceId, long count, byte[] prevRow, 
Random r,
+      boolean checksum) {
+    // Adler32 is supposed to be faster, but according to wikipedia is not 
good for small data.... so used CRC32 instead
+    CRC32 cksum = null;
+
+    byte[] rowString = genRow(rowLong);
+
+    byte[] cfString = FastFormat.toZeroPaddedString(cfInt, 4, 16, EMPTY_BYTES);
+    byte[] cqString = FastFormat.toZeroPaddedString(cqInt, 4, 16, EMPTY_BYTES);
+
+    if (checksum) {
+      cksum = new CRC32();
+      cksum.update(rowString);
+      cksum.update(cfString);
+      cksum.update(cqString);
+      cksum.update(cv.getExpression());
+    }
+
+    Mutation m = new Mutation(new Text(rowString));
+
+    m.put(new Text(cfString), new Text(cqString), cv, 
createValue(ingestInstanceId, count, prevRow, cksum));
+    return m;
+  }
+
+  public static final long genLong(long min, long max, Random r) {
+    return ((r.nextLong() & 0x7fffffffffffffffl) % (max - min)) + min;
+  }
+
+  static final byte[] genRow(long min, long max, Random r) {
+    return genRow(genLong(min, max, r));
+  }
+
+  static final byte[] genRow(long rowLong) {
+    return FastFormat.toZeroPaddedString(rowLong, 16, 16, EMPTY_BYTES);
+  }
+
+  private static Value createValue(byte[] ingestInstanceId, long count, byte[] 
prevRow, Checksum cksum) {
+    int dataLen = ingestInstanceId.length + 16 + (prevRow == null ? 0 : 
prevRow.length) + 3;
+    if (cksum != null)
+      dataLen += 8;
+    byte val[] = new byte[dataLen];
+    System.arraycopy(ingestInstanceId, 0, val, 0, ingestInstanceId.length);
+    int index = ingestInstanceId.length;
+    val[index++] = ':';
+    int added = FastFormat.toZeroPaddedString(val, index, count, 16, 16, 
EMPTY_BYTES);
+    if (added != 16)
+      throw new RuntimeException(" " + added);
+    index += 16;
+    val[index++] = ':';
+    if (prevRow != null) {
+      System.arraycopy(prevRow, 0, val, index, prevRow.length);
+      index += prevRow.length;
+    }
+
+    val[index++] = ':';
+
+    if (cksum != null) {
+      cksum.update(val, 0, index);
+      cksum.getValue();
+      FastFormat.toZeroPaddedString(val, index, cksum.getValue(), 8, 16, 
EMPTY_BYTES);
+    }
+
+    // System.out.println("val "+new String(val));
+
+    return new Value(val);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/src/main/java/org/apache/accumulo/test/performance/ContinuousOpts.java
----------------------------------------------------------------------
diff --git 
a/test/src/main/java/org/apache/accumulo/test/performance/ContinuousOpts.java 
b/test/src/main/java/org/apache/accumulo/test/performance/ContinuousOpts.java
new file mode 100644
index 0000000..8c14064
--- /dev/null
+++ 
b/test/src/main/java/org/apache/accumulo/test/performance/ContinuousOpts.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.performance;
+
+import java.io.IOException;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.log4j.FileAppender;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
+
+import com.beust.jcommander.IStringConverter;
+import com.beust.jcommander.Parameter;
+
+/**
+ * Common CLI arguments for the Continuous Ingest suite.
+ */
+public class ContinuousOpts {
+
+  public static class DebugConverter implements IStringConverter<String> {
+    @Override
+    public String convert(String debugLog) {
+      Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
+      logger.setLevel(Level.TRACE);
+      logger.setAdditivity(false);
+      try {
+        logger.addAppender(new FileAppender(new PatternLayout("%d{dd 
HH:mm:ss,SSS} [%-8c{2}] %-5p: %m%n"), debugLog, true));
+      } catch (IOException ex) {
+        throw new RuntimeException(ex);
+      }
+      return debugLog;
+    }
+  }
+
+  public static class ShortConverter implements IStringConverter<Short> {
+    @Override
+    public Short convert(String value) {
+      return Short.valueOf(value);
+    }
+  }
+
+  @Parameter(names = "--min", description = "lowest random row number to use")
+  long min = 0;
+
+  @Parameter(names = "--max", description = "maximum random row number to use")
+  long max = Long.MAX_VALUE;
+
+  @Parameter(names = "--debugLog", description = "file to write debugging 
output", converter = DebugConverter.class)
+  String debugLog = null;
+
+  @Parameter(names = "--num", description = "the number of entries to ingest")
+  long num = Long.MAX_VALUE;
+
+  @Parameter(names = "--maxColF", description = "maximum column family value 
to use", converter = ShortConverter.class)
+  short maxColF = Short.MAX_VALUE;
+
+  @Parameter(names = "--maxColQ", description = "maximum column qualifier 
value to use", converter = ShortConverter.class)
+  short maxColQ = Short.MAX_VALUE;
+
+  @Parameter(names = "--addCheckSum", description = "turn on checksums")
+  boolean checksum = false;
+
+  @Parameter(names = "--visibilities", description = "read the visibilities to 
ingest with from a file")
+  String visFile = null;
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
 
b/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
index efc03dd..4feebd3 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
@@ -31,7 +31,6 @@ import org.apache.accumulo.minicluster.ServerType;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 import org.apache.accumulo.test.categories.MiniClusterOnlyTests;
 import org.apache.accumulo.test.categories.PerformanceTests;
-import org.apache.accumulo.test.continuous.ContinuousIngest;
 import org.apache.accumulo.test.functional.ConfigurableMacBase;
 import org.apache.accumulo.test.mrit.IntegrationTestMapReduce;
 import org.apache.hadoop.conf.Configuration;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/src/main/java/org/apache/accumulo/test/scalability/Ingest.java
----------------------------------------------------------------------
diff --git 
a/test/src/main/java/org/apache/accumulo/test/scalability/Ingest.java 
b/test/src/main/java/org/apache/accumulo/test/scalability/Ingest.java
index 7c04000..1c99cce 100644
--- a/test/src/main/java/org/apache/accumulo/test/scalability/Ingest.java
+++ b/test/src/main/java/org/apache/accumulo/test/scalability/Ingest.java
@@ -29,7 +29,7 @@ import 
org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.test.continuous.ContinuousIngest;
+import org.apache.accumulo.test.performance.ContinuousIngest;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/README.md
----------------------------------------------------------------------
diff --git a/test/system/continuous/README.md b/test/system/continuous/README.md
deleted file mode 100644
index 31ee4bd..0000000
--- a/test/system/continuous/README.md
+++ /dev/null
@@ -1,103 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at 
- 
-    http://www.apache.org/licenses/LICENSE-2.0
- 
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Continuous Query and Ingest
-===========================
-
-This directory contains a suite of scripts for placing continuous query and
-ingest load on accumulo.  The purpose of these script is two-fold. First,
-place continuous load on accumulo to see if breaks.  Second, collect
-statistics in order to understand how accumulo behaves.  To run these scripts
-copy all of the `.example` files and modify them.  You can put these scripts in
-the current directory or define a `CONTINUOUS_CONF_DIR` where the files will be
-read from. These scripts rely on `pssh`. Before running any script you may need
-to use `pssh` to create the log directory on each machine (if you want it 
local).
-Also, create the table "ci" before running. You can run
-`org.apache.accumulo.test.continuous.GenSplits` to generate splits points for a
-continuous ingest table.
-
-The following ingest scripts insert data into accumulo that will form a random
-graph.
-
-> $ start-ingest.sh  
-> $ stop-ingest.sh
-
-The following query scripts randomly walk the graph created by the ingesters.
-Each walker produce detailed statistics on query/scan times.
-
-> $ start-walkers.sh  
-> $ stop-walker.sh
-
-The following scripts start and stop batch walkers.
-
-> $ start-batchwalkers.sh  
-> $ stop-batchwalkers.sh
-
-And the following scripts start and stop scanners.
-
-> $ start-scanners.sh
-> $ stop-scanners.sh
-
-In addition to placing continuous load, the following scripts start and stop a
-service that continually collect statistics about accumulo and HDFS.
-
-> $ start-stats.sh  
-> $ stop-stats.sh
-
-Optionally, start the agitator to periodically kill the tabletserver and/or 
datanode
-process(es) on random nodes. You can run this script as root and it will 
properly start
-processes as the user you configured in `continuous-env.sh` (`HDFS_USER` for 
the Datanode and
-`ACCUMULO_USER` for Accumulo processes). If you run it as yourself and the 
`HDFS_USER` and
-`ACCUMULO_USER` values are the same as your user, the agitator will not change 
users. In
-the case where you run the agitator as a non-privileged user which isn't the 
same as `HDFS_USER`
-or `ACCUMULO_USER`, the agitator will attempt to `sudo` to these users, which 
relies on correct
-configuration of sudo. Also, be sure that your `HDFS_USER` has password-less 
`ssh` configured.
-
-> $ start-agitator.sh  
-> $ stop-agitator.sh
-
-Start all three of these services and let them run for a few hours. Then run
-`report.pl` to generate a simple HTML report containing plots and histograms
-showing what has transpired.
-
-A MapReduce job to verify all data created by continuous ingest can be run
-with the following command.  Before running the command modify the `VERIFY_*`
-variables in `continuous-env.sh` if needed.  Do not run ingest while running 
this
-command, this will cause erroneous reporting of UNDEFINED nodes. The MapReduce
-job will scan a reference after it has scanned the definition.
-
-> $ run-verify.sh
-
-Each entry, except for the first batch of entries, inserted by continuous
-ingest references a previously flushed entry.  Since we are referencing flushed
-entries, they should always exist.  The MapReduce job checks that all
-referenced entries exist.  If it finds any that do not exist it will increment
-the UNDEFINED counter and emit the referenced but undefined node.  The 
MapReduce
-job produces two other counts : REFERENCED and UNREFERENCED.  It is
-expected that these two counts are non zero.  REFERENCED counts nodes that are
-defined and referenced.  UNREFERENCED counts nodes that defined and
-unreferenced, these are the latest nodes inserted.
-
-To stress accumulo, run the following script which starts a MapReduce job
-that reads and writes to your continuous ingest table.  This MapReduce job
-will write out an entry for every entry in the table (except for ones created
-by the MapReduce job itself). Stop ingest before running this MapReduce job.
-Do not run more than one instance of this MapReduce job concurrently against a
-table.
-
-> $ run-moru.sh
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/analyze-missing.pl
----------------------------------------------------------------------
diff --git a/test/system/continuous/analyze-missing.pl 
b/test/system/continuous/analyze-missing.pl
deleted file mode 100755
index 5cce1b1..0000000
--- a/test/system/continuous/analyze-missing.pl
+++ /dev/null
@@ -1,127 +0,0 @@
-#! /usr/bin/env perl
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-use POSIX qw(strftime);
-
-if(scalar(@ARGV) != 4){
-       print "Usage : analyze-missing.pl <accumulo home> <continuous log dir> 
<user> <pass> \n";
-       exit(1);
-}
-
-$ACCUMULO_HOME=$ARGV[0];
-$CONTINUOUS_LOG_DIR=$ARGV[1];
-$USER=$ARGV[2];
-$PASS=$ARGV[3];
-
-
-@missing = `grep MIS $CONTINUOUS_LOG_DIR/*.err`;
-
-
-
-for $miss (@missing) {
-       chomp($miss);
-       ($file, $type, $time, $row) = split(/[: ]/, $miss);
-
-       substr($file, -3, 3, "out");
-
-       $prevRowLine = `grep -B 1 $row $file | grep SRQ | grep -v $row`;
-
-       @prla = split(/\s+/, $prevRowLine);
-       $prevRow = $prla[2];
-#      print $prevRow."\n";
-
-       $aScript = `mktemp /tmp/miss_script.XXXXXXXXXX`;
-       chomp($aScript);
-       open(AS, ">$aScript") || die;
-
-       print AS "table ci\n";
-       print AS "scan -b $prevRow -e $prevRow\n";
-       print AS "scan -b $row -e $row\n";
-       print AS "quit\n";
-       close(AS);
-
-       $exist = 0;
-       $ingestIDSame = 0;
-       $ingestId = "";
-       $count = 0;
-
-       @entries = `$ACCUMULO_HOME/bin/accumulo shell -u $USER -p $PASS -f 
$aScript | grep $row`;
-       system("rm $aScript");
-
-       for $entry (@entries){
-               chomp($entry);
-               @entryA = split(/[: ]+/, $entry);
-               if($entryA[0] eq $row){
-                       $exist = 1;
-
-                       if($entryA[4] eq $ingestId){
-                               $ingestIDSame = 1;
-                       }
-               }else{
-                       $ingestId = $entryA[4];
-                       $count = hex($entryA[5]);
-               }
-       }
-
-
-       #look in ingest logs
-       @ingestLogs = `ls  $CONTINUOUS_LOG_DIR/*ingest*.out`;
-       @flushTimes = ();
-       chomp(@ingestLogs);
-       for $ingestLog (@ingestLogs){
-               open(IL, "<$ingestLog") || die;
-               
-
-               while($firstLine = <IL>){
-                       chomp($firstLine);
-                       if($firstLine =~ /UUID.*/){
-                               last;
-                       }
-               }
-
-               @iinfo = split(/\s+/,$firstLine);
-               if($iinfo[2] eq $ingestId){
-                       while($line = <IL>){
-                               if($line =~ /FLUSH (\d+) \d+ \d+ (\d+) \d+/){
-                                       push(@flushTimes, $1);
-                                       if(scalar(@flushTimes) > 3){
-                                               shift(@flushTimes);
-                                       }
-                                       if($count < $2){
-                                               last;
-                                       }
-                               }
-                       }
-               }
-               
-               
-
-               close(IL);
-       
-               if(scalar(@flushTimes) > 0){
-                       last;
-               }
-       } 
-
-       $its0 = strftime "%m/%d/%Y_%H:%M:%S", gmtime($flushTimes[0]/1000);
-       $its1 = strftime "%m/%d/%Y_%H:%M:%S", gmtime($flushTimes[1]/1000);
-       $mts = strftime "%m/%d/%Y_%H:%M:%S", gmtime($time/1000);
-
-       print "$row $exist $ingestIDSame $prevRow $ingestId   $its0   $its1   
$mts\n";
-}
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/analyze-missing.sh
----------------------------------------------------------------------
diff --git a/test/system/continuous/analyze-missing.sh 
b/test/system/continuous/analyze-missing.sh
deleted file mode 100755
index e2cfbb1..0000000
--- a/test/system/continuous/analyze-missing.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#! /usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-CONTINUOUS_CONF_DIR=${CONTINUOUS_CONF_DIR:-$ACCUMULO_HOME/test/system/continuous/}
-. "$CONTINUOUS_CONF_DIR/continuous-env.sh"
-
-./analyze-missing.pl "$ACCUMULO_HOME" "$CONTINUOUS_LOG_DIR" "$USER" "$PASS"
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/batch_walkers.txt.example
----------------------------------------------------------------------
diff --git a/test/system/continuous/batch_walkers.txt.example 
b/test/system/continuous/batch_walkers.txt.example
deleted file mode 100644
index 63fb8bb..0000000
--- a/test/system/continuous/batch_walkers.txt.example
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-localhost

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/continuous-env.sh.example
----------------------------------------------------------------------
diff --git a/test/system/continuous/continuous-env.sh.example 
b/test/system/continuous/continuous-env.sh.example
deleted file mode 100644
index 0abd8c3..0000000
--- a/test/system/continuous/continuous-env.sh.example
+++ /dev/null
@@ -1,131 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# set this to an absolute path that exist on every machine
-# Inherit values from environment if they are already set.
-HADOOP_HOME=${HADOOP_HOME:-/opt/hadoop}
-HADOOP_PREFIX=${HADOOP_PREFIX:-$HADOOP_HOME}
-ACCUMULO_HOME=${ACCUMULO_HOME:-/opt/accumulo}
-ACCUMULO_CONF_DIR=${ACCUMULO_CONF_DIR:-$ACCUMULO_HOME/conf}
-JAVA_HOME=${JAVA_HOME:-/opt/java}
-ZOOKEEPER_HOME=${ZOOKEEPER_HOME:-/opt/zookeeper}
-
-CONTINUOUS_LOG_DIR=$ACCUMULO_HOME/test/system/continuous/logs
-INSTANCE_NAME=instance
-ZOO_KEEPERS=zhost1,zhost2
-ACCUMULO_USER=$(whoami)
-HDFS_USER=$(whoami)
-USER=user
-PASS=pass
-TABLE=ci
-
-#set debug to on to enable logging of accumulo client debugging
-DEBUG_INGEST=off
-DEBUG_WALKER=off
-DEBUG_BATCH_WALKER=off
-DEBUG_SCANNER=off
-
-#the number of entries each client should write
-NUM=9223372036854775807
-
-#the minimum random row to generate
-MIN=0
-
-#the maximum random row to generate
-MAX=9223372036854775807
-
-#the maximum number of random column families to generate
-MAX_CF=32767
-
-#the maximum number of random column qualifiers to generate
-MAX_CQ=32767
-
-#an optional file in hdfs containing visibilites.  If left blank, then column
-#visibility will not be set.  If specified then a random line will be selected
-#from the file and used for column visibility for each linked list.
-VISIBILITIES=''
-
-#the max memory (in bytes) each ingester will use to buffer writes
-MAX_MEM=100000000
-
-#the maximum time (in millis) each ingester will buffer data
-MAX_LATENCY=600000
-
-#the number of threads each ingester will use to write data
-NUM_THREADS=4
-
-#the amount of time (in millis) to sleep between each query
-SLEEP_TIME=10
-
-#an optional file in hdfs containing line of comma seperated auths.  If
-#specified, walkers will randomly select lines from this file and use that to
-#set auths.
-AUTHS=''
-
-#determines if checksum are generated, may want to turn of when performance 
testing
-CHECKSUM=true
-
-#the amount of time (in minutes) the agitator should sleep before killing 
tservers
-TSERVER_KILL_SLEEP_TIME=20
-
-#the amount of time (in minutes) the agitator should sleep after killing
-# before restarting tservers
-TSERVER_RESTART_SLEEP_TIME=10
-
-#the minimum and maximum number of tservers the agitator will kill at once
-TSERVER_MIN_KILL=1
-TSERVER_MAX_KILL=1
-
-#the amount of time (in minutes) the agitator should sleep before killing 
datanodes
-DATANODE_KILL_SLEEP_TIME=20
-
-#the amount of time (in minutes) the agitator should sleep after killing
-# before restarting datanodes
-DATANODE_RESTART_SLEEP_TIME=10
-
-#the minimum and maximum number of datanodes the agitator will kill at once
-DATANODE_MIN_KILL=1
-DATANODE_MAX_KILL=1
-
-#time in minutes between killing masters
-MASTER_KILL_SLEEP_TIME=60
-MASTER_RESTART_SLEEP_TIME=2
-
-#Do we want to perturb HDFS? Only works on HDFS versions with HA, i.e. Hadoop 2
-# AGITATE_HDFS=true
-AGITATE_HDFS=false
-AGITATE_HDFS_SLEEP_TIME=10
-AGITATE_HDFS_SUPERUSER=hdfs
-AGITATE_HDFS_COMMAND="${HADOOP_PREFIX:-/usr/lib/hadoop}/share/hadoop/hdfs/bin/hdfs"
-AGITATE_HDFS_SUDO=$(which sudo)
-
-#settings for the verification map reduce job
-VERIFY_OUT=/tmp/continuous_verify
-VERIFY_MAX_MAPS=64
-VERIFY_REDUCERS=64
-SCAN_OFFLINE=false
-#comma separated list of auths to use for verify
-VERIFY_AUTHS=''
-
-#settings related to the batch walker
-# sleep in seconds
-BATCH_WALKER_SLEEP=1800
-BATCH_WALKER_BATCH_SIZE=10000
-BATCH_WALKER_THREADS=8
-
-#settings related to scanners
-# sleep in seconds
-SCANNER_SLEEP_TIME=10
-SCANNER_ENTRIES=5000

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/datanode-agitator.pl
----------------------------------------------------------------------
diff --git a/test/system/continuous/datanode-agitator.pl 
b/test/system/continuous/datanode-agitator.pl
deleted file mode 100755
index a98bb66..0000000
--- a/test/system/continuous/datanode-agitator.pl
+++ /dev/null
@@ -1,140 +0,0 @@
-#! /usr/bin/env perl
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-use POSIX qw(strftime);
-use Cwd qw();
-
-if(scalar(@ARGV) != 5 && scalar(@ARGV) != 3){
-  print "Usage : datanode-agitator.pl <min sleep before kill in minutes>[:max 
sleep before kill in minutes] <min sleep before restart in minutes>[:max sleep 
before restart in minutes] HADOOP_PREFIX [<min kill> <max kill>]\n";
-  exit(1);
-}
-
-my $ACCUMULO_HOME;
-if( defined $ENV{'ACCUMULO_HOME'} ){
-  $ACCUMULO_HOME = $ENV{'ACCUMULO_HOME'};
-} else {
-  $cwd=Cwd::cwd();
-  $ACCUMULO_HOME=$cwd . '/../../..';
-}
-$HADOOP_PREFIX=$ARGV[2];
-
-print "ACCUMULO_HOME=$ACCUMULO_HOME\n";
-print "HADOOP_PREFIX=$HADOOP_PREFIX\n";
-
-@sleeprange1 = split(/:/, $ARGV[0]);
-$sleep1 = $sleeprange1[0];
-
-@sleeprange2 = split(/:/, $ARGV[1]);
-$sleep2 = $sleeprange2[0];
-
-if (scalar(@sleeprange1) > 1) {
-  $sleep1max = $sleeprange1[1] + 1;
-} else {
-  $sleep1max = $sleep1;
-}
-
-if ($sleep1 > $sleep1max) {
-  die("sleep1 > sleep1max $sleep1 > $sleep1max");
-}
-
-if (scalar(@sleeprange2) > 1) {
-  $sleep2max = $sleeprange2[1] + 1;
-} else {
-  $sleep2max = $sleep2;
-}
-
-if($sleep2 > $sleep2max){
-  die("sleep2 > sleep2max $sleep2 > $sleep2max");
-}
-
-if(defined $ENV{'ACCUMULO_CONF_DIR'}){
-  $ACCUMULO_CONF_DIR = $ENV{'ACCUMULO_CONF_DIR'};
-}else{
-  $ACCUMULO_CONF_DIR = $ACCUMULO_HOME . '/conf';
-}
-
-if(scalar(@ARGV) == 5){
-  $minKill = $ARGV[3];
-  $maxKill = $ARGV[4];
-}else{
-  $minKill = 1;
-  $maxKill = 1;
-}
-
-if($minKill > $maxKill){
-  die("minKill > maxKill $minKill > $maxKill");
-}
-
-@tserversRaw = `cat $ACCUMULO_CONF_DIR/tservers`;
-chomp(@tserversRaw);
-
-for $tserver (@tserversRaw){
-  if($tserver eq "" || substr($tserver,0,1) eq "#"){
-    next;
-  }
-
-  push(@tservers, $tserver);
-}
-
-
-if(scalar(@tservers) < $maxKill){
-  print STDERR "WARN setting maxKill to ".scalar(@tservers)."\n";
-  $maxKill = scalar(@tservers);
-}
-
-if ($minKill > $maxKill){
-  print STDERR "WARN setting minKill to equal maxKill\n";
-  $minKill = $maxKill;
-}
-
-while(1){
-
-  $numToKill = int(rand($maxKill - $minKill + 1)) + $minKill;
-  %killed = ();
-  $server = "";
-
-  for($i = 0; $i < $numToKill; $i++){
-    while($server eq "" || $killed{$server} != undef){
-      $index = int(rand(scalar(@tservers)));
-      $server = $tservers[$index];
-    }
-
-    $killed{$server} = 1;
-
-    $t = strftime "%Y%m%d %H:%M:%S", localtime;
-
-    print STDERR "$t Killing datanode on $server\n";
-    system("ssh $server \"pkill -9 -f '[p]roc_datanode'\"");
-  }
-
-  $nextsleep2 = int(rand($sleep2max - $sleep2)) + $sleep2;
-  sleep($nextsleep2 * 60);
-
-  foreach $restart (keys %killed) {
-
-    $t = strftime "%Y%m%d %H:%M:%S", localtime;
-
-    print STDERR "$t Starting datanode on $restart\n";
-    # We can just start as we're the HDFS user
-    system("ssh $restart '$HADOOP_PREFIX/sbin/hadoop-daemon.sh start 
datanode'");
-  }
-
-  $nextsleep1 = int(rand($sleep1max - $sleep1)) + $sleep1;
-  sleep($nextsleep1 * 60);
-}
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/hdfs-agitator.pl
----------------------------------------------------------------------
diff --git a/test/system/continuous/hdfs-agitator.pl 
b/test/system/continuous/hdfs-agitator.pl
deleted file mode 100755
index 85eab32..0000000
--- a/test/system/continuous/hdfs-agitator.pl
+++ /dev/null
@@ -1,217 +0,0 @@
-#! /usr/bin/env perl
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-use strict;
-use warnings;
-use POSIX qw(strftime);
-use Getopt::Long;
-use Pod::Usage;
-
-my $help = 0;
-my $man = 0;
-my $sleep = 10;
-my $superuser = 'hdfs';
-my $hdfsCmd;
-if( defined $ENV{'HADOOP_PREFIX'} ){
-  $hdfsCmd = $ENV{'HADOOP_PREFIX'} . '/share/hadoop/hdfs/bin/hdfs';
-}
-my $sudo;
-my $nameservice;
-
-GetOptions('help|?' => \$help, 'man' => \$man, 'sleep=i' => \$sleep, 
'nameservice=s' => \$nameservice, 'superuser=s' => \$superuser, 'hdfs-cmd=s' => 
\$hdfsCmd, 'sudo:s' => \$sudo) or pod2usage(2);
-pod2usage(-exitval => 0, -verbose => 1) if $help;
-pod2usage(-exitval => 0, -verbose => 2) if $man;
-pod2usage(-exitval => 1, -verbose => 1, -message => '$HADOOP_PREFIX not 
defined and no hdfs-cmd given. please use --hdfs-cmd to specify where your hdfs 
cli is.') if not defined $hdfsCmd;
-pod2usage(-exitval => 1, -verbose => 1, -message => "Your specified hdfs cli 
'$hdfsCmd' is not executable.") if not -x $hdfsCmd;
-if( defined $sudo and "" eq $sudo ){
-  $sudo = `which sudo`;
-  pod2usage(-exitval => 1, -verbose => 1, -message => "Error attempting to 
find the sudo command, please specify it with --sudo /path/to/sudo") if 0 != $?;
-  chomp($sudo);
-}
-if( defined $sudo ){
-  pod2usage(-exitval => 1, -verbose => 1, -message => "Your specified sudo 
command '$sudo' is not executable.") if not -x $sudo;
-}
-
-my $needsudo = defined $sudo;
-my $haadmin = "$hdfsCmd haadmin";
-if($needsudo) {
-  $haadmin = "$sudo -u $superuser $haadmin";
-  print STDERR "Starting HDFS agitator, configured to fail over every $sleep 
minutes. will run hdfs command '$hdfsCmd' as user '$superuser' via '$sudo'.\n";
-} else {
-  print STDERR "Starting HDFS agitator, configured to fail over every $sleep 
minutes. will run hdfs command '$hdfsCmd' as the current user.\n";
-}
-while(1){
-  sleep($sleep * 60);
-  my $t = strftime "%Y%m%d %H:%M:%S", localtime;
-  my @failServices;
-  if( defined $nameservice ){
-    @failServices = ($nameservice);
-  } else {
-    my $nameservicesRaw = `$hdfsCmd getconf -confKey dfs.nameservices`;
-    if(0 != $?) {
-      print STDERR "$t HDFS CLI failed. please see --help to set it 
correctly\n";
-      exit(1);
-    }
-    chomp($nameservicesRaw);
-    my @nameservices = split(/,/, $nameservicesRaw);
-    if(1 > scalar(@nameservices)) {
-      print STDERR "$t No HDFS NameServices found. Are you sure you're running 
in HA?\n";
-      exit(1);
-    }
-    if(rand(1) < .5){
-      my $serviceToFail = $nameservices[int(rand(scalar(@nameservices)))];
-      print STDERR "$t Failing over nameservice $serviceToFail\n";
-      @failServices = ($serviceToFail);
-    } else {
-      print STDERR "$t Failing over all nameservices\n";
-      @failServices = @nameservices;
-    }
-  }
-  for my $toFail (@failServices){
-    my $namenodesRaw = `$hdfsCmd getconf -confKey dfs.ha.namenodes.$toFail`;
-    if(0 != $?) {
-      print STDERR "$t HDFS CLI failed to look up namenodes in service 
$toFail.\n";
-      exit(1);
-    }
-    chomp($namenodesRaw);
-    my @namenodes = split(/,/, $namenodesRaw);
-    if(2 > scalar(@namenodes)) {
-      print STDERR "$t WARN NameService $toFail does not have at least 2 
namenodes according to the HDFS configuration, skipping.\n";
-      next;
-    }
-    my $active;
-    for my $namenode (@namenodes){
-      my $status = `$haadmin -ns $toFail -getServiceState $namenode`;
-      if(0 != $?) {
-        if($needsudo) {
-          print STDERR "$t WARN Error while attempting to get the service 
state of $toFail :: $namenode\n";
-          $status = 'error';
-        } else {
-          print STDERR "$t WARN Current user may not run the HDFS haadmin 
utility, attempting to sudo to the $superuser user.\n";
-          $needsudo = 1;
-          if(not defined $sudo) {
-            $sudo = `which sudo`;
-            pod2usage(-exitval => 1, -verbose => 1, -message => "Error 
attempting to find the sudo command, please specify it with --sudo") if 0 != $?;
-            chomp($sudo);
-            pod2usage(-exitval => 1, -verbose => 1, -message => "The sudo 
command '$sudo' is not executable. please specify sudo with --sudo") if not -x 
$sudo;
-          }
-          $haadmin = "$sudo -u $superuser $haadmin";
-          redo;
-        }
-      }
-      chomp($status);
-      if( 'active' eq $status ){
-        $active = $namenode;
-        last;
-      }
-    }
-    if( defined $active ){
-      my @standby = grep { $_ ne $active } @namenodes;
-      my $newActive = $standby[int(rand(scalar(@standby)))];
-      print STDERR "$t Transitioning nameservice $toFail from $active to 
$newActive\n";
-      my $cmd = "$haadmin -ns $toFail -failover $active $newActive";
-      print "$t $cmd\n";
-      system($cmd);
-    } else {
-      my $newActive = $namenodes[int(rand(scalar(@namenodes)))];
-      print STDERR "$t WARN nameservice $toFail did not have an active 
namenode. Transitioning a random namenode to active. This will fail if HDFS is 
configured for automatic failover.\n";
-      my $cmd = "$haadmin -ns $toFail -transitionToActive $newActive";
-      print "$t $cmd\n";
-      system($cmd);
-    }
-  }
-}
-__END__
-
-=head1 NAME
-
-hdfs-agitator - causes HDFS to failover
-
-=head1 DESCRIPTION
-
-Sleeps for a configurable amount of time, then causes a NameNode failover in 
one
-or more HDFS NameServices. If a given NameService does not have an Active
-NameNode when it comes time to failover, a random standby is promoted.
-
-Only works on HDFS versions that support HA configurations and the haadmin
-command. In order to function, the user running this script must be able to
-use the haadmin command. This requires access to an HDFS superuser. By default,
-it will attempt to sudo to perform calls.
-
-=head1 SYNOPSIS
-
-hdfs-agitator [options]
-
-  Options:
-    --help         Brief help message
-    --man          Full documentation
-    --sleep        Time to sleep between failovers in minutes. Default 10
-    --superuser    HDFS superuser. Default 'hdfs'
-    --hdfs-cmd     hdfs command path. Default 
'$HADOOP_PREFIX/share/hadoop/hdfs/bin/hdfs'
-    --nameservice  Limit failovers to specified nameservice. Default all 
nameservices
-    --sudo         command to call to sudo to the HDFS superuser. Default 
'sudo' if needed.
-
-=head1 OPTIONS
-
-=over 8
-
-=item B<--sleep>
-
-Sleep the given number of minutes between attempts to fail over nameservices.
-
-=item B<--nameservice>
-
-Limit failover attempts to the given nameservice. By default, we attempt ot 
list
-all known nameservices and choose either one or all of them to failover in a
-given cycle.
-
-=item B<--superuser>
-
-An HDFS superuser capable of running the haadmin command. Defaults to "hdfs".
-
-=item B<--hdfs-cmd>
-
-Path to the HDFS cli. Will be used both for non-administrative commands (e.g.
-listing the nameservices and serviceids in a given nameservice) and admin-only
-actions such as checking status and failing over.
-
-Defaults to using $HADOOP_PREFIX.
-
-=item B<--sudo>
-
-Command to allow us to act as the given HDFS superuser. By default we assume 
the current user
-can run HDFS administrative commands. When this argument is specified we will 
instead attempt
-to use the HDFS superuser instead. If given an argument, it will be called like
-sudo, i.e. "sudo -u $superuser $cmd". Defaults to "sudo" on the shell's path.
-
-=back
-
-=head1 SEE ALSO
-
-See the Apache Hadoop documentation on configuring HDFS HA
-
-=over 8
-
-=item B<HA with QJM>
-
-http://hadoop.apache.org/docs/r2.2.0/hadoop-yarn/hadoop-yarn-site/HDFSHighAvailabilityWithQJM.html#Administrative_commands
-
-=item B<HA with NFS>
-
-http://hadoop.apache.org/docs/r2.2.0/hadoop-yarn/hadoop-yarn-site/HDFSHighAvailabilityWithNFS.html#Administrative_commands
-
-=back

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/ingesters.txt.example
----------------------------------------------------------------------
diff --git a/test/system/continuous/ingesters.txt.example 
b/test/system/continuous/ingesters.txt.example
deleted file mode 100644
index b66d790..0000000
--- a/test/system/continuous/ingesters.txt.example
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-host1
-host2

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/master-agitator.pl
----------------------------------------------------------------------
diff --git a/test/system/continuous/master-agitator.pl 
b/test/system/continuous/master-agitator.pl
deleted file mode 100755
index d87f17e..0000000
--- a/test/system/continuous/master-agitator.pl
+++ /dev/null
@@ -1,92 +0,0 @@
-#! /usr/bin/env perl
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-use POSIX qw(strftime);
-use Cwd qw();
-
-if(scalar(@ARGV) != 2){
-       print "Usage : master-agitator.pl <sleep before kill in minutes> <sleep 
before start in minutes>\n";
-       exit(1);
-}
-
-my $ACCUMULO_HOME;
-if( defined $ENV{'ACCUMULO_HOME'} ){
-  $ACCUMULO_HOME = $ENV{'ACCUMULO_HOME'};
-} else {
-  $cwd=Cwd::cwd();
-  $ACCUMULO_HOME=$cwd . '/../../..';
-}
-
-if(defined $ENV{'ACCUMULO_CONF_DIR'}){
-        $ACCUMULO_CONF_DIR = $ENV{'ACCUMULO_CONF_DIR'};
-}else{
-       $ACCUMULO_CONF_DIR = $ACCUMULO_HOME . '/conf';
-}
-
-$sleep1 = $ARGV[0];
-$sleep2 = $ARGV[1];
-
-@mastersRaw = `cat $ACCUMULO_CONF_DIR/masters`;
-chomp(@mastersRaw);
-
-for $master (@mastersRaw){
-       if($master eq "" || substr($master,0,1) eq "#"){
-               next;
-       }
-
-       push(@masters, $master);
-}
-
-
-while(1){
-       sleep($sleep1 * 60);
-       $t = strftime "%Y%m%d %H:%M:%S", localtime;
-       if(rand(1) < .5){
-               $masterNodeToWack = $masters[int(rand(scalar(@masters)))];
-               print STDERR "$t Killing master on $masterNodeToWack\n";
-               $cmd = "ssh $masterNodeToWack \"pkill -f '[ 
]org.apache.accumulo.start.*master'\"";
-               print "$t $cmd\n";
-               system($cmd);
-       }else{
-               print STDERR "$t Killing all masters\n";
-               $cmd = "pssh -h $ACCUMULO_CONF_DIR/masters \"pkill -f '[ 
]org.apache.accumulo.start.*master'\" < /dev/null";
-               print "$t $cmd\n";
-               system($cmd);
-
-               $file = '';
-               if (-e "$ACCUMULO_CONF_DIR/gc") {
-                       $file = 'gc';
-               } else {
-                       $file = 'masters';
-               }
-
-               $cmd = "pssh -h $ACCUMULO_CONF_DIR/$file \"pkill -f '[ 
]org.apache.accumulo.start.*gc'\" < /dev/null";
-               print "$t $cmd\n";
-               system($cmd);
-       }
-
-       sleep($sleep2 * 60);
-       $t = strftime "%Y%m%d %H:%M:%S", localtime;
-       print STDERR "$t Running start-all\n";
-
-       $cmd = "pssh -h $ACCUMULO_CONF_DIR/masters 
\"$ACCUMULO_HOME/bin/accumulo-service master start\" < /dev/null";
-       print "$t $cmd\n";
-       system($cmd);
-}
-
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/report.pl
----------------------------------------------------------------------
diff --git a/test/system/continuous/report.pl b/test/system/continuous/report.pl
deleted file mode 100755
index d1902b6..0000000
--- a/test/system/continuous/report.pl
+++ /dev/null
@@ -1,120 +0,0 @@
-#! /usr/bin/env perl
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-use POSIX qw(strftime);
-
-if(scalar(@ARGV) == 2 && $ARGV[0] eq "-bt"){
-       $BIN_TIME=$ARGV[1];
-}elsif(scalar(@ARGV) == 0){
-       $BIN_TIME=900;
-}else{
-       print "Usage : report.pl [-bt <bin time>]\n";
-       exit;
-}
-
-
-$LOG_DIR = "logs";
-$ACCUMULO_HOME="../../..";
-$REPORT_DIR = strftime "report_%Y%m%d%H%M", localtime;
-
-mkdir("$REPORT_DIR");
-
-open (HTML, ">$REPORT_DIR/report.html");
-
-print HTML "<html><body>\n";
-
-$misCount = `grep MIS $LOG_DIR/*_walk.err | wc -l`;
-
-if($misCount > 0){
-       print HTML "<HR width=50% size=4>\n";
-       print HTML "<center><P><B color=red>WARNING : The walkers saw missing 
nodes, this should not happen</B><P></center>\n";
-       print HTML "<HR width=50% size=4>\n";
-}
-
-plot("cat $LOG_DIR/*_stats.out", $BIN_TIME, 0, 2, "AVG", "entries", "Entries 
over time");
-plot("cat $LOG_DIR/*_stats.out", $BIN_TIME, 0, 3, "AMM", "ingest_rate", 
"Ingest rate over time");
-plot("egrep 'SRQ|FSR' $LOG_DIR/*_walk.out", $BIN_TIME, 1, 3, "AMM", 
"query_latency", "Row lookup latency (in milliseconds) over time");
-plot("egrep 'SRQ|FSR' $LOG_DIR/*_walk.out", 3600, 1, 3, "COUNT", 
"query_count", "# rows looked up in each hour");
-plot("grep 'BRQ' $LOG_DIR/*_batch_walk.out", $BIN_TIME, 1, 5, "AMM", 
"batch_walk_rate", "batch walkers average lookup rate" );
-plot("cat $LOG_DIR/*_stats.out", $BIN_TIME, 0, 10, "AVG", "tablets", "Table 
tablets online over time");
-plot("cat $LOG_DIR/*_stats.out", $BIN_TIME, 0, 25, "AMM_HACK1", 
"files_per_tablet", "Files per tablet");
-plot("cat $LOG_DIR/*_stats.out", $BIN_TIME, 0, 1, "AVG", "tservers", "Tablet 
servers over time");
-plot("cat $LOG_DIR/*_stats.out", $BIN_TIME, 0, 11, "AVG", "du", "HDFS usage 
over time");
-plot("cat $LOG_DIR/*_stats.out", $BIN_TIME, 0, 12, "AVG", "dirs", "HDFS # dirs 
over time");
-plot("cat $LOG_DIR/*_stats.out", $BIN_TIME, 0, 13, "AVG", "files", "HDFS # 
files over time");
-plot("cat $LOG_DIR/*_stats.out", $BIN_TIME, 0, 17, "AVG", "maps", "# map task 
over time");
-plot("cat $LOG_DIR/*_stats.out", $BIN_TIME, 0, 19, "AVG", "reduces", "# reduce 
task over time");
-
-print HTML "<P><h2>Config</h2>\n";
-print HTML "<UL>\n";
-for $config_file (glob("$LOG_DIR/*_config.out")){
-       @path = split(/\//,$config_file);
-        $file_name = $path[$path - 1];
-       system("cp $config_file $REPORT_DIR/$file_name");
-       print HTML "<li><a href='$file_name'>$file_name</a>\n";
-}
-print HTML "</UL>\n";
-
-
-print HTML "<P><h2>Lookup times histogram</h2>\n";
-print HTML "<pre>\n";
-print HTML `cat $LOG_DIR/*_walk.out | $ACCUMULO_HOME/bin/accumulo 
org.apache.accumulo.test.continuous.PrintScanTimeHistogram`;
-print HTML "</pre>\n";
-
-print HTML "</body></html>\n";
-close(HTML);
-
-sub plot {
-       my $cmd = shift(@_);
-       my $period = shift(@_);
-       my $time_col = shift(@_);
-       my $data_col = shift(@_);
-       my $op = shift(@_);
-       my $output = shift(@_);
-       my $title = shift(@_);
-
-       system("$cmd | $ACCUMULO_HOME/bin/accumulo 
org.apache.accumulo.test.continuous.TimeBinner --period $period --timeColumn 
$time_col --dataColumn $data_col --operation $op --dateFormat MM/dd/yy-HH:mm:ss 
> $REPORT_DIR/$output.dat");
-       gnuplot("$REPORT_DIR/$output.dat", "$REPORT_DIR/$output.png", $op eq 
"AMM" || $op eq "AMM_HACK1");
-
-       print HTML "<P><h2>$title</h2><img src='$output.png'>\n";
-}
-
-sub gnuplot {
-       my $input = shift(@_);
-       my $output = shift(@_);
-       my $yerr = shift(@_);
-
-       open(GNUP, "|gnuplot > $output");       
-
-       print GNUP "set xdata time\n";
-       print GNUP "set timefmt \"%m/%d/%y-%H:%M:%S\"\n";
-       print GNUP "set format x \"%m/%d\"\n";
-       print GNUP "set offsets 1,1,1,1\n";
-       print GNUP "set size 1.25,1.25\n";
-       print GNUP "set terminal png\n";
-       if($yerr){
-               print GNUP "plot \"$input\" using 1:2:3:4 with yerrorlines\n";
-       }else{
-               print GNUP "plot \"$input\" using 1:2\n";
-       }
-
-       close(GNUP);
-}
-       
-
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/run-moru.sh
----------------------------------------------------------------------
diff --git a/test/system/continuous/run-moru.sh 
b/test/system/continuous/run-moru.sh
deleted file mode 100755
index 3c73ddb..0000000
--- a/test/system/continuous/run-moru.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#! /usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Start: Resolve Script Directory
-SOURCE="${BASH_SOURCE[0]}"
-while [[ -h "${SOURCE}" ]]; do # resolve $SOURCE until the file is no longer a 
symlink
-   bin=$( cd -P "$( dirname "${SOURCE}" )" && pwd )
-   SOURCE="$(readlink "${SOURCE}")"
-   [[ "${SOURCE}" != /* ]] && SOURCE="${bin}/${SOURCE}" # if $SOURCE was a 
relative symlink, we need to resolve it relative to the path where the symlink 
file was located
-done
-bin=$( cd -P "$( dirname "${SOURCE}" )" && pwd )
-script=$( basename "${SOURCE}" )
-# Stop: Resolve Script Directory
-
-CONTINUOUS_CONF_DIR=${CONTINUOUS_CONF_DIR:-${bin}}
-. $CONTINUOUS_CONF_DIR/continuous-env.sh
-. $ACCUMULO_CONF_DIR/accumulo-env.sh
-
-SERVER_LIBJAR="$ACCUMULO_HOME/lib/accumulo-test.jar"
-
-"$ACCUMULO_HOME/contrib/tool.sh" "$SERVER_LIBJAR" 
org.apache.accumulo.test.continuous.ContinuousMoru -libjars "$SERVER_LIBJAR" -i 
"$INSTANCE_NAME" -z "$ZOO_KEEPERS" -u "$USER" -p "$PASS" --table "$TABLE" --min 
"$MIN" --max "$MAX" --maxColF "$MAX_CF" --maxColQ "$MAX_CQ" --batchMemory 
"$MAX_MEM" --batchLatency "$MAX_LATENCY" --batchThreads "$NUM_THREADS" 
--maxMappers "$VERIFY_MAX_MAPS"
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/run-verify.sh
----------------------------------------------------------------------
diff --git a/test/system/continuous/run-verify.sh 
b/test/system/continuous/run-verify.sh
deleted file mode 100755
index aa56643..0000000
--- a/test/system/continuous/run-verify.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#! /usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Start: Resolve Script Directory
-SOURCE="${BASH_SOURCE[0]}"
-while [[ -h "${SOURCE}" ]]; do # resolve $SOURCE until the file is no longer a 
symlink
-   bin=$( cd -P "$( dirname "${SOURCE}" )" && pwd )
-   SOURCE=$(readlink "${SOURCE}")
-   [[ "${SOURCE}" != /* ]] && SOURCE="${bin}/${SOURCE}" # if $SOURCE was a 
relative symlink, we need to resolve it relative to the path where the symlink 
file was located
-done
-bin=$( cd -P "$( dirname "${SOURCE}" )" && pwd )
-script=$( basename "${SOURCE}" )
-# Stop: Resolve Script Directory
-
-CONTINUOUS_CONF_DIR=${CONTINUOUS_CONF_DIR:-${bin}}
-. $CONTINUOUS_CONF_DIR/continuous-env.sh
-. $ACCUMULO_CONF_DIR/accumulo-env.sh
-
-SERVER_LIBJAR="$ACCUMULO_HOME/lib/accumulo-test.jar"
-
-AUTH_OPT="";
-[[ -n $VERIFY_AUTHS ]] && AUTH_OPT="--auths $VERIFY_AUTHS"
-
-SCAN_OPT=--offline
-[[ $SCAN_OFFLINE == false ]] && SCAN_OPT=
-
-"$ACCUMULO_HOME/contrib/tool.sh" "$SERVER_LIBJAR" 
org.apache.accumulo.test.continuous.ContinuousVerify 
-Dmapreduce.job.reduce.slowstart.completedmaps=0.95 -libjars "$SERVER_LIBJAR" 
"$AUTH_OPT" -i "$INSTANCE_NAME" -z "$ZOO_KEEPERS" -u "$USER" -p "$PASS" --table 
"$TABLE" --output "$VERIFY_OUT" --maxMappers "$VERIFY_MAX_MAPS" --reducers 
"$VERIFY_REDUCERS" "$SCAN_OPT"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/scanners.txt.example
----------------------------------------------------------------------
diff --git a/test/system/continuous/scanners.txt.example 
b/test/system/continuous/scanners.txt.example
deleted file mode 100644
index 63fb8bb..0000000
--- a/test/system/continuous/scanners.txt.example
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-localhost

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/start-agitator.sh
----------------------------------------------------------------------
diff --git a/test/system/continuous/start-agitator.sh 
b/test/system/continuous/start-agitator.sh
deleted file mode 100755
index a44cd83..0000000
--- a/test/system/continuous/start-agitator.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#! /usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Start: Resolve Script Directory
-SOURCE="${BASH_SOURCE[0]}"
-while [[ -h "${SOURCE}" ]]; do # resolve $SOURCE until the file is no longer a 
symlink
-   bin=$( cd -P "$( dirname "${SOURCE}" )" && pwd )
-   SOURCE=$(readlink "${SOURCE}")
-   [[ "${SOURCE}" != /* ]] && SOURCE="${bin}/${SOURCE}" # if $SOURCE was a 
relative symlink, we need to resolve it relative to the path where the symlink 
file was located
-done
-bin=$( cd -P "$( dirname "${SOURCE}" )" && pwd )
-script=$( basename "${SOURCE}" )
-# Stop: Resolve Script Directory
-
-CONTINUOUS_CONF_DIR=${CONTINUOUS_CONF_DIR:-${bin}}
-. "$CONTINUOUS_CONF_DIR/continuous-env.sh"
-
-mkdir -p "$CONTINUOUS_LOG_DIR"
-
-LOG_BASE="${CONTINUOUS_LOG_DIR}/$(date +%Y%m%d%H%M%S)_$(hostname)"
-
-# Start agitators for datanodes, tservers, and the master
-[[ -n $AGITATOR_USER ]] || AGITATOR_USER=$(whoami)
-if [[ $AGITATOR_USER == root ]];  then
-  echo "Running master-agitator and tserver-agitator as $ACCUMULO_USER using 
su. Running datanode-agitator as $HDFS_USER using su."
-
-  # Change to the correct user if started as root
-  su -c "nohup ${bin}/master-agitator.pl $MASTER_KILL_SLEEP_TIME 
$MASTER_RESTART_SLEEP_TIME >${LOG_BASE}_master-agitator.out 
2>${LOG_BASE}_master-agitator.err &" -m - "$ACCUMULO_USER"
-
-  su -c "nohup ${bin}/tserver-agitator.pl $TSERVER_KILL_SLEEP_TIME 
$TSERVER_RESTART_SLEEP_TIME $TSERVER_MIN_KILL $TSERVER_MAX_KILL 
>${LOG_BASE}_tserver-agitator.out 2>${LOG_BASE}_tserver-agitator.err &" -m - 
"$ACCUMULO_USER"
-
-  su -c "nohup ${bin}/datanode-agitator.pl $DATANODE_KILL_SLEEP_TIME 
$DATANODE_RESTART_SLEEP_TIME $HADOOP_PREFIX $DATANODE_MIN_KILL 
$DATANODE_MAX_KILL >${LOG_BASE}_datanode-agitator.out 
2>${LOG_BASE}_datanode-agitator.err &" -m - "$HDFS_USER"
-
-elif [[ $AGITATOR_USER == "$ACCUMULO_USER" ]]; then
-  echo "Running master-agitator and tserver-agitator as $AGITATOR_USER Running 
datanode-agitator as $HDFS_USER using sudo."
-  # Just run the master-agitator if we're the accumulo user
-  nohup "${bin}/master-agitator.pl" "$MASTER_KILL_SLEEP_TIME" 
"$MASTER_RESTART_SLEEP_TIME" >"${LOG_BASE}_master-agitator.out" 
2>"${LOG_BASE}_master-agitator.err" &
-
-  nohup "${bin}/tserver-agitator.pl" "$TSERVER_KILL_SLEEP_TIME" 
"$TSERVER_RESTART_SLEEP_TIME" "$TSERVER_MIN_KILL" "$TSERVER_MAX_KILL" 
>"${LOG_BASE}_tserver-agitator.out" 2>"${LOG_BASE}_tserver-agitator.err" &
-
-  sudo -u "$HDFS_USER" nohup "${bin}/datanode-agitator.pl" 
"$DATANODE_KILL_SLEEP_TIME" "$DATANODE_RESTART_SLEEP_TIME" "$HADOOP_PREFIX" 
"$DATANODE_MIN_KILL" "$DATANODE_MAX_KILL" >"${LOG_BASE}_datanode-agitator.out" 
2>"${LOG_BASE}_datanode-agitator.err" &
-
-else
-  echo "Running master-agitator and tserver-agitator as $ACCUMULO_USER using 
sudo. Running datanode-agitator as $HDFS_USER using sudo."
-
-  # Not root, and not the accumulo user, hope you can sudo to it
-  sudo -u "$ACCUMULO_USER" "nohup ${bin}/master-agitator.pl 
$MASTER_KILL_SLEEP_TIME $MASTER_RESTART_SLEEP_TIME 
>${LOG_BASE}_master-agitator.out 2>${LOG_BASE}_master-agitator.err &"
-
-  sudo -u "$ACCUMULO_USER" "nohup ${bin}/tserver-agitator.pl 
$TSERVER_KILL_SLEEP_TIME $TSERVER_RESTART_SLEEP_TIME $TSERVER_MIN_KILL 
$TSERVER_MAX_KILL >${LOG_BASE}_tserver-agitator.out 
2>${LOG_BASE}_tserver-agitator.err &"
-
-  sudo -u "$HDFS_USER" "nohup ${bin}/datanode-agitator.pl 
$DATANODE_KILL_SLEEP_TIME $DATANODE_RESTART_SLEEP_TIME $HADOOP_PREFIX 
$DATANODE_MIN_KILL $DATANODE_MAX_KILL >${LOG_BASE}_datanode-agitator.out 
2>${LOG_BASE}_datanode-agitator.err &" -m - "$HDFS_USER"
-
-fi
-
-if ${AGITATE_HDFS:-false} ; then
-  AGITATOR_LOG=${LOG_BASE}_hdfs-agitator
-  sudo -u "$AGITATE_HDFS_SUPERUSER" nohup "${bin}/hdfs-agitator.pl" --sleep 
"${AGITATE_HDFS_SLEEP_TIME}" --hdfs-cmd "${AGITATE_HDFS_COMMAND}" --superuser 
"${AGITATE_HDFS_SUPERUSER}" >"${AGITATOR_LOG}.out" 2>"${AGITATOR_LOG}.err" &
-fi

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/start-batchwalkers.sh
----------------------------------------------------------------------
diff --git a/test/system/continuous/start-batchwalkers.sh 
b/test/system/continuous/start-batchwalkers.sh
deleted file mode 100755
index 7d4efff..0000000
--- a/test/system/continuous/start-batchwalkers.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#! /usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Start: Resolve Script Directory
-SOURCE="${BASH_SOURCE[0]}"
-while [[ -h "${SOURCE}" ]]; do # resolve $SOURCE until the file is no longer a 
symlink
-   bin=$( cd -P "$( dirname "${SOURCE}" )" && pwd )
-   SOURCE=$(readlink "${SOURCE}")
-   [[ "${SOURCE}" != /* ]] && SOURCE="${bin}/${SOURCE}" # if $SOURCE was a 
relative symlink, we need to resolve it relative to the path where the symlink 
file was located
-done
-bin=$( cd -P "$( dirname "${SOURCE}" )" && pwd )
-script=$( basename "${SOURCE}" )
-# Stop: Resolve Script Directory
-
-CONTINUOUS_CONF_DIR=${CONTINUOUS_CONF_DIR:-${bin}}
-. "$CONTINUOUS_CONF_DIR/continuous-env.sh"
-
-DEBUG_OPT=''
-if [[ $DEBUG_BATCH_WALKER == on ]] ; then
-       DEBUG_OPT="--debug $CONTINUOUS_LOG_DIR/\`date 
+%Y%m%d%H%M%S\`_\`hostname\`_batch_walk.log";
-fi
-
-AUTH_OPT=''
-[[ -n $AUTHS ]] && AUTH_OPT="--auths \"$AUTHS\""
-
-pssh -h "$CONTINUOUS_CONF_DIR/batch_walkers.txt" "mkdir -p 
$CONTINUOUS_LOG_DIR; nohup $ACCUMULO_HOME/bin/accumulo 
org.apache.accumulo.test.continuous.ContinuousBatchWalker $DEBUG_OPT $AUTH_OPT 
-i $INSTANCE_NAME -z $ZOO_KEEPERS -u $USER -p $PASS --table $TABLE --min $MIN 
--max $MAX --sleep $BATCH_WALKER_SLEEP --numToScan $BATCH_WALKER_BATCH_SIZE 
--scanThreads $BATCH_WALKER_THREADS >$CONTINUOUS_LOG_DIR/\`date 
+%Y%m%d%H%M%S\`_\`hostname\`_batch_walk.out 2>$CONTINUOUS_LOG_DIR/\`date 
+%Y%m%d%H%M%S\`_\`hostname\`_batch_walk.err &" < /dev/null
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/39830635/test/system/continuous/start-ingest.sh
----------------------------------------------------------------------
diff --git a/test/system/continuous/start-ingest.sh 
b/test/system/continuous/start-ingest.sh
deleted file mode 100755
index 8cc7d07..0000000
--- a/test/system/continuous/start-ingest.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#! /usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Start: Resolve Script Directory
-SOURCE="${BASH_SOURCE[0]}"
-while [[ -h "${SOURCE}" ]]; do # resolve $SOURCE until the file is no longer a 
symlink
-   bin=$( cd -P "$( dirname "${SOURCE}" )" && pwd )
-   SOURCE=$(readlink "${SOURCE}")
-   [[ "${SOURCE}" != /* ]] && SOURCE="${bin}/${SOURCE}" # if $SOURCE was a 
relative symlink, we need to resolve it relative to the path where the symlink 
file was located
-done
-bin=$( cd -P "$( dirname "${SOURCE}" )" && pwd )
-script=$( basename "${SOURCE}" )
-# Stop: Resolve Script Directory
-
-CONTINUOUS_CONF_DIR=${CONTINUOUS_CONF_DIR:-${bin}}
-. "$CONTINUOUS_CONF_DIR/continuous-env.sh"
-
-DEBUG_OPT=''
-if [[ $DEBUG_INGEST == on ]] ; then
-       DEBUG_OPT="--debug $CONTINUOUS_LOG_DIR/\`date 
+%Y%m%d%H%M%S\`_\`hostname\`_ingest.log";
-fi
-
-VIS_OPT=''
-[[ -n $VISIBILITIES ]] && VIS_OPT="--visibilities \"$VISIBILITIES\""
-
-CHECKSUM_OPT='--addCheckSum'
-[[ $CHECKSUM == false ]] && CHECKSUM_OPT=''
-
-pssh -h "$CONTINUOUS_CONF_DIR/ingesters.txt" "mkdir -p $CONTINUOUS_LOG_DIR; 
nohup $ACCUMULO_HOME/bin/accumulo 
org.apache.accumulo.test.continuous.ContinuousIngest $DEBUG_OPT $VIS_OPT -i 
$INSTANCE_NAME -z $ZOO_KEEPERS -u $USER -p $PASS --table $TABLE --num $NUM 
--min $MIN --max $MAX --maxColF $MAX_CF --maxColQ $MAX_CQ --batchMemory 
$MAX_MEM --batchLatency $MAX_LATENCY --batchThreads $NUM_THREADS $CHECKSUM_OPT 
>$CONTINUOUS_LOG_DIR/\`date +%Y%m%d%H%M%S\`_\`hostname\`_ingest.out 
2>$CONTINUOUS_LOG_DIR/\`date +%Y%m%d%H%M%S\`_\`hostname\`_ingest.err &" < 
/dev/null
-

Reply via email to