http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
 
b/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
deleted file mode 100644
index b86fcfe..0000000
--- 
a/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Map;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.MonitorUtil;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.harness.AccumuloITBase;
-import org.apache.accumulo.minicluster.MiniAccumuloCluster;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ZooKeeperBindException;
-import org.apache.accumulo.test.util.CertUtils;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.Before;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * General Integration-Test base class that provides access to a {@link 
MiniAccumuloCluster} for testing. Tests using these typically do very 
disruptive things
- * to the instance, and require specific configuration. Most tests don't need 
this level of control and should extend {@link AccumuloClusterHarness} instead.
- */
-public class ConfigurableMacBase extends AccumuloITBase {
-  public static final Logger log = 
LoggerFactory.getLogger(ConfigurableMacBase.class);
-
-  protected MiniAccumuloClusterImpl cluster;
-
-  protected void configure(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {}
-
-  protected void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws 
Exception {}
-
-  protected static final String ROOT_PASSWORD = "testRootPassword1";
-
-  public static void configureForEnvironment(MiniAccumuloConfigImpl cfg, 
Class<?> testClass, File folder) {
-    if 
("true".equals(System.getProperty("org.apache.accumulo.test.functional.useSslForIT")))
 {
-      configureForSsl(cfg, folder);
-    }
-    if 
("true".equals(System.getProperty("org.apache.accumulo.test.functional.useCredProviderForIT")))
 {
-      cfg.setUseCredentialProvider(true);
-    }
-  }
-
-  protected static void configureForSsl(MiniAccumuloConfigImpl cfg, File 
sslDir) {
-    Map<String,String> siteConfig = cfg.getSiteConfig();
-    if 
("true".equals(siteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
-      // already enabled; don't mess with it
-      return;
-    }
-
-    // create parent directories, and ensure sslDir is empty
-    assertTrue(sslDir.mkdirs() || sslDir.isDirectory());
-    FileUtils.deleteQuietly(sslDir);
-    assertTrue(sslDir.mkdir());
-
-    File rootKeystoreFile = new File(sslDir, "root-" + cfg.getInstanceName() + 
".jks");
-    File localKeystoreFile = new File(sslDir, "local-" + cfg.getInstanceName() 
+ ".jks");
-    File publicTruststoreFile = new File(sslDir, "public-" + 
cfg.getInstanceName() + ".jks");
-    final String rootKeystorePassword = "root_keystore_password", 
truststorePassword = "truststore_password";
-    try {
-      new CertUtils(Property.RPC_SSL_KEYSTORE_TYPE.getDefaultValue(), 
"o=Apache Accumulo,cn=MiniAccumuloCluster", "RSA", 2048, 
"sha1WithRSAEncryption")
-          .createAll(rootKeystoreFile, localKeystoreFile, 
publicTruststoreFile, cfg.getInstanceName(), rootKeystorePassword, 
cfg.getRootPassword(),
-              truststorePassword);
-    } catch (Exception e) {
-      throw new RuntimeException("error creating MAC keystore", e);
-    }
-
-    siteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true");
-    siteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), 
localKeystoreFile.getAbsolutePath());
-    siteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), 
cfg.getRootPassword());
-    siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), 
publicTruststoreFile.getAbsolutePath());
-    siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), 
truststorePassword);
-    cfg.setSiteConfig(siteConfig);
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    createMiniAccumulo();
-    Exception lastException = null;
-    for (int i = 0; i < 3; i++) {
-      try {
-        cluster.start();
-        return;
-      } catch (ZooKeeperBindException e) {
-        lastException = e;
-        log.warn("Failed to start MiniAccumuloCluster, assumably due to 
ZooKeeper issues", lastException);
-        Thread.sleep(3000);
-        createMiniAccumulo();
-      }
-    }
-    throw new RuntimeException("Failed to start MiniAccumuloCluster after 
three attempts", lastException);
-  }
-
-  private void createMiniAccumulo() throws Exception {
-    // createTestDir will give us a empty directory, we don't need to clean it 
up ourselves
-    File baseDir = createTestDir(this.getClass().getName() + "_" + 
this.testName.getMethodName());
-    MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, 
ROOT_PASSWORD);
-    String nativePathInDevTree = 
NativeMapIT.nativeMapLocation().getAbsolutePath();
-    String nativePathInMapReduce = new 
File(System.getProperty("user.dir")).toString();
-    cfg.setNativeLibPaths(nativePathInDevTree, nativePathInMapReduce);
-    cfg.setProperty(Property.GC_FILE_ARCHIVE, Boolean.TRUE.toString());
-    Configuration coreSite = new Configuration(false);
-    configure(cfg, coreSite);
-    cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
-    configureForEnvironment(cfg, getClass(), getSslDir(baseDir));
-    cluster = new MiniAccumuloClusterImpl(cfg);
-    if (coreSite.size() > 0) {
-      File csFile = new File(cluster.getConfig().getConfDir(), 
"core-site.xml");
-      if (csFile.exists())
-        throw new RuntimeException(csFile + " already exist");
-
-      OutputStream out = new BufferedOutputStream(new FileOutputStream(new 
File(cluster.getConfig().getConfDir(), "core-site.xml")));
-      coreSite.writeXml(out);
-      out.close();
-    }
-    beforeClusterStart(cfg);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (cluster != null)
-      try {
-        cluster.stop();
-      } catch (Exception e) {
-        // ignored
-      }
-  }
-
-  protected MiniAccumuloClusterImpl getCluster() {
-    return cluster;
-  }
-
-  protected Connector getConnector() throws AccumuloException, 
AccumuloSecurityException {
-    return getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
-  }
-
-  protected Process exec(Class<?> clazz, String... args) throws IOException {
-    return getCluster().exec(clazz, args);
-  }
-
-  protected String getMonitor() throws KeeperException, InterruptedException {
-    Instance instance = new ZooKeeperInstance(getCluster().getClientConfig());
-    return MonitorUtil.getLocation(instance);
-  }
-
-  protected ClientConfiguration getClientConfig() throws Exception {
-    return new 
ClientConfiguration(getCluster().getConfig().getClientConfFile());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java 
b/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
deleted file mode 100644
index 4ef4a61..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.ConstraintViolationSummary;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
-import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ConstraintIT extends AccumuloClusterHarness {
-  private static final Logger log = 
LoggerFactory.getLogger(ConstraintIT.class);
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 30;
-  }
-
-  @Test
-  public void run() throws Exception {
-    String[] tableNames = getUniqueNames(3);
-    Connector c = getConnector();
-    for (String table : tableNames) {
-      c.tableOperations().create(table);
-      c.tableOperations().addConstraint(table, 
NumericValueConstraint.class.getName());
-      c.tableOperations().addConstraint(table, 
AlphaNumKeyConstraint.class.getName());
-    }
-
-    // A static sleep to just let ZK do its thing
-    Thread.sleep(10 * 1000);
-
-    // Then check that the client has at least gotten the updates
-    for (String table : tableNames) {
-      log.debug("Checking constraints on {}", table);
-      Map<String,Integer> constraints = 
c.tableOperations().listConstraints(table);
-      while (!constraints.containsKey(NumericValueConstraint.class.getName()) 
|| !constraints.containsKey(AlphaNumKeyConstraint.class.getName())) {
-        log.debug("Failed to verify constraints. Sleeping and retrying");
-        Thread.sleep(2000);
-        constraints = c.tableOperations().listConstraints(table);
-      }
-      log.debug("Verified all constraints on {}", table);
-    }
-
-    log.debug("Verified constraints on all tables. Running tests");
-
-    test1(tableNames[0]);
-
-    test2(tableNames[1], false);
-    test2(tableNames[2], true);
-  }
-
-  private void test1(String tableName) throws Exception {
-    BatchWriter bw = getConnector().createBatchWriter(tableName, new 
BatchWriterConfig());
-
-    Mutation mut1 = new Mutation(new Text("r1"));
-    mut1.put(new Text("cf1"), new Text("cq1"), new 
Value("123".getBytes(UTF_8)));
-
-    bw.addMutation(mut1);
-
-    // should not throw any exceptions
-    bw.close();
-
-    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
-
-    // create a mutation with a non numeric value
-    Mutation mut2 = new Mutation(new Text("r1"));
-    mut2.put(new Text("cf1"), new Text("cq1"), new 
Value("123a".getBytes(UTF_8)));
-
-    bw.addMutation(mut2);
-
-    boolean sawMRE = false;
-
-    try {
-      bw.close();
-      // should not get here
-      throw new Exception("Test failed, constraint did not catch bad 
mutation");
-    } catch (MutationsRejectedException mre) {
-      sawMRE = true;
-
-      // verify constraint violation summary
-      List<ConstraintViolationSummary> cvsl = 
mre.getConstraintViolationSummaries();
-
-      if (cvsl.size() != 1) {
-        throw new Exception("Unexpected constraints");
-      }
-
-      for (ConstraintViolationSummary cvs : cvsl) {
-        if 
(!cvs.constrainClass.equals(NumericValueConstraint.class.getName())) {
-          throw new Exception("Unexpected constraint class " + 
cvs.constrainClass);
-        }
-
-        if (cvs.numberOfViolatingMutations != 1) {
-          throw new Exception("Unexpected # violating mutations " + 
cvs.numberOfViolatingMutations);
-        }
-      }
-    }
-
-    if (!sawMRE) {
-      throw new Exception("Did not see MutationsRejectedException");
-    }
-
-    // verify mutation did not go through
-    Scanner scanner = getConnector().createScanner(tableName, 
Authorizations.EMPTY);
-    scanner.setRange(new Range(new Text("r1")));
-
-    Iterator<Entry<Key,Value>> iter = scanner.iterator();
-    Entry<Key,Value> entry = iter.next();
-
-    if (!entry.getKey().getRow().equals(new Text("r1")) || 
!entry.getKey().getColumnFamily().equals(new Text("cf1"))
-        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || 
!entry.getValue().equals(new Value("123".getBytes(UTF_8)))) {
-      throw new Exception("Unexpected key or value " + entry.getKey() + " " + 
entry.getValue());
-    }
-
-    if (iter.hasNext()) {
-      entry = iter.next();
-      throw new Exception("Unexpected extra key or value " + entry.getKey() + 
" " + entry.getValue());
-    }
-
-    // remove the numeric value constraint
-    getConnector().tableOperations().removeConstraint(tableName, 2);
-    UtilWaitThread.sleep(1000);
-
-    // now should be able to add a non numeric value
-    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
-    bw.addMutation(mut2);
-    bw.close();
-
-    // verify mutation went through
-    iter = scanner.iterator();
-    entry = iter.next();
-
-    if (!entry.getKey().getRow().equals(new Text("r1")) || 
!entry.getKey().getColumnFamily().equals(new Text("cf1"))
-        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || 
!entry.getValue().equals(new Value("123a".getBytes(UTF_8)))) {
-      throw new Exception("Unexpected key or value " + entry.getKey() + " " + 
entry.getValue());
-    }
-
-    if (iter.hasNext()) {
-      entry = iter.next();
-      throw new Exception("Unexpected extra key or value " + entry.getKey() + 
" " + entry.getValue());
-    }
-
-    // add a constraint that references a non-existant class
-    getConnector().tableOperations().setProperty(tableName, 
Property.TABLE_CONSTRAINT_PREFIX + "1", "com.foobar.nonExistantClass");
-    UtilWaitThread.sleep(1000);
-
-    // add a mutation
-    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
-
-    Mutation mut3 = new Mutation(new Text("r1"));
-    mut3.put(new Text("cf1"), new Text("cq1"), new 
Value("foo".getBytes(UTF_8)));
-
-    bw.addMutation(mut3);
-
-    sawMRE = false;
-
-    try {
-      bw.close();
-      // should not get here
-      throw new Exception("Test failed, mutation went through when table had 
bad constraints");
-    } catch (MutationsRejectedException mre) {
-      sawMRE = true;
-    }
-
-    if (!sawMRE) {
-      throw new Exception("Did not see MutationsRejectedException");
-    }
-
-    // verify the mutation did not go through
-    iter = scanner.iterator();
-    entry = iter.next();
-
-    if (!entry.getKey().getRow().equals(new Text("r1")) || 
!entry.getKey().getColumnFamily().equals(new Text("cf1"))
-        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || 
!entry.getValue().equals(new Value("123a".getBytes(UTF_8)))) {
-      throw new Exception("Unexpected key or value " + entry.getKey() + " " + 
entry.getValue());
-    }
-
-    if (iter.hasNext()) {
-      entry = iter.next();
-      throw new Exception("Unexpected extra key or value " + entry.getKey() + 
" " + entry.getValue());
-    }
-
-    // remove the bad constraint
-    getConnector().tableOperations().removeConstraint(tableName, 1);
-    UtilWaitThread.sleep(1000);
-
-    // try the mutation again
-    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
-    bw.addMutation(mut3);
-    bw.close();
-
-    // verify it went through
-    iter = scanner.iterator();
-    entry = iter.next();
-
-    if (!entry.getKey().getRow().equals(new Text("r1")) || 
!entry.getKey().getColumnFamily().equals(new Text("cf1"))
-        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || 
!entry.getValue().equals(new Value("foo".getBytes(UTF_8)))) {
-      throw new Exception("Unexpected key or value " + entry.getKey() + " " + 
entry.getValue());
-    }
-
-    if (iter.hasNext()) {
-      entry = iter.next();
-      throw new Exception("Unexpected extra key or value " + entry.getKey() + 
" " + entry.getValue());
-    }
-  }
-
-  private Mutation newMut(String row, String cf, String cq, String val) {
-    Mutation mut1 = new Mutation(new Text(row));
-    mut1.put(new Text(cf), new Text(cq), new Value(val.getBytes(UTF_8)));
-    return mut1;
-  }
-
-  private void test2(String table, boolean doFlush) throws Exception {
-    // test sending multiple mutations with multiple constrain violations... 
all of the non violating mutations
-    // should go through
-    int numericErrors = 2;
-
-    BatchWriter bw = getConnector().createBatchWriter(table, new 
BatchWriterConfig());
-    bw.addMutation(newMut("r1", "cf1", "cq1", "123"));
-    bw.addMutation(newMut("r1", "cf1", "cq2", "I'm a bad value"));
-    if (doFlush) {
-      try {
-        bw.flush();
-        throw new Exception("Didn't find a bad mutation");
-      } catch (MutationsRejectedException mre) {
-        // ignored
-        try {
-          bw.close();
-        } catch (MutationsRejectedException ex) {
-          // ignored
-        }
-        bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
-        numericErrors = 1;
-      }
-    }
-    bw.addMutation(newMut("r1", "cf1", "cq3", "I'm a naughty value"));
-    bw.addMutation(newMut("@bad row@", "cf1", "cq2", "456"));
-    bw.addMutation(newMut("r1", "cf1", "cq4", "789"));
-
-    boolean sawMRE = false;
-
-    try {
-      bw.close();
-      // should not get here
-      throw new Exception("Test failed, constraint did not catch bad 
mutation");
-    } catch (MutationsRejectedException mre) {
-      System.out.println(mre);
-
-      sawMRE = true;
-
-      // verify constraint violation summary
-      List<ConstraintViolationSummary> cvsl = 
mre.getConstraintViolationSummaries();
-
-      if (cvsl.size() != 2) {
-        throw new Exception("Unexpected constraints");
-      }
-
-      HashMap<String,Integer> expected = new HashMap<String,Integer>();
-
-      
expected.put("org.apache.accumulo.examples.simple.constraints.NumericValueConstraint",
 numericErrors);
-      
expected.put("org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint",
 1);
-
-      for (ConstraintViolationSummary cvs : cvsl) {
-        if (expected.get(cvs.constrainClass) != 
cvs.numberOfViolatingMutations) {
-          throw new Exception("Unexpected " + cvs.constrainClass + " " + 
cvs.numberOfViolatingMutations);
-        }
-      }
-    }
-
-    if (!sawMRE) {
-      throw new Exception("Did not see MutationsRejectedException");
-    }
-
-    Scanner scanner = getConnector().createScanner(table, 
Authorizations.EMPTY);
-
-    Iterator<Entry<Key,Value>> iter = scanner.iterator();
-
-    Entry<Key,Value> entry = iter.next();
-
-    if (!entry.getKey().getRow().equals(new Text("r1")) || 
!entry.getKey().getColumnFamily().equals(new Text("cf1"))
-        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || 
!entry.getValue().equals(new Value("123".getBytes(UTF_8)))) {
-      throw new Exception("Unexpected key or value " + entry.getKey() + " " + 
entry.getValue());
-    }
-
-    entry = iter.next();
-
-    if (!entry.getKey().getRow().equals(new Text("r1")) || 
!entry.getKey().getColumnFamily().equals(new Text("cf1"))
-        || !entry.getKey().getColumnQualifier().equals(new Text("cq4")) || 
!entry.getValue().equals(new Value("789".getBytes(UTF_8)))) {
-      throw new Exception("Unexpected key or value " + entry.getKey() + " " + 
entry.getValue());
-    }
-
-    if (iter.hasNext()) {
-      entry = iter.next();
-      throw new Exception("Unexpected extra key or value " + entry.getKey() + 
" " + entry.getValue());
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java 
b/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
deleted file mode 100644
index b2373e6..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class CreateAndUseIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 4 * 60;
-  }
-
-  private static SortedSet<Text> splits;
-
-  @BeforeClass
-  public static void createData() throws Exception {
-    splits = new TreeSet<Text>();
-
-    for (int i = 1; i < 256; i++) {
-      splits.add(new Text(String.format("%08x", i << 8)));
-    }
-  }
-
-  @Test
-  public void verifyDataIsPresent() throws Exception {
-    Text cf = new Text("cf1");
-    Text cq = new Text("cq1");
-
-    String tableName = getUniqueNames(1)[0];
-    getConnector().tableOperations().create(tableName);
-    getConnector().tableOperations().addSplits(tableName, splits);
-    BatchWriter bw = getConnector().createBatchWriter(tableName, new 
BatchWriterConfig());
-
-    for (int i = 1; i < 257; i++) {
-      Mutation m = new Mutation(new Text(String.format("%08x", (i << 8) - 
16)));
-      m.put(cf, cq, new Value(Integer.toString(i).getBytes(UTF_8)));
-
-      bw.addMutation(m);
-    }
-
-    bw.close();
-    Scanner scanner1 = getConnector().createScanner(tableName, 
Authorizations.EMPTY);
-
-    int ei = 1;
-
-    for (Entry<Key,Value> entry : scanner1) {
-      Assert.assertEquals(String.format("%08x", (ei << 8) - 16), 
entry.getKey().getRow().toString());
-      Assert.assertEquals(Integer.toString(ei), entry.getValue().toString());
-
-      ei++;
-    }
-
-    Assert.assertEquals("Did not see expected number of rows", 257, ei);
-  }
-
-  @Test
-  public void createTableAndScan() throws Exception {
-    String table2 = getUniqueNames(1)[0];
-    getConnector().tableOperations().create(table2);
-    getConnector().tableOperations().addSplits(table2, splits);
-    Scanner scanner2 = getConnector().createScanner(table2, 
Authorizations.EMPTY);
-    int count = 0;
-    for (Entry<Key,Value> entry : scanner2) {
-      if (entry != null)
-        count++;
-    }
-
-    if (count != 0) {
-      throw new Exception("Did not see expected number of entries, count = " + 
count);
-    }
-  }
-
-  @Test
-  public void createTableAndBatchScan() throws Exception {
-    ArrayList<Range> ranges = new ArrayList<Range>();
-    for (int i = 1; i < 257; i++) {
-      ranges.add(new Range(new Text(String.format("%08x", (i << 8) - 16))));
-    }
-
-    String table3 = getUniqueNames(1)[0];
-    getConnector().tableOperations().create(table3);
-    getConnector().tableOperations().addSplits(table3, splits);
-    BatchScanner bs = getConnector().createBatchScanner(table3, 
Authorizations.EMPTY, 3);
-    bs.setRanges(ranges);
-    Iterator<Entry<Key,Value>> iter = bs.iterator();
-    int count = Iterators.size(iter);
-    bs.close();
-
-    Assert.assertEquals("Did not expect to find any entries", 0, count);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
 
b/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
deleted file mode 100644
index 79151ee..0000000
--- 
a/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.junit.Test;
-
-public class CreateManyScannersIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 60;
-  }
-
-  @Test
-  public void run() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    for (int i = 0; i < 100000; i++) {
-      c.createScanner(tableName, Authorizations.EMPTY);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java 
b/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
deleted file mode 100644
index b383d0a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.security.SecurityErrorCode;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class CredentialsIT extends AccumuloClusterHarness {
-
-  private boolean saslEnabled;
-  private String username;
-  private String password;
-  private Instance inst;
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  @Before
-  public void createLocalUser() throws AccumuloException, 
AccumuloSecurityException {
-    Connector conn = getConnector();
-    inst = conn.getInstance();
-
-    ClientConfiguration clientConf = cluster.getClientConfig();
-    ClusterUser user = getUser(0);
-    username = user.getPrincipal();
-    saslEnabled = 
clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false);
-    // Create the user if it doesn't exist
-    Set<String> users = conn.securityOperations().listLocalUsers();
-    if (!users.contains(username)) {
-      PasswordToken passwdToken = null;
-      if (!saslEnabled) {
-        password = user.getPassword();
-        passwdToken = new PasswordToken(password);
-      }
-      conn.securityOperations().createLocalUser(username, passwdToken);
-    }
-  }
-
-  @After
-  public void deleteLocalUser() throws Exception {
-    if (saslEnabled) {
-      ClusterUser root = getAdminUser();
-      UserGroupInformation.loginUserFromKeytab(root.getPrincipal(), 
root.getKeytab().getAbsolutePath());
-    }
-    getConnector().securityOperations().dropLocalUser(username);
-  }
-
-  @Test
-  public void testConnectorWithDestroyedToken() throws Exception {
-    AuthenticationToken token = getUser(0).getToken();
-    assertFalse(token.isDestroyed());
-    token.destroy();
-    assertTrue(token.isDestroyed());
-    try {
-      inst.getConnector("non_existent_user", token);
-      fail();
-    } catch (AccumuloSecurityException e) {
-      
assertTrue(e.getSecurityErrorCode().equals(SecurityErrorCode.TOKEN_EXPIRED));
-    }
-  }
-
-  @Test
-  public void testDestroyTokenBeforeRPC() throws Exception {
-    AuthenticationToken token = getUser(0).getToken();
-    Connector userConnector = inst.getConnector(username, token);
-    Scanner scanner = userConnector.createScanner(MetadataTable.NAME, 
Authorizations.EMPTY);
-    assertFalse(token.isDestroyed());
-    token.destroy();
-    assertTrue(token.isDestroyed());
-    try {
-      Iterator<Entry<Key,Value>> iter = scanner.iterator();
-      while (iter.hasNext())
-        fail();
-      fail();
-    } catch (Exception e) {
-      assertTrue(e instanceof RuntimeException);
-      assertTrue(e.getCause() instanceof AccumuloSecurityException);
-      
assertTrue(AccumuloSecurityException.class.cast(e.getCause()).getSecurityErrorCode().equals(SecurityErrorCode.TOKEN_EXPIRED));
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
 
b/test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
deleted file mode 100644
index 2650c89..0000000
--- 
a/test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-
-import java.util.Map;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Iterators;
-
-public class DeleteEverythingIT extends AccumuloClusterHarness {
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {
-    Map<String,String> siteConfig = cfg.getSiteConfig();
-    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1s");
-    cfg.setSiteConfig(siteConfig);
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 60;
-  }
-
-  private String majcDelay;
-
-  @Before
-  public void updateMajcDelay() throws Exception {
-    Connector c = getConnector();
-    majcDelay = 
c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
-    c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), 
"1s");
-    if (getClusterType() == ClusterType.STANDALONE) {
-      // Gotta wait for the cluster to get out of the default sleep value
-      Thread.sleep(AccumuloConfiguration.getTimeInMillis(majcDelay));
-    }
-  }
-
-  @After
-  public void resetMajcDelay() throws Exception {
-    Connector c = getConnector();
-    c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), 
majcDelay);
-  }
-
-  @Test
-  public void run() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    BatchWriter bw = getConnector().createBatchWriter(tableName, new 
BatchWriterConfig());
-    Mutation m = new Mutation(new Text("foo"));
-    m.put(new Text("bar"), new Text("1910"), new Value("5".getBytes(UTF_8)));
-    bw.addMutation(m);
-    bw.flush();
-
-    getConnector().tableOperations().flush(tableName, null, null, true);
-
-    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
-
-    m = new Mutation(new Text("foo"));
-    m.putDelete(new Text("bar"), new Text("1910"));
-    bw.addMutation(m);
-    bw.flush();
-
-    Scanner scanner = getConnector().createScanner(tableName, 
Authorizations.EMPTY);
-    scanner.setRange(new Range());
-    int count = Iterators.size(scanner.iterator());
-    assertEquals("count == " + count, 0, count);
-    getConnector().tableOperations().flush(tableName, null, null, true);
-
-    getConnector().tableOperations().setProperty(tableName, 
Property.TABLE_MAJC_RATIO.getKey(), "1.0");
-    UtilWaitThread.sleep(4000);
-
-    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
-
-    bw.close();
-
-    count = Iterables.size(scanner);
-
-    if (count != 0)
-      throw new Exception("count == " + count);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java 
b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
deleted file mode 100644
index 79c4e60..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.accumulo.cluster.AccumuloCluster;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOpts.Password;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.TestRandomDeletes;
-import org.apache.accumulo.test.VerifyIngest;
-import org.junit.Test;
-
-import com.google.common.base.Charsets;
-
-public class DeleteIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  @Test
-  public void test() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    AuthenticationToken token = getAdminToken();
-    if (token instanceof KerberosToken) {
-      deleteTest(c, getCluster(), getAdminPrincipal(), null, tableName, 
getAdminUser().getKeytab().getAbsolutePath());
-    } else if (token instanceof PasswordToken) {
-      PasswordToken passwdToken = (PasswordToken) token;
-      deleteTest(c, getCluster(), getAdminPrincipal(), new 
String(passwdToken.getPassword(), Charsets.UTF_8), tableName, null);
-    }
-  }
-
-  public static void deleteTest(Connector c, AccumuloCluster cluster, String 
user, String password, String tableName, String keytab) throws Exception {
-    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
-    TestIngest.Opts opts = new TestIngest.Opts();
-    vopts.setTableName(tableName);
-    opts.setTableName(tableName);
-    vopts.rows = opts.rows = 1000;
-    vopts.cols = opts.cols = 1;
-    vopts.random = opts.random = 56;
-
-    assertTrue("Expected one of password or keytab", null != password || null 
!= keytab);
-    if (null != password) {
-      assertNull("Given password, expected null keytab", keytab);
-      Password passwd = new Password(password);
-      opts.setPassword(passwd);
-      opts.setPrincipal(user);
-      vopts.setPassword(passwd);
-      vopts.setPrincipal(user);
-    }
-    if (null != keytab) {
-      assertNull("Given keytab, expect null password", password);
-      ClientConfiguration clientConfig = cluster.getClientConfig();
-      opts.updateKerberosCredentials(clientConfig);
-      vopts.updateKerberosCredentials(clientConfig);
-    }
-
-    BatchWriterOpts BWOPTS = new BatchWriterOpts();
-    TestIngest.ingest(c, opts, BWOPTS);
-
-    String[] args = null;
-
-    assertTrue("Expected one of password or keytab", null != password || null 
!= keytab);
-    if (null != password) {
-      assertNull("Given password, expected null keytab", keytab);
-      args = new String[] {"-u", user, "-p", password, "-i", 
cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--table", tableName};
-    }
-    if (null != keytab) {
-      assertNull("Given keytab, expect null password", password);
-      args = new String[] {"-u", user, "-i", cluster.getInstanceName(), "-z", 
cluster.getZooKeepers(), "--table", tableName, "--keytab", keytab};
-    }
-
-    assertEquals(0, cluster.getClusterControl().exec(TestRandomDeletes.class, 
args));
-    TestIngest.ingest(c, opts, BWOPTS);
-    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java 
b/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
deleted file mode 100644
index e4a8451..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-public class DeleteRowsIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 10 * 60;
-  }
-
-  private static final Logger log = 
LoggerFactory.getLogger(DeleteRowsIT.class);
-
-  private static final int ROWS_PER_TABLET = 10;
-  private static final String[] LETTERS = new String[] {"a", "b", "c", "d", 
"e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
-      "u", "v", "w", "x", "y", "z"};
-  static final SortedSet<Text> SPLITS = new TreeSet<Text>();
-  static {
-    for (String alpha : LETTERS) {
-      SPLITS.add(new Text(alpha));
-    }
-  }
-  static final List<String> ROWS = new 
ArrayList<String>(Arrays.asList(LETTERS));
-  static {
-    // put data on first and last tablet
-    ROWS.add("A");
-    ROWS.add("{");
-  }
-
-  @Test(timeout = 5 * 60 * 1000)
-  public void testDeleteAllRows() throws Exception {
-    Connector c = getConnector();
-    String[] tableNames = this.getUniqueNames(20);
-    for (String tableName : tableNames) {
-      c.tableOperations().create(tableName);
-      c.tableOperations().deleteRows(tableName, null, null);
-      Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
-      assertEquals(0, Iterators.size(scanner.iterator()));
-    }
-  }
-
-  @Test
-  public void testManyRows() throws Exception {
-    // Delete ranges of rows, and verify the tablets are removed.
-    int i = 0;
-    // Eliminate whole tablets
-    String tableName = getUniqueNames(1)[0];
-    testSplit(tableName + i++, "f", "h", "abcdefijklmnopqrstuvwxyz", 260);
-    // Eliminate whole tablets, partial first tablet
-    testSplit(tableName + i++, "f1", "h", "abcdeff1ijklmnopqrstuvwxyz", 262);
-    // Eliminate whole tablets, partial last tablet
-    testSplit(tableName + i++, "f", "h1", "abcdefijklmnopqrstuvwxyz", 258);
-    // Eliminate whole tablets, partial first and last tablet
-    testSplit(tableName + i++, "f1", "h1", "abcdeff1ijklmnopqrstuvwxyz", 260);
-    // Eliminate one tablet
-    testSplit(tableName + i++, "f", "g", "abcdefhijklmnopqrstuvwxyz", 270);
-    // Eliminate partial tablet, matches start split
-    testSplit(tableName + i++, "f", "f1", "abcdefghijklmnopqrstuvwxyz", 278);
-    // Eliminate partial tablet, matches end split
-    testSplit(tableName + i++, "f1", "g", "abcdeff1hijklmnopqrstuvwxyz", 272);
-    // Eliminate tablets starting at -inf
-    testSplit(tableName + i++, null, "h", "ijklmnopqrstuvwxyz", 200);
-    // Eliminate tablets ending at +inf
-    testSplit(tableName + i++, "t", null, "abcdefghijklmnopqrst", 200);
-    // Eliminate some rows inside one tablet
-    testSplit(tableName + i++, "t0", "t2", "abcdefghijklmnopqrstt0uvwxyz", 
278);
-    // Eliminate some rows in the first tablet
-    testSplit(tableName + i++, null, "A1", "abcdefghijklmnopqrstuvwxyz", 278);
-    // Eliminate some rows in the last tablet
-    testSplit(tableName + i++, "{1", null, "abcdefghijklmnopqrstuvwxyz{1", 
272);
-    // Delete everything
-    testSplit(tableName + i++, null, null, "", 0);
-  }
-
-  private void testSplit(String table, String start, String end, String 
result, int entries) throws Exception {
-    // Put a bunch of rows on each tablet
-    Connector c = getConnector();
-    c.tableOperations().create(table);
-    BatchWriter bw = c.createBatchWriter(table, null);
-    for (String row : ROWS) {
-      for (int j = 0; j < ROWS_PER_TABLET; j++) {
-        Mutation m = new Mutation(row + j);
-        m.put("cf", "cq", "value");
-        bw.addMutation(m);
-      }
-    }
-    bw.flush();
-    bw.close();
-    // Split the table
-    c.tableOperations().addSplits(table, SPLITS);
-
-    Text startText = start == null ? null : new Text(start);
-    Text endText = end == null ? null : new Text(end);
-    c.tableOperations().deleteRows(table, startText, endText);
-    Collection<Text> remainingSplits = c.tableOperations().listSplits(table);
-    StringBuilder sb = new StringBuilder();
-    // See that whole tablets are removed
-    for (Text split : remainingSplits)
-      sb.append(split.toString());
-    assertEquals(result, sb.toString());
-    // See that the rows are really deleted
-    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
-    int count = 0;
-    for (Entry<Key,Value> entry : scanner) {
-      Text row = entry.getKey().getRow();
-      assertTrue((startText == null || row.compareTo(startText) <= 0) || 
(endText == null || row.compareTo(endText) > 0));
-      assertTrue(startText != null || endText != null);
-      count++;
-    }
-    log.info("Finished table " + table);
-    assertEquals(entries, count);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java 
b/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
deleted file mode 100644
index dcc3124..0000000
--- 
a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-// attempt to reproduce ACCUMULO-315
-public class DeleteRowsSplitIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 4 * 60;
-  }
-
-  private static final Logger log = 
LoggerFactory.getLogger(DeleteRowsSplitIT.class);
-
-  private static final String LETTERS = "abcdefghijklmnopqrstuvwxyz";
-  static final SortedSet<Text> SPLITS = new TreeSet<Text>();
-  static final List<String> ROWS = new ArrayList<String>();
-  static {
-    for (byte b : LETTERS.getBytes(UTF_8)) {
-      SPLITS.add(new Text(new byte[] {b}));
-      ROWS.add(new String(new byte[] {b}, UTF_8));
-    }
-  }
-
-  @Test
-  public void run() throws Exception {
-    // Delete ranges of rows, and verify the are removed
-    // Do this while adding many splits
-    final String tableName = getUniqueNames(1)[0];
-    final Connector conn = getConnector();
-
-    // Eliminate whole tablets
-    for (int test = 0; test < 10; test++) {
-      // create a table
-      log.info("Test " + test);
-      conn.tableOperations().create(tableName);
-
-      // put some data in it
-      fillTable(conn, tableName);
-
-      // generate a random delete range
-      final Text start = new Text();
-      final Text end = new Text();
-      generateRandomRange(start, end);
-
-      // initiate the delete range
-      final boolean fail[] = {false};
-      Thread t = new Thread() {
-        @Override
-        public void run() {
-          try {
-            // split the table
-            final SortedSet<Text> afterEnd = SPLITS.tailSet(new 
Text(end.toString() + "\0"));
-            conn.tableOperations().addSplits(tableName, afterEnd);
-          } catch (Exception ex) {
-            log.error("Exception", ex);
-            synchronized (fail) {
-              fail[0] = true;
-            }
-          }
-        }
-      };
-      t.start();
-
-      UtilWaitThread.sleep(test * 2);
-
-      conn.tableOperations().deleteRows(tableName, start, end);
-
-      t.join();
-      synchronized (fail) {
-        assertTrue(!fail[0]);
-      }
-
-      // scan the table
-      Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
-      for (Entry<Key,Value> entry : scanner) {
-        Text row = entry.getKey().getRow();
-        assertTrue(row.compareTo(start) <= 0 || row.compareTo(end) > 0);
-      }
-
-      // delete the table
-      conn.tableOperations().delete(tableName);
-    }
-  }
-
-  private void generateRandomRange(Text start, Text end) {
-    List<String> bunch = new ArrayList<String>(ROWS);
-    Collections.shuffle(bunch);
-    if (bunch.get(0).compareTo((bunch.get(1))) < 0) {
-      start.set(bunch.get(0));
-      end.set(bunch.get(1));
-    } else {
-      start.set(bunch.get(1));
-      end.set(bunch.get(0));
-    }
-
-  }
-
-  private void fillTable(Connector conn, String table) throws Exception {
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
-    for (String row : ROWS) {
-      Mutation m = new Mutation(row);
-      m.put("cf", "cq", "value");
-      bw.addMutation(m);
-    }
-    bw.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
 
b/test/src/test/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
deleted file mode 100644
index 7c94163..0000000
--- 
a/test/src/test/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertFalse;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.Future;
-
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.SimpleThreadPool;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-// ACCUMULO-2361
-public class DeleteTableDuringSplitIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 15 * 60;
-  }
-
-  @Test
-  public void test() throws Exception {
-    // 96 invocations, 8 at a time
-    int batches = 12, batchSize = 8;
-    String[] tableNames = getUniqueNames(batches * batchSize);
-    // make a bunch of tables
-    for (String tableName : tableNames) {
-      getConnector().tableOperations().create(tableName);
-    }
-    final SortedSet<Text> splits = new TreeSet<Text>();
-    for (byte i = 0; i < 100; i++) {
-      splits.add(new Text(new byte[] {0, 0, i}));
-    }
-
-    List<Future<?>> results = new ArrayList<Future<?>>();
-    List<Runnable> tasks = new ArrayList<Runnable>();
-    SimpleThreadPool es = new SimpleThreadPool(batchSize * 2, 
"concurrent-api-requests");
-    for (String tableName : tableNames) {
-      final String finalName = tableName;
-      tasks.add(new Runnable() {
-        @Override
-        public void run() {
-          try {
-            getConnector().tableOperations().addSplits(finalName, splits);
-          } catch (TableNotFoundException ex) {
-            // expected, ignore
-          } catch (Exception ex) {
-            throw new RuntimeException(finalName, ex);
-          }
-        }
-      });
-      tasks.add(new Runnable() {
-        @Override
-        public void run() {
-          try {
-            UtilWaitThread.sleep(500);
-            getConnector().tableOperations().delete(finalName);
-          } catch (Exception ex) {
-            throw new RuntimeException(ex);
-          }
-        }
-      });
-    }
-    Iterator<Runnable> itr = tasks.iterator();
-    for (int batch = 0; batch < batches; batch++) {
-      for (int i = 0; i < batchSize; i++) {
-        Future<?> f = es.submit(itr.next());
-        results.add(f);
-        f = es.submit(itr.next());
-        results.add(f);
-      }
-      for (Future<?> f : results) {
-        f.get();
-      }
-      results.clear();
-    }
-    // Shut down the ES
-    List<Runnable> queued = es.shutdownNow();
-    Assert.assertTrue("Had more tasks to run", queued.isEmpty());
-    Assert.assertFalse("Had more tasks that needed to be submitted", 
itr.hasNext());
-    for (String tableName : tableNames) {
-      assertFalse(getConnector().tableOperations().exists(tableName));
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java
 
b/test/src/test/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java
deleted file mode 100644
index ca8003a..0000000
--- 
a/test/src/test/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.EnumSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.harness.SharedMiniClusterBase;
-import org.junit.Test;
-
-// ACCUMULO-2880
-public class DeletedTablesDontFlushIT extends SharedMiniClusterBase {
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 60;
-  }
-
-  @Test
-  public void test() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    IteratorSetting setting = new IteratorSetting(100, SlowIterator.class);
-    SlowIterator.setSleepTime(setting, 1000);
-    c.tableOperations().attachIterator(tableName, setting, 
EnumSet.of(IteratorScope.minc));
-    // let the configuration change propagate through zookeeper
-    UtilWaitThread.sleep(1000);
-
-    Mutation m = new Mutation("xyzzy");
-    for (int i = 0; i < 100; i++) {
-      m.put("cf", "" + i, new Value(new byte[] {}));
-    }
-    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-    bw.addMutation(m);
-    bw.close();
-    // should go fast
-    c.tableOperations().delete(tableName);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DurabilityIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/DurabilityIT.java 
b/test/src/test/java/org/apache/accumulo/test/functional/DurabilityIT.java
deleted file mode 100644
index 49e004f..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/DurabilityIT.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-public class DurabilityIT extends ConfigurableMacBase {
-  private static final Logger log = 
LoggerFactory.getLogger(DurabilityIT.class);
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {
-    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
-    cfg.setNumTservers(1);
-  }
-
-  static final long N = 100000;
-
-  private String[] init() throws Exception {
-    String[] tableNames = getUniqueNames(4);
-    Connector c = getConnector();
-    TableOperations tableOps = c.tableOperations();
-    createTable(tableNames[0]);
-    createTable(tableNames[1]);
-    createTable(tableNames[2]);
-    createTable(tableNames[3]);
-    // default is sync
-    tableOps.setProperty(tableNames[1], Property.TABLE_DURABILITY.getKey(), 
"flush");
-    tableOps.setProperty(tableNames[2], Property.TABLE_DURABILITY.getKey(), 
"log");
-    tableOps.setProperty(tableNames[3], Property.TABLE_DURABILITY.getKey(), 
"none");
-    return tableNames;
-  }
-
-  private void cleanup(String[] tableNames) throws Exception {
-    Connector c = getConnector();
-    for (String tableName : tableNames) {
-      c.tableOperations().delete(tableName);
-    }
-  }
-
-  private void createTable(String tableName) throws Exception {
-    TableOperations tableOps = getConnector().tableOperations();
-    tableOps.create(tableName);
-  }
-
-  @Test(timeout = 2 * 60 * 1000)
-  public void testWriteSpeed() throws Exception {
-    TableOperations tableOps = getConnector().tableOperations();
-    String tableNames[] = init();
-    // write some gunk, delete the table to keep that table from messing with 
the performance numbers of successive calls
-    // sync
-    long t0 = writeSome(tableNames[0], N);
-    tableOps.delete(tableNames[0]);
-    // flush
-    long t1 = writeSome(tableNames[1], N);
-    tableOps.delete(tableNames[1]);
-    // log
-    long t2 = writeSome(tableNames[2], N);
-    tableOps.delete(tableNames[2]);
-    // none
-    long t3 = writeSome(tableNames[3], N);
-    tableOps.delete(tableNames[3]);
-    System.out.println(String.format("sync %d flush %d log %d none %d", t0, 
t1, t2, t3));
-    assertTrue("flush should be faster than sync", t0 > t1);
-    assertTrue("log should be faster than flush", t1 > t2);
-    assertTrue("no durability should be faster than log", t2 > t3);
-  }
-
-  @Test(timeout = 4 * 60 * 1000)
-  public void testSync() throws Exception {
-    String tableNames[] = init();
-    // sync table should lose nothing
-    writeSome(tableNames[0], N);
-    restartTServer();
-    assertEquals(N, readSome(tableNames[0]));
-    cleanup(tableNames);
-  }
-
-  @Test(timeout = 4 * 60 * 1000)
-  public void testFlush() throws Exception {
-    String tableNames[] = init();
-    // flush table won't lose anything since we're not losing power/dfs
-    writeSome(tableNames[1], N);
-    restartTServer();
-    assertEquals(N, readSome(tableNames[1]));
-    cleanup(tableNames);
-  }
-
-  @Test(timeout = 4 * 60 * 1000)
-  public void testLog() throws Exception {
-    String tableNames[] = init();
-    // we're probably going to lose something the the log setting
-    writeSome(tableNames[2], N);
-    restartTServer();
-    long numResults = readSome(tableNames[2]);
-    assertTrue("Expected " + N + " >= " + numResults, N >= numResults);
-    cleanup(tableNames);
-  }
-
-  @Test(timeout = 4 * 60 * 1000)
-  public void testNone() throws Exception {
-    String tableNames[] = init();
-    // probably won't get any data back without logging
-    writeSome(tableNames[3], N);
-    restartTServer();
-    long numResults = readSome(tableNames[3]);
-    assertTrue("Expected " + N + " >= " + numResults, N >= numResults);
-    cleanup(tableNames);
-  }
-
-  @Test(timeout = 4 * 60 * 1000)
-  public void testIncreaseDurability() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, 
Property.TABLE_DURABILITY.getKey(), "none");
-    writeSome(tableName, N);
-    restartTServer();
-    long numResults = readSome(tableName);
-    assertTrue("Expected " + N + " >= " + numResults, N >= numResults);
-    c.tableOperations().setProperty(tableName, 
Property.TABLE_DURABILITY.getKey(), "sync");
-    writeSome(tableName, N);
-    restartTServer();
-    assertTrue(N == readSome(tableName));
-  }
-
-  private static Map<String,String> map(Iterable<Entry<String,String>> 
entries) {
-    Map<String,String> result = new HashMap<String,String>();
-    for (Entry<String,String> entry : entries) {
-      result.put(entry.getKey(), entry.getValue());
-    }
-    return result;
-  }
-
-  @Test(timeout = 4 * 60 * 1000)
-  public void testMetaDurability() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.instanceOperations().setProperty(Property.TABLE_DURABILITY.getKey(), 
"none");
-    Map<String,String> props = 
map(c.tableOperations().getProperties(MetadataTable.NAME));
-    assertEquals("sync", props.get(Property.TABLE_DURABILITY.getKey()));
-    c.tableOperations().create(tableName);
-    props = map(c.tableOperations().getProperties(tableName));
-    assertEquals("none", props.get(Property.TABLE_DURABILITY.getKey()));
-    restartTServer();
-    assertTrue(c.tableOperations().exists(tableName));
-  }
-
-  private long readSome(String table) throws Exception {
-    return Iterators.size(getConnector().createScanner(table, 
Authorizations.EMPTY).iterator());
-  }
-
-  private void restartTServer() throws Exception {
-    for (ProcessReference proc : 
cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
-      cluster.killProcess(ServerType.TABLET_SERVER, proc);
-    }
-    cluster.start();
-  }
-
-  private long writeSome(String table, long count) throws Exception {
-    int iterations = 5;
-    long[] attempts = new long[iterations];
-    for (int attempt = 0; attempt < iterations; attempt++) {
-      long now = System.currentTimeMillis();
-      Connector c = getConnector();
-      BatchWriter bw = c.createBatchWriter(table, null);
-      for (int i = 1; i < count + 1; i++) {
-        Mutation m = new Mutation("" + i);
-        m.put("", "", "");
-        bw.addMutation(m);
-        if (i % (Math.max(1, count / 100)) == 0) {
-          bw.flush();
-        }
-      }
-      bw.close();
-      attempts[attempt] = System.currentTimeMillis() - now;
-    }
-    Arrays.sort(attempts);
-    log.info("Attempt durations: {}", Arrays.toString(attempts));
-    // Return the median duration
-    return attempts[2];
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
----------------------------------------------------------------------
diff --git 
a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
 
b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
deleted file mode 100644
index 2251d4b..0000000
--- 
a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.fail;
-
-import java.util.Map;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.MasterClient;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.master.thrift.MasterClientService;
-import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
-import org.apache.accumulo.core.master.thrift.TableInfo;
-import org.apache.accumulo.core.master.thrift.TabletServerStatus;
-import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class DynamicThreadPoolsIT extends AccumuloClusterHarness {
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {
-    cfg.setNumTservers(1);
-    Map<String,String> siteConfig = cfg.getSiteConfig();
-    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
-    cfg.setSiteConfig(siteConfig);
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 4 * 60;
-  }
-
-  private String majcDelay;
-
-  @Before
-  public void updateMajcDelay() throws Exception {
-    Connector c = getConnector();
-    majcDelay = 
c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
-    c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), 
"100ms");
-    if (getClusterType() == ClusterType.STANDALONE) {
-      Thread.sleep(AccumuloConfiguration.getTimeInMillis(majcDelay));
-    }
-  }
-
-  @After
-  public void resetMajcDelay() throws Exception {
-    Connector c = getConnector();
-    c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), 
majcDelay);
-  }
-
-  @Test
-  public void test() throws Exception {
-    final String[] tables = getUniqueNames(15);
-    String firstTable = tables[0];
-    Connector c = getConnector();
-    
c.instanceOperations().setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), 
"5");
-    TestIngest.Opts opts = new TestIngest.Opts();
-    opts.rows = 500 * 1000;
-    opts.createTable = true;
-    opts.setTableName(firstTable);
-    ClientConfiguration clientConf = cluster.getClientConfig();
-    if 
(clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), 
false)) {
-      opts.updateKerberosCredentials(clientConf);
-    } else {
-      opts.setPrincipal(getAdminPrincipal());
-    }
-    TestIngest.ingest(c, opts, new BatchWriterOpts());
-    c.tableOperations().flush(firstTable, null, null, true);
-    for (int i = 1; i < tables.length; i++)
-      c.tableOperations().clone(firstTable, tables[i], true, null, null);
-    UtilWaitThread.sleep(11 * 1000); // time between checks of the thread pool 
sizes
-    Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
-    for (int i = 1; i < tables.length; i++)
-      c.tableOperations().compact(tables[i], null, null, true, false);
-    for (int i = 0; i < 30; i++) {
-      int count = 0;
-      MasterClientService.Iface client = null;
-      MasterMonitorInfo stats = null;
-      try {
-        client = MasterClient.getConnectionWithRetry(new 
ClientContext(c.getInstance(), creds, clientConf));
-        stats = client.getMasterStats(Tracer.traceInfo(), 
creds.toThrift(c.getInstance()));
-      } finally {
-        if (client != null)
-          MasterClient.close(client);
-      }
-      for (TabletServerStatus server : stats.tServerInfo) {
-        for (TableInfo table : server.tableMap.values()) {
-          count += table.majors.running;
-        }
-      }
-      System.out.println("count " + count);
-      if (count > 3)
-        return;
-      UtilWaitThread.sleep(500);
-    }
-    fail("Could not observe higher number of threads after changing the 
config");
-  }
-}

Reply via email to