Merge branch '1.7' into 1.8

Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a4afd1bf
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a4afd1bf
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a4afd1bf

Branch: refs/heads/1.8
Commit: a4afd1bfd73cd0425b110dd20cefb47ea82341d1
Parents: a4fdcf2 cead397
Author: Christopher Tubbs <ctubb...@apache.org>
Authored: Thu Sep 1 14:46:40 2016 -0400
Committer: Christopher Tubbs <ctubb...@apache.org>
Committed: Thu Sep 1 14:46:40 2016 -0400

----------------------------------------------------------------------
 TESTING.md                                      | 26 +++++++++++----
 maven-plugin/src/it/plugin-test/pom.xml         | 12 +++++++
 pom.xml                                         | 33 ++++++--------------
 .../harness/AccumuloClusterHarness.java         |  4 +--
 .../accumulo/harness/SharedMiniClusterBase.java |  4 +--
 .../org/apache/accumulo/test/InMemoryMapIT.java | 11 +++++--
 .../org/apache/accumulo/test/NamespacesIT.java  |  4 +--
 .../org/apache/accumulo/test/ShellServerIT.java |  4 +++
 .../test/categories/AnyClusterTest.java         | 25 ---------------
 .../test/categories/MiniClusterOnlyTest.java    | 24 --------------
 .../test/categories/MiniClusterOnlyTests.java   | 22 +++++++++++++
 .../StandaloneCapableClusterTests.java          | 23 ++++++++++++++
 .../accumulo/test/categories/SunnyDayTests.java | 23 ++++++++++++++
 .../accumulo/test/functional/ClassLoaderIT.java |  4 +--
 .../test/functional/ConfigurableMacBase.java    |  4 +--
 .../accumulo/test/functional/ExamplesIT.java    |  4 +++
 .../accumulo/test/functional/KerberosIT.java    |  4 +--
 .../test/functional/KerberosProxyIT.java        |  4 +--
 .../test/functional/KerberosRenewalIT.java      |  4 +--
 .../accumulo/test/functional/NativeMapIT.java   |  3 ++
 .../accumulo/test/functional/PermissionsIT.java |  4 +--
 .../accumulo/test/functional/ReadWriteIT.java   |  4 +++
 .../accumulo/test/functional/TableIT.java       |  4 +--
 .../accumulo/test/proxy/TCompactProxyIT.java    |  7 +++--
 .../test/replication/CyclicReplicationIT.java   |  9 +++---
 .../test/replication/KerberosReplicationIT.java |  4 +--
 .../accumulo/test/start/KeywordStartIT.java     |  3 ++
 27 files changed, 167 insertions(+), 110 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/a4afd1bf/TESTING.md
----------------------------------------------------------------------
diff --cc TESTING.md
index 9799397,c3a9e06..34c8073
--- a/TESTING.md
+++ b/TESTING.md
@@@ -79,25 -91,10 +91,27 @@@ Use of a standalone cluster can be enab
  providing a Java properties file on the Maven command line. The use of a 
properties file is recommended since it is
  typically a fixed file per standalone cluster you want to run the tests 
against.
  
- These tests will always run during the `integration-test` lifecycle phase 
using `mvn verify`.
+ These tests will run by default during the `integration-test` lifecycle phase 
using `mvn verify`.
+ To execute only these tests, use `mvn verify 
-Dfailsafe.groups=org.apache.accumulo.test.categories.StandaloneCapableClusterTests`
+ To execute everything except these tests, use `mvn verify 
-Dfailsafe.excludedGroups=org.apache.accumulo.test.categories.StandaloneCapableClusterTests`
  
 +### Performance tests
 +
 +Performance tests refer to a small subset of integration tests which are not 
activated by default. These tests allow
 +developers to write tests which specifically exercise expected performance 
which may be dependent on the available
 +resources of the host machine. Normal integration tests should be capable of 
running anywhere with a lower-bound on
 +available memory.
 +
 +These tests are designated using the JUnit Category annotation with the 
`PerformanceTest` interface in the
 +accumulo-test module. See the `PerformanceTest` interface for more 
information on how to use this to write your
 +own performance test.
 +
 +To invoke the performance tests, activate the `performanceTests` Maven 
profile in addition to the integration-test
 +or verify Maven lifecycle. For example `mvn verify -PperformanceTests` would 
invoke all of the integration tests:
 +both normal integration tests and the performance tests. There is presently 
no way to invoke only the performance
 +tests without the rest of the integration tests.
 +
 +
  ## Configuration for Standalone clusters
  
  The following properties can be used to configure a standalone cluster:

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a4afd1bf/maven-plugin/src/it/plugin-test/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a4afd1bf/pom.xml
----------------------------------------------------------------------
diff --cc pom.xml
index 8106dff,62f58bb..afd134b
--- a/pom.xml
+++ b/pom.xml
@@@ -116,12 -115,6 +116,8 @@@
      <url>https://builds.apache.org/view/A-D/view/Accumulo/</url>
    </ciManagement>
    <properties>
-     
<accumulo.anyClusterTests>org.apache.accumulo.test.categories.AnyClusterTest</accumulo.anyClusterTests>
-     
<accumulo.it.excludedGroups>${accumulo.performanceTests</accumulo.it.excludedGroups>
-     
<accumulo.it.groups>${accumulo.anyClusterTests},${accumulo.miniclusterTests}</accumulo.it.groups>
-     
<accumulo.miniclusterTests>org.apache.accumulo.test.categories.MiniClusterOnlyTest</accumulo.miniclusterTests>
 +    <!-- Interface used to separate tests with JUnit category -->
 +    
<accumulo.performanceTests>org.apache.accumulo.test.PerformanceTest</accumulo.performanceTests>
      <!-- used for filtering the java source with the current version -->
      <accumulo.release.version>${project.version}</accumulo.release.version>
      <assembly.tarLongFileMode>posix</assembly.tarLongFileMode>
@@@ -133,6 -124,8 +129,8 @@@
      
<eclipseFormatterStyle>${project.parent.basedir}/contrib/Eclipse-Accumulo-Codestyle.xml</eclipseFormatterStyle>
      <!-- extra release args for testing -->
      <extraReleaseArgs />
 -    <failsafe.excludedGroups />
++    
<failsafe.excludedGroups>${accumulo.performanceTests}</failsafe.excludedGroups>
+     <failsafe.groups />
      <!-- findbugs-maven-plugin won't work on jdk8 or later; set to 3.0.0 or 
newer -->
      <findbugs.version>3.0.3</findbugs.version>
      <!-- surefire/failsafe plugin option -->
@@@ -149,14 -142,18 +147,16 @@@
      <maven.min-version>3.0.5</maven.min-version>
      <!-- surefire/failsafe plugin option -->
      
<maven.test.redirectTestOutputToFile>true</maven.test.redirectTestOutputToFile>
 -    <powermock.version>1.6.4</powermock.version>
 +    <powermock.version>1.6.5</powermock.version>
      <!-- surefire/failsafe plugin option -->
      <reuseForks>false</reuseForks>
 -    <sealJars>false</sealJars>
 -    <!-- overwritten in hadoop profiles -->
 -    <slf4j.version>1.7.5</slf4j.version>
 +    <slf4j.version>1.7.21</slf4j.version>
      
<sourceReleaseAssemblyDescriptor>source-release-tar</sourceReleaseAssemblyDescriptor>
+     <surefire.excludedGroups />
      <surefire.failIfNoSpecifiedTests>false</surefire.failIfNoSpecifiedTests>
+     <surefire.groups />
      <!-- Thrift version -->
 -    <thrift.version>0.9.1</thrift.version>
 +    <thrift.version>0.9.3</thrift.version>
      <!-- ZooKeeper version -->
      <zookeeper.version>3.4.6</zookeeper.version>
    </properties>
@@@ -1345,9 -1320,45 +1345,9 @@@
        <!-- Minimal testing profile. (a.k.a. SunnyDay) -->
        <id>sunny</id>
        <properties>
-         <it.test>ReadWriteIT,SimpleProxyIT,ExamplesIT,ShellServerIT</it.test>
+         
<failsafe.groups>org.apache.accumulo.test.categories.SunnyDayTests</failsafe.groups>
        </properties>
      </profile>
 -    <!-- profile for our default Hadoop build
 -         unfortunately, has to duplicate one of our
 -         specified profiles. see MNG-3328 -->
 -    <profile>
 -      <id>hadoop-default</id>
 -      <activation>
 -        <property>
 -          <name>!hadoop.profile</name>
 -        </property>
 -      </activation>
 -      <properties>
 -        <!-- Denotes intention and allows the enforcer plugin to pass when
 -             the user is relying on default behavior; won't work to activate 
profile -->
 -        <hadoop.profile>2</hadoop.profile>
 -        <hadoop.version>2.2.0</hadoop.version>
 -        <httpclient.version>3.1</httpclient.version>
 -        <slf4j.version>1.7.5</slf4j.version>
 -      </properties>
 -    </profile>
 -    <!-- profile for building against Hadoop 2.x
 -     XXX Since this is the default, make sure to sync hadoop-default when 
changing.
 -    Activate using: mvn -Dhadoop.profile=2 -->
 -    <profile>
 -      <id>hadoop-2</id>
 -      <activation>
 -        <property>
 -          <name>hadoop.profile</name>
 -          <value>2</value>
 -        </property>
 -      </activation>
 -      <properties>
 -        <hadoop.version>2.2.0</hadoop.version>
 -        <httpclient.version>3.1</httpclient.version>
 -        <slf4j.version>1.7.5</slf4j.version>
 -      </properties>
 -    </profile>
      <profile>
        <id>jdk8</id>
        <activation>
@@@ -1371,33 -1382,6 +1371,34 @@@
        </build>
      </profile>
      <profile>
 +      <id>performanceTests</id>
 +      <build>
 +        <pluginManagement>
 +          <plugins>
 +            <!-- Add an additional execution for performance tests -->
 +            <plugin>
 +              <groupId>org.apache.maven.plugins</groupId>
 +              <artifactId>maven-failsafe-plugin</artifactId>
 +              <executions>
 +                <execution>
 +                  <!-- Run only the performance tests -->
 +                  <id>run-performance-tests</id>
 +                  <goals>
 +                    <goal>integration-test</goal>
 +                    <goal>verify</goal>
 +                  </goals>
 +                  <configuration>
++                    <excludedGroups />
 +                    <groups>${accumulo.performanceTests}</groups>
 +                  </configuration>
 +                </execution>
 +              </executions>
 +            </plugin>
 +          </plugins>
 +        </pluginManagement>
 +      </build>
 +    </profile>
 +    <profile>
        <id>aggregate-javadocs</id>
        <build>
          <pluginManagement>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a4afd1bf/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
----------------------------------------------------------------------
diff --cc 
test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
index 70d8dc7,0000000..4cdaf60
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
+++ b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
@@@ -1,341 -1,0 +1,341 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.harness;
 +
 +import static com.google.common.base.Preconditions.checkState;
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static org.junit.Assert.fail;
 +
 +import java.io.IOException;
 +
 +import org.apache.accumulo.cluster.AccumuloCluster;
 +import org.apache.accumulo.cluster.ClusterControl;
 +import org.apache.accumulo.cluster.ClusterUser;
 +import org.apache.accumulo.cluster.ClusterUsers;
 +import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
 +import org.apache.accumulo.core.client.ClientConfiguration;
 +import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.admin.SecurityOperations;
 +import org.apache.accumulo.core.client.admin.TableOperations;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.client.security.tokens.KerberosToken;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.security.TablePermission;
 +import org.apache.accumulo.harness.conf.AccumuloClusterConfiguration;
 +import org.apache.accumulo.harness.conf.AccumuloClusterPropertyConfiguration;
 +import org.apache.accumulo.harness.conf.AccumuloMiniClusterConfiguration;
 +import 
org.apache.accumulo.harness.conf.StandaloneAccumuloClusterConfiguration;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
- import org.apache.accumulo.test.categories.AnyClusterTest;
++import org.apache.accumulo.test.categories.StandaloneCapableClusterTests;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.security.UserGroupInformation;
 +import org.junit.After;
 +import org.junit.AfterClass;
 +import org.junit.Assume;
 +import org.junit.Before;
 +import org.junit.BeforeClass;
 +import org.junit.experimental.categories.Category;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +/**
 + * General Integration-Test base class that provides access to an Accumulo 
instance for testing. This instance could be MAC or a standalone instance.
 + */
- @Category(AnyClusterTest.class)
++@Category(StandaloneCapableClusterTests.class)
 +public abstract class AccumuloClusterHarness extends AccumuloITBase 
implements MiniClusterConfigurationCallback, ClusterUsers {
 +  private static final Logger log = 
LoggerFactory.getLogger(AccumuloClusterHarness.class);
 +  private static final String TRUE = Boolean.toString(true);
 +
 +  public static enum ClusterType {
 +    MINI, STANDALONE;
 +
 +    public boolean isDynamic() {
 +      return this == MINI;
 +    }
 +  }
 +
 +  private static boolean initialized = false;
 +
 +  protected static AccumuloCluster cluster;
 +  protected static ClusterType type;
 +  protected static AccumuloClusterPropertyConfiguration clusterConf;
 +  protected static TestingKdc krb;
 +
 +  @BeforeClass
 +  public static void setUp() throws Exception {
 +    clusterConf = AccumuloClusterPropertyConfiguration.get();
 +    type = clusterConf.getClusterType();
 +
 +    if (ClusterType.MINI == type && 
TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION))) 
{
 +      krb = new TestingKdc();
 +      krb.start();
 +      log.info("MiniKdc started");
 +    }
 +
 +    initialized = true;
 +  }
 +
 +  @AfterClass
 +  public static void tearDownKdc() throws Exception {
 +    if (null != krb) {
 +      krb.stop();
 +    }
 +  }
 +
 +  /**
 +   * The {@link TestingKdc} used for this {@link AccumuloCluster}. Might be 
null.
 +   */
 +  public static TestingKdc getKdc() {
 +    return krb;
 +  }
 +
 +  @Before
 +  public void setupCluster() throws Exception {
 +    // Before we try to instantiate the cluster, check to see if the test 
even wants to run against this type of cluster
 +    Assume.assumeTrue(canRunTest(type));
 +
 +    switch (type) {
 +      case MINI:
 +        MiniClusterHarness miniClusterHarness = new MiniClusterHarness();
 +        // Intrinsically performs the callback to let tests alter 
MiniAccumuloConfig and core-site.xml
 +        MiniAccumuloClusterImpl impl = miniClusterHarness.create(this, 
getAdminToken(), krb);
 +        cluster = impl;
 +        // MAC makes a ClientConf for us, just set it
 +        ((AccumuloMiniClusterConfiguration) 
clusterConf).setClientConf(impl.getClientConfig());
 +        // Login as the "root" user
 +        if (null != krb) {
 +          ClusterUser rootUser = krb.getRootUser();
 +          // Log in the 'client' user
 +          UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), 
rootUser.getKeytab().getAbsolutePath());
 +        }
 +        break;
 +      case STANDALONE:
 +        StandaloneAccumuloClusterConfiguration conf = 
(StandaloneAccumuloClusterConfiguration) clusterConf;
 +        ClientConfiguration clientConf = conf.getClientConf();
 +        StandaloneAccumuloCluster standaloneCluster = new 
StandaloneAccumuloCluster(conf.getInstance(), clientConf, 
conf.getTmpDirectory(), conf.getUsers(),
 +            conf.getAccumuloServerUser());
 +        // If these are provided in the configuration, pass them into the 
cluster
 +        standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
 +        
standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir());
 +        
standaloneCluster.setServerAccumuloConfDir(conf.getServerAccumuloConfDir());
 +        standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());
 +
 +        // For SASL, we need to get the Hadoop configuration files as well 
otherwise UGI will log in as SIMPLE instead of KERBEROS
 +        Configuration hadoopConfiguration = 
standaloneCluster.getHadoopConfiguration();
 +        if 
(clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), 
false)) {
 +          UserGroupInformation.setConfiguration(hadoopConfiguration);
 +          // Login as the admin user to start the tests
 +          UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(), 
conf.getAdminKeytab().getAbsolutePath());
 +        }
 +
 +        // Set the implementation
 +        cluster = standaloneCluster;
 +        break;
 +      default:
 +        throw new RuntimeException("Unhandled type");
 +    }
 +
 +    if (type.isDynamic()) {
 +      cluster.start();
 +    } else {
 +      log.info("Removing tables which appear to be from a previous test run");
 +      cleanupTables();
 +      log.info("Removing users which appear to be from a previous test run");
 +      cleanupUsers();
 +    }
 +
 +    switch (type) {
 +      case MINI:
 +        if (null != krb) {
 +          final String traceTable = Property.TRACE_TABLE.getDefaultValue();
 +          final ClusterUser systemUser = krb.getAccumuloServerUser(), 
rootUser = krb.getRootUser();
 +
 +          // Login as the trace user
 +          UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), 
systemUser.getKeytab().getAbsolutePath());
 +
 +          // Open a connector as the system user (ensures the user will exist 
for us to assign permissions to)
 +          UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), 
systemUser.getKeytab().getAbsolutePath());
 +          Connector conn = cluster.getConnector(systemUser.getPrincipal(), 
new KerberosToken());
 +
 +          // Then, log back in as the "root" user and do the grant
 +          UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), 
rootUser.getKeytab().getAbsolutePath());
 +          conn = getConnector();
 +
 +          // Create the trace table
 +          conn.tableOperations().create(traceTable);
 +
 +          // Trace user (which is the same kerberos principal as the system 
user, but using a normal KerberosToken) needs
 +          // to have the ability to read, write and alter the trace table
 +          
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), 
traceTable, TablePermission.READ);
 +          
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), 
traceTable, TablePermission.WRITE);
 +          
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), 
traceTable, TablePermission.ALTER_TABLE);
 +        }
 +        break;
 +      default:
 +        // do nothing
 +    }
 +  }
 +
 +  public void cleanupTables() throws Exception {
 +    final String tablePrefix = this.getClass().getSimpleName() + "_";
 +    final TableOperations tops = getConnector().tableOperations();
 +    for (String table : tops.list()) {
 +      if (table.startsWith(tablePrefix)) {
 +        log.debug("Removing table {}", table);
 +        tops.delete(table);
 +      }
 +    }
 +  }
 +
 +  public void cleanupUsers() throws Exception {
 +    final String userPrefix = this.getClass().getSimpleName();
 +    final SecurityOperations secOps = getConnector().securityOperations();
 +    for (String user : secOps.listLocalUsers()) {
 +      if (user.startsWith(userPrefix)) {
 +        log.info("Dropping local user {}", user);
 +        secOps.dropLocalUser(user);
 +      }
 +    }
 +  }
 +
 +  @After
 +  public void teardownCluster() throws Exception {
 +    if (null != cluster) {
 +      if (type.isDynamic()) {
 +        cluster.stop();
 +      } else {
 +        log.info("Removing tables which appear to be from the current test");
 +        cleanupTables();
 +        log.info("Removing users which appear to be from the current test");
 +        cleanupUsers();
 +      }
 +    }
 +  }
 +
 +  public static AccumuloCluster getCluster() {
 +    checkState(initialized);
 +    return cluster;
 +  }
 +
 +  public static ClusterControl getClusterControl() {
 +    checkState(initialized);
 +    return cluster.getClusterControl();
 +  }
 +
 +  public static ClusterType getClusterType() {
 +    checkState(initialized);
 +    return type;
 +  }
 +
 +  public static String getAdminPrincipal() {
 +    checkState(initialized);
 +    return clusterConf.getAdminPrincipal();
 +  }
 +
 +  public static AuthenticationToken getAdminToken() {
 +    checkState(initialized);
 +    return clusterConf.getAdminToken();
 +  }
 +
 +  @Override
 +  public ClusterUser getAdminUser() {
 +    switch (type) {
 +      case MINI:
 +        if (null == krb) {
 +          PasswordToken passwordToken = (PasswordToken) getAdminToken();
 +          return new ClusterUser(getAdminPrincipal(), new 
String(passwordToken.getPassword(), UTF_8));
 +        }
 +        return krb.getRootUser();
 +      case STANDALONE:
 +        return new ClusterUser(getAdminPrincipal(), 
((StandaloneAccumuloClusterConfiguration) clusterConf).getAdminKeytab());
 +      default:
 +        throw new RuntimeException("Unknown cluster type");
 +    }
 +  }
 +
 +  @Override
 +  public ClusterUser getUser(int offset) {
 +    switch (type) {
 +      case MINI:
 +        if (null != krb) {
 +          // Defer to the TestingKdc when kerberos is on so we can get the 
keytab instead of a password
 +          return krb.getClientPrincipal(offset);
 +        } else {
 +          // Come up with a mostly unique name
 +          String principal = getClass().getSimpleName() + "_" + 
testName.getMethodName() + "_" + offset;
 +          // Username and password are the same
 +          return new ClusterUser(principal, principal);
 +        }
 +      case STANDALONE:
 +        return ((StandaloneAccumuloCluster) cluster).getUser(offset);
 +      default:
 +        throw new RuntimeException("Unknown cluster type");
 +    }
 +  }
 +
 +  public static FileSystem getFileSystem() throws IOException {
 +    checkState(initialized);
 +    return cluster.getFileSystem();
 +  }
 +
 +  public static AccumuloClusterConfiguration getClusterConfiguration() {
 +    checkState(initialized);
 +    return clusterConf;
 +  }
 +
 +  public Connector getConnector() {
 +    try {
 +      String princ = getAdminPrincipal();
 +      AuthenticationToken token = getAdminToken();
 +      log.debug("Creating connector as {} with {}", princ, token);
 +      return cluster.getConnector(princ, token);
 +    } catch (Exception e) {
 +      log.error("Could not connect to Accumulo", e);
 +      fail("Could not connect to Accumulo: " + e.getMessage());
 +
 +      throw new RuntimeException("Could not connect to Accumulo", e);
 +    }
 +  }
 +
 +  // TODO Really don't want this here. Will ultimately need to abstract 
configuration method away from MAConfig
 +  // and change over to something more generic
 +  @Override
 +  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration 
hadoopCoreSite) {}
 +
 +  /**
 +   * A test may not be capable of running against a given AccumuloCluster. 
Implementations can override this method to advertise that they cannot (or 
perhaps do
 +   * not) want to run the test.
 +   */
 +  public boolean canRunTest(ClusterType type) {
 +    return true;
 +  }
 +
 +  /**
 +   * Tries to give a reasonable directory which can be used to create 
temporary files for the test. Makes a basic attempt to create the directory if 
it does not
 +   * already exist.
 +   *
 +   * @return A directory which can be expected to exist on the Cluster's 
FileSystem
 +   */
 +  public Path getUsableDir() throws IllegalArgumentException, IOException {
 +    return cluster.getTemporaryPath();
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a4afd1bf/test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
----------------------------------------------------------------------
diff --cc 
test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
index 0e486da,0000000..406f9c0
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
+++ b/test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
@@@ -1,207 -1,0 +1,207 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.harness;
 +
 +import static org.junit.Assert.assertTrue;
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.util.Random;
 +
 +import org.apache.accumulo.cluster.ClusterUser;
 +import org.apache.accumulo.cluster.ClusterUsers;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.client.security.tokens.KerberosToken;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.security.TablePermission;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
- import org.apache.accumulo.test.categories.MiniClusterOnlyTest;
++import org.apache.accumulo.test.categories.MiniClusterOnlyTests;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 +import org.apache.hadoop.security.UserGroupInformation;
 +import org.junit.experimental.categories.Category;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +/**
 + * Convenience class which starts a single MAC instance for a test to 
leverage.
 + *
 + * There isn't a good way to build this off of the {@link 
AccumuloClusterHarness} (as would be the logical place) because we need to 
start the
 + * MiniAccumuloCluster in a static BeforeClass-annotated method. Because it 
is static and invoked before any other BeforeClass methods in the 
implementation,
 + * the actual test classes can't expose any information to tell the base 
class that it is to perform the one-MAC-per-class semantics.
 + *
 + * Implementations of this class must be sure to invoke {@link 
#startMiniCluster()} or {@link 
#startMiniClusterWithConfig(MiniClusterConfigurationCallback)} in
 + * a method annotated with the {@link org.junit.BeforeClass} JUnit annotation 
and {@link #stopMiniCluster()} in a method annotated with the
 + * {@link org.junit.AfterClass} JUnit annotation.
 + */
- @Category(MiniClusterOnlyTest.class)
++@Category(MiniClusterOnlyTests.class)
 +public abstract class SharedMiniClusterBase extends AccumuloITBase implements 
ClusterUsers {
 +  private static final Logger log = 
LoggerFactory.getLogger(SharedMiniClusterBase.class);
 +  public static final String TRUE = Boolean.toString(true);
 +
 +  private static String principal = "root";
 +  private static String rootPassword;
 +  private static AuthenticationToken token;
 +  private static MiniAccumuloClusterImpl cluster;
 +  private static TestingKdc krb;
 +
 +  /**
 +   * Starts a MiniAccumuloCluster instance with the default configuration.
 +   */
 +  public static void startMiniCluster() throws Exception {
 +    startMiniClusterWithConfig(MiniClusterConfigurationCallback.NO_CALLBACK);
 +  }
 +
 +  /**
 +   * Starts a MiniAccumuloCluster instance with the default configuration but 
also provides the caller the opportunity to update the configuration before the
 +   * MiniAccumuloCluster is started.
 +   *
 +   * @param miniClusterCallback
 +   *          A callback to configure the minicluster before it is started.
 +   */
 +  public static void 
startMiniClusterWithConfig(MiniClusterConfigurationCallback 
miniClusterCallback) throws Exception {
 +    File baseDir = new File(System.getProperty("user.dir") + 
"/target/mini-tests");
 +    assertTrue(baseDir.mkdirs() || baseDir.isDirectory());
 +
 +    // Make a shared MAC instance instead of spinning up one per test method
 +    MiniClusterHarness harness = new MiniClusterHarness();
 +
 +    if 
(TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION)))
 {
 +      krb = new TestingKdc();
 +      krb.start();
 +      // Enabled krb auth
 +      Configuration conf = new Configuration(false);
 +      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, 
"kerberos");
 +      UserGroupInformation.setConfiguration(conf);
 +      // Login as the client
 +      ClusterUser rootUser = krb.getRootUser();
 +      // Get the krb token
 +      UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), 
rootUser.getKeytab().getAbsolutePath());
 +      token = new KerberosToken();
 +    } else {
 +      rootPassword = "rootPasswordShared1";
 +      token = new PasswordToken(rootPassword);
 +    }
 +
 +    cluster = harness.create(SharedMiniClusterBase.class.getName(), 
System.currentTimeMillis() + "_" + new Random().nextInt(Short.MAX_VALUE), token,
 +        miniClusterCallback, krb);
 +    cluster.start();
 +
 +    if (null != krb) {
 +      final String traceTable = Property.TRACE_TABLE.getDefaultValue();
 +      final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = 
krb.getRootUser();
 +      // Login as the trace user
 +      // Open a connector as the system user (ensures the user will exist for 
us to assign permissions to)
 +      UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), 
systemUser.getKeytab().getAbsolutePath());
 +      Connector conn = cluster.getConnector(systemUser.getPrincipal(), new 
KerberosToken());
 +
 +      // Then, log back in as the "root" user and do the grant
 +      UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), 
rootUser.getKeytab().getAbsolutePath());
 +      conn = cluster.getConnector(principal, token);
 +
 +      // Create the trace table
 +      conn.tableOperations().create(traceTable);
 +
 +      // Trace user (which is the same kerberos principal as the system user, 
but using a normal KerberosToken) needs
 +      // to have the ability to read, write and alter the trace table
 +      
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), 
traceTable, TablePermission.READ);
 +      
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), 
traceTable, TablePermission.WRITE);
 +      
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), 
traceTable, TablePermission.ALTER_TABLE);
 +    }
 +  }
 +
 +  /**
 +   * Stops the MiniAccumuloCluster and related services if they are running.
 +   */
 +  public static void stopMiniCluster() throws Exception {
 +    if (null != cluster) {
 +      try {
 +        cluster.stop();
 +      } catch (Exception e) {
 +        log.error("Failed to stop minicluster", e);
 +      }
 +    }
 +    if (null != krb) {
 +      try {
 +        krb.stop();
 +      } catch (Exception e) {
 +        log.error("Failed to stop KDC", e);
 +      }
 +    }
 +  }
 +
 +  public static String getRootPassword() {
 +    return rootPassword;
 +  }
 +
 +  public static AuthenticationToken getToken() {
 +    if (token instanceof KerberosToken) {
 +      try {
 +        UserGroupInformation.loginUserFromKeytab(getPrincipal(), 
krb.getRootUser().getKeytab().getAbsolutePath());
 +      } catch (IOException e) {
 +        throw new RuntimeException("Failed to login", e);
 +      }
 +    }
 +    return token;
 +  }
 +
 +  public static String getPrincipal() {
 +    return principal;
 +  }
 +
 +  public static MiniAccumuloClusterImpl getCluster() {
 +    return cluster;
 +  }
 +
 +  public static File getMiniClusterDir() {
 +    return cluster.getConfig().getDir();
 +  }
 +
 +  public static Connector getConnector() {
 +    try {
 +      return getCluster().getConnector(principal, getToken());
 +    } catch (Exception e) {
 +      throw new RuntimeException(e);
 +    }
 +  }
 +
 +  public static TestingKdc getKdc() {
 +    return krb;
 +  }
 +
 +  @Override
 +  public ClusterUser getAdminUser() {
 +    if (null == krb) {
 +      return new ClusterUser(getPrincipal(), getRootPassword());
 +    } else {
 +      return krb.getRootUser();
 +    }
 +  }
 +
 +  @Override
 +  public ClusterUser getUser(int offset) {
 +    if (null == krb) {
 +      String user = SharedMiniClusterBase.class.getName() + "_" + 
testName.getMethodName() + "_" + offset;
 +      // Password is the username
 +      return new ClusterUser(user, user);
 +    } else {
 +      return krb.getClientPrincipal(offset);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a4afd1bf/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
index 8c1bc07,0000000..fc8945b
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
@@@ -1,367 -1,0 +1,372 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test;
 +
- import com.google.common.collect.ImmutableSet;
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.fail;
++
 +import java.io.File;
 +import java.io.FileNotFoundException;
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.Set;
 +
 +import org.apache.accumulo.core.conf.ConfigurationCopy;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.ArrayByteSequence;
 +import org.apache.accumulo.core.data.ByteSequence;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
++import org.apache.accumulo.test.categories.SunnyDayTests;
 +import org.apache.accumulo.test.functional.NativeMapIT;
 +import org.apache.accumulo.tserver.InMemoryMap;
 +import org.apache.accumulo.tserver.MemKey;
 +import org.apache.accumulo.tserver.NativeMap;
 +import org.apache.hadoop.io.Text;
- import static org.junit.Assert.assertEquals;
- import static org.junit.Assert.fail;
 +import org.junit.BeforeClass;
 +import org.junit.Rule;
 +import org.junit.Test;
++import org.junit.experimental.categories.Category;
 +import org.junit.rules.TemporaryFolder;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
++import com.google.common.collect.ImmutableSet;
++
 +/**
 + * Integration Test for https://issues.apache.org/jira/browse/ACCUMULO-4148
 + * <p>
 + * User had problem writing one Mutation with multiple KV pairs that had the 
same key. Doing so should write out all pairs in all mutations with a unique 
id. In
 + * typical operation, you would only see the last one when scanning. User had 
a combiner on the table, and they noticed that when using InMemoryMap with
 + * NativeMapWrapper, only the last KV pair was ever written. When InMemoryMap 
used DefaultMap, all KV pairs were added and the behavior worked as expected.
 + *
 + * This IT inserts a variety of Mutations with and without the same KV pairs 
and then inspects result of InMemoryMap mutate, looking for unique id stored 
with
 + * each key. This unique id, shown as mc= in the MemKey toString, was 
originally used for scan Isolation. Writing the same key multiple times in the 
same
 + * mutation is a secondary use case, discussed in 
https://issues.apache.org/jira/browse/ACCUMULO-227. In addition to 
NativeMapWrapper and DefaultMap,
 + * LocalityGroupMap was add in 
https://issues.apache.org/jira/browse/ACCUMULO-112.
 + *
 + * This test has to be an IT in accumulo-test, because libaccumulo is built 
in 'integration-test' phase of accumulo-native, which currently runs right 
before
 + * accumulo-test. The tests for DefaultMap could move to a unit test in 
tserver, but they are here for convenience of viewing both at the same time.
 + */
++@Category(SunnyDayTests.class)
 +public class InMemoryMapIT {
 +
 +  private static final Logger log = 
LoggerFactory.getLogger(InMemoryMapIT.class);
 +
 +  @Rule
 +  public TemporaryFolder tempFolder = new TemporaryFolder(new 
File(System.getProperty("user.dir") + "/target"));
 +
 +  @BeforeClass
 +  public static void ensureNativeLibrary() throws FileNotFoundException {
 +    File nativeMapLocation = NativeMapIT.nativeMapLocation();
 +    log.debug("Native map location " + nativeMapLocation);
 +    NativeMap.loadNativeLib(Collections.singletonList(nativeMapLocation));
 +    if (!NativeMap.isLoaded()) {
 +      fail("Missing the native library from " + 
nativeMapLocation.getAbsolutePath() + "\nYou need to build the libaccumulo 
binary first. "
 +          + "\nTry running 'mvn clean install -Dit.test=InMemoryMapIT 
-Dtest=foo -DfailIfNoTests=false -Dfindbugs.skip -Dcheckstyle.skip'");
 +      // afterwards, you can run the following
 +      // mvn clean verify -Dit.test=InMemoryMapIT -Dtest=foo 
-DfailIfNoTests=false -Dfindbugs.skip -Dcheckstyle.skip -pl :accumulo-test
 +    }
 +    log.debug("Native map loaded");
 +
 +  }
 +
 +  @Test
 +  public void testOneMutationOneKey() {
 +    Mutation m = new Mutation("a");
 +    m.put(new Text("1cf"), new Text("1cq"), new Value("vala".getBytes()));
 +
 +    assertEquivalentMutate(m);
 +  }
 +
 +  @Test
 +  public void testOneMutationManyKeys() throws IOException {
 +    Mutation m = new Mutation("a");
 +    for (int i = 1; i < 6; i++) {
 +      m.put(new Text("2cf" + i), new Text("2cq" + i), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +
 +    assertEquivalentMutate(m);
 +  }
 +
 +  @Test
 +  public void testOneMutationManySameKeys() {
 +    Mutation m = new Mutation("a");
 +    for (int i = 1; i <= 5; i++) {
 +      // same keys
 +      m.put(new Text("3cf"), new Text("3cq"), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +
 +    assertEquivalentMutate(m);
 +  }
 +
 +  @Test
 +  public void testMultipleMutationsOneKey() {
 +    Mutation m1 = new Mutation("a");
 +    m1.put(new Text("4cf"), new Text("4cq"), new Value("vala".getBytes()));
 +    Mutation m2 = new Mutation("b");
 +    m2.put(new Text("4cf"), new Text("4cq"), new Value("vala".getBytes()));
 +
 +    assertEquivalentMutate(Arrays.asList(m1, m2));
 +  }
 +
 +  @Test
 +  public void testMultipleMutationsSameOneKey() {
 +    Mutation m1 = new Mutation("a");
 +    m1.put(new Text("5cf"), new Text("5cq"), new Value("vala".getBytes()));
 +    Mutation m2 = new Mutation("a");
 +    m2.put(new Text("5cf"), new Text("5cq"), new Value("vala".getBytes()));
 +
 +    assertEquivalentMutate(Arrays.asList(m1, m2));
 +  }
 +
 +  @Test
 +  public void testMutlipleMutationsMultipleKeys() {
 +    Mutation m1 = new Mutation("a");
 +    for (int i = 1; i < 6; i++) {
 +      m1.put(new Text("6cf" + i), new Text("6cq" + i), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +    Mutation m2 = new Mutation("b");
 +    for (int i = 1; i < 3; i++) {
 +      m2.put(new Text("6cf" + i), new Text("6cq" + i), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +
 +    assertEquivalentMutate(Arrays.asList(m1, m2));
 +  }
 +
 +  @Test
 +  public void testMultipleMutationsMultipleSameKeys() {
 +    Mutation m1 = new Mutation("a");
 +    for (int i = 1; i < 3; i++) {
 +      m1.put(new Text("7cf"), new Text("7cq"), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +    Mutation m2 = new Mutation("a");
 +    for (int i = 1; i < 4; i++) {
 +      m2.put(new Text("7cf"), new Text("7cq"), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +
 +    assertEquivalentMutate(Arrays.asList(m1, m2));
 +  }
 +
 +  @Test
 +  public void testMultipleMutationsMultipleKeysSomeSame() {
 +    Mutation m1 = new Mutation("a");
 +    for (int i = 1; i < 2; i++) {
 +      m1.put(new Text("8cf"), new Text("8cq"), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +    for (int i = 1; i < 3; i++) {
 +      m1.put(new Text("8cf" + i), new Text("8cq" + i), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +    for (int i = 1; i < 2; i++) {
 +      m1.put(new Text("8cf" + i), new Text("8cq" + i), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +    Mutation m2 = new Mutation("a");
 +    for (int i = 1; i < 3; i++) {
 +      m2.put(new Text("8cf"), new Text("8cq"), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +    for (int i = 1; i < 4; i++) {
 +      m2.put(new Text("8cf" + i), new Text("8cq" + i), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +    Mutation m3 = new Mutation("b");
 +    for (int i = 1; i < 3; i++) {
 +      m3.put(new Text("8cf" + i), new Text("8cq" + i), new 
Value(Integer.toString(i).getBytes()));
 +    }
 +
 +    assertEquivalentMutate(Arrays.asList(m1, m2, m3));
 +  }
 +
 +  private void assertEquivalentMutate(Mutation m) {
 +    assertEquivalentMutate(Collections.singletonList(m));
 +  }
 +
 +  private void assertEquivalentMutate(List<Mutation> mutations) {
 +    InMemoryMap defaultMap = null;
 +    InMemoryMap nativeMapWrapper = null;
 +    InMemoryMap localityGroupMap = null;
 +    InMemoryMap localityGroupMapWithNative = null;
 +
 +    try {
 +      Map<String,String> defaultMapConfig = new HashMap<>();
 +      defaultMapConfig.put(Property.TSERV_NATIVEMAP_ENABLED.getKey(), 
"false");
 +      defaultMapConfig.put(Property.TSERV_MEMDUMP_DIR.getKey(), 
tempFolder.newFolder().getAbsolutePath());
 +      defaultMapConfig.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "");
 +      Map<String,String> nativeMapConfig = new HashMap<>();
 +      nativeMapConfig.put(Property.TSERV_NATIVEMAP_ENABLED.getKey(), "true");
 +      nativeMapConfig.put(Property.TSERV_MEMDUMP_DIR.getKey(), 
tempFolder.newFolder().getAbsolutePath());
 +      nativeMapConfig.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "");
 +      Map<String,String> localityGroupConfig = new HashMap<>();
 +      localityGroupConfig.put(Property.TSERV_NATIVEMAP_ENABLED.getKey(), 
"false");
 +      localityGroupConfig.put(Property.TSERV_MEMDUMP_DIR.getKey(), 
tempFolder.newFolder().getAbsolutePath());
 +      Map<String,String> localityGroupNativeConfig = new HashMap<>();
 +      
localityGroupNativeConfig.put(Property.TSERV_NATIVEMAP_ENABLED.getKey(), 
"true");
 +      localityGroupNativeConfig.put(Property.TSERV_MEMDUMP_DIR.getKey(), 
tempFolder.newFolder().getAbsolutePath());
 +
 +      defaultMap = new InMemoryMap(new ConfigurationCopy(defaultMapConfig));
 +      nativeMapWrapper = new InMemoryMap(new 
ConfigurationCopy(nativeMapConfig));
 +      localityGroupMap = new 
InMemoryMap(updateConfigurationForLocalityGroups(new 
ConfigurationCopy(localityGroupConfig)));
 +      localityGroupMapWithNative = new 
InMemoryMap(updateConfigurationForLocalityGroups(new 
ConfigurationCopy(localityGroupNativeConfig)));
 +    } catch (Exception e) {
 +      log.error("Error getting new InMemoryMap ", e);
 +      fail(e.getMessage());
 +    }
 +
 +    // ensure the maps are correct type
 +    assertEquals("Not a DefaultMap", InMemoryMap.TYPE_DEFAULT_MAP, 
defaultMap.getMapType());
 +    assertEquals("Not a NativeMapWrapper", 
InMemoryMap.TYPE_NATIVE_MAP_WRAPPER, nativeMapWrapper.getMapType());
 +    assertEquals("Not a LocalityGroupMap", 
InMemoryMap.TYPE_LOCALITY_GROUP_MAP, localityGroupMap.getMapType());
 +    assertEquals("Not a LocalityGroupMap with native", 
InMemoryMap.TYPE_LOCALITY_GROUP_MAP_NATIVE, 
localityGroupMapWithNative.getMapType());
 +
 +    defaultMap.mutate(mutations);
 +    nativeMapWrapper.mutate(mutations);
 +    localityGroupMap.mutate(mutations);
 +    localityGroupMapWithNative.mutate(mutations);
 +
 +    // let's use the transitive property to assert all four are equivalent
 +    assertMutatesEquivalent(mutations, defaultMap, nativeMapWrapper);
 +    assertMutatesEquivalent(mutations, defaultMap, localityGroupMap);
 +    assertMutatesEquivalent(mutations, defaultMap, 
localityGroupMapWithNative);
 +  }
 +
 +  /**
 +   * Assert that a set of mutations mutate to equivalent map in both of the 
InMemoryMaps.
 +   * <p>
 +   * In this case, equivalent means 2 things.
 +   * <ul>
 +   * <li>The size of both maps generated is equal to the number of key value 
pairs in all mutations passed</li>
 +   * <li>The size of the map generated from the first InMemoryMap equals the 
size of the map generated from the second</li>
 +   * <li>Each key value pair in each mutated map has a unique id 
(kvCount)</li>
 +   * </ul>
 +   *
 +   * @param mutations
 +   *          List of mutations
 +   * @param imm1
 +   *          InMemoryMap to compare
 +   * @param imm2
 +   *          InMemoryMap to compare
 +   */
 +  private void assertMutatesEquivalent(List<Mutation> mutations, InMemoryMap 
imm1, InMemoryMap imm2) {
 +    int mutationKVPairs = countKVPairs(mutations);
 +
 +    List<MemKey> memKeys1 = getArrayOfMemKeys(imm1);
 +    List<MemKey> memKeys2 = getArrayOfMemKeys(imm2);
 +
 +    assertEquals("Not all key value pairs included: " + dumpInMemoryMap(imm1, 
memKeys1), mutationKVPairs, memKeys1.size());
 +    assertEquals("InMemoryMaps differ in size: " + dumpInMemoryMap(imm1, 
memKeys1) + "\n" + dumpInMemoryMap(imm2, memKeys2), memKeys1.size(), 
memKeys2.size());
 +    assertEquals("InMemoryMap did not have distinct kvCounts " + 
dumpInMemoryMap(imm1, memKeys1), mutationKVPairs, getUniqKVCount(memKeys1));
 +    assertEquals("InMemoryMap did not have distinct kvCounts " + 
dumpInMemoryMap(imm2, memKeys2), mutationKVPairs, getUniqKVCount(memKeys2));
 +
 +  }
 +
 +  private int countKVPairs(List<Mutation> mutations) {
 +    int count = 0;
 +    for (Mutation m : mutations) {
 +      count += m.size();
 +    }
 +    return count;
 +  }
 +
 +  private List<MemKey> getArrayOfMemKeys(InMemoryMap imm) {
 +    SortedKeyValueIterator<Key,Value> skvi = imm.compactionIterator();
 +
 +    List<MemKey> memKeys = new ArrayList<>();
 +    try {
 +      skvi.seek(new Range(), new ArrayList<ByteSequence>(), false); // 
everything
 +      while (skvi.hasTop()) {
 +        memKeys.add((MemKey) skvi.getTopKey());
 +        skvi.next();
 +      }
 +    } catch (IOException ex) {
 +      log.error("Error getting memkeys", ex);
 +      throw new RuntimeException(ex);
 +    }
 +
 +    return memKeys;
 +  }
 +
 +  private String dumpInMemoryMap(InMemoryMap map, List<MemKey> memkeys) {
 +    StringBuilder sb = new StringBuilder();
 +    sb.append("InMemoryMap type ");
 +    sb.append(map.getMapType());
 +    sb.append("\n");
 +
 +    for (MemKey mk : memkeys) {
 +      sb.append("  ");
 +      sb.append(mk.toString());
 +      sb.append("\n");
 +    }
 +
 +    return sb.toString();
 +  }
 +
 +  private int getUniqKVCount(List<MemKey> memKeys) {
 +    List<Integer> kvCounts = new ArrayList<>();
 +    for (MemKey m : memKeys) {
 +      kvCounts.add(m.getKVCount());
 +    }
 +    return ImmutableSet.copyOf(kvCounts).size();
 +  }
 +
 +  private ConfigurationCopy 
updateConfigurationForLocalityGroups(ConfigurationCopy configuration) {
 +    Map<String,Set<ByteSequence>> locGroups = getLocalityGroups();
 +    StringBuilder enabledLGs = new StringBuilder();
 +
 +    for (Entry<String,Set<ByteSequence>> entry : locGroups.entrySet()) {
 +      if (enabledLGs.length() > 0) {
 +        enabledLGs.append(",");
 +      }
 +
 +      StringBuilder value = new StringBuilder();
 +      for (ByteSequence bytes : entry.getValue()) {
 +        if (value.length() > 0) {
 +          value.append(",");
 +        }
 +        value.append(new String(bytes.toArray()));
 +      }
 +      configuration.set("table.group." + entry.getKey(), value.toString());
 +      enabledLGs.append(entry.getKey());
 +    }
 +    configuration.set(Property.TABLE_LOCALITY_GROUPS, enabledLGs.toString());
 +    return configuration;
 +  }
 +
 +  private Map<String,Set<ByteSequence>> getLocalityGroups() {
 +    Map<String,Set<ByteSequence>> locgro = new HashMap<>();
 +    locgro.put("a", newCFSet("cf", "cf2"));
 +    locgro.put("b", newCFSet("cf3", "cf4"));
 +    return locgro;
 +  }
 +
 +  // from InMemoryMapTest
 +  private Set<ByteSequence> newCFSet(String... cfs) {
 +    HashSet<ByteSequence> cfSet = new HashSet<>();
 +    for (String cf : cfs) {
 +      cfSet.add(new ArrayByteSequence(cf));
 +    }
 +    return cfSet;
 +  }
 +
 +}

Reply via email to