http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java deleted file mode 100644 index 3fcbcfb..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.util.ArrayList; -import java.util.List; -import java.util.TreeSet; - -import org.apache.accumulo.core.cli.BatchWriterOpts; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.impl.ClientContext; -import org.apache.accumulo.core.client.impl.Credentials; -import org.apache.accumulo.core.client.impl.MasterClient; -import org.apache.accumulo.core.client.security.tokens.PasswordToken; -import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.core.master.thrift.MasterClientService; -import org.apache.accumulo.core.master.thrift.MasterMonitorInfo; -import org.apache.accumulo.core.master.thrift.TableInfo; -import org.apache.accumulo.core.master.thrift.TabletServerStatus; -import org.apache.accumulo.core.trace.Tracer; -import org.apache.accumulo.core.util.UtilWaitThread; -import org.apache.accumulo.minicluster.MemoryUnit; -import org.apache.accumulo.minicluster.ServerType; -import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl; -import org.apache.accumulo.test.TestIngest; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.Text; -import org.junit.Test; - -public class SimpleBalancerFairnessIT extends ConfigurableMacBase { - - @Override - public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { - cfg.setProperty(Property.TSERV_MAXMEM, "10K"); - cfg.setProperty(Property.TSERV_MAJC_DELAY, "0"); - cfg.setMemory(ServerType.TABLET_SERVER, cfg.getMemory(ServerType.TABLET_SERVER) * 3, MemoryUnit.BYTE); - } - - @Override - protected int defaultTimeoutSeconds() { - return 10 * 60; - } - - @Test - public void simpleBalancerFairness() throws Exception { - Connector c = getConnector(); - c.tableOperations().create("test_ingest"); - c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K"); - c.tableOperations().create("unused"); - TreeSet<Text> splits = TestIngest.getSplitPoints(0, 10000000, 500); - log.info("Creating " + splits.size() + " splits"); - c.tableOperations().addSplits("unused", splits); - List<String> tservers = c.instanceOperations().getTabletServers(); - TestIngest.Opts opts = new TestIngest.Opts(); - opts.rows = 50000; - opts.setPrincipal("root"); - TestIngest.ingest(c, opts, new BatchWriterOpts()); - c.tableOperations().flush("test_ingest", null, null, false); - UtilWaitThread.sleep(45 * 1000); - Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD)); - ClientContext context = new ClientContext(c.getInstance(), creds, getClientConfig()); - - MasterMonitorInfo stats = null; - int unassignedTablets = 1; - for (int i = 0; unassignedTablets > 0 && i < 10; i++) { - MasterClientService.Iface client = null; - try { - client = MasterClient.getConnectionWithRetry(context); - stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(c.getInstance())); - } finally { - if (client != null) - MasterClient.close(client); - } - unassignedTablets = stats.getUnassignedTablets(); - if (unassignedTablets > 0) { - log.info("Found " + unassignedTablets + " unassigned tablets, sleeping 3 seconds for tablet assignment"); - Thread.sleep(3000); - } - } - - assertEquals("Unassigned tablets were not assigned within 30 seconds", 0, unassignedTablets); - - // Compute online tablets per tserver - List<Integer> counts = new ArrayList<Integer>(); - for (TabletServerStatus server : stats.tServerInfo) { - int count = 0; - for (TableInfo table : server.tableMap.values()) { - count += table.onlineTablets; - } - counts.add(count); - } - assertTrue("Expected to have at least two TabletServers", counts.size() > 1); - for (int i = 1; i < counts.size(); i++) { - int diff = Math.abs(counts.get(0) - counts.get(i)); - assertTrue("Expected difference in tablets to be less than or equal to " + counts.size() + " but was " + diff + ". Counts " + counts, - diff <= tservers.size()); - } - } - -}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java deleted file mode 100644 index 8cece0b..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import java.util.Iterator; -import java.util.Map.Entry; - -import org.apache.accumulo.core.client.BatchWriter; -import org.apache.accumulo.core.client.BatchWriterConfig; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.harness.AccumuloClusterHarness; -import org.apache.hadoop.io.Text; -import org.junit.Test; - -/** - * This test recreates issue ACCUMULO-516. Until that issue is fixed this test should time out. - */ -public class SparseColumnFamilyIT extends AccumuloClusterHarness { - - @Override - protected int defaultTimeoutSeconds() { - return 60; - } - - @Test - public void sparceColumnFamily() throws Exception { - String scftt = getUniqueNames(1)[0]; - Connector c = getConnector(); - c.tableOperations().create(scftt); - - BatchWriter bw = c.createBatchWriter(scftt, new BatchWriterConfig()); - - // create file in the tablet that has mostly column family 0, with a few entries for column family 1 - - bw.addMutation(nm(0, 1, 0)); - for (int i = 1; i < 99999; i++) { - bw.addMutation(nm(i * 2, 0, i)); - } - bw.addMutation(nm(99999 * 2, 1, 99999)); - bw.flush(); - - c.tableOperations().flush(scftt, null, null, true); - - // create a file that has column family 1 and 0 interleaved - for (int i = 0; i < 100000; i++) { - bw.addMutation(nm(i * 2 + 1, i % 2 == 0 ? 0 : 1, i)); - } - bw.close(); - - c.tableOperations().flush(scftt, null, null, true); - - Scanner scanner = c.createScanner(scftt, Authorizations.EMPTY); - - for (int i = 0; i < 200; i++) { - - // every time we search for column family 1, it will scan the entire file - // that has mostly column family 0 until the bug is fixed - scanner.setRange(new Range(String.format("%06d", i), null)); - scanner.clearColumns(); - scanner.setBatchSize(3); - scanner.fetchColumnFamily(new Text(String.format("%03d", 1))); - - Iterator<Entry<Key,Value>> iter = scanner.iterator(); - if (iter.hasNext()) { - Entry<Key,Value> entry = iter.next(); - if (!"001".equals(entry.getKey().getColumnFamilyData().toString())) { - throw new Exception(); - } - } - } - } - - private Mutation nm(int row, int cf, int val) { - Mutation m = new Mutation(String.format("%06d", row)); - m.put(String.format("%03d", cf), "", "" + val); - return m; - } -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java deleted file mode 100644 index 49cd2aa..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.util.Map; -import java.util.Map.Entry; - -import org.apache.accumulo.cluster.ClusterUser; -import org.apache.accumulo.core.cli.BatchWriterOpts; -import org.apache.accumulo.core.cli.ScannerOpts; -import org.apache.accumulo.core.client.ClientConfiguration; -import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.client.admin.InstanceOperations; -import org.apache.accumulo.core.client.security.tokens.PasswordToken; -import org.apache.accumulo.core.conf.AccumuloConfiguration; -import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.data.impl.KeyExtent; -import org.apache.accumulo.core.metadata.MetadataTable; -import org.apache.accumulo.core.metadata.schema.MetadataSchema; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.util.UtilWaitThread; -import org.apache.accumulo.harness.AccumuloClusterHarness; -import org.apache.accumulo.minicluster.ServerType; -import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl; -import org.apache.accumulo.server.util.CheckForMetadataProblems; -import org.apache.accumulo.test.TestIngest; -import org.apache.accumulo.test.VerifyIngest; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.Text; -import org.junit.After; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Charsets; - -public class SplitIT extends AccumuloClusterHarness { - private static final Logger log = LoggerFactory.getLogger(SplitIT.class); - - @Override - public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { - cfg.setProperty(Property.TSERV_MAXMEM, "5K"); - cfg.setProperty(Property.TSERV_MAJC_DELAY, "100ms"); - } - - @Override - protected int defaultTimeoutSeconds() { - return 4 * 60; - } - - private String tservMaxMem, tservMajcDelay; - - @Before - public void alterConfig() throws Exception { - Assume.assumeTrue(ClusterType.MINI == getClusterType()); - - InstanceOperations iops = getConnector().instanceOperations(); - Map<String,String> config = iops.getSystemConfiguration(); - tservMaxMem = config.get(Property.TSERV_MAXMEM.getKey()); - tservMajcDelay = config.get(Property.TSERV_MAJC_DELAY.getKey()); - - if (!tservMajcDelay.equals("100ms")) { - iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "100ms"); - } - - // Property.TSERV_MAXMEM can't be altered on a running server - boolean restarted = false; - if (!tservMaxMem.equals("5K")) { - iops.setProperty(Property.TSERV_MAXMEM.getKey(), "5K"); - getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER); - getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER); - restarted = true; - } - - // If we restarted the tservers, we don't need to re-wait for the majc delay - if (!restarted) { - long millis = AccumuloConfiguration.getTimeInMillis(tservMajcDelay); - log.info("Waiting for majc delay period: {}ms", millis); - Thread.sleep(millis); - log.info("Finished waiting for majc delay period"); - } - } - - @After - public void resetConfig() throws Exception { - if (null != tservMaxMem) { - log.info("Resetting {}={}", Property.TSERV_MAXMEM.getKey(), tservMaxMem); - getConnector().instanceOperations().setProperty(Property.TSERV_MAXMEM.getKey(), tservMaxMem); - tservMaxMem = null; - getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER); - getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER); - } - if (null != tservMajcDelay) { - log.info("Resetting {}={}", Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay); - getConnector().instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay); - tservMajcDelay = null; - } - } - - @Test - public void tabletShouldSplit() throws Exception { - Connector c = getConnector(); - String table = getUniqueNames(1)[0]; - c.tableOperations().create(table); - c.tableOperations().setProperty(table, Property.TABLE_SPLIT_THRESHOLD.getKey(), "256K"); - c.tableOperations().setProperty(table, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K"); - TestIngest.Opts opts = new TestIngest.Opts(); - VerifyIngest.Opts vopts = new VerifyIngest.Opts(); - opts.rows = 100000; - opts.setTableName(table); - - ClientConfiguration clientConfig = cluster.getClientConfig(); - if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) { - opts.updateKerberosCredentials(clientConfig); - vopts.updateKerberosCredentials(clientConfig); - } else { - opts.setPrincipal(getAdminPrincipal()); - vopts.setPrincipal(getAdminPrincipal()); - } - - TestIngest.ingest(c, opts, new BatchWriterOpts()); - vopts.rows = opts.rows; - vopts.setTableName(table); - VerifyIngest.verifyIngest(c, vopts, new ScannerOpts()); - while (c.tableOperations().listSplits(table).size() < 10) { - UtilWaitThread.sleep(15 * 1000); - } - String id = c.tableOperations().tableIdMap().get(table); - Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY); - KeyExtent extent = new KeyExtent(new Text(id), null, null); - s.setRange(extent.toMetadataRange()); - MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s); - int count = 0; - int shortened = 0; - for (Entry<Key,Value> entry : s) { - extent = new KeyExtent(entry.getKey().getRow(), entry.getValue()); - if (extent.getEndRow() != null && extent.getEndRow().toString().length() < 14) - shortened++; - count++; - } - - assertTrue("Shortened should be greater than zero: " + shortened, shortened > 0); - assertTrue("Count should be cgreater than 10: " + count, count > 10); - - String[] args; - if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) { - ClusterUser rootUser = getAdminUser(); - args = new String[] {"-i", cluster.getInstanceName(), "-u", rootUser.getPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-z", - cluster.getZooKeepers()}; - } else { - PasswordToken token = (PasswordToken) getAdminToken(); - args = new String[] {"-i", cluster.getInstanceName(), "-u", "root", "-p", new String(token.getPassword(), Charsets.UTF_8), "-z", cluster.getZooKeepers()}; - } - - assertEquals(0, getCluster().getClusterControl().exec(CheckForMetadataProblems.class, args)); - } - - @Test - public void interleaveSplit() throws Exception { - Connector c = getConnector(); - String tableName = getUniqueNames(1)[0]; - c.tableOperations().create(tableName); - c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K"); - c.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none"); - UtilWaitThread.sleep(5 * 1000); - ReadWriteIT.interleaveTest(c, tableName); - UtilWaitThread.sleep(5 * 1000); - int numSplits = c.tableOperations().listSplits(tableName).size(); - while (numSplits <= 20) { - log.info("Waiting for splits to happen"); - Thread.sleep(2000); - numSplits = c.tableOperations().listSplits(tableName).size(); - } - assertTrue("Expected at least 20 splits, saw " + numSplits, numSplits > 20); - } - - @Test - public void deleteSplit() throws Exception { - Connector c = getConnector(); - String tableName = getUniqueNames(1)[0]; - c.tableOperations().create(tableName); - c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K"); - ClientConfiguration clientConfig = getCluster().getClientConfig(); - String password = null, keytab = null; - if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) { - keytab = getAdminUser().getKeytab().getAbsolutePath(); - } else { - password = new String(((PasswordToken) getAdminToken()).getPassword(), Charsets.UTF_8); - } - DeleteIT.deleteTest(c, getCluster(), getAdminPrincipal(), password, tableName, keytab); - c.tableOperations().flush(tableName, null, null, true); - for (int i = 0; i < 5; i++) { - UtilWaitThread.sleep(10 * 1000); - if (c.tableOperations().listSplits(tableName).size() > 20) - break; - } - assertTrue(c.tableOperations().listSplits(tableName).size() > 20); - } - -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java deleted file mode 100644 index 4d13e2a..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.junit.Assert.assertEquals; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; -import java.util.SortedMap; -import java.util.TreeMap; - -import org.apache.accumulo.core.Constants; -import org.apache.accumulo.core.client.Instance; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.client.impl.ScannerImpl; -import org.apache.accumulo.core.client.impl.Writer; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.data.impl.KeyExtent; -import org.apache.accumulo.core.file.rfile.RFile; -import org.apache.accumulo.core.metadata.MetadataTable; -import org.apache.accumulo.core.metadata.schema.DataFileValue; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.util.ColumnFQ; -import org.apache.accumulo.core.zookeeper.ZooUtil; -import org.apache.accumulo.fate.zookeeper.IZooReaderWriter; -import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason; -import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher; -import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy; -import org.apache.accumulo.server.AccumuloServerContext; -import org.apache.accumulo.server.ServerConstants; -import org.apache.accumulo.server.client.HdfsZooInstance; -import org.apache.accumulo.server.conf.ServerConfigurationFactory; -import org.apache.accumulo.server.fs.FileRef; -import org.apache.accumulo.server.master.state.Assignment; -import org.apache.accumulo.server.master.state.TServerInstance; -import org.apache.accumulo.server.tablets.TabletTime; -import org.apache.accumulo.server.util.FileUtil; -import org.apache.accumulo.server.util.MasterMetadataUtil; -import org.apache.accumulo.server.util.MetadataTableUtil; -import org.apache.accumulo.server.zookeeper.TransactionWatcher; -import org.apache.accumulo.server.zookeeper.ZooLock; -import org.apache.accumulo.server.zookeeper.ZooReaderWriter; -import org.apache.accumulo.tserver.TabletServer; -import org.apache.hadoop.io.Text; -import org.junit.Test; - -import com.google.common.collect.Multimap; - -public class SplitRecoveryIT extends ConfigurableMacBase { - - @Override - protected int defaultTimeoutSeconds() { - return 60; - } - - private KeyExtent nke(String table, String endRow, String prevEndRow) { - return new KeyExtent(new Text(table), endRow == null ? null : new Text(endRow), prevEndRow == null ? null : new Text(prevEndRow)); - } - - private void run() throws Exception { - Instance inst = HdfsZooInstance.getInstance(); - AccumuloServerContext c = new AccumuloServerContext(new ServerConfigurationFactory(inst)); - String zPath = ZooUtil.getRoot(inst) + "/testLock"; - IZooReaderWriter zoo = ZooReaderWriter.getInstance(); - zoo.putPersistentData(zPath, new byte[0], NodeExistsPolicy.OVERWRITE); - ZooLock zl = new ZooLock(zPath); - boolean gotLock = zl.tryLock(new LockWatcher() { - - @Override - public void lostLock(LockLossReason reason) { - System.exit(-1); - - } - - @Override - public void unableToMonitorLockNode(Throwable e) { - System.exit(-1); - } - }, "foo".getBytes(UTF_8)); - - if (!gotLock) { - System.err.println("Failed to get lock " + zPath); - } - - // run test for a table with one tablet - runSplitRecoveryTest(c, 0, "sp", 0, zl, nke("foo0", null, null)); - runSplitRecoveryTest(c, 1, "sp", 0, zl, nke("foo1", null, null)); - - // run test for tables with two tablets, run test on first and last tablet - runSplitRecoveryTest(c, 0, "k", 0, zl, nke("foo2", "m", null), nke("foo2", null, "m")); - runSplitRecoveryTest(c, 1, "k", 0, zl, nke("foo3", "m", null), nke("foo3", null, "m")); - runSplitRecoveryTest(c, 0, "o", 1, zl, nke("foo4", "m", null), nke("foo4", null, "m")); - runSplitRecoveryTest(c, 1, "o", 1, zl, nke("foo5", "m", null), nke("foo5", null, "m")); - - // run test for table w/ three tablets, run test on middle tablet - runSplitRecoveryTest(c, 0, "o", 1, zl, nke("foo6", "m", null), nke("foo6", "r", "m"), nke("foo6", null, "r")); - runSplitRecoveryTest(c, 1, "o", 1, zl, nke("foo7", "m", null), nke("foo7", "r", "m"), nke("foo7", null, "r")); - - // run test for table w/ three tablets, run test on first - runSplitRecoveryTest(c, 0, "g", 0, zl, nke("foo8", "m", null), nke("foo8", "r", "m"), nke("foo8", null, "r")); - runSplitRecoveryTest(c, 1, "g", 0, zl, nke("foo9", "m", null), nke("foo9", "r", "m"), nke("foo9", null, "r")); - - // run test for table w/ three tablets, run test on last tablet - runSplitRecoveryTest(c, 0, "w", 2, zl, nke("fooa", "m", null), nke("fooa", "r", "m"), nke("fooa", null, "r")); - runSplitRecoveryTest(c, 1, "w", 2, zl, nke("foob", "m", null), nke("foob", "r", "m"), nke("foob", null, "r")); - } - - private void runSplitRecoveryTest(AccumuloServerContext context, int failPoint, String mr, int extentToSplit, ZooLock zl, KeyExtent... extents) - throws Exception { - - Text midRow = new Text(mr); - - SortedMap<FileRef,DataFileValue> splitMapFiles = null; - - for (int i = 0; i < extents.length; i++) { - KeyExtent extent = extents[i]; - - String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId().toString() + "/dir_" + i; - MetadataTableUtil.addTablet(extent, tdir, context, TabletTime.LOGICAL_TIME_ID, zl); - SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<FileRef,DataFileValue>(); - mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i)); - - if (i == extentToSplit) { - splitMapFiles = mapFiles; - } - int tid = 0; - TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid); - MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", context, zl); - } - - KeyExtent extent = extents[extentToSplit]; - - KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow); - KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow()); - - splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl); - } - - private void splitPartiallyAndRecover(AccumuloServerContext context, KeyExtent extent, KeyExtent high, KeyExtent low, double splitRatio, - SortedMap<FileRef,DataFileValue> mapFiles, Text midRow, String location, int steps, ZooLock zl) throws Exception { - - SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<FileRef,DataFileValue>(); - SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>(); - List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>(); - - MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), mapFiles, lowDatafileSizes, - highDatafileSizes, highDatafilesToRemove); - - MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, context, zl); - TServerInstance instance = new TServerInstance(location, zl.getSessionId()); - Writer writer = MetadataTableUtil.getMetadataTable(context); - Assignment assignment = new Assignment(high, instance); - Mutation m = new Mutation(assignment.tablet.getMetadataEntry()); - assignment.server.putFutureLocation(m); - writer.update(m); - - if (steps >= 1) { - Multimap<Long,FileRef> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, extent); - MasterMetadataUtil.addNewTablet(context, low, "/lowDir", instance, lowDatafileSizes, bulkFiles, TabletTime.LOGICAL_TIME_ID + "0", -1l, -1l, zl); - } - if (steps >= 2) { - MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, context, zl); - } - - TabletServer.verifyTabletInformation(context, high, instance, null, "127.0.0.1:0", zl); - - if (steps >= 1) { - ensureTabletHasNoUnexpectedMetadataEntries(context, low, lowDatafileSizes); - ensureTabletHasNoUnexpectedMetadataEntries(context, high, highDatafileSizes); - - Multimap<Long,FileRef> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, low); - Multimap<Long,FileRef> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, high); - - if (!lowBulkFiles.equals(highBulkFiles)) { - throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high); - } - - if (lowBulkFiles.size() == 0) { - throw new Exception(" no bulk files " + low); - } - } else { - ensureTabletHasNoUnexpectedMetadataEntries(context, extent, mapFiles); - } - } - - private void ensureTabletHasNoUnexpectedMetadataEntries(AccumuloServerContext context, KeyExtent extent, SortedMap<FileRef,DataFileValue> expectedMapFiles) - throws Exception { - Scanner scanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY); - scanner.setRange(extent.toMetadataRange()); - - HashSet<ColumnFQ> expectedColumns = new HashSet<ColumnFQ>(); - expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN); - expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN); - expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN); - expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN); - - HashSet<Text> expectedColumnFamilies = new HashSet<Text>(); - expectedColumnFamilies.add(DataFileColumnFamily.NAME); - expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME); - expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME); - expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME); - expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME); - - Iterator<Entry<Key,Value>> iter = scanner.iterator(); - while (iter.hasNext()) { - Key key = iter.next().getKey(); - - if (!key.getRow().equals(extent.getMetadataEntry())) { - throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key); - } - - if (expectedColumnFamilies.contains(key.getColumnFamily())) { - continue; - } - - if (expectedColumns.remove(new ColumnFQ(key))) { - continue; - } - - throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key); - } - System.out.println("expectedColumns " + expectedColumns); - if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) { - throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns); - } - - SortedMap<FileRef,DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent, context); - verifySame(expectedMapFiles, fixedMapFiles); - } - - private void verifySame(SortedMap<FileRef,DataFileValue> datafileSizes, SortedMap<FileRef,DataFileValue> fixedDatafileSizes) throws Exception { - - if (!datafileSizes.keySet().containsAll(fixedDatafileSizes.keySet()) || !fixedDatafileSizes.keySet().containsAll(datafileSizes.keySet())) { - throw new Exception("Key sets not the same " + datafileSizes.keySet() + " != " + fixedDatafileSizes.keySet()); - } - - for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) { - DataFileValue dfv = entry.getValue(); - DataFileValue otherDfv = fixedDatafileSizes.get(entry.getKey()); - - if (!dfv.equals(otherDfv)) { - throw new Exception(entry.getKey() + " dfv not equal " + dfv + " " + otherDfv); - } - } - } - - public static void main(String[] args) throws Exception { - new SplitRecoveryIT().run(); - } - - @Test - public void test() throws Exception { - assertEquals(0, exec(SplitRecoveryIT.class).waitFor()); - } - -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SslIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SslIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SslIT.java deleted file mode 100644 index 13248d0..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/SslIT.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.junit.Test; - -/** - * Do a selection of ITs with SSL turned on that cover a range of different connection scenarios. Note that you can run *all* the ITs against SSL-enabled mini - * clusters with `mvn verify -DuseSslForIT` - * - */ -public class SslIT extends ConfigurableMacBase { - @Override - public int defaultTimeoutSeconds() { - return 6 * 60; - } - - @Override - public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { - super.configure(cfg, hadoopCoreSite); - configureForSsl(cfg, getSslDir(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName()))); - } - - @Test - public void binary() throws AccumuloException, AccumuloSecurityException, Exception { - String tableName = getUniqueNames(1)[0]; - getConnector().tableOperations().create(tableName); - BinaryIT.runTest(getConnector(), tableName); - } - - @Test - public void concurrency() throws Exception { - ConcurrencyIT.runTest(getConnector(), getUniqueNames(1)[0]); - } - - @Test - public void adminStop() throws Exception { - ShutdownIT.runAdminStopTest(getConnector(), getCluster()); - } - - @Test - public void bulk() throws Exception { - BulkIT.runTest(getConnector(), FileSystem.getLocal(new Configuration(false)), new Path(getCluster().getConfig().getDir().getAbsolutePath(), "tmp"), "root", - getUniqueNames(1)[0], this.getClass().getName(), testName.getMethodName()); - } - - @Test - public void mapReduce() throws Exception { - MapReduceIT.runTest(getConnector(), getCluster()); - } - -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java deleted file mode 100644 index bb00b19..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import java.util.Map; - -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl; -import org.apache.hadoop.conf.Configuration; -import org.junit.Test; - -/** - * Run all the same tests as SslIT, but with client auth turned on. - * - * All the methods are overridden just to make it easier to run individual tests from an IDE. - * - */ -public class SslWithClientAuthIT extends SslIT { - @Override - public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { - super.configure(cfg, hadoopCoreSite); - Map<String,String> site = cfg.getSiteConfig(); - site.put(Property.INSTANCE_RPC_SSL_CLIENT_AUTH.getKey(), "true"); - cfg.setSiteConfig(site); - } - - @Override - public int defaultTimeoutSeconds() { - return 8 * 60; - } - - @Override - @Test - public void binary() throws AccumuloException, AccumuloSecurityException, Exception { - super.binary(); - } - - @Override - @Test - public void concurrency() throws Exception { - super.concurrency(); - } - - @Override - @Test - public void adminStop() throws Exception { - super.adminStop(); - } - - @Override - @Test - public void bulk() throws Exception { - super.bulk(); - } - - @Override - @Test - public void mapReduce() throws Exception { - super.mapReduce(); - } -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java deleted file mode 100644 index 57a8a6f..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; - -import org.apache.accumulo.cluster.ClusterControl; -import org.apache.accumulo.harness.AccumuloClusterHarness; -import org.apache.accumulo.start.TestMain; -import org.junit.Test; - -public class StartIT extends AccumuloClusterHarness { - - @Override - protected int defaultTimeoutSeconds() { - return 30; - } - - @Test - public void test() throws Exception { - ClusterControl control = getCluster().getClusterControl(); - - assertNotEquals(0, control.exec(TestMain.class, new String[] {"exception"})); - assertEquals(0, control.exec(TestMain.class, new String[] {"success"})); - assertNotEquals(0, control.exec(TestMain.class, new String[0])); - } - -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java deleted file mode 100644 index a4678a7..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java +++ /dev/null @@ -1,108 +0,0 @@ -package org.apache.accumulo.test.functional; - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.io.FileNotFoundException; - -import org.apache.accumulo.cluster.AccumuloCluster; -import org.apache.accumulo.core.cli.BatchWriterOpts; -import org.apache.accumulo.core.cli.ScannerOpts; -import org.apache.accumulo.core.client.ClientConfiguration; -import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.client.admin.TableOperations; -import org.apache.accumulo.core.data.impl.KeyExtent; -import org.apache.accumulo.core.metadata.MetadataTable; -import org.apache.accumulo.core.metadata.schema.MetadataSchema; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.harness.AccumuloClusterHarness; -import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl; -import org.apache.accumulo.test.TestIngest; -import org.apache.accumulo.test.VerifyIngest; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.Text; -import org.hamcrest.CoreMatchers; -import org.junit.Assume; -import org.junit.Test; - -import com.google.common.collect.Iterators; - -public class TableIT extends AccumuloClusterHarness { - - @Override - protected int defaultTimeoutSeconds() { - return 2 * 60; - } - - @Test - public void test() throws Exception { - Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI)); - - AccumuloCluster cluster = getCluster(); - MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster; - String rootPath = mac.getConfig().getDir().getAbsolutePath(); - - Connector c = getConnector(); - TableOperations to = c.tableOperations(); - String tableName = getUniqueNames(1)[0]; - to.create(tableName); - - TestIngest.Opts opts = new TestIngest.Opts(); - VerifyIngest.Opts vopts = new VerifyIngest.Opts(); - ClientConfiguration clientConfig = getCluster().getClientConfig(); - if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) { - opts.updateKerberosCredentials(clientConfig); - vopts.updateKerberosCredentials(clientConfig); - } else { - opts.setPrincipal(getAdminPrincipal()); - vopts.setPrincipal(getAdminPrincipal()); - } - - opts.setTableName(tableName); - TestIngest.ingest(c, opts, new BatchWriterOpts()); - to.flush(tableName, null, null, true); - vopts.setTableName(tableName); - VerifyIngest.verifyIngest(c, vopts, new ScannerOpts()); - String id = to.tableIdMap().get(tableName); - Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY); - s.setRange(new KeyExtent(new Text(id), null, null).toMetadataRange()); - s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME); - assertTrue(Iterators.size(s.iterator()) > 0); - - FileSystem fs = getCluster().getFileSystem(); - assertTrue(fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length > 0); - to.delete(tableName); - assertEquals(0, Iterators.size(s.iterator())); - try { - assertEquals(0, fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length); - } catch (FileNotFoundException ex) { - // that's fine, too - } - assertNull(to.tableIdMap().get(tableName)); - to.create(tableName); - TestIngest.ingest(c, opts, new BatchWriterOpts()); - VerifyIngest.verifyIngest(c, vopts, new ScannerOpts()); - to.delete(tableName); - } - -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java deleted file mode 100644 index d2b1416..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.junit.Assert.assertEquals; - -import java.util.Map; -import java.util.Map.Entry; -import java.util.TreeSet; - -import org.apache.accumulo.core.client.BatchWriter; -import org.apache.accumulo.core.client.BatchWriterConfig; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.harness.AccumuloClusterHarness; -import org.apache.accumulo.minicluster.MemoryUnit; -import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.Text; -import org.junit.Test; - -public class TabletIT extends AccumuloClusterHarness { - - private static final int N = 1000; - - @Override - public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { - Map<String,String> siteConfig = cfg.getSiteConfig(); - siteConfig.put(Property.TSERV_MAXMEM.getKey(), "128M"); - cfg.setDefaultMemory(256, MemoryUnit.MEGABYTE); - cfg.setSiteConfig(siteConfig); - } - - @Override - protected int defaultTimeoutSeconds() { - return 2 * 60; - } - - @Test - public void createTableTest() throws Exception { - String tableName = getUniqueNames(1)[0]; - createTableTest(tableName, false); - createTableTest(tableName, true); - } - - public void createTableTest(String tableName, boolean readOnly) throws Exception { - // create the test table within accumulo - Connector connector = getConnector(); - - if (!readOnly) { - TreeSet<Text> keys = new TreeSet<Text>(); - for (int i = N / 100; i < N; i += N / 100) { - keys.add(new Text(String.format("%05d", i))); - } - - // presplit - connector.tableOperations().create(tableName); - connector.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "200"); - connector.tableOperations().addSplits(tableName, keys); - BatchWriter b = connector.createBatchWriter(tableName, new BatchWriterConfig()); - - // populate - for (int i = 0; i < N; i++) { - Mutation m = new Mutation(new Text(String.format("%05d", i))); - m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes(UTF_8))); - b.addMutation(m); - } - b.close(); - } - - Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY); - int count = 0; - for (Entry<Key,Value> elt : scanner) { - String expected = String.format("%05d", count); - assert (elt.getKey().getRow().toString().equals(expected)); - count++; - } - assertEquals(N, count); - } - -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java deleted file mode 100644 index 0efb1aa..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import static org.junit.Assert.assertEquals; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Map.Entry; -import java.util.Set; - -import org.apache.accumulo.core.Constants; -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.client.BatchDeleter; -import org.apache.accumulo.core.client.BatchWriterConfig; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.MutationsRejectedException; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.client.TableExistsException; -import org.apache.accumulo.core.client.TableNotFoundException; -import org.apache.accumulo.core.client.impl.Tables; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.data.impl.KeyExtent; -import org.apache.accumulo.core.master.state.tables.TableState; -import org.apache.accumulo.core.master.thrift.MasterState; -import org.apache.accumulo.core.metadata.MetadataTable; -import org.apache.accumulo.core.metadata.schema.MetadataSchema; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.zookeeper.ZooUtil; -import org.apache.accumulo.fate.zookeeper.ZooCache; -import org.apache.accumulo.harness.SharedMiniClusterBase; -import org.apache.accumulo.server.master.state.CurrentState; -import org.apache.accumulo.server.master.state.MergeInfo; -import org.apache.accumulo.server.master.state.MetaDataTableScanner; -import org.apache.accumulo.server.master.state.TServerInstance; -import org.apache.accumulo.server.master.state.TabletStateChangeIterator; -import org.apache.accumulo.server.zookeeper.ZooLock; -import org.apache.hadoop.io.Text; -import org.junit.Test; - -import com.google.common.base.Predicate; -import com.google.common.collect.Sets; - -/** - * Test to ensure that the {@link TabletStateChangeIterator} properly skips over tablet information in the metadata table when there is no work to be done on - * the tablet (see ACCUMULO-3580) - */ -public class TabletStateChangeIteratorIT extends SharedMiniClusterBase { - - @Override - public int defaultTimeoutSeconds() { - return 2 * 60; - } - - @Test - public void test() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException { - String[] tables = getUniqueNames(4); - final String t1 = tables[0]; - final String t2 = tables[1]; - final String t3 = tables[2]; - final String cloned = tables[3]; - - // create some metadata - createTable(t1, true); - createTable(t2, false); - createTable(t3, true); - - // examine a clone of the metadata table, so we can manipulate it - cloneMetadataTable(cloned); - - assertEquals("No tables should need attention", 0, findTabletsNeedingAttention(cloned)); - - // test the assigned case (no location) - removeLocation(cloned, t3); - assertEquals("Should have one tablet without a loc", 1, findTabletsNeedingAttention(cloned)); - - // TODO test the cases where the assignment is to a dead tserver - // TODO test the cases where there is ongoing merges - // TODO test the bad tablet location state case (active split, inconsistent metadata) - - // clean up - dropTables(t1, t2, t3); - } - - private void removeLocation(String table, String tableNameToModify) throws TableNotFoundException, MutationsRejectedException { - String tableIdToModify = getConnector().tableOperations().tableIdMap().get(tableNameToModify); - BatchDeleter deleter = getConnector().createBatchDeleter(table, Authorizations.EMPTY, 1, new BatchWriterConfig()); - deleter.setRanges(Collections.singleton(new KeyExtent(new Text(tableIdToModify), null, null).toMetadataRange())); - deleter.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME); - deleter.delete(); - deleter.close(); - } - - private int findTabletsNeedingAttention(String table) throws TableNotFoundException { - int results = 0; - Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY); - MetaDataTableScanner.configureScanner(scanner, new State()); - scanner.updateScanIteratorOption("tabletChange", "debug", "1"); - for (Entry<Key,Value> e : scanner) { - if (e != null) - results++; - } - return results; - } - - private void createTable(String t, boolean online) throws AccumuloSecurityException, AccumuloException, TableNotFoundException, TableExistsException { - Connector conn = getConnector(); - conn.tableOperations().create(t); - conn.tableOperations().online(t, true); - if (!online) { - conn.tableOperations().offline(t, true); - } - } - - private void cloneMetadataTable(String cloned) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException { - getConnector().tableOperations().clone(MetadataTable.NAME, cloned, true, null, null); - } - - private void dropTables(String... tables) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { - for (String t : tables) { - getConnector().tableOperations().delete(t); - } - } - - private final class State implements CurrentState { - - @Override - public Set<TServerInstance> onlineTabletServers() { - HashSet<TServerInstance> tservers = new HashSet<TServerInstance>(); - for (String tserver : getConnector().instanceOperations().getTabletServers()) { - try { - String zPath = ZooUtil.getRoot(getConnector().getInstance()) + Constants.ZTSERVERS + "/" + tserver; - long sessionId = ZooLock.getSessionId(new ZooCache(getCluster().getZooKeepers(), getConnector().getInstance().getZooKeepersSessionTimeOut()), zPath); - tservers.add(new TServerInstance(tserver, sessionId)); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - return tservers; - } - - @Override - public Set<String> onlineTables() { - HashSet<String> onlineTables = new HashSet<String>(getConnector().tableOperations().tableIdMap().values()); - return Sets.filter(onlineTables, new Predicate<String>() { - @Override - public boolean apply(String tableId) { - return Tables.getTableState(getConnector().getInstance(), tableId) == TableState.ONLINE; - } - }); - } - - @Override - public Collection<MergeInfo> merges() { - return Collections.emptySet(); - } - - @Override - public Collection<KeyExtent> migrations() { - return Collections.emptyList(); - } - - @Override - public MasterState getMasterState() { - return MasterState.NORMAL; - } - - @Override - public Set<TServerInstance> shutdownServers() { - return Collections.emptySet(); - } - - } - -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java deleted file mode 100644 index ffadd22..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import static org.junit.Assert.fail; - -import java.util.Collections; -import java.util.Map.Entry; -import java.util.concurrent.TimeUnit; - -import org.apache.accumulo.core.client.BatchScanner; -import org.apache.accumulo.core.client.BatchWriter; -import org.apache.accumulo.core.client.BatchWriterConfig; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.IteratorSetting; -import org.apache.accumulo.core.client.MutationsRejectedException; -import org.apache.accumulo.core.client.TimedOutException; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.util.UtilWaitThread; -import org.apache.accumulo.harness.AccumuloClusterHarness; -import org.junit.Test; - -/** - * - */ -public class TimeoutIT extends AccumuloClusterHarness { - - @Override - protected int defaultTimeoutSeconds() { - return 75; - } - - @Test - public void run() throws Exception { - Connector conn = getConnector(); - String[] tableNames = getUniqueNames(2); - testBatchWriterTimeout(conn, tableNames[0]); - testBatchScannerTimeout(conn, tableNames[1]); - } - - public void testBatchWriterTimeout(Connector conn, String tableName) throws Exception { - conn.tableOperations().create(tableName); - conn.tableOperations().addConstraint(tableName, SlowConstraint.class.getName()); - - // give constraint time to propagate through zookeeper - UtilWaitThread.sleep(1000); - - BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig().setTimeout(3, TimeUnit.SECONDS)); - - Mutation mut = new Mutation("r1"); - mut.put("cf1", "cq1", "v1"); - - bw.addMutation(mut); - try { - bw.close(); - fail("batch writer did not timeout"); - } catch (MutationsRejectedException mre) { - if (mre.getCause() instanceof TimedOutException) - return; - throw mre; - } - } - - public void testBatchScannerTimeout(Connector conn, String tableName) throws Exception { - getConnector().tableOperations().create(tableName); - - BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig()); - - Mutation m = new Mutation("r1"); - m.put("cf1", "cq1", "v1"); - m.put("cf1", "cq2", "v2"); - m.put("cf1", "cq3", "v3"); - m.put("cf1", "cq4", "v4"); - - bw.addMutation(m); - bw.close(); - - BatchScanner bs = getConnector().createBatchScanner(tableName, Authorizations.EMPTY, 2); - bs.setRanges(Collections.singletonList(new Range())); - - // should not timeout - for (Entry<Key,Value> entry : bs) { - entry.getKey(); - } - - bs.setTimeout(5, TimeUnit.SECONDS); - IteratorSetting iterSetting = new IteratorSetting(100, SlowIterator.class); - iterSetting.addOption("sleepTime", 2000 + ""); - bs.addScanIterator(iterSetting); - - try { - for (Entry<Key,Value> entry : bs) { - entry.getKey(); - } - fail("batch scanner did not time out"); - } catch (TimedOutException toe) { - // toe.printStackTrace(); - } - bs.close(); - } - -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java b/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java deleted file mode 100644 index 3d6ad85..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import static java.nio.charset.StandardCharsets.UTF_8; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; - -import org.apache.accumulo.core.client.BatchScanner; -import org.apache.accumulo.core.client.BatchWriter; -import org.apache.accumulo.core.client.BatchWriterConfig; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.security.ColumnVisibility; -import org.apache.accumulo.core.util.ByteArraySet; -import org.apache.accumulo.harness.AccumuloClusterHarness; -import org.apache.hadoop.io.Text; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import com.google.common.collect.Iterators; - -public class VisibilityIT extends AccumuloClusterHarness { - - @Override - protected int defaultTimeoutSeconds() { - return 2 * 60; - } - - Authorizations origAuths = null; - - @Before - public void emptyAuths() throws Exception { - Connector c = getConnector(); - origAuths = c.securityOperations().getUserAuthorizations(getAdminPrincipal()); - } - - @After - public void resetAuths() throws Exception { - Connector c = getConnector(); - if (null != origAuths) { - c.securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths); - } - } - - @Test - public void run() throws Exception { - Connector c = getConnector(); - String[] tableNames = getUniqueNames(2); - String table = tableNames[0]; - c.tableOperations().create(table); - String table2 = tableNames[1]; - c.tableOperations().create(table2); - c.tableOperations().setProperty(table2, Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "DEFLABEL"); - - insertData(c, table); - queryData(c, table); - deleteData(c, table); - - insertDefaultData(c, table2); - queryDefaultData(c, table2); - - } - - private static SortedSet<String> nss(String... labels) { - TreeSet<String> ts = new TreeSet<String>(); - - for (String s : labels) { - ts.add(s); - } - - return ts; - } - - private void mput(Mutation m, String cf, String cq, String cv, String val) { - ColumnVisibility le = new ColumnVisibility(cv.getBytes(UTF_8)); - m.put(new Text(cf), new Text(cq), le, new Value(val.getBytes(UTF_8))); - } - - private void mputDelete(Mutation m, String cf, String cq, String cv) { - ColumnVisibility le = new ColumnVisibility(cv.getBytes(UTF_8)); - m.putDelete(new Text(cf), new Text(cq), le); - } - - private void insertData(Connector c, String tableName) throws Exception { - - BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); - Mutation m1 = new Mutation(new Text("row1")); - - mput(m1, "cf1", "cq1", "", "v1"); - mput(m1, "cf1", "cq1", "A", "v2"); - mput(m1, "cf1", "cq1", "B", "v3"); - mput(m1, "cf1", "cq1", "A&B", "v4"); - mput(m1, "cf1", "cq1", "A&(L|M)", "v5"); - mput(m1, "cf1", "cq1", "B&(L|M)", "v6"); - mput(m1, "cf1", "cq1", "A&B&(L|M)", "v7"); - mput(m1, "cf1", "cq1", "A&B&(L)", "v8"); - mput(m1, "cf1", "cq1", "A&FOO", "v9"); - mput(m1, "cf1", "cq1", "A&FOO&(L|M)", "v10"); - mput(m1, "cf1", "cq1", "FOO", "v11"); - mput(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)", "v12"); - mput(m1, "cf1", "cq1", "A&B&(L|M|FOO)", "v13"); - - bw.addMutation(m1); - bw.close(); - } - - private void deleteData(Connector c, String tableName) throws Exception { - - BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); - Mutation m1 = new Mutation(new Text("row1")); - - mputDelete(m1, "cf1", "cq1", ""); - mputDelete(m1, "cf1", "cq1", "A"); - mputDelete(m1, "cf1", "cq1", "A&B"); - mputDelete(m1, "cf1", "cq1", "B&(L|M)"); - mputDelete(m1, "cf1", "cq1", "A&B&(L)"); - mputDelete(m1, "cf1", "cq1", "A&FOO&(L|M)"); - mputDelete(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)"); - mputDelete(m1, "cf1", "cq1", "FOO&A"); // should not delete anything - - bw.addMutation(m1); - bw.close(); - - Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>(); - - expected.put(nss("A", "L"), nss("v5")); - expected.put(nss("A", "M"), nss("v5")); - expected.put(nss("B"), nss("v3")); - expected.put(nss("Z"), nss()); - expected.put(nss("A", "B", "L"), nss("v7", "v13")); - expected.put(nss("A", "B", "M"), nss("v7", "v13")); - expected.put(nss("A", "B", "FOO"), nss("v13")); - expected.put(nss("FOO"), nss("v11")); - expected.put(nss("A", "FOO"), nss("v9")); - - queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected); - } - - private void insertDefaultData(Connector c, String tableName) throws Exception { - BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); - Mutation m1 = new Mutation(new Text("row1")); - - mput(m1, "cf1", "cq1", "BASE", "v1"); - mput(m1, "cf1", "cq2", "DEFLABEL", "v2"); - mput(m1, "cf1", "cq3", "", "v3"); - - bw.addMutation(m1); - bw.close(); - } - - private static void uniqueCombos(List<Set<String>> all, Set<String> prefix, Set<String> suffix) { - - all.add(prefix); - - TreeSet<String> ss = new TreeSet<String>(suffix); - - for (String s : suffix) { - TreeSet<String> ps = new TreeSet<String>(prefix); - ps.add(s); - ss.remove(s); - - uniqueCombos(all, ps, ss); - } - } - - private void queryData(Connector c, String tableName) throws Exception { - Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>(); - expected.put(nss(), nss("v1")); - expected.put(nss("A"), nss("v2")); - expected.put(nss("A", "L"), nss("v5")); - expected.put(nss("A", "M"), nss("v5")); - expected.put(nss("B"), nss("v3")); - expected.put(nss("B", "L"), nss("v6")); - expected.put(nss("B", "M"), nss("v6")); - expected.put(nss("Z"), nss()); - expected.put(nss("A", "B"), nss("v4")); - expected.put(nss("A", "B", "L"), nss("v7", "v8", "v13")); - expected.put(nss("A", "B", "M"), nss("v7", "v13")); - expected.put(nss("A", "B", "FOO"), nss("v13")); - expected.put(nss("FOO"), nss("v11")); - expected.put(nss("A", "FOO"), nss("v9")); - expected.put(nss("A", "FOO", "L"), nss("v10", "v12")); - expected.put(nss("A", "FOO", "M"), nss("v10", "v12")); - expected.put(nss("B", "FOO", "L"), nss("v12")); - expected.put(nss("B", "FOO", "M"), nss("v12")); - - queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected); - queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "L", "M", "Z"), expected); - queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "Z"), expected); - queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("Z"), expected); - queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss(), expected); - } - - private void queryData(Connector c, String tableName, Set<String> allAuths, Set<String> userAuths, Map<Set<String>,Set<String>> expected) throws Exception { - - c.securityOperations().changeUserAuthorizations(getAdminPrincipal(), new Authorizations(nbas(userAuths))); - - ArrayList<Set<String>> combos = new ArrayList<Set<String>>(); - uniqueCombos(combos, nss(), allAuths); - - for (Set<String> set1 : combos) { - Set<String> e = new TreeSet<String>(); - for (Set<String> set2 : combos) { - - set2 = new HashSet<String>(set2); - set2.retainAll(userAuths); - - if (set1.containsAll(set2) && expected.containsKey(set2)) { - e.addAll(expected.get(set2)); - } - } - - set1.retainAll(userAuths); - verify(c, tableName, set1, e); - } - - } - - private void queryDefaultData(Connector c, String tableName) throws Exception { - Scanner scanner; - - // should return no records - c.securityOperations().changeUserAuthorizations(getAdminPrincipal(), new Authorizations("BASE", "DEFLABEL")); - scanner = getConnector().createScanner(tableName, new Authorizations()); - verifyDefault(scanner, 0); - - // should return one record - scanner = getConnector().createScanner(tableName, new Authorizations("BASE")); - verifyDefault(scanner, 1); - - // should return all three records - scanner = getConnector().createScanner(tableName, new Authorizations("BASE", "DEFLABEL")); - verifyDefault(scanner, 3); - } - - private void verifyDefault(Scanner scanner, int expectedCount) throws Exception { - int actual = Iterators.size(scanner.iterator()); - if (actual != expectedCount) - throw new Exception("actual count " + actual + " != expected count " + expectedCount); - } - - private void verify(Connector c, String tableName, Set<String> auths, Set<String> expectedValues) throws Exception { - ByteArraySet bas = nbas(auths); - - try { - verify(c, tableName, bas, expectedValues.toArray(new String[0])); - } catch (Exception e) { - throw new Exception("Verification failed auths=" + auths + " exp=" + expectedValues, e); - } - } - - private ByteArraySet nbas(Set<String> auths) { - ByteArraySet bas = new ByteArraySet(); - for (String auth : auths) { - bas.add(auth.getBytes(UTF_8)); - } - return bas; - } - - private void verify(Connector c, String tableName, ByteArraySet nss, String... expected) throws Exception { - Scanner scanner = c.createScanner(tableName, new Authorizations(nss)); - verify(scanner.iterator(), expected); - - BatchScanner bs = getConnector().createBatchScanner(tableName, new Authorizations(nss), 3); - bs.setRanges(Collections.singleton(new Range())); - verify(bs.iterator(), expected); - bs.close(); - } - - private void verify(Iterator<Entry<Key,Value>> iter, String... expected) throws Exception { - HashSet<String> valuesSeen = new HashSet<String>(); - - while (iter.hasNext()) { - Entry<Key,Value> entry = iter.next(); - if (valuesSeen.contains(entry.getValue().toString())) { - throw new Exception("Value seen twice"); - } - valuesSeen.add(entry.getValue().toString()); - } - - for (String ev : expected) { - if (!valuesSeen.remove(ev)) { - throw new Exception("Did not see expected value " + ev); - } - } - - if (valuesSeen.size() != 0) { - throw new Exception("Saw more values than expected " + valuesSeen); - } - } -} http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java b/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java deleted file mode 100644 index 34d1c6d..0000000 --- a/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.accumulo.test.functional; - -import static org.apache.accumulo.core.conf.Property.GC_CYCLE_DELAY; -import static org.apache.accumulo.core.conf.Property.GC_CYCLE_START; -import static org.apache.accumulo.core.conf.Property.INSTANCE_ZK_TIMEOUT; -import static org.apache.accumulo.core.conf.Property.TSERV_WALOG_MAX_SIZE; -import static org.apache.accumulo.core.conf.Property.TSERV_WAL_REPLICATION; -import static org.apache.accumulo.core.security.Authorizations.EMPTY; -import static org.apache.accumulo.minicluster.ServerType.GARBAGE_COLLECTOR; -import static org.apache.accumulo.minicluster.ServerType.TABLET_SERVER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Random; - -import org.apache.accumulo.core.client.BatchWriter; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.Instance; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.data.impl.KeyExtent; -import org.apache.accumulo.core.metadata.MetadataTable; -import org.apache.accumulo.core.metadata.RootTable; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection; -import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily; -import org.apache.accumulo.core.util.UtilWaitThread; -import org.apache.accumulo.master.state.SetGoalState; -import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterControl; -import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl; -import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl; -import org.apache.accumulo.server.log.WalStateManager; -import org.apache.accumulo.server.log.WalStateManager.WalState; -import org.apache.accumulo.server.zookeeper.ZooReaderWriter; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RawLocalFileSystem; -import org.apache.hadoop.io.Text; -import org.junit.Assert; -import org.junit.Test; - -import com.google.common.collect.Iterators; - -public class WALSunnyDayIT extends ConfigurableMacBase { - - private static final Text CF = new Text(new byte[0]); - - @Override - protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { - cfg.setProperty(GC_CYCLE_DELAY, "1s"); - cfg.setProperty(GC_CYCLE_START, "0s"); - cfg.setProperty(TSERV_WALOG_MAX_SIZE, "1M"); - cfg.setProperty(TSERV_WAL_REPLICATION, "1"); - cfg.setProperty(INSTANCE_ZK_TIMEOUT, "3s"); - cfg.setNumTservers(1); - hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName()); - } - - int countTrue(Collection<Boolean> bools) { - int result = 0; - for (Boolean b : bools) { - if (b.booleanValue()) - result++; - } - return result; - } - - @Test - public void test() throws Exception { - MiniAccumuloClusterImpl mac = getCluster(); - MiniAccumuloClusterControl control = mac.getClusterControl(); - control.stop(GARBAGE_COLLECTOR); - Connector c = getConnector(); - String tableName = getUniqueNames(1)[0]; - c.tableOperations().create(tableName); - writeSomeData(c, tableName, 1, 1); - - // wal markers are added lazily - Map<String,Boolean> wals = getWals(c); - assertEquals(wals.toString(), 2, wals.size()); - for (Boolean b : wals.values()) { - assertTrue("logs should be in use", b.booleanValue()); - } - - // roll log, get a new next - writeSomeData(c, tableName, 1000, 50); - Map<String,Boolean> walsAfterRoll = getWals(c); - assertEquals("should have 3 WALs after roll", 3, walsAfterRoll.size()); - assertTrue("new WALs should be a superset of the old WALs", walsAfterRoll.keySet().containsAll(wals.keySet())); - assertEquals("all WALs should be in use", 3, countTrue(walsAfterRoll.values())); - - // flush the tables - for (String table : new String[] {tableName, MetadataTable.NAME, RootTable.NAME}) { - c.tableOperations().flush(table, null, null, true); - } - UtilWaitThread.sleep(1000); - // rolled WAL is no longer in use, but needs to be GC'd - Map<String,Boolean> walsAfterflush = getWals(c); - assertEquals(walsAfterflush.toString(), 3, walsAfterflush.size()); - assertEquals("inUse should be 2", 2, countTrue(walsAfterflush.values())); - - // let the GC run for a little bit - control.start(GARBAGE_COLLECTOR); - UtilWaitThread.sleep(5 * 1000); - // make sure the unused WAL goes away - Map<String,Boolean> walsAfterGC = getWals(c); - assertEquals(walsAfterGC.toString(), 2, walsAfterGC.size()); - control.stop(GARBAGE_COLLECTOR); - // restart the tserver, but don't run recovery on all tablets - control.stop(TABLET_SERVER); - // this delays recovery on the normal tables - assertEquals(0, cluster.exec(SetGoalState.class, "SAFE_MODE").waitFor()); - control.start(TABLET_SERVER); - - // wait for the metadata table to go back online - getRecoveryMarkers(c); - // allow a little time for the master to notice ASSIGNED_TO_DEAD_SERVER tablets - UtilWaitThread.sleep(5 * 1000); - Map<KeyExtent,List<String>> markers = getRecoveryMarkers(c); - // log.debug("markers " + markers); - assertEquals("one tablet should have markers", 1, markers.keySet().size()); - assertEquals("tableId of the keyExtent should be 1", markers.keySet().iterator().next().getTableId(), new Text("1")); - - // put some data in the WAL - assertEquals(0, cluster.exec(SetGoalState.class, "NORMAL").waitFor()); - verifySomeData(c, tableName, 1000 * 50 + 1); - writeSomeData(c, tableName, 100, 100); - - Map<String,Boolean> walsAfterRestart = getWals(c); - // log.debug("wals after " + walsAfterRestart); - assertEquals("used WALs after restart should be 4", 4, countTrue(walsAfterRestart.values())); - control.start(GARBAGE_COLLECTOR); - UtilWaitThread.sleep(5 * 1000); - Map<String,Boolean> walsAfterRestartAndGC = getWals(c); - assertEquals("wals left should be 2", 2, walsAfterRestartAndGC.size()); - assertEquals("logs in use should be 2", 2, countTrue(walsAfterRestartAndGC.values())); - } - - private void verifySomeData(Connector c, String tableName, int expected) throws Exception { - Scanner scan = c.createScanner(tableName, EMPTY); - int result = Iterators.size(scan.iterator()); - scan.close(); - Assert.assertEquals(expected, result); - } - - private void writeSomeData(Connector conn, String tableName, int row, int col) throws Exception { - Random rand = new Random(); - BatchWriter bw = conn.createBatchWriter(tableName, null); - byte[] rowData = new byte[10]; - byte[] cq = new byte[10]; - byte[] value = new byte[10]; - - for (int r = 0; r < row; r++) { - rand.nextBytes(rowData); - Mutation m = new Mutation(rowData); - for (int c = 0; c < col; c++) { - rand.nextBytes(cq); - rand.nextBytes(value); - m.put(CF, new Text(cq), new Value(value)); - } - bw.addMutation(m); - if (r % 100 == 0) { - bw.flush(); - } - } - bw.close(); - } - - private Map<String,Boolean> getWals(Connector c) throws Exception { - Map<String,Boolean> result = new HashMap<>(); - Instance i = c.getInstance(); - ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), ""); - WalStateManager wals = new WalStateManager(c.getInstance(), zk); - for (Entry<Path,WalState> entry : wals.getAllState().entrySet()) { - // WALs are in use if they are not unreferenced - result.put(entry.getKey().toString(), entry.getValue() != WalState.UNREFERENCED); - } - return result; - } - - private Map<KeyExtent,List<String>> getRecoveryMarkers(Connector c) throws Exception { - Map<KeyExtent,List<String>> result = new HashMap<>(); - Scanner root = c.createScanner(RootTable.NAME, EMPTY); - root.setRange(TabletsSection.getRange()); - root.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME); - TabletColumnFamily.PREV_ROW_COLUMN.fetch(root); - - Scanner meta = c.createScanner(MetadataTable.NAME, EMPTY); - meta.setRange(TabletsSection.getRange()); - meta.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME); - TabletColumnFamily.PREV_ROW_COLUMN.fetch(meta); - - List<String> logs = new ArrayList<>(); - Iterator<Entry<Key,Value>> both = Iterators.concat(root.iterator(), meta.iterator()); - while (both.hasNext()) { - Entry<Key,Value> entry = both.next(); - Key key = entry.getKey(); - if (key.getColumnFamily().equals(TabletsSection.LogColumnFamily.NAME)) { - logs.add(key.getColumnQualifier().toString()); - } - if (TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key) && !logs.isEmpty()) { - KeyExtent extent = new KeyExtent(key.getRow(), entry.getValue()); - result.put(extent, logs); - logs = new ArrayList<String>(); - } - } - return result; - } - -}