http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
----------------------------------------------------------------------
diff --cc 
test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
index 42f8000,0000000..d9de5d1
mode 100644,000000..100644
--- 
a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
+++ 
b/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
@@@ -1,268 -1,0 +1,268 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import static org.junit.Assert.assertEquals;
 +
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.SortedMap;
 +import java.util.TreeMap;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.impl.ScannerImpl;
 +import org.apache.accumulo.core.client.impl.Writer;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.file.rfile.RFile;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.DataFileValue;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 +import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.ColumnFQ;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 +import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
 +import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 +import org.apache.accumulo.server.ServerConstants;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.fs.FileRef;
 +import org.apache.accumulo.server.master.state.Assignment;
 +import org.apache.accumulo.server.master.state.TServerInstance;
 +import org.apache.accumulo.server.security.SystemCredentials;
 +import org.apache.accumulo.server.tablets.TabletTime;
 +import org.apache.accumulo.server.util.FileUtil;
 +import org.apache.accumulo.server.util.MasterMetadataUtil;
 +import org.apache.accumulo.server.util.MetadataTableUtil;
 +import org.apache.accumulo.server.zookeeper.TransactionWatcher;
 +import org.apache.accumulo.server.zookeeper.ZooLock;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.apache.accumulo.tserver.TabletServer;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class SplitRecoveryIT extends ConfigurableMacIT {
 +  
 +  
 +  private KeyExtent nke(String table, String endRow, String prevEndRow) {
 +    return new KeyExtent(new Text(table), endRow == null ? null : new 
Text(endRow), prevEndRow == null ? null : new Text(prevEndRow));
 +  }
 +  
 +  private void run() throws Exception {
 +    String zPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + 
"/testLock";
 +    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-     zoo.putPersistentData(zPath, "".getBytes(), NodeExistsPolicy.OVERWRITE);
++    zoo.putPersistentData(zPath, new byte[0], NodeExistsPolicy.OVERWRITE);
 +    ZooLock zl = new ZooLock(zPath);
 +    boolean gotLock = zl.tryLock(new LockWatcher() {
 +      
 +      @Override
 +      public void lostLock(LockLossReason reason) {
 +        System.exit(-1);
 +        
 +      }
 +      
 +      @Override
 +      public void unableToMonitorLockNode(Throwable e) {
 +        System.exit(-1);
 +      }
-     }, "foo".getBytes());
++    }, "foo".getBytes(Constants.UTF8));
 +    
 +    if (!gotLock) {
 +      System.err.println("Failed to get lock " + zPath);
 +    }
 +    
 +    // run test for a table with one tablet
 +    runSplitRecoveryTest(0, "sp", 0, zl, nke("foo0", null, null));
 +    runSplitRecoveryTest(1, "sp", 0, zl, nke("foo1", null, null));
 +    
 +    // run test for tables with two tablets, run test on first and last tablet
 +    runSplitRecoveryTest(0, "k", 0, zl, nke("foo2", "m", null), nke("foo2", 
null, "m"));
 +    runSplitRecoveryTest(1, "k", 0, zl, nke("foo3", "m", null), nke("foo3", 
null, "m"));
 +    runSplitRecoveryTest(0, "o", 1, zl, nke("foo4", "m", null), nke("foo4", 
null, "m"));
 +    runSplitRecoveryTest(1, "o", 1, zl, nke("foo5", "m", null), nke("foo5", 
null, "m"));
 +    
 +    // run test for table w/ three tablets, run test on middle tablet
 +    runSplitRecoveryTest(0, "o", 1, zl, nke("foo6", "m", null), nke("foo6", 
"r", "m"), nke("foo6", null, "r"));
 +    runSplitRecoveryTest(1, "o", 1, zl, nke("foo7", "m", null), nke("foo7", 
"r", "m"), nke("foo7", null, "r"));
 +    
 +    // run test for table w/ three tablets, run test on first
 +    runSplitRecoveryTest(0, "g", 0, zl, nke("foo8", "m", null), nke("foo8", 
"r", "m"), nke("foo8", null, "r"));
 +    runSplitRecoveryTest(1, "g", 0, zl, nke("foo9", "m", null), nke("foo9", 
"r", "m"), nke("foo9", null, "r"));
 +    
 +    // run test for table w/ three tablets, run test on last tablet
 +    runSplitRecoveryTest(0, "w", 2, zl, nke("fooa", "m", null), nke("fooa", 
"r", "m"), nke("fooa", null, "r"));
 +    runSplitRecoveryTest(1, "w", 2, zl, nke("foob", "m", null), nke("foob", 
"r", "m"), nke("foob", null, "r"));
 +  }
 +  
 +  private void runSplitRecoveryTest(int failPoint, String mr, int 
extentToSplit, ZooLock zl, KeyExtent... extents) throws Exception {
 +    
 +    Text midRow = new Text(mr);
 +    
 +    SortedMap<FileRef,DataFileValue> splitMapFiles = null;
 +    
 +    for (int i = 0; i < extents.length; i++) {
 +      KeyExtent extent = extents[i];
 +      
 +      String tdir = ServerConstants.getTablesDirs()[0] + "/" + 
extent.getTableId().toString() + "/dir_" + i;
 +      MetadataTableUtil.addTablet(extent, tdir, SystemCredentials.get(), 
TabletTime.LOGICAL_TIME_ID, zl);
 +      SortedMap<FileRef,DataFileValue> mapFiles = new 
TreeMap<FileRef,DataFileValue>();
 +      mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), 
new DataFileValue(1000017 + i, 10000 + i));
 +      
 +      if (i == extentToSplit) {
 +        splitMapFiles = mapFiles;
 +      }
 +      int tid = 0;
 +      TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, 
tid);
 +      MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", 
SystemCredentials.get(), zl);
 +    }
 +    
 +    KeyExtent extent = extents[extentToSplit];
 +    
 +    KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), 
midRow);
 +    KeyExtent low = new KeyExtent(extent.getTableId(), midRow, 
extent.getPrevEndRow());
 +    
 +    splitPartiallyAndRecover(extent, high, low, .4, splitMapFiles, midRow, 
"localhost:1234", failPoint, zl);
 +  }
 +  
 +  private void splitPartiallyAndRecover(KeyExtent extent, KeyExtent high, 
KeyExtent low, double splitRatio, SortedMap<FileRef,DataFileValue> mapFiles,
 +      Text midRow, String location, int steps, ZooLock zl) throws Exception {
 +    
 +    SortedMap<FileRef,DataFileValue> lowDatafileSizes = new 
TreeMap<FileRef,DataFileValue>();
 +    SortedMap<FileRef,DataFileValue> highDatafileSizes = new 
TreeMap<FileRef,DataFileValue>();
 +    List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
 +    
 +    MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, 
new HashMap<FileRef,FileUtil.FileInfo>(), mapFiles, lowDatafileSizes,
 +        highDatafileSizes, highDatafilesToRemove);
 +    
 +    MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, 
SystemCredentials.get(), zl);
 +    TServerInstance instance = new TServerInstance(location, 
zl.getSessionId());
 +    Writer writer = new Writer(HdfsZooInstance.getInstance(), 
SystemCredentials.get(), MetadataTable.ID);
 +    Assignment assignment = new Assignment(high, instance);
 +    Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
 +    m.put(TabletsSection.FutureLocationColumnFamily.NAME, 
assignment.server.asColumnQualifier(), assignment.server.asMutationValue());
 +    writer.update(m);
 +    
 +    if (steps >= 1) {
 +      Map<FileRef,Long> bulkFiles = 
MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(), extent);
 +      MasterMetadataUtil.addNewTablet(low, "/lowDir", instance, 
lowDatafileSizes, bulkFiles, SystemCredentials.get(), 
TabletTime.LOGICAL_TIME_ID + "0", -1l,
 +          -1l, zl);
 +    }
 +    if (steps >= 2)
 +      MetadataTableUtil.finishSplit(high, highDatafileSizes, 
highDatafilesToRemove, SystemCredentials.get(), zl);
 +    
 +    TabletServer.verifyTabletInformation(high, instance, null, "127.0.0.1:0", 
zl);
 +    
 +    if (steps >= 1) {
 +      ensureTabletHasNoUnexpectedMetadataEntries(low, lowDatafileSizes);
 +      ensureTabletHasNoUnexpectedMetadataEntries(high, highDatafileSizes);
 +      
 +      Map<FileRef,Long> lowBulkFiles = 
MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(), low);
 +      Map<FileRef,Long> highBulkFiles = 
MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(), high);
 +      
 +      if (!lowBulkFiles.equals(highBulkFiles)) {
 +        throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " 
+ low + " " + high);
 +      }
 +      
 +      if (lowBulkFiles.size() == 0) {
 +        throw new Exception(" no bulk files " + low);
 +      }
 +    } else {
 +      ensureTabletHasNoUnexpectedMetadataEntries(extent, mapFiles);
 +    }
 +  }
 +  
 +  private void ensureTabletHasNoUnexpectedMetadataEntries(KeyExtent extent, 
SortedMap<FileRef,DataFileValue> expectedMapFiles) throws Exception {
 +    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), 
SystemCredentials.get(), MetadataTable.ID, Authorizations.EMPTY);
 +    scanner.setRange(extent.toMetadataRange());
 +    
 +    HashSet<ColumnFQ> expectedColumns = new HashSet<ColumnFQ>();
 +    expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN);
 +    expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
 +    expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN);
 +    expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN);
 +    
 +    HashSet<Text> expectedColumnFamilies = new HashSet<Text>();
 +    expectedColumnFamilies.add(DataFileColumnFamily.NAME);
 +    
expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME);
 +    
expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME);
 +    expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME);
 +    expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME);
 +    
 +    Iterator<Entry<Key,Value>> iter = scanner.iterator();
 +    while (iter.hasNext()) {
 +      Key key = iter.next().getKey();
 +      
 +      if (!key.getRow().equals(extent.getMetadataEntry())) {
 +        throw new Exception("Tablet " + extent + " contained unexpected " + 
MetadataTable.NAME + " entry " + key);
 +      }
 +      
 +      if (expectedColumnFamilies.contains(key.getColumnFamily())) {
 +        continue;
 +      }
 +      
 +      if (expectedColumns.remove(new ColumnFQ(key))) {
 +        continue;
 +      }
 +      
 +      throw new Exception("Tablet " + extent + " contained unexpected " + 
MetadataTable.NAME + " entry " + key);
 +    }
 +    System.out.println("expectedColumns " + expectedColumns);
 +    if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
 +      throw new Exception("Not all expected columns seen " + extent + " " + 
expectedColumns);
 +    }
 +    
 +    SortedMap<FileRef,DataFileValue> fixedMapFiles = 
MetadataTableUtil.getDataFileSizes(extent, SystemCredentials.get());
 +    verifySame(expectedMapFiles, fixedMapFiles);
 +  }
 +  
 +  private void verifySame(SortedMap<FileRef,DataFileValue> datafileSizes, 
SortedMap<FileRef,DataFileValue> fixedDatafileSizes) throws Exception {
 +    
 +    if (!datafileSizes.keySet().containsAll(fixedDatafileSizes.keySet()) || 
!fixedDatafileSizes.keySet().containsAll(datafileSizes.keySet())) {
 +      throw new Exception("Key sets not the same " + datafileSizes.keySet() + 
" !=  " + fixedDatafileSizes.keySet());
 +    }
 +    
 +    for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
 +      DataFileValue dfv = entry.getValue();
 +      DataFileValue otherDfv = fixedDatafileSizes.get(entry.getKey());
 +      
 +      if (!dfv.equals(otherDfv)) {
 +        throw new Exception(entry.getKey() + " dfv not equal  " + dfv + "  " 
+ otherDfv);
 +      }
 +    }
 +  }
 +
 +  
 +  public static void main(String[] args) throws Exception {
 +    new SplitRecoveryIT().run();
 +  }
 +  
 +  @Test(timeout = 30 * 1000)
 +  public void test() throws Exception {
 +    assertEquals(0, exec(SplitRecoveryIT.class).waitFor());
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
index 5cec48f,0000000..cc183cd
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
@@@ -1,94 -1,0 +1,97 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import static org.junit.Assert.assertEquals;
 +
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.TreeSet;
 +
++import org.apache.accumulo.core.Constants;
++import org.apache.accumulo.core.cli.BatchWriterOpts;
++import org.apache.accumulo.core.cli.ScannerOpts;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.minicluster.MemoryUnit;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class TabletIT extends ConfigurableMacIT {
 +
 +  private static final int N = 1000;
 +
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg) {
 +    Map<String,String> siteConfig = new HashMap<String,String>();
 +    siteConfig.put(Property.TABLE_SPLIT_THRESHOLD.getKey(), "200");
 +    siteConfig.put(Property.TSERV_MAXMEM.getKey(), "128M");
 +    cfg.setDefaultMemory(256, MemoryUnit.MEGABYTE);
 +    cfg.setSiteConfig(siteConfig);
 +  }
 +
 +  @Test(timeout = 2 * 60 * 1000)
 +  public void createTableTest() throws Exception {
 +    String tableName = getTableNames(1)[0];
 +    createTableTest(tableName, false);
 +    createTableTest(tableName, true);
 +  }
 +
 +  public void createTableTest(String tableName, boolean readOnly) throws 
Exception {
 +    // create the test table within accumulo
 +    Connector connector = getConnector();
 +
 +    if (!readOnly) {
 +      TreeSet<Text> keys = new TreeSet<Text>();
 +      for (int i = N / 100; i < N; i += N / 100) {
 +        keys.add(new Text(String.format("%05d", i)));
 +      }
 +
 +      // presplit
 +      connector.tableOperations().create(tableName);
 +      connector.tableOperations().addSplits(tableName, keys);
 +      BatchWriter b = connector.createBatchWriter(tableName, new 
BatchWriterConfig());
 +
 +      // populate
 +      for (int i = 0; i < N; i++) {
 +        Mutation m = new Mutation(new Text(String.format("%05d", i)));
-         m.put(new Text("col" + Integer.toString((i % 3) + 1)), new 
Text("qual"), new Value("junk".getBytes()));
++        m.put(new Text("col" + Integer.toString((i % 3) + 1)), new 
Text("qual"), new Value("junk".getBytes(Constants.UTF8)));
 +        b.addMutation(m);
 +      }
 +      b.close();
 +    }
 +
 +    Scanner scanner = getConnector().createScanner(tableName, 
Authorizations.EMPTY);
 +    int count = 0;
 +    for (Entry<Key,Value> elt : scanner) {
 +      String expected = String.format("%05d", count);
 +      assert (elt.getKey().getRow().toString().equals(expected));
 +      count++;
 +    }
 +    assertEquals(N, count);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
----------------------------------------------------------------------
diff --cc 
test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
index 3252dec,0000000..c98e080
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
@@@ -1,297 -1,0 +1,298 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.Set;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +
++import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchScanner;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +import org.apache.accumulo.core.util.ByteArraySet;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class VisibilityIT extends SimpleMacIT {
 +
 +  @Test(timeout = 2 * 60 * 1000)
 +  public void run() throws Exception {
 +    Connector c = getConnector();
 +    String[] tableNames = getTableNames(2);
 +    String table = tableNames[0];
 +    c.tableOperations().create(table);
 +    String table2 = tableNames[1];
 +    c.tableOperations().create(table2);
 +    c.tableOperations().setProperty(table2, 
Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "DEFLABEL");
 +
 +    insertData(c, table);
 +    queryData(c, table);
 +    deleteData(c, table);
 +
 +    insertDefaultData(c, table2);
 +    queryDefaultData(c, table2);
 +
 +  }
 +
 +  private static SortedSet<String> nss(String... labels) {
 +    TreeSet<String> ts = new TreeSet<String>();
 +
 +    for (String s : labels) {
 +      ts.add(s);
 +    }
 +
 +    return ts;
 +  }
 +
 +  private void mput(Mutation m, String cf, String cq, String cv, String val) {
-     ColumnVisibility le = new ColumnVisibility(cv.getBytes());
-     m.put(new Text(cf), new Text(cq), le, new Value(val.getBytes()));
++    ColumnVisibility le = new ColumnVisibility(cv.getBytes(Constants.UTF8));
++    m.put(new Text(cf), new Text(cq), le, new 
Value(val.getBytes(Constants.UTF8)));
 +  }
 +
 +  private void mputDelete(Mutation m, String cf, String cq, String cv) {
-     ColumnVisibility le = new ColumnVisibility(cv.getBytes());
++    ColumnVisibility le = new ColumnVisibility(cv.getBytes(Constants.UTF8));
 +    m.putDelete(new Text(cf), new Text(cq), le);
 +  }
 +
 +  private void insertData(Connector c, String tableName) throws Exception {
 +
 +    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
 +    Mutation m1 = new Mutation(new Text("row1"));
 +
 +    mput(m1, "cf1", "cq1", "", "v1");
 +    mput(m1, "cf1", "cq1", "A", "v2");
 +    mput(m1, "cf1", "cq1", "B", "v3");
 +    mput(m1, "cf1", "cq1", "A&B", "v4");
 +    mput(m1, "cf1", "cq1", "A&(L|M)", "v5");
 +    mput(m1, "cf1", "cq1", "B&(L|M)", "v6");
 +    mput(m1, "cf1", "cq1", "A&B&(L|M)", "v7");
 +    mput(m1, "cf1", "cq1", "A&B&(L)", "v8");
 +    mput(m1, "cf1", "cq1", "A&FOO", "v9");
 +    mput(m1, "cf1", "cq1", "A&FOO&(L|M)", "v10");
 +    mput(m1, "cf1", "cq1", "FOO", "v11");
 +    mput(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)", "v12");
 +    mput(m1, "cf1", "cq1", "A&B&(L|M|FOO)", "v13");
 +
 +    bw.addMutation(m1);
 +    bw.close();
 +  }
 +
 +  private void deleteData(Connector c, String tableName) throws Exception {
 +
 +    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
 +    Mutation m1 = new Mutation(new Text("row1"));
 +
 +    mputDelete(m1, "cf1", "cq1", "");
 +    mputDelete(m1, "cf1", "cq1", "A");
 +    mputDelete(m1, "cf1", "cq1", "A&B");
 +    mputDelete(m1, "cf1", "cq1", "B&(L|M)");
 +    mputDelete(m1, "cf1", "cq1", "A&B&(L)");
 +    mputDelete(m1, "cf1", "cq1", "A&FOO&(L|M)");
 +    mputDelete(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)");
 +    mputDelete(m1, "cf1", "cq1", "FOO&A"); // should not delete anything
 +
 +    bw.addMutation(m1);
 +    bw.close();
 +
 +    Map<Set<String>,Set<String>> expected = new 
HashMap<Set<String>,Set<String>>();
 +
 +    expected.put(nss("A", "L"), nss("v5"));
 +    expected.put(nss("A", "M"), nss("v5"));
 +    expected.put(nss("B"), nss("v3"));
 +    expected.put(nss("Z"), nss());
 +    expected.put(nss("A", "B", "L"), nss("v7", "v13"));
 +    expected.put(nss("A", "B", "M"), nss("v7", "v13"));
 +    expected.put(nss("A", "B", "FOO"), nss("v13"));
 +    expected.put(nss("FOO"), nss("v11"));
 +    expected.put(nss("A", "FOO"), nss("v9"));
 +
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", 
"B", "FOO", "L", "M", "Z"), expected);
 +  }
 +
 +  private void insertDefaultData(Connector c, String tableName) throws 
Exception {
 +    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
 +    Mutation m1 = new Mutation(new Text("row1"));
 +
 +    mput(m1, "cf1", "cq1", "BASE", "v1");
 +    mput(m1, "cf1", "cq2", "DEFLABEL", "v2");
 +    mput(m1, "cf1", "cq3", "", "v3");
 +
 +    bw.addMutation(m1);
 +    bw.close();
 +  }
 +
 +  private static void uniqueCombos(List<Set<String>> all, Set<String> prefix, 
Set<String> suffix) {
 +
 +    all.add(prefix);
 +
 +    TreeSet<String> ss = new TreeSet<String>(suffix);
 +
 +    for (String s : suffix) {
 +      TreeSet<String> ps = new TreeSet<String>(prefix);
 +      ps.add(s);
 +      ss.remove(s);
 +
 +      uniqueCombos(all, ps, ss);
 +    }
 +  }
 +
 +  private void queryData(Connector c, String tableName) throws Exception {
 +    Map<Set<String>,Set<String>> expected = new 
HashMap<Set<String>,Set<String>>();
 +    expected.put(nss(), nss("v1"));
 +    expected.put(nss("A"), nss("v2"));
 +    expected.put(nss("A", "L"), nss("v5"));
 +    expected.put(nss("A", "M"), nss("v5"));
 +    expected.put(nss("B"), nss("v3"));
 +    expected.put(nss("B", "L"), nss("v6"));
 +    expected.put(nss("B", "M"), nss("v6"));
 +    expected.put(nss("Z"), nss());
 +    expected.put(nss("A", "B"), nss("v4"));
 +    expected.put(nss("A", "B", "L"), nss("v7", "v8", "v13"));
 +    expected.put(nss("A", "B", "M"), nss("v7", "v13"));
 +    expected.put(nss("A", "B", "FOO"), nss("v13"));
 +    expected.put(nss("FOO"), nss("v11"));
 +    expected.put(nss("A", "FOO"), nss("v9"));
 +    expected.put(nss("A", "FOO", "L"), nss("v10", "v12"));
 +    expected.put(nss("A", "FOO", "M"), nss("v10", "v12"));
 +    expected.put(nss("B", "FOO", "L"), nss("v12"));
 +    expected.put(nss("B", "FOO", "M"), nss("v12"));
 +
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", 
"B", "FOO", "L", "M", "Z"), expected);
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", 
"B", "L", "M", "Z"), expected);
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", 
"Z"), expected);
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("Z"), 
expected);
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss(), 
expected);
 +  }
 +
 +  private void queryData(Connector c, String tableName, Set<String> allAuths, 
Set<String> userAuths, Map<Set<String>,Set<String>> expected) throws Exception {
 +
 +    c.securityOperations().changeUserAuthorizations("root", new 
Authorizations(nbas(userAuths)));
 +
 +    ArrayList<Set<String>> combos = new ArrayList<Set<String>>();
 +    uniqueCombos(combos, nss(), allAuths);
 +
 +    for (Set<String> set1 : combos) {
 +      Set<String> e = new TreeSet<String>();
 +      for (Set<String> set2 : combos) {
 +
 +        set2 = new HashSet<String>(set2);
 +        set2.retainAll(userAuths);
 +
 +        if (set1.containsAll(set2) && expected.containsKey(set2)) {
 +          e.addAll(expected.get(set2));
 +        }
 +      }
 +
 +      set1.retainAll(userAuths);
 +      verify(c, tableName, set1, e);
 +    }
 +
 +  }
 +
 +  private void queryDefaultData(Connector c, String tableName) throws 
Exception {
 +    Scanner scanner;
 +
 +    // should return no records
 +    c.securityOperations().changeUserAuthorizations("root", new 
Authorizations("BASE", "DEFLABEL"));
 +    scanner = getConnector().createScanner(tableName, new Authorizations());
 +    verifyDefault(scanner, 0);
 +
 +    // should return one record
 +    scanner = getConnector().createScanner(tableName, new 
Authorizations("BASE"));
 +    verifyDefault(scanner, 1);
 +
 +    // should return all three records
 +    scanner = getConnector().createScanner(tableName, new 
Authorizations("BASE", "DEFLABEL"));
 +    verifyDefault(scanner, 3);
 +  }
 +
 +  private void verifyDefault(Scanner scanner, int expectedCount) throws 
Exception {
 +    for (@SuppressWarnings("unused")
 +    Entry<Key,Value> entry : scanner)
 +      --expectedCount;
 +    if (expectedCount != 0)
 +      throw new Exception(" expected count !=0 " + expectedCount);
 +  }
 +
 +  private void verify(Connector c, String tableName, Set<String> auths, 
Set<String> expectedValues) throws Exception {
 +    ByteArraySet bas = nbas(auths);
 +
 +    try {
 +      verify(c, tableName, bas, expectedValues.toArray(new String[0]));
 +    } catch (Exception e) {
 +      throw new Exception("Verification failed auths=" + auths + " exp=" + 
expectedValues, e);
 +    }
 +  }
 +
 +  private ByteArraySet nbas(Set<String> auths) {
 +    ByteArraySet bas = new ByteArraySet();
 +    for (String auth : auths) {
-       bas.add(auth.getBytes());
++      bas.add(auth.getBytes(Constants.UTF8));
 +    }
 +    return bas;
 +  }
 +
 +  private void verify(Connector c, String tableName, ByteArraySet nss, 
String... expected) throws Exception {
 +    Scanner scanner = c.createScanner(tableName, new Authorizations(nss));
 +    verify(scanner.iterator(), expected);
 +
 +    BatchScanner bs = getConnector().createBatchScanner(tableName, new 
Authorizations(nss), 3);
 +    bs.setRanges(Collections.singleton(new Range()));
 +    verify(bs.iterator(), expected);
 +    bs.close();
 +  }
 +
 +  private void verify(Iterator<Entry<Key,Value>> iter, String... expected) 
throws Exception {
 +    HashSet<String> valuesSeen = new HashSet<String>();
 +
 +    while (iter.hasNext()) {
 +      Entry<Key,Value> entry = iter.next();
 +      if (valuesSeen.contains(entry.getValue().toString())) {
 +        throw new Exception("Value seen twice");
 +      }
 +      valuesSeen.add(entry.getValue().toString());
 +    }
 +
 +    for (String ev : expected) {
 +      if (!valuesSeen.remove(ev)) {
 +        throw new Exception("Did not see expected value " + ev);
 +      }
 +    }
 +
 +    if (valuesSeen.size() != 0) {
 +      throw new Exception("Saw more values than expected " + valuesSeen);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/trace/src/main/java/org/apache/accumulo/trace/instrument/receivers/ZooSpanClient.java
----------------------------------------------------------------------

Reply via email to