Merge branch '1.6.1-SNAPSHOT'

Conflicts:
        
server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
        
shell/src/main/java/org/apache/accumulo/shell/commands/SetIterCommand.java
        
test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/541a77ff
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/541a77ff
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/541a77ff

Branch: refs/heads/master
Commit: 541a77ff7d42906da3aa84dfd4767f6b5953f28f
Parents: 117d5c5 d3e17f4
Author: Christopher Tubbs <ctubb...@apache.org>
Authored: Fri Sep 12 18:32:53 2014 -0400
Committer: Christopher Tubbs <ctubb...@apache.org>
Committed: Fri Sep 12 18:32:53 2014 -0400

----------------------------------------------------------------------
 .../apache/accumulo/core/client/BatchScanner.java  |  1 -
 .../accumulo/core/client/ConditionalWriter.java    |  3 ---
 .../core/client/ConditionalWriterConfig.java       |  1 -
 .../org/apache/accumulo/core/client/Instance.java  |  4 ++--
 .../core/client/admin/TableOperations.java         |  6 ------
 .../accumulo/core/client/impl/OfflineScanner.java  | 13 -------------
 .../accumulo/core/file/BloomFilterLayer.java       |  1 -
 .../accumulo/core/file/rfile/bcfile/BCFile.java    |  3 ---
 .../iterators/system/LocalityGroupIterator.java    |  4 ----
 .../core/security/VisibilityEvaluator.java         |  2 +-
 .../core/security/crypto/CryptoModule.java         |  2 --
 .../core/client/mock/MockConnectorTest.java        |  2 --
 .../examples/simple/client/RandomBatchScanner.java |  1 -
 .../examples/simple/client/RowOperations.java      | 11 -----------
 .../examples/simple/mapreduce/TeraSortIngest.java  |  2 --
 .../main/java/org/apache/accumulo/fate/TStore.java |  2 --
 .../apache/accumulo/fate/zookeeper/ZooLock.java    |  2 --
 .../apache/accumulo/server/init/Initialize.java    |  1 -
 .../server/tabletserver/MemoryManager.java         |  2 --
 .../org/apache/accumulo/server/util/ZooZap.java    |  3 ---
 .../accumulo/gc/GarbageCollectionEnvironment.java  | 17 -----------------
 .../apache/accumulo/gc/SimpleGarbageCollector.java |  1 -
 .../gc/GarbageCollectWriteAheadLogsTest.java       |  6 ------
 .../org/apache/accumulo/master/TestMergeState.java |  6 ------
 .../java/org/apache/accumulo/monitor/Monitor.java  |  3 ---
 .../accumulo/monitor/servlets/ScanServlet.java     |  4 ++--
 .../accumulo/tserver/log/TabletServerLogger.java   |  1 -
 .../start/classloader/AccumuloClassLoader.java     |  5 -----
 .../apache/accumulo/test/randomwalk/Module.java    |  1 -
 .../accumulo/test/randomwalk/bulk/Verify.java      |  4 ----
 .../apache/accumulo/test/ConditionalWriterIT.java  |  6 +++++-
 .../test/functional/AccumuloInputFormatIT.java     |  3 ++-
 .../test/functional/SparseColumnFamilyIT.java      |  6 ------
 33 files changed, 12 insertions(+), 117 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriterConfig.java
----------------------------------------------------------------------
diff --cc 
core/src/main/java/org/apache/accumulo/core/client/ConditionalWriterConfig.java
index 1280abd,a220e62..52c6a76
--- 
a/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriterConfig.java
+++ 
b/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriterConfig.java
@@@ -104,20 -101,7 +104,19 @@@ public class ConditionalWriterConfig 
      this.maxWriteThreads = maxWriteThreads;
      return this;
    }
 -  
 +
 +  /**
 +   * Sets the Durability for the mutation, if applied.
 +   * <p>
 +   * <b>Default:</b> Durability.DEFAULT: use the table's durability 
configuration.
-    * @param durability
 +   * @return {@code this} to allow chaining of set methods
 +   * @since 1.7.0
 +   */
 +  public ConditionalWriterConfig setDurability(Durability durability) {
 +    this.durability = durability;
 +    return this;
 +  }
 +
    public Authorizations getAuthorizations() {
      return auths;
    }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/core/src/main/java/org/apache/accumulo/core/client/Instance.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/Instance.java
index bcdd0e5,a55312e..8a70d4c
--- a/core/src/main/java/org/apache/accumulo/core/client/Instance.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/Instance.java
@@@ -124,10 -124,10 +124,10 @@@ public interface Instance 
  
    /**
     * Returns the AccumuloConfiguration to use when interacting with this 
instance.
 -   * 
 +   *
     * @return the AccumuloConfiguration that specifies properties related to 
interacting with this instance
     * @deprecated since 1.6.0. This method makes very little sense in the 
context of the client API and never should have been exposed.
-    * @see {@link InstanceOperations#getSystemConfiguration()} for client-side 
reading of the server-side configuration.
+    * @see InstanceOperations#getSystemConfiguration() for client-side reading 
of the server-side configuration.
     */
    @Deprecated
    AccumuloConfiguration getConfiguration();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/core/src/main/java/org/apache/accumulo/core/file/BloomFilterLayer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
----------------------------------------------------------------------
diff --cc 
server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 5f1e287,5cbffc3..4305e71
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@@ -392,61 -418,6 +392,60 @@@ public class Initialize 
        }
      }
    }
 +  /**
 +   * Create an rfile in the default tablet's directory for a new table
 +   * @param volmanager The VolumeManager
 +   * @param tableId TableID that is being "created"
 +   * @param targetTabletDir Directory where the rfile should created in
 +   * @param tableTabletDir The table_info directory for the new table
 +   * @param defaultTabletDir The default_tablet directory for the new table
-    * @throws IOException
 +   */
 +  private static void initializeTableData(VolumeManager volmanager, String 
tableId, String targetTabletDir, String tableTabletDir, String 
defaultTabletDir) throws IOException {
 +    // populate the root tablet with info about the default tablet
 +    // the root tablet contains the key extent and locations of all the
 +    // metadata tablets
 +    String initTabFile = targetTabletDir + "/00000_00000." + 
FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
 +    FileSystem fs = volmanager.getVolumeByPath(new 
Path(initTabFile)).getFileSystem();
 +    FileSKVWriter tabletWriter = 
FileOperations.getInstance().openWriter(initTabFile, fs, fs.getConf(), 
AccumuloConfiguration.getDefaultConfiguration());
 +    tabletWriter.startDefaultLocalityGroup();
 +
 +    Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(tableId), 
MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
 +
 +    // table tablet's directory
 +    Key tableDirKey = new Key(tableExtent, 
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
 +        
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
 +    tabletWriter.append(tableDirKey, new 
Value(defaultTabletDir.getBytes(StandardCharsets.UTF_8)));
 +
 +    // table tablet time
 +    Key tableTimeKey = new Key(tableExtent, 
TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
 +        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 
0);
 +    tabletWriter.append(tableTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + 
"0").getBytes(StandardCharsets.UTF_8)));
 +
 +    // table tablet's prevrow
 +    Key tablePrevRowKey = new Key(tableExtent, 
TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
 +        
TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
 +    tabletWriter.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(null));
 +
 +    // ----------] default tablet info
 +    Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new 
Text(tableId), null));
 +
 +    // default's directory
 +    Key defaultDirKey = new Key(defaultExtent, 
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
 +        
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
 +    tabletWriter.append(defaultDirKey, new 
Value(defaultTabletDir.getBytes(StandardCharsets.UTF_8)));
 +
 +    // default's time
 +    Key defaultTimeKey = new Key(defaultExtent, 
TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
 +        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 
0);
 +    tabletWriter.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID 
+ "0").getBytes(StandardCharsets.UTF_8)));
 +
 +    // default's prevrow
 +    Key defaultPrevRowKey = new Key(defaultExtent, 
TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
 +        
TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
 +    tabletWriter.append(defaultPrevRowKey, 
KeyExtent.encodePrevEndRow(MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
 +
 +    tabletWriter.close();
 +  }
  
    private static void initZooKeeper(Opts opts, String uuid, String 
instanceNamePath, Path rootTablet) throws KeeperException, InterruptedException 
{
      // setup basic data in zookeeper

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/server/base/src/main/java/org/apache/accumulo/server/util/ZooZap.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionEnvironment.java
----------------------------------------------------------------------
diff --cc 
server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionEnvironment.java
index b137d21,0294ce1..7f208d8
--- 
a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionEnvironment.java
+++ 
b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionEnvironment.java
@@@ -116,13 -101,4 +102,10 @@@ public interface GarbageCollectionEnvir
     *          Value to increment the still-in-use count by.
     */
    void incrementInUseStat(long i);
 +
 +  /**
 +   * Determine if the given absolute file is still pending replication
-    * @param absolutePath Absolute path to a file
 +   * @return True if the file still needs to be replicated
-    * @throws AccumuloException
-    * @throws AccumuloSecurityException
 +   */
 +  Iterator<Entry<String,Status>> getReplicationNeededIterator() throws 
AccumuloException, AccumuloSecurityException;
  }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
----------------------------------------------------------------------
diff --cc 
server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
index 946b653,ce1f026..74b0919
--- 
a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
+++ 
b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
@@@ -302,208 -268,4 +302,202 @@@ public class GarbageCollectWriteAheadLo
      assertFalse(GarbageCollectWriteAheadLogs.isUUID("0" + 
UUID.randomUUID().toString()));
      assertFalse(GarbageCollectWriteAheadLogs.isUUID(null));
    }
 +
 +  // It was easier to do this than get the mocking working for me
 +  private static class ReplicationGCWAL extends GarbageCollectWriteAheadLogs {
 +
 +    private List<Entry<Key,Value>> replData;
 +
-     /**
-      * @param instance
-      * @param fs
-      * @param useTrash
-      * @throws IOException
-      */
 +    ReplicationGCWAL(Instance instance, VolumeManager fs, boolean useTrash, 
List<Entry<Key,Value>> replData) throws IOException {
 +      super(instance, fs, useTrash);
 +      this.replData = replData;
 +    }
 +
 +    @Override
 +    protected Iterable<Entry<Key,Value>> 
getReplicationStatusForFile(Connector conn, String wal) {
 +      return this.replData;
 +    }
 +  }
 +
 +  @Test
 +  public void replicationEntriesAffectGC() throws Exception {
 +    String file1 = UUID.randomUUID().toString(), file2 = 
UUID.randomUUID().toString();
 +    Connector conn = createMock(Connector.class);
 +
 +    // Write a Status record which should prevent file1 from being deleted
 +    LinkedList<Entry<Key,Value>> replData = new LinkedList<>();
 +    replData.add(Maps.immutableEntry(new Key("/wals/" + file1, 
StatusSection.NAME.toString(), "1"), 
StatusUtil.fileCreatedValue(System.currentTimeMillis())));
 +
 +    ReplicationGCWAL replGC = new ReplicationGCWAL(instance, volMgr, false, 
replData);
 +
 +    replay(conn);
 +
 +    // Open (not-closed) file must be retained
 +    assertTrue(replGC.neededByReplication(conn, "/wals/" + file1));
 +
 +    // No replication data, not needed
 +    replData.clear();
 +    assertFalse(replGC.neededByReplication(conn, "/wals/" + file2));
 +
 +    // The file is closed but not replicated, must be retained
 +    replData.add(Maps.immutableEntry(new Key("/wals/" + file1, 
StatusSection.NAME.toString(), "1"), StatusUtil.fileClosedValue()));
 +    assertTrue(replGC.neededByReplication(conn, "/wals/" + file1));
 +
 +    // File is closed and fully replicated, can be deleted
 +    replData.clear();
 +    replData.add(Maps.immutableEntry(new Key("/wals/" + file1, 
StatusSection.NAME.toString(), "1"),
 +        
ProtobufUtil.toValue(Status.newBuilder().setInfiniteEnd(true).setBegin(Long.MAX_VALUE).setClosed(true).build())));
 +    assertFalse(replGC.neededByReplication(conn, "/wals/" + file1));
 +  }
 +
 +  @Test
 +  public void removeReplicationEntries() throws Exception {
 +    String file1 = UUID.randomUUID().toString(), file2 = 
UUID.randomUUID().toString();
 +
 +    Instance inst = new MockInstance(testName.getMethodName());
 +    Credentials creds = new Credentials("root", new PasswordToken(""));
 +    Connector conn = inst.getConnector(creds.getPrincipal(), 
creds.getToken());
 +
 +    GarbageCollectWriteAheadLogs gcWALs = new 
GarbageCollectWriteAheadLogs(inst, volMgr, false);
 +
 +    ReplicationTable.create(conn);
 +
 +    long file1CreateTime = System.currentTimeMillis();
 +    long file2CreateTime = file1CreateTime + 50;
 +    BatchWriter bw = conn.createBatchWriter(ReplicationTable.NAME, new 
BatchWriterConfig());
 +    Mutation m = new Mutation("/wals/" + file1);
 +    StatusSection.add(m, new Text("1"), 
StatusUtil.fileCreatedValue(file1CreateTime));
 +    bw.addMutation(m);
 +    m = new Mutation("/wals/" + file2);
 +    StatusSection.add(m, new Text("1"), 
StatusUtil.fileCreatedValue(file2CreateTime));
 +    bw.addMutation(m);
 +
 +    // These WALs are potential candidates for deletion from fs
 +    Map<String,Path> nameToFileMap = new HashMap<>();
 +    nameToFileMap.put(file1, new Path("/wals/" + file1));
 +    nameToFileMap.put(file2, new Path("/wals/" + file2));
 +
 +    Map<String,Path> sortedWALogs = Collections.emptyMap();
 +
 +    // Make the GCStatus and GcCycleStats
 +    GCStatus status = new GCStatus();
 +    GcCycleStats cycleStats = new GcCycleStats();
 +    status.currentLog = cycleStats;
 +
 +    // We should iterate over two entries
 +    Assert.assertEquals(2, gcWALs.removeReplicationEntries(nameToFileMap, 
sortedWALogs, status, creds));
 +
 +    // We should have noted that two files were still in use
 +    Assert.assertEquals(2l, cycleStats.inUse);
 +
 +    // Both should have been deleted
 +    Assert.assertEquals(0, nameToFileMap.size());
 +  }
 +
 +  @Test
 +  public void replicationEntriesOnlyInMetaPreventGC() throws Exception {
 +    String file1 = UUID.randomUUID().toString(), file2 = 
UUID.randomUUID().toString();
 +
 +    Instance inst = new MockInstance(testName.getMethodName());
 +    Credentials creds = new Credentials("root", new PasswordToken(""));
 +    Connector conn = inst.getConnector(creds.getPrincipal(), 
creds.getToken());
 +
 +    GarbageCollectWriteAheadLogs gcWALs = new 
GarbageCollectWriteAheadLogs(inst, volMgr, false);
 +
 +    ReplicationTable.create(conn);
 +
 +    long file1CreateTime = System.currentTimeMillis();
 +    long file2CreateTime = file1CreateTime + 50;
 +    // Write some records to the metadata table, we haven't yet written 
status records to the replication table
 +    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new 
BatchWriterConfig());
 +    Mutation m = new Mutation(ReplicationSection.getRowPrefix() + "/wals/" + 
file1);
 +    m.put(ReplicationSection.COLF, new Text("1"), 
StatusUtil.fileCreatedValue(file1CreateTime));
 +    bw.addMutation(m);
 +
 +    m = new Mutation(ReplicationSection.getRowPrefix() + "/wals/" + file2);
 +    m.put(ReplicationSection.COLF, new Text("1"), 
StatusUtil.fileCreatedValue(file2CreateTime));
 +    bw.addMutation(m);
 +
 +    // These WALs are potential candidates for deletion from fs
 +    Map<String,Path> nameToFileMap = new HashMap<>();
 +    nameToFileMap.put(file1, new Path("/wals/" + file1));
 +    nameToFileMap.put(file2, new Path("/wals/" + file2));
 +
 +    Map<String,Path> sortedWALogs = Collections.emptyMap();
 +
 +    // Make the GCStatus and GcCycleStats objects
 +    GCStatus status = new GCStatus();
 +    GcCycleStats cycleStats = new GcCycleStats();
 +    status.currentLog = cycleStats;
 +
 +    // We should iterate over two entries
 +    Assert.assertEquals(2, gcWALs.removeReplicationEntries(nameToFileMap, 
sortedWALogs, status, creds));
 +
 +    // We should have noted that two files were still in use
 +    Assert.assertEquals(2l, cycleStats.inUse);
 +
 +    // Both should have been deleted
 +    Assert.assertEquals(0, nameToFileMap.size());
 +  }
 +
 +  @Test
 +  public void noReplicationTableDoesntLimitMetatdataResults() throws 
Exception {
 +    Instance inst = new MockInstance(testName.getMethodName());
 +    Connector conn = inst.getConnector("root", new PasswordToken(""));
 +
 +    String wal = 
"hdfs://localhost:8020/accumulo/wal/tserver+port/123456-1234-1234-12345678";
 +    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new 
BatchWriterConfig());
 +    Mutation m = new Mutation(ReplicationSection.getRowPrefix() + wal);
 +    m.put(ReplicationSection.COLF, new Text("1"), 
StatusUtil.fileCreatedValue(System.currentTimeMillis()));
 +    bw.addMutation(m);
 +    bw.close();
 +
 +    GarbageCollectWriteAheadLogs gcWALs = new 
GarbageCollectWriteAheadLogs(inst, volMgr, false);
 +
 +    Iterable<Entry<Key,Value>> data = 
gcWALs.getReplicationStatusForFile(conn, wal);
 +    Entry<Key,Value> entry = Iterables.getOnlyElement(data);
 +
 +    Assert.assertEquals(ReplicationSection.getRowPrefix() + wal, 
entry.getKey().getRow().toString());
 +  }
 +
 +  @Test
 +  public void fetchesReplicationEntriesFromMetadataAndReplicationTables() 
throws Exception {
 +    Instance inst = new MockInstance(testName.getMethodName());
 +    Connector conn = inst.getConnector("root", new PasswordToken(""));
 +    ReplicationTable.create(conn);
 +
 +    long walCreateTime = System.currentTimeMillis();
 +    String wal = 
"hdfs://localhost:8020/accumulo/wal/tserver+port/123456-1234-1234-12345678";
 +    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new 
BatchWriterConfig());
 +    Mutation m = new Mutation(ReplicationSection.getRowPrefix() + wal);
 +    m.put(ReplicationSection.COLF, new Text("1"), 
StatusUtil.fileCreatedValue(walCreateTime));
 +    bw.addMutation(m);
 +    bw.close();
 +
 +    bw = ReplicationTable.getBatchWriter(conn);
 +    m = new Mutation(wal);
 +    StatusSection.add(m, new Text("1"), 
StatusUtil.fileCreatedValue(walCreateTime));
 +    bw.addMutation(m);
 +    bw.close();
 +
 +    GarbageCollectWriteAheadLogs gcWALs = new 
GarbageCollectWriteAheadLogs(inst, volMgr, false);
 +
 +    Iterable<Entry<Key,Value>> iter = 
gcWALs.getReplicationStatusForFile(conn, wal);
 +    Map<Key,Value> data = new HashMap<>();
 +    for (Entry<Key,Value> e : iter) {
 +      data.put(e.getKey(), e.getValue());
 +    }
 +
 +    Assert.assertEquals(2, data.size());
 +
 +    // Should get one element from each table (metadata and replication)
 +    for (Key k : data.keySet()) {
 +      String row = k.getRow().toString();
 +      if (row.startsWith(ReplicationSection.getRowPrefix())) {
 +        Assert.assertTrue(row.endsWith(wal));
 +      } else {
 +        Assert.assertEquals(wal, row);
 +      }
 +    }
 +  }
  }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/test/src/main/java/org/apache/accumulo/test/randomwalk/Module.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/test/src/main/java/org/apache/accumulo/test/randomwalk/bulk/Verify.java
----------------------------------------------------------------------
diff --cc 
test/src/main/java/org/apache/accumulo/test/randomwalk/bulk/Verify.java
index 852fe37,aed790c..3c2be07
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/bulk/Verify.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/bulk/Verify.java
@@@ -139,12 -138,8 +139,8 @@@ public class Verify extends Test 
      }
    }
    
-   /**
-    * @param startBadEntry
-    * @param lastBadEntry
-    */
    private static void report(Text startBadRow, Text lastBadRow, Value value) {
 -    System.out.println("Bad value " + new String(value.get(), 
Constants.UTF8));
 +    System.out.println("Bad value " + new String(value.get(), 
StandardCharsets.UTF_8));
      System.out.println(" Range [" + startBadRow + " -> " + lastBadRow + "]");
    }
    

http://git-wip-us.apache.org/repos/asf/accumulo/blob/541a77ff/test/src/test/java/org/apache/accumulo/test/ConditionalWriterIT.java
----------------------------------------------------------------------

Reply via email to