ACCUMULO-1641 Removed use of non public Accumulo command line parsing code.

In the processing of doing this I copied and simplified code from Accumulo.
Also started using a properties file in all examples for Accumulo connection
info.  This should make it easier to run all examples.  No longer need to
change instance, zookeepers, user and password when running examples.


Project: http://git-wip-us.apache.org/repos/asf/accumulo-examples/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo-examples/commit/e07cdcde
Tree: http://git-wip-us.apache.org/repos/asf/accumulo-examples/tree/e07cdcde
Diff: http://git-wip-us.apache.org/repos/asf/accumulo-examples/diff/e07cdcde

Branch: refs/heads/master
Commit: e07cdcde118e5544d7a909b13bb2e7436bd475d6
Parents: d96c6d9
Author: Keith Turner <ktur...@apache.org>
Authored: Thu Dec 8 18:53:51 2016 -0500
Committer: Keith Turner <ktur...@apache.org>
Committed: Thu Dec 15 16:29:37 2016 -0500

----------------------------------------------------------------------
 .gitignore                                      |   1 +
 README.md                                       |  16 +-
 bin/runex                                       |   6 +-
 docs/batch.md                                   |   4 +-
 docs/bloom.md                                   |  14 +-
 docs/client.md                                  |   7 +-
 docs/compactionStrategy.md                      |   8 +-
 docs/dirlist.md                                 |  18 +-
 docs/filedata.md                                |   4 +-
 docs/helloworld.md                              |   4 +-
 docs/isolation.md                               |   4 +-
 docs/release-testing.md                         |  50 ++++
 docs/sample.md                                  |   8 +-
 docs/shard.md                                   |   8 +-
 docs/tabletofile.md                             |   2 +-
 docs/terasort.md                                |   2 +-
 examples.conf.template                          |  27 +++
 pom.xml                                         |  15 +-
 .../accumulo/examples/cli/BatchScannerOpts.java |  30 +++
 .../accumulo/examples/cli/BatchWriterOpts.java  |  51 ++++
 .../examples/cli/ClientOnDefaultTable.java      |  36 +++
 .../examples/cli/ClientOnRequiredTable.java     |  28 +++
 .../accumulo/examples/cli/ClientOpts.java       | 126 ++++++++++
 .../org/apache/accumulo/examples/cli/Help.java  |  53 +++++
 .../cli/MapReduceClientOnDefaultTable.java      |  53 +++++
 .../cli/MapReduceClientOnRequiredTable.java     |  58 +++++
 .../examples/cli/MapReduceClientOpts.java       |  81 +++++++
 .../accumulo/examples/cli/ScannerOpts.java      |  24 ++
 .../apache/accumulo/examples/client/Flush.java  |   2 +-
 .../examples/client/RandomBatchScanner.java     |   4 +-
 .../examples/client/RandomBatchWriter.java      |   4 +-
 .../examples/client/ReadWriteExample.java       |  14 +-
 .../accumulo/examples/client/RowOperations.java |   6 +-
 .../examples/client/SequentialBatchWriter.java  |   4 +-
 .../examples/client/TraceDumpExample.java       |   4 +-
 .../examples/client/TracingExample.java         |  12 +-
 .../accumulo/examples/dirlist/FileCount.java    |  30 ++-
 .../accumulo/examples/dirlist/Ingest.java       |   4 +-
 .../accumulo/examples/dirlist/QueryUtil.java    |   2 +-
 .../accumulo/examples/dirlist/Viewer.java       |   2 +-
 .../examples/filedata/CharacterHistogram.java   |   2 +-
 .../examples/filedata/FileDataIngest.java       |   7 +-
 .../examples/filedata/FileDataQuery.java        |   8 +-
 .../helloworld/InsertWithBatchWriter.java       |   4 +-
 .../accumulo/examples/helloworld/ReadData.java  |   4 +-
 .../examples/isolation/InterferenceTest.java    |   4 +-
 .../examples/mapreduce/NGramIngest.java         |   2 +-
 .../examples/mapreduce/RegexExample.java        |   2 +-
 .../accumulo/examples/mapreduce/RowHash.java    |   2 +-
 .../examples/mapreduce/TableToFile.java         |   2 +-
 .../examples/mapreduce/TeraSortIngest.java      |   2 +-
 .../examples/mapreduce/UniqueColumns.java       |   2 +-
 .../accumulo/examples/mapreduce/WordCount.java  |   2 +-
 .../mapreduce/bulk/BulkIngestExample.java       |   2 +-
 .../examples/mapreduce/bulk/SetupTable.java     |   2 +-
 .../examples/mapreduce/bulk/VerifyIngest.java   |   2 +-
 .../accumulo/examples/sample/SampleExample.java |   4 +-
 .../examples/shard/ContinuousQuery.java         |   4 +-
 .../apache/accumulo/examples/shard/Index.java   |   4 +-
 .../apache/accumulo/examples/shard/Query.java   |   4 +-
 .../apache/accumulo/examples/shard/Reverse.java |   6 +-
 .../apache/accumulo/examples/ExamplesIT.java    | 237 +++++--------------
 .../accumulo/examples/dirlist/CountIT.java      |  16 +-
 .../examples/filedata/ChunkCombinerTest.java    |   4 +-
 .../examples/filedata/ChunkInputFormatIT.java   |   1 -
 .../accumulo/examples/filedata/KeyUtilTest.java |   4 +-
 .../examples/mapreduce/MapReduceIT.java         |  12 +-
 67 files changed, 846 insertions(+), 325 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index f534230..ce8bb12 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,3 +4,4 @@
 /target/
 /*.iml
 /.idea
+/examples.conf

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index 757e4d0..63a02d8 100644
--- a/README.md
+++ b/README.md
@@ -34,15 +34,20 @@ Before running any of the examples, the following steps 
must be performed.
         git clone https://github.com/apache/accumulo-examples.git
         mvn clean package
 
-4. Each Accumulo example has its own documentation and instructions for 
running the example which
+4. Specify Accumulo connection information.  All examples read connection 
information from a 
+   properties file. Copy the template and edit it.
+
+        cd accumulo-examples
+        cp examples.conf.template examples.conf
+        nano examples.conf
+
+5. Each Accumulo example has its own documentation and instructions for 
running the example which
    are linked to below.
 
 When running the examples, remember the tips below:
 
 * Examples are run using the `runex` command which is located in the `bin/` 
directory of this repo.
   The `runex` command is a simple wrapper around the Maven Exec plugin.
-* Any command that references Accumulo settings such as `instance`, 
`zookeepers`, `username`, or 
-`password` should be updated for your instance.
 * Commands intended to be run in bash are prefixed by '$' and should be run 
from the root of this
   repository.
 * Several examples use the `accumulo` and `tool.sh` commands which are 
expected to be on your 
@@ -82,6 +87,11 @@ Each example below highlights a feature of Apache Accumulo.
 | [terasort] | Generating random data and sorting it using Accumulo. |
 | [visibility] | Using visibilities (or combinations of authorizations). Also 
shows user permissions. |
 
+## Release Testing
+
+This repository can be used to test Accumulo release candidates.  See
+[docs/release-testing.md](docs/release-testing.md).
+
 [manual]: https://accumulo.apache.org/latest/accumulo_user_manual/
 [INSTALL.md]: https://github.com/apache/accumulo/blob/master/INSTALL.md
 [batch]: docs/batch.md

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/bin/runex
----------------------------------------------------------------------
diff --git a/bin/runex b/bin/runex
index 391ffbf..feabbe9 100755
--- a/bin/runex
+++ b/bin/runex
@@ -18,4 +18,8 @@
 main_class="$1"
 main_args="${*:2}"
 
-mvn -q exec:java -Dexec.mainClass="org.apache.accumulo.examples.$main_class" 
-Dexec.args="$main_args"
+if command -v accumulo > /dev/null 2>&1 ; then
+  av_arg="-Daccumulo.version=`accumulo version`"
+fi
+
+mvn -q exec:java -Dexec.mainClass="org.apache.accumulo.examples.$main_class" 
$av_arg -Dexec.args="$main_args"

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/batch.md
----------------------------------------------------------------------
diff --git a/docs/batch.md b/docs/batch.md
index 0bbf531..c52f9f6 100644
--- a/docs/batch.md
+++ b/docs/batch.md
@@ -40,8 +40,8 @@ Before you run this, you must ensure that the user you are 
running has the
 You must also create the table, batchtest1, ahead of time. (In the shell, use 
"createtable batchtest1")
 
     $ accumulo shell -u username -e "createtable batchtest1"
-    $ ./bin/runex client.SequentialBatchWriter -i instance -z zookeepers -u 
username -p password -t batchtest1 --start 0 --num 10000 --size 50 
--batchMemory 20M --batchLatency 500 --batchThreads 20 --vis exampleVis
-    $ ./bin/runex client.RandomBatchScanner -i instance -z zookeepers -u 
username -p password -t batchtest1 --num 100 --min 0 --max 10000 --size 50 
--scanThreads 20 --auths exampleVis
+    $ ./bin/runex client.SequentialBatchWriter -c ./examples.conf -t 
batchtest1 --start 0 --num 10000 --size 50 --batchMemory 20M --batchLatency 500 
--batchThreads 20 --vis exampleVis
+    $ ./bin/runex client.RandomBatchScanner -c ./examples.conf -t batchtest1 
--num 100 --min 0 --max 10000 --size 50 --scanThreads 20 --auths exampleVis
     07 11:33:11,103 [client.CountingVerifyingReceiver] INFO : Generating 100 
random queries...
     07 11:33:11,112 [client.CountingVerifyingReceiver] INFO : finished
     07 11:33:11,260 [client.CountingVerifyingReceiver] INFO : 694.44 
lookups/sec   0.14 secs

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/bloom.md
----------------------------------------------------------------------
diff --git a/docs/bloom.md b/docs/bloom.md
index 39f928e..c5549b0 100644
--- a/docs/bloom.md
+++ b/docs/bloom.md
@@ -39,7 +39,7 @@ Below 1 million random values are inserted into accumulo. The 
randomly
 generated rows range between 0 and 1 billion. The random number generator is
 initialized with the seed 7.
 
-    $ ./bin/runex client.RandomBatchWriter --seed 7 -i instance -z zookeepers 
-u username -p password -t bloom_test --num 1000000 --min 0 --max 1000000000 
--size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis
+    $ ./bin/runex client.RandomBatchWriter --seed 7 -c ./examples.conf -t 
bloom_test --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M 
--batchLatency 60 --batchThreads 3 --vis exampleVis
 
 Below the table is flushed:
 
@@ -50,7 +50,7 @@ After the flush completes, 500 random queries are done 
against the table. The
 same seed is used to generate the queries, therefore everything is found in the
 table.
 
-    $ ./bin/runex client.RandomBatchScanner --seed 7 -i instance -z zookeepers 
-u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 
50 --scanThreads 20 --auths exampleVis
+    $ ./bin/runex client.RandomBatchScanner --seed 7 -c ./examples.conf -t 
bloom_test --num 500 --min 0 --max 1000000000 --size 50 --scanThreads 20 
--auths exampleVis
     Generating 500 random queries...finished
     96.19 lookups/sec   5.20 secs
     num results : 500
@@ -62,7 +62,7 @@ Below another 500 queries are performed, using a different 
seed which results
 in nothing being found. In this case the lookups are much faster because of
 the bloom filters.
 
-    $ ./bin/runex client.RandomBatchScanner --seed 8 -i instance -z zookeepers 
-u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 
50 -batchThreads 20 -auths exampleVis
+    $ ./bin/runex client.RandomBatchScanner --seed 8 -c ./examples.conf -t 
bloom_test --num 500 --min 0 --max 1000000000 --size 50 -batchThreads 20 -auths 
exampleVis
     Generating 500 random queries...finished
     2212.39 lookups/sec   0.23 secs
     num results : 0
@@ -113,7 +113,7 @@ The commands for creating the first table without bloom 
filters are below.
     username@instance bloom_test1> config -t bloom_test1 -s 
table.compaction.major.ratio=7
     username@instance bloom_test1> exit
 
-    $ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test1 
--num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M 
--batchLatency 60s --batchThreads 3 --vis exampleVis"
+    $ ARGS="-c ./examples.conf -t bloom_test1 --num 1000000 --min 0 --max 
1000000000 --size 50 --batchMemory 2M --batchLatency 60 --batchThreads 3 --vis 
exampleVis"
     $ ./bin/runex client.RandomBatchWriter --seed 7 $ARGS
     $ accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
     $ ./bin/runex client.RandomBatchWriter --seed 8 $ARGS
@@ -137,7 +137,7 @@ The commands for creating the second table with bloom 
filers are below.
     username@instance bloom_test2> config -t bloom_test2 -s 
table.bloom.enabled=true
     username@instance bloom_test2> exit
 
-    $ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test2 
--num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M 
--batchLatency 60s --batchThreads 3 --vis exampleVis"
+    $ ARGS="-c ./examples.conf -t bloom_test2 --num 1000000 --min 0 --max 
1000000000 --size 50 --batchMemory 2M --batchLatency 60 --batchThreads 3 --vis 
exampleVis"
     $ ./bin/runex client.RandomBatchWriter --seed 7 $ARGS
     $ accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
     $ ./bin/runex client.RandomBatchWriter --seed 8 $ARGS
@@ -149,7 +149,7 @@ Below 500 lookups are done against the table without bloom 
filters using random
 NG seed 7. Even though only one map file will likely contain entries for this
 seed, all map files will be interrogated.
 
-    $ ./bin/runex client.RandomBatchScanner --seed 7 -i instance -z zookeepers 
-u username -p password -t bloom_test1 --num 500 --min 0 --max 1000000000 
--size 50 --scanThreads 20 --auths exampleVis
+    $ ./bin/runex client.RandomBatchScanner --seed 7 -c ./examples.conf -t 
bloom_test1 --num 500 --min 0 --max 1000000000 --size 50 --scanThreads 20 
--auths exampleVis
     Generating 500 random queries...finished
     35.09 lookups/sec  14.25 secs
     num results : 500
@@ -161,7 +161,7 @@ Below the same lookups are done against the table with 
bloom filters. The
 lookups were 2.86 times faster because only one map file was used, even though 
three
 map files existed.
 
-    $ ./bin/runex client.RandomBatchScanner --seed 7 -i instance -z zookeepers 
-u username -p password -t bloom_test2 --num 500 --min 0 --max 1000000000 
--size 50 -scanThreads 20 --auths exampleVis
+    $ ./bin/runex client.RandomBatchScanner --seed 7 -c ./examples.conf -t 
bloom_test2 --num 500 --min 0 --max 1000000000 --size 50 -scanThreads 20 
--auths exampleVis
     Generating 500 random queries...finished
     99.03 lookups/sec   5.05 secs
     num results : 500

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/client.md
----------------------------------------------------------------------
diff --git a/docs/client.md b/docs/client.md
index 363fa7d..b3872a8 100644
--- a/docs/client.md
+++ b/docs/client.md
@@ -28,13 +28,12 @@ Using the accumulo command, you can run the simple client 
examples by providing
 class name, and enough arguments to find your accumulo instance. For example,
 the Flush class will flush a table:
 
-    $ PACKAGE=org.apache.accumulo.examples.client
-    $ bin/accumulo $PACKAGE.Flush -u root -p mypassword -i instance -z 
zookeeper -t trace
+    $ ./bin/runex client.Flush -c ./examples.conf -t trace
 
 The very simple RowOperations class demonstrates how to read and write rows 
using the BatchWriter
 and Scanner:
 
-    $ bin/accumulo $PACKAGE.RowOperations -u root -p mypassword -i instance -z 
zookeeper
+    $ ./bin/runex client.RowOperations -c ./examples.conf
     2013-01-14 14:45:24,738 [client.RowOperations] INFO : This is everything
     2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:1 
[] 1358192724640 false Value: This is the value for this key
     2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:2 
[] 1358192724642 false Value: This is the value for this key
@@ -65,7 +64,7 @@ and Scanner:
 
 To create a table, write to it and read from it:
 
-    $ bin/accumulo $PACKAGE.ReadWriteExample -u root -p mypassword -i instance 
-z zookeeper --createtable --create --read
+    $ ./bin/runex client.ReadWriteExample -c ./examples.conf --createtable 
--create --read
     hello%00; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
     hello%01; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
     hello%02; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/compactionStrategy.md
----------------------------------------------------------------------
diff --git a/docs/compactionStrategy.md b/docs/compactionStrategy.md
index e9e02ec..a7c96d5 100644
--- a/docs/compactionStrategy.md
+++ b/docs/compactionStrategy.md
@@ -44,13 +44,13 @@ The commands below will configure the 
TwoTierCompactionStrategy to use gz compre
 
 Generate some data and files in order to test the strategy:
 
-    $ ./bin/runex client.SequentialBatchWriter -i instance17 -z localhost:2181 
-u root -p secret -t test1 --start 0 --num 10000 --size 50 --batchMemory 20M 
--batchLatency 500 --batchThreads 20
+    $ ./bin/runex client.SequentialBatchWriter -c ./examples.conf -t test1 
--start 0 --num 10000 --size 50 --batchMemory 20M --batchLatency 500 
--batchThreads 20
     $ accumulo shell -u root -p secret -e "flush -t test1"
-    $ ./bin/runex client.SequentialBatchWriter -i instance17 -z localhost:2181 
-u root -p secret -t test1 --start 0 --num 11000 --size 50 --batchMemory 20M 
--batchLatency 500 --batchThreads 20
+    $ ./bin/runex client.SequentialBatchWriter -c ./examples.conf -t test1 
--start 0 --num 11000 --size 50 --batchMemory 20M --batchLatency 500 
--batchThreads 20
     $ accumulo shell -u root -p secret -e "flush -t test1"
-    $ ./bin/runex client.SequentialBatchWriter -i instance17 -z localhost:2181 
-u root -p secret -t test1 --start 0 --num 12000 --size 50 --batchMemory 20M 
--batchLatency 500 --batchThreads 20
+    $ ./bin/runex client.SequentialBatchWriter -c ./examples.conf -t test1 
--start 0 --num 12000 --size 50 --batchMemory 20M --batchLatency 500 
--batchThreads 20
     $ accumulo shell -u root -p secret -e "flush -t test1"
-    $ ./bin/runex client.SequentialBatchWriter -i instance17 -z localhost:2181 
-u root -p secret -t test1 --start 0 --num 13000 --size 50 --batchMemory 20M 
--batchLatency 500 --batchThreads 20
+    $ ./bin/runex client.SequentialBatchWriter -c ./examples.conf -t test1 
--start 0 --num 13000 --size 50 --batchMemory 20M --batchLatency 500 
--batchThreads 20
     $ accumulo shell -u root -p secret -e "flush -t test1"
 
 View the tserver log in <accumulo_home>/logs for the compaction and find the 
name of the <rfile> that was compacted for your table. Print info about this 
file using the PrintInfo tool:

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/dirlist.md
----------------------------------------------------------------------
diff --git a/docs/dirlist.md b/docs/dirlist.md
index f5f3ddb..40e4e5d 100644
--- a/docs/dirlist.md
+++ b/docs/dirlist.md
@@ -31,7 +31,7 @@ This example shows how to use Accumulo to store a file system 
history. It has th
 
 To begin, ingest some data with Ingest.java.
 
-    $ ./bin/runex dirlist.Ingest -i instance -z zookeepers -u username -p 
password --vis exampleVis --chunkSize 100000 /local/username/workspace
+    $ ./bin/runex dirlist.Ingest -c ./examples.conf --vis exampleVis 
--chunkSize 100000 /local/username/workspace
 
 This may take some time if there are large files in the 
/local/username/workspace directory. If you use 0 instead of 100000 on the 
command line, the ingest will run much faster, but it will not put any file 
data into Accumulo (the dataTable will be empty).
 Note that running this example will create tables dirTable, indexTable, and 
dataTable in Accumulo that you should delete when you have completed the 
example.
@@ -43,26 +43,26 @@ To browse the data ingested, use Viewer.java. Be sure to 
give the "username" use
 
 then run the Viewer:
 
-    $ ./bin/runex dirlist.Viewer -i instance -z zookeepers -u username -p 
password -t dirTable --dataTable dataTable --auths exampleVis --path 
/local/username/workspace
+    $ ./bin/runex dirlist.Viewer -c ./examples.conf -t dirTable --dataTable 
dataTable --auths exampleVis --path /local/username/workspace
 
 To list the contents of specific directories, use QueryUtil.java.
 
-    $ ./bin/runex dirlist.QueryUtil -i instance -z zookeepers -u username -p 
password -t dirTable --auths exampleVis --path /local/username
-    $ ./bin/runex dirlist.QueryUtil -i instance -z zookeepers -u username -p 
password -t dirTable --auths exampleVis --path /local/username/workspace
+    $ ./bin/runex dirlist.QueryUtil -c ./examples.conf -t dirTable --auths 
exampleVis --path /local/username
+    $ ./bin/runex dirlist.QueryUtil -c ./examples.conf -t dirTable --auths 
exampleVis --path /local/username/workspace
 
 To perform searches on file or directory names, also use QueryUtil.java. 
Search terms must contain no more than one wild card and cannot contain "/".
 *Note* these queries run on the _indexTable_ table instead of the dirTable 
table.
 
-    $ ./bin/runex dirlist.QueryUtil -i instance -z zookeepers -u username -p 
password -t indexTable --auths exampleVis --path filename --search
-    $ ./bin/runex dirlist.QueryUtil -i instance -z zookeepers -u username -p 
password -t indexTable --auths exampleVis --path 'filename*' --search
-    $ ./bin/runex dirlist.QueryUtil -i instance -z zookeepers -u username -p 
password -t indexTable --auths exampleVis --path '*jar' --search
-    $ ./bin/runex dirlist.QueryUtil -i instance -z zookeepers -u username -p 
password -t indexTable --auths exampleVis --path 'filename*jar' --search
+    $ ./bin/runex dirlist.QueryUtil -c ./examples.conf -t indexTable --auths 
exampleVis --path filename --search
+    $ ./bin/runex dirlist.QueryUtil -c ./examples.conf -t indexTable --auths 
exampleVis --path 'filename*' --search
+    $ ./bin/runex dirlist.QueryUtil -c ./examples.conf -t indexTable --auths 
exampleVis --path '*jar' --search
+    $ ./bin/runex dirlist.QueryUtil -c ./examples.conf -t indexTable --auths 
exampleVis --path 'filename*jar' --search
 
 To count the number of direct children (directories and files) and descendants 
(children and children's descendants, directories and files), run the FileCount 
over the dirTable table.
 The results are written back to the same table. FileCount reads from and 
writes to Accumulo. This requires scan authorizations for the read and a 
visibility for the data written.
 In this example, the authorizations and visibility are set to the same value, 
exampleVis. See the [visibility example][vis] for more information on 
visibility and authorizations.
 
-    $ ./bin/runex dirlist.FileCount -i instance -z zookeepers -u username -p 
password -t dirTable --auths exampleVis
+    $ ./bin/runex dirlist.FileCount -c ./examples.conf -t dirTable --auths 
exampleVis
 
 ## Directory Table
 

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/filedata.md
----------------------------------------------------------------------
diff --git a/docs/filedata.md b/docs/filedata.md
index 6dd9ab9..84311d2 100644
--- a/docs/filedata.md
+++ b/docs/filedata.md
@@ -32,7 +32,7 @@ This example is coupled with the [dirlist example][dirlist].
 
 If you haven't already run the [dirlist example][dirlist], ingest a file with 
FileDataIngest.
 
-    $ ./bin/runex filedata.FileDataIngest -i instance -z zookeepers -u 
username -p password -t dataTable --auths exampleVis --chunk 1000 
/path/to/accumulo/README.md
+    $ ./bin/runex filedata.FileDataIngest -c ./examples.conf -t dataTable 
--auths exampleVis --chunk 1000 /path/to/accumulo/README.md
 
 Open the accumulo shell and look at the data. The row is the MD5 hash of the 
file, which you can verify by running a command such as 'md5sum' on the file.
 
@@ -40,7 +40,7 @@ Open the accumulo shell and look at the data. The row is the 
MD5 hash of the fil
 
 Run the CharacterHistogram MapReduce to add some information about the file.
 
-    $ tool.sh target/accumulo-examples.jar 
org.apache.accumulo.examples.filedata.CharacterHistogram -i instance -z 
zookeepers -u username -p password -t dataTable --auths exampleVis --vis 
exampleVis
+    $ tool.sh target/accumulo-examples.jar 
org.apache.accumulo.examples.filedata.CharacterHistogram -c ./examples.conf -t 
dataTable --auths exampleVis --vis exampleVis
 
 Scan again to see the histogram stored in the 'info' column family.
 

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/helloworld.md
----------------------------------------------------------------------
diff --git a/docs/helloworld.md b/docs/helloworld.md
index 003a3bc..1a87e27 100644
--- a/docs/helloworld.md
+++ b/docs/helloworld.md
@@ -31,7 +31,7 @@ Create a table called 'hellotable':
 
 Launch a Java program that inserts data with a BatchWriter:
 
-    $ ./bin/runex helloworld.InsertWithBatchWriter -i instance -z zookeepers 
-u username -p password -t hellotable
+    $ ./bin/runex helloworld.InsertWithBatchWriter -c ./examples.conf -t 
hellotable
 
 On the accumulo status page at the URL below (where 'master' is replaced with 
the name or IP of your accumulo master), you should see 50K entries
 
@@ -44,4 +44,4 @@ To view the entries, use the shell to scan the table:
 
 You can also use a Java class to scan the table:
 
-    $ ./bin/runex helloworld.ReadData -i instance -z zookeepers -u username -p 
password -t hellotable --startKey row_0 --endKey row_1001
+    $ ./bin/runex helloworld.ReadData -c ./examples.conf -t hellotable 
--startKey row_0 --endKey row_1001

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/isolation.md
----------------------------------------------------------------------
diff --git a/docs/isolation.md b/docs/isolation.md
index 58f363d..d6dc5ac 100644
--- a/docs/isolation.md
+++ b/docs/isolation.md
@@ -30,7 +30,7 @@ reading the row at the same time a mutation is changing the 
row.
 Below, Interference Test is run without isolation enabled for 5000 iterations
 and it reports problems.
 
-    $ ./bin/runex isolation.InterferenceTest -i instance -z zookeepers -u 
username -p password -t isotest --iterations 5000
+    $ ./bin/runex isolation.InterferenceTest -c ./examples.conf -t isotest 
--iterations 5000
     ERROR Columns in row 053 had multiple values [53, 4553]
     ERROR Columns in row 061 had multiple values [561, 61]
     ERROR Columns in row 070 had multiple values [570, 1070]
@@ -43,7 +43,7 @@ and it reports problems.
 Below, Interference Test is run with isolation enabled for 5000 iterations and
 it reports no problems.
 
-    $ ./bin/runex isolation.InterferenceTest -i instance -z zookeepers -u 
username -p password -t isotest --iterations 5000 --isolated
+    $ ./bin/runex isolation.InterferenceTest -c ./examples.conf -t isotest 
--iterations 5000 --isolated
     finished
 
 

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/release-testing.md
----------------------------------------------------------------------
diff --git a/docs/release-testing.md b/docs/release-testing.md
new file mode 100644
index 0000000..227f32d
--- /dev/null
+++ b/docs/release-testing.md
@@ -0,0 +1,50 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+# Apache Accumulo Release Testing
+
+This repository contains an integration test (IT) that runs all of the
+examples.  This can be used for testing Accumulo release candidates (RC). To
+run the IT against a RC add the following to `~/.m2/settings.xml` changing
+`XXXX` to the proper id for a given RC.
+
+```xml
+ <profiles>
+   <profile>
+     <id>rcAccumulo</id>
+     <repositories>
+       <repository>
+         <id>accrc</id>
+         <name>accrcp</name>
+         
<url>https://repository.apache.org/content/repositories/orgapacheaccumulo-XXXX</url>
+       </repository>
+     </repositories>
+     <pluginRepositories>
+       <pluginRepository>
+         <id>accrcp</id>
+         <name>accrcp</name>
+         
<url>https://repository.apache.org/content/repositories/orgapacheaccumulo-XXX</url>
+       </pluginRepository>
+     </pluginRepositories>
+   </profile>
+ </profiles>
+```
+
+After adding that, you can run the following command in this repository to run 
the IT.
+
+```
+mvn clean verify -PrcAccumulo -Daccumulo.version=$ACCUMULO_RC_VERSION
+```

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/sample.md
----------------------------------------------------------------------
diff --git a/docs/sample.md b/docs/sample.md
index 9e5d429..1f6cae5 100644
--- a/docs/sample.md
+++ b/docs/sample.md
@@ -88,7 +88,7 @@ failure and fixiing the problem with a compaction.
 The example above is replicated in a java program using the Accumulo API.
 Below is the program name and the command to run it.
 
-    ./bin/runex sample.SampleExample -i instance -z localhost -u root -p secret
+    ./bin/runex sample.SampleExample -c ./examples.conf
 
 The commands below look under the hood to give some insight into how this
 feature works.  The commands determine what files the sampex table is using.
@@ -166,13 +166,13 @@ shard table based on the column qualifier.
 After enabling sampling, the command below counts the number of documents in
 the sample containing the words `import` and `int`.     
 
-    $ ./bin/runex shard.Query --sample -i instance16 -z localhost -t shard -u 
root -p secret import int | fgrep '.java' | wc
+    $ ./bin/runex shard.Query --sample -c ./examples.conf -t shard import int 
| fgrep '.java' | wc
          11      11    1246
 
 The command below counts the total number of documents containing the words
 `import` and `int`.
 
-    $ ./bin/runex shard.Query -i instance16 -z localhost -t shard -u root -p 
secret import int | fgrep '.java' | wc
+    $ ./bin/runex shard.Query -c ./examples.conf -t shard import int | fgrep 
'.java' | wc
        1085    1085  118175
 
 The counts 11 out of 1085 total are around what would be expected for a modulus
@@ -188,4 +188,4 @@ To experiment with this iterator, use the following 
command.  The
 `--sampleCutoff` option below will cause the query to return nothing if based
 on the sample it appears a query would return more than 1000 documents.
 
-    $ ./bin/runex shard.Query --sampleCutoff 1000 -i instance16 -z localhost 
-t shard -u root -p secret import int | fgrep '.java' | wc
+    $ ./bin/runex shard.Query --sampleCutoff 1000 -c ./examples.conf -t shard 
import int | fgrep '.java' | wc

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/shard.md
----------------------------------------------------------------------
diff --git a/docs/shard.md b/docs/shard.md
index b9460c7..9e3d63e 100644
--- a/docs/shard.md
+++ b/docs/shard.md
@@ -32,11 +32,11 @@ To run these example programs, create two tables like below.
 After creating the tables, index some files. The following command indexes all 
of the java files in the Accumulo source code.
 
     $ cd /local/username/workspace/accumulo/
-    $ find core/src server/src -name "*.java" | xargs ./bin/runex shard.Index 
-i instance -z zookeepers -t shard -u username -p password --partitions 30
+    $ find core/src server/src -name "*.java" | xargs ./bin/runex shard.Index 
-c ./examples.conf -t shard --partitions 30
 
 The following command queries the index to find all files containing 'foo' and 
'bar'.
 
-    $ ./bin/runex shard.Query -i instance -z zookeepers -t shard -u username 
-p password foo bar
+    $ ./bin/runex shard.Query -c ./examples.conf -t shard foo bar
     
/local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/ColumnVisibilityTest.java
     
/local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/client/mock/MockConnectorTest.java
     
/local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/VisibilityEvaluatorTest.java
@@ -51,12 +51,12 @@ The following command queries the index to find all files 
containing 'foo' and '
 
 In order to run ContinuousQuery, we need to run Reverse.java to populate 
doc2term.
 
-    $ ./bin/runex shard.Reverse -i instance -z zookeepers --shardTable shard 
--doc2Term doc2term -u username -p password
+    $ ./bin/runex shard.Reverse -c ./examples.conf --shardTable shard 
--doc2Term doc2term
 
 Below ContinuousQuery is run using 5 terms. So it selects 5 random terms from 
each document, then it continually
 randomly selects one set of 5 terms and queries. It prints the number of 
matching documents and the time in seconds.
 
-    $ ./bin/runex shard.ContinuousQuery -i instance -z zookeepers --shardTable 
shard --doc2Term doc2term -u username -p password --terms 5
+    $ ./bin/runex shard.ContinuousQuery -c ./examples.conf --shardTable shard 
--doc2Term doc2term --terms 5
     [public, core, class, binarycomparable, b] 2  0.081
     [wordtodelete, unindexdocument, doctablename, putdelete, insert] 1  0.041
     [import, columnvisibilityinterpreterfactory, illegalstateexception, cv, 
columnvisibility] 1  0.049

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/tabletofile.md
----------------------------------------------------------------------
diff --git a/docs/tabletofile.md b/docs/tabletofile.md
index af69114..20ba930 100644
--- a/docs/tabletofile.md
+++ b/docs/tabletofile.md
@@ -40,7 +40,7 @@ write the key/value pairs to a file in HDFS.
 
 The following will extract the rows containing the column "cf:cq":
 
-    $ tool.sh target/accumulo-examples.jar 
org.apache.accumulo.examples.mapreduce.TableToFile -u user -p passwd -i 
instance -t input --columns cf:cq --output /tmp/output
+    $ tool.sh target/accumulo-examples.jar 
org.apache.accumulo.examples.mapreduce.TableToFile -c ./examples.conf -t input 
--columns cf:cq --output /tmp/output
 
     $ hadoop fs -ls /tmp/output
     -rw-r--r--   1 username supergroup          0 2013-01-10 14:44 
/tmp/output/_SUCCESS

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/docs/terasort.md
----------------------------------------------------------------------
diff --git a/docs/terasort.md b/docs/terasort.md
index 6038b97..65a4170 100644
--- a/docs/terasort.md
+++ b/docs/terasort.md
@@ -23,7 +23,7 @@ hadoop terasort benchmark.
 To run this example you run it with arguments describing the amount of data:
 
     $ tool.sh target/accumulo-examples.jar 
org.apache.accumulo.examples.mapreduce.TeraSortIngest \
-    -i instance -z zookeepers -u user -p password \
+    -c ./examples.conf \
     --count 10 \
     --minKeySize 10 \
     --maxKeySize 10 \

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/examples.conf.template
----------------------------------------------------------------------
diff --git a/examples.conf.template b/examples.conf.template
new file mode 100644
index 0000000..8563189
--- /dev/null
+++ b/examples.conf.template
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Properties prefixed with accumulo.examples are not general Accumulo 
properties
+# and are only used by example code in this repository. All other properties 
are
+# general Accumulo properties parsed by Accumulo's ClientConfiguration
+
+instance.zookeeper.host=localhost:2181
+instance.name=your-instance-name
+accumulo.examples.principal=root
+accumulo.examples.password=secret
+
+# Currently the examples only support authentication via username and password.
+# Kerberos authentication is currently not supported in the utility code used
+# by all of the examples.

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 133b740..b378a9d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -98,7 +98,20 @@
         <configuration>
           <cleanupDaemonThreads>false</cleanupDaemonThreads>
         </configuration>
-      </plugin>  
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <version>0.12</version>
+        <executions>
+          <execution>
+            <phase>verify</phase>
+            <goals>
+              <goal>check</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
   </build>
 

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/cli/BatchScannerOpts.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/cli/BatchScannerOpts.java 
b/src/main/java/org/apache/accumulo/examples/cli/BatchScannerOpts.java
new file mode 100644
index 0000000..052b642
--- /dev/null
+++ b/src/main/java/org/apache/accumulo/examples/cli/BatchScannerOpts.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.cli;
+
+import org.apache.accumulo.examples.cli.ClientOpts.TimeConverter;
+
+import com.beust.jcommander.Parameter;
+
+public class BatchScannerOpts {
+  @Parameter(names = "--scanThreads", description = "Number of threads to use 
when batch scanning")
+  public Integer scanThreads = 10;
+
+  @Parameter(names = "--scanTimeout", converter = TimeConverter.class, 
description = "timeout used to fail a batch scan")
+  public Long scanTimeout = Long.MAX_VALUE;
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/cli/BatchWriterOpts.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/cli/BatchWriterOpts.java 
b/src/main/java/org/apache/accumulo/examples/cli/BatchWriterOpts.java
new file mode 100644
index 0000000..19b2395
--- /dev/null
+++ b/src/main/java/org/apache/accumulo/examples/cli/BatchWriterOpts.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.cli;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.examples.cli.ClientOpts.MemoryConverter;
+import org.apache.accumulo.examples.cli.ClientOpts.TimeConverter;
+
+import com.beust.jcommander.Parameter;
+
+public class BatchWriterOpts {
+  private static final BatchWriterConfig BWDEFAULTS = new BatchWriterConfig();
+
+  @Parameter(names = "--batchThreads", description = "Number of threads to use 
when writing large batches")
+  public Integer batchThreads = BWDEFAULTS.getMaxWriteThreads();
+
+  @Parameter(names = "--batchLatency", converter = TimeConverter.class, 
description = "The maximum time to wait before flushing data to servers when 
writing")
+  public Long batchLatency = BWDEFAULTS.getMaxLatency(TimeUnit.MILLISECONDS);
+
+  @Parameter(names = "--batchMemory", converter = MemoryConverter.class, 
description = "memory used to batch data when writing")
+  public Long batchMemory = BWDEFAULTS.getMaxMemory();
+
+  @Parameter(names = "--batchTimeout", converter = TimeConverter.class, 
description = "timeout used to fail a batch write")
+  public Long batchTimeout = BWDEFAULTS.getTimeout(TimeUnit.MILLISECONDS);
+
+  public BatchWriterConfig getBatchWriterConfig() {
+    BatchWriterConfig config = new BatchWriterConfig();
+    config.setMaxWriteThreads(this.batchThreads);
+    config.setMaxLatency(this.batchLatency, TimeUnit.MILLISECONDS);
+    config.setMaxMemory(this.batchMemory);
+    config.setTimeout(this.batchTimeout, TimeUnit.MILLISECONDS);
+    return config;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/cli/ClientOnDefaultTable.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/cli/ClientOnDefaultTable.java 
b/src/main/java/org/apache/accumulo/examples/cli/ClientOnDefaultTable.java
new file mode 100644
index 0000000..174b38b
--- /dev/null
+++ b/src/main/java/org/apache/accumulo/examples/cli/ClientOnDefaultTable.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.cli;
+
+import com.beust.jcommander.Parameter;
+
+public class ClientOnDefaultTable extends ClientOpts {
+  @Parameter(names = "--table", description = "table to use")
+  private String tableName;
+
+  public ClientOnDefaultTable(String table) {
+    this.tableName = table;
+  }
+
+  public String getTableName() {
+    return tableName;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/cli/ClientOnRequiredTable.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/cli/ClientOnRequiredTable.java 
b/src/main/java/org/apache/accumulo/examples/cli/ClientOnRequiredTable.java
new file mode 100644
index 0000000..72bd812
--- /dev/null
+++ b/src/main/java/org/apache/accumulo/examples/cli/ClientOnRequiredTable.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.cli;
+
+import com.beust.jcommander.Parameter;
+
+public class ClientOnRequiredTable extends ClientOpts {
+  @Parameter(names = {"-t", "--table"}, required = true, description = "table 
to use")
+  private String tableName;
+
+  public String getTableName() {
+    return tableName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java 
b/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
new file mode 100644
index 0000000..e08dfb8
--- /dev/null
+++ b/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.cli;
+
+import java.time.Duration;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.commons.configuration.PropertiesConfiguration;
+
+import com.beust.jcommander.IStringConverter;
+import com.beust.jcommander.Parameter;
+
+public class ClientOpts extends Help {
+
+  public static class AuthConverter implements 
IStringConverter<Authorizations> {
+    @Override
+    public Authorizations convert(String value) {
+      return new Authorizations(value.split(","));
+    }
+  }
+
+  public static class VisibilityConverter implements 
IStringConverter<ColumnVisibility> {
+    @Override
+    public ColumnVisibility convert(String value) {
+      return new ColumnVisibility(value);
+    }
+  }
+
+  public static class TimeConverter implements IStringConverter<Long> {
+    @Override
+    public Long convert(String value) {
+      if(value.matches("[0-9]+"))
+        value = "PT"+value+"S"; //if only numbers then assume seconds
+      return Duration.parse(value).toMillis();
+    }
+  }
+
+  public static class MemoryConverter implements IStringConverter<Long> {
+    @Override
+    public Long convert(String str) {
+      try {
+        char lastChar = str.charAt(str.length() - 1);
+        int multiplier = 0;
+        switch (Character.toUpperCase(lastChar)) {
+          case 'G':
+            multiplier += 10;
+          case 'M':
+            multiplier += 10;
+          case 'K':
+            multiplier += 10;
+          case 'B':
+            break;
+          default:
+            return Long.parseLong(str);
+        }
+        return Long.parseLong(str.substring(0, str.length() - 1)) << 
multiplier;
+      } catch (Exception ex) {
+        throw new IllegalArgumentException("The value '" + str + "' is not a 
valid memory setting. A valid value would a number "
+            + "possibily followed by an optional 'G', 'M', 'K', or 'B'.");
+      }
+    }
+  }
+
+  public static class PropertiesConverter implements 
IStringConverter<Configuration> {
+    @Override
+    public Configuration convert(String filename) {
+      try {
+        return new PropertiesConfiguration(filename);
+      } catch (ConfigurationException e) {
+        throw new RuntimeException(e);
+      }
+    }
+  }
+
+  @Parameter(names = {"-c", "--conf"}, required = true, converter = 
PropertiesConverter.class,
+      description = "Config file for connecting to Accumulo.  See README.md 
for details.")
+  private Configuration config = null;
+
+  @Parameter(names = {"-auths", "--auths"}, converter = AuthConverter.class, 
description = "the authorizations to use when reading or writing")
+  public Authorizations auths = Authorizations.EMPTY;
+
+  public Connector getConnector() {
+    try {
+      ZooKeeperInstance zki = new ZooKeeperInstance(config);
+      return zki.getConnector(getPrincipal(), getToken());
+    } catch (AccumuloException | AccumuloSecurityException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  public ClientConfiguration getClientConfiguration() {
+    return new ClientConfiguration(config);
+  }
+
+  public String getPrincipal() {
+    return config.getString("accumulo.examples.principal", "root");
+  }
+
+  public AuthenticationToken getToken() {
+    return new PasswordToken(config.getString("accumulo.examples.password", 
"secret"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/cli/Help.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/accumulo/examples/cli/Help.java 
b/src/main/java/org/apache/accumulo/examples/cli/Help.java
new file mode 100644
index 0000000..9ae6bdd
--- /dev/null
+++ b/src/main/java/org/apache/accumulo/examples/cli/Help.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.cli;
+
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+import com.beust.jcommander.ParameterException;
+
+public class Help {
+  @Parameter(names = {"-h", "-?", "--help", "-help"}, help = true)
+  public boolean help = false;
+
+  public void parseArgs(String programName, String[] args, Object... others) {
+    JCommander commander = new JCommander();
+    commander.addObject(this);
+    for (Object other : others)
+      commander.addObject(other);
+    commander.setProgramName(programName);
+    try {
+      commander.parse(args);
+    } catch (ParameterException ex) {
+      commander.usage();
+      exitWithError(ex.getMessage(), 1);
+    }
+    if (help) {
+      commander.usage();
+      exit(0);
+    }
+  }
+
+  public void exit(int status) {
+    System.exit(status);
+  }
+
+  public void exitWithError(String message, int status) {
+    System.err.println(message);
+    exit(status);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnDefaultTable.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnDefaultTable.java
 
b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnDefaultTable.java
new file mode 100644
index 0000000..d8161ec
--- /dev/null
+++ 
b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnDefaultTable.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.cli;
+
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
+import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.hadoop.mapreduce.Job;
+
+import com.beust.jcommander.Parameter;
+
+public class MapReduceClientOnDefaultTable extends MapReduceClientOpts {
+  @Parameter(names = "--table", description = "table to use")
+  public String tableName;
+
+  public MapReduceClientOnDefaultTable(String table) {
+    this.tableName = table;
+  }
+
+  public String getTableName() {
+    return tableName;
+  }
+
+  @Override
+  public void setAccumuloConfigs(Job job) throws AccumuloSecurityException {
+    super.setAccumuloConfigs(job);
+    final String tableName = getTableName();
+    final String principal = getPrincipal();
+    final AuthenticationToken token = getToken();
+    AccumuloInputFormat.setConnectorInfo(job, principal, token);
+    AccumuloInputFormat.setInputTableName(job, tableName);
+    AccumuloInputFormat.setScanAuthorizations(job, auths);
+    AccumuloOutputFormat.setConnectorInfo(job, principal, token);
+    AccumuloOutputFormat.setCreateTables(job, true);
+    AccumuloOutputFormat.setDefaultTableName(job, tableName);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnRequiredTable.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnRequiredTable.java
 
b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnRequiredTable.java
new file mode 100644
index 0000000..67ca57a
--- /dev/null
+++ 
b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnRequiredTable.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.cli;
+
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
+import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.hadoop.mapreduce.Job;
+
+import com.beust.jcommander.Parameter;
+
+public class MapReduceClientOnRequiredTable extends MapReduceClientOpts {
+
+  @Parameter(names = {"-t", "--table"}, required = true, description = "table 
to use")
+  private String tableName;
+
+  @Parameter(names = {"-tf", "--tokenFile"}, description = "File in hdfs 
containing the user's authentication token create with \"bin/accumulo 
create-token\"")
+  private String tokenFile = "";
+
+  @Override
+  public void setAccumuloConfigs(Job job) throws AccumuloSecurityException {
+    super.setAccumuloConfigs(job);
+
+    final String principal = getPrincipal(), tableName = getTableName();
+
+    if (tokenFile.isEmpty()) {
+      AuthenticationToken token = getToken();
+      AccumuloInputFormat.setConnectorInfo(job, principal, token);
+      AccumuloOutputFormat.setConnectorInfo(job, principal, token);
+    } else {
+      AccumuloInputFormat.setConnectorInfo(job, principal, tokenFile);
+      AccumuloOutputFormat.setConnectorInfo(job, principal, tokenFile);
+    }
+    AccumuloInputFormat.setInputTableName(job, tableName);
+    AccumuloInputFormat.setScanAuthorizations(job, auths);
+    AccumuloOutputFormat.setCreateTables(job, true);
+    AccumuloOutputFormat.setDefaultTableName(job, tableName);
+  }
+
+  public String getTableName() {
+    return tableName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java 
b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java
new file mode 100644
index 0000000..b73ddd4
--- /dev/null
+++ b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.cli;
+
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
+import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
+import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.security.SystemPermission;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Adds some MR awareness to the ClientOpts
+ */
+public class MapReduceClientOpts extends ClientOpts {
+  private static final Logger log = 
LoggerFactory.getLogger(MapReduceClientOpts.class);
+
+  public void setAccumuloConfigs(Job job) throws AccumuloSecurityException {
+    AccumuloInputFormat.setZooKeeperInstance(job, 
this.getClientConfiguration());
+    AccumuloOutputFormat.setZooKeeperInstance(job, 
this.getClientConfiguration());
+  }
+
+  @Override
+  public AuthenticationToken getToken() {
+    AuthenticationToken authToken = super.getToken();
+    // For MapReduce, Kerberos credentials don't make it to the Mappers and 
Reducers,
+    // so we need to request a delegation token and use that instead.
+    if (authToken instanceof KerberosToken) {
+      log.info("Received KerberosToken, fetching DelegationToken for 
MapReduce");
+
+      try {
+        UserGroupInformation user = UserGroupInformation.getCurrentUser();
+        if (!user.hasKerberosCredentials()) {
+          throw new IllegalStateException("Expected current user to have 
Kerberos credentials");
+        }
+
+        String newPrincipal = user.getUserName();
+        log.info("Obtaining delegation token for {}", newPrincipal);
+
+        Connector conn = getConnector();
+
+        // Do the explicit check to see if the user has the permission to get 
a delegation token
+        if (!conn.securityOperations().hasSystemPermission(conn.whoami(), 
SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
+          log.error(
+              "{} doesn't have the {} SystemPermission neccesary to obtain a 
delegation token. MapReduce tasks cannot automatically use the client's"
+                  + " credentials on remote servers. Delegation tokens provide 
a means to run MapReduce without distributing the user's credentials.",
+              user.getUserName(), 
SystemPermission.OBTAIN_DELEGATION_TOKEN.name());
+          throw new IllegalStateException(conn.whoami() + " does not have 
permission to obtain a delegation token");
+        }
+
+        // Get the delegation token from Accumulo
+        return conn.securityOperations().getDelegationToken(new 
DelegationTokenConfig());
+      } catch (Exception e) {
+        final String msg = "Failed to acquire DelegationToken for use with 
MapReduce";
+        log.error(msg, e);
+        throw new RuntimeException(msg, e);
+      }
+    }
+    return authToken;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/cli/ScannerOpts.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/accumulo/examples/cli/ScannerOpts.java 
b/src/main/java/org/apache/accumulo/examples/cli/ScannerOpts.java
new file mode 100644
index 0000000..00bce98
--- /dev/null
+++ b/src/main/java/org/apache/accumulo/examples/cli/ScannerOpts.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.cli;
+
+import com.beust.jcommander.Parameter;
+
+public class ScannerOpts {
+  @Parameter(names = "--scanBatchSize", description = "the number of 
key-values to pull during a scan")
+  public int scanBatchSize = 1000;
+}

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/client/Flush.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/accumulo/examples/client/Flush.java 
b/src/main/java/org/apache/accumulo/examples/client/Flush.java
index 1227b36..ea183d2 100644
--- a/src/main/java/org/apache/accumulo/examples/client/Flush.java
+++ b/src/main/java/org/apache/accumulo/examples/client/Flush.java
@@ -16,8 +16,8 @@
  */
 package org.apache.accumulo.examples.client;
 
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
 
 /**
  * Simple example for using tableOperations() (like create, delete, flush, 
etc).

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java 
b/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
index 9b0c519..ac32827 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
@@ -24,8 +24,6 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.core.cli.BatchScannerOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchScanner;
@@ -34,6 +32,8 @@ import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.examples.cli.BatchScannerOpts;
+import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
 import org.apache.hadoop.io.Text;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java 
b/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
index b1f0d74..20f2f94 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
@@ -22,8 +22,6 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
 
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -35,6 +33,8 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.TabletId;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.cli.BatchWriterOpts;
+import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
 import org.apache.hadoop.io.Text;
 
 import com.beust.jcommander.Parameter;

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java 
b/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
index 0e63370..d4cfe19 100644
--- a/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
@@ -20,8 +20,6 @@ import java.util.Map.Entry;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.cli.ClientOnDefaultTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
@@ -34,6 +32,8 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.util.ByteArraySet;
+import org.apache.accumulo.examples.cli.ClientOnDefaultTable;
+import org.apache.accumulo.examples.cli.ScannerOpts;
 import org.apache.hadoop.io.Text;
 
 import com.beust.jcommander.IStringConverter;
@@ -54,15 +54,15 @@ public class ReadWriteExample {
   }
 
   static class Opts extends ClientOnDefaultTable {
-    @Parameter(names = {"-C", "--createtable"}, description = "create table 
before doing anything")
+    @Parameter(names = {"--createtable"}, description = "create table before 
doing anything")
     boolean createtable = false;
-    @Parameter(names = {"-D", "--deletetable"}, description = "delete table 
when finished")
+    @Parameter(names = {"--deletetable"}, description = "delete table when 
finished")
     boolean deletetable = false;
-    @Parameter(names = {"-c", "--create"}, description = "create entries 
before any deletes")
+    @Parameter(names = {"--create"}, description = "create entries before any 
deletes")
     boolean createEntries = false;
-    @Parameter(names = {"-r", "--read"}, description = "read entries after any 
creates/deletes")
+    @Parameter(names = {"--read"}, description = "read entries after any 
creates/deletes")
     boolean readEntries = false;
-    @Parameter(names = {"-d", "--delete"}, description = "delete entries after 
any creates")
+    @Parameter(names = {"--delete"}, description = "delete entries after any 
creates")
     boolean deleteEntries = false;
     @Parameter(names = {"--durability"}, description = "durability used for 
writes (none, log, flush or sync)", converter = DurabilityConverter.class)
     Durability durability = Durability.DEFAULT;

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/RowOperations.java 
b/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
index 4081971..b535d4c 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
@@ -20,9 +20,6 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -36,6 +33,9 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.examples.cli.BatchWriterOpts;
+import org.apache.accumulo.examples.cli.ClientOpts;
+import org.apache.accumulo.examples.cli.ScannerOpts;
 import org.apache.hadoop.io.Text;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java 
b/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
index 56eaa84..9b57739 100644
--- 
a/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
+++ 
b/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
@@ -16,8 +16,6 @@
  */
 package org.apache.accumulo.examples.client;
 
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -26,6 +24,8 @@ import 
org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.cli.BatchWriterOpts;
+import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
 
 import com.beust.jcommander.Parameter;
 

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java 
b/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java
index 7261637..08146bb 100644
--- a/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java
@@ -16,8 +16,6 @@
  */
 package org.apache.accumulo.examples.client;
 
-import org.apache.accumulo.core.cli.ClientOnDefaultTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
@@ -25,6 +23,8 @@ import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.examples.cli.ClientOnDefaultTable;
+import org.apache.accumulo.examples.cli.ScannerOpts;
 import org.apache.accumulo.tracer.TraceDump;
 import org.apache.accumulo.tracer.TraceDump.Printer;
 import org.apache.hadoop.io.Text;

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java 
b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
index 9b05a14..68d4404 100644
--- a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
@@ -21,8 +21,6 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.cli.ClientOnDefaultTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -35,6 +33,8 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.trace.DistributedTrace;
+import org.apache.accumulo.examples.cli.ClientOnDefaultTable;
+import org.apache.accumulo.examples.cli.ScannerOpts;
 import org.apache.htrace.Sampler;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
@@ -52,13 +52,13 @@ public class TracingExample {
   private static final String DEFAULT_TABLE_NAME = "test";
 
   static class Opts extends ClientOnDefaultTable {
-    @Parameter(names = {"-C", "--createtable"}, description = "create table 
before doing anything")
+    @Parameter(names = {"--createtable"}, description = "create table before 
doing anything")
     boolean createtable = false;
-    @Parameter(names = {"-D", "--deletetable"}, description = "delete table 
when finished")
+    @Parameter(names = {"--deletetable"}, description = "delete table when 
finished")
     boolean deletetable = false;
-    @Parameter(names = {"-c", "--create"}, description = "create entries 
before any deletes")
+    @Parameter(names = {"--create"}, description = "create entries before any 
deletes")
     boolean createEntries = false;
-    @Parameter(names = {"-r", "--read"}, description = "read entries after any 
creates/deletes")
+    @Parameter(names = {"--read"}, description = "read entries after any 
creates/deletes")
     boolean readEntries = false;
 
     public Opts() {

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java 
b/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
index 0b9e7a3..4ac1f04 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
@@ -19,9 +19,6 @@ package org.apache.accumulo.examples.dirlist;
 import java.util.Iterator;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
@@ -29,7 +26,11 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.cli.BatchWriterOpts;
+import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
+import org.apache.accumulo.examples.cli.ScannerOpts;
 import org.apache.hadoop.io.Text;
 
 import com.beust.jcommander.Parameter;
@@ -42,9 +43,12 @@ public class FileCount {
   private int entriesScanned;
   private int inserts;
 
-  private Opts opts;
   private ScannerOpts scanOpts;
   private BatchWriterOpts bwOpts;
+  private Connector conn;
+  private String tableName;
+  private Authorizations auths;
+  private ColumnVisibility visibility;
 
   private static class CountValue {
     int dirCount = 0;
@@ -171,7 +175,7 @@ public class FileCount {
 
   private Mutation createMutation(int depth, String dir, CountValue countVal) {
     Mutation m = new Mutation(String.format("%03d%s", depth, dir));
-    m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, opts.visibility, 
countVal.toValue());
+    m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, visibility, 
countVal.toValue());
     return m;
   }
 
@@ -214,7 +218,7 @@ public class FileCount {
           // in this case the higher depth will not insert anything if the
           // dir has no children, so insert something here
           Mutation m = new Mutation(key.getRow());
-          m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, opts.visibility, 
tmpCount.toValue());
+          m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, visibility, 
tmpCount.toValue());
           batchWriter.addMutation(m);
           inserts++;
         }
@@ -233,8 +237,11 @@ public class FileCount {
     }
   }
 
-  public FileCount(Opts opts, ScannerOpts scanOpts, BatchWriterOpts bwOpts) 
throws Exception {
-    this.opts = opts;
+  public FileCount(Connector conn, String tableName, Authorizations auths, 
ColumnVisibility cv, ScannerOpts scanOpts, BatchWriterOpts bwOpts) throws 
Exception {
+    this.conn = conn;
+    this.tableName = tableName;
+    this.auths = auths;
+    this.visibility = cv;
     this.scanOpts = scanOpts;
     this.bwOpts = bwOpts;
   }
@@ -244,10 +251,9 @@ public class FileCount {
     entriesScanned = 0;
     inserts = 0;
 
-    Connector conn = opts.getConnector();
-    Scanner scanner = conn.createScanner(opts.getTableName(), opts.auths);
+    Scanner scanner = conn.createScanner(tableName, auths);
     scanner.setBatchSize(scanOpts.scanBatchSize);
-    BatchWriter bw = conn.createBatchWriter(opts.getTableName(), 
bwOpts.getBatchWriterConfig());
+    BatchWriter bw = conn.createBatchWriter(tableName, 
bwOpts.getBatchWriterConfig());
 
     long t1 = System.currentTimeMillis();
 
@@ -284,7 +290,7 @@ public class FileCount {
     String programName = FileCount.class.getName();
     opts.parseArgs(programName, args, scanOpts, bwOpts);
 
-    FileCount fileCount = new FileCount(opts, scanOpts, bwOpts);
+    FileCount fileCount = new FileCount(opts.getConnector(), 
opts.getTableName(), opts.auths, opts.visibility, scanOpts, bwOpts);
     fileCount.run();
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java 
b/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
index b55ba71..421ca1e 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
@@ -21,8 +21,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOpts;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
@@ -31,6 +29,8 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.LongCombiner;
 import org.apache.accumulo.core.iterators.TypedValueCombiner.Encoder;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.cli.BatchWriterOpts;
+import org.apache.accumulo.examples.cli.ClientOpts;
 import org.apache.accumulo.examples.filedata.ChunkCombiner;
 import org.apache.accumulo.examples.filedata.FileDataIngest;
 import org.apache.hadoop.io.Text;

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java 
b/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
index d54d39a..d979106 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
@@ -20,7 +20,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.TreeMap;
 
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
@@ -32,6 +31,7 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
 import org.apache.hadoop.io.Text;
 
 import com.beust.jcommander.Parameter;

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java 
b/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
index 2cc2298..b40fe24 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
@@ -104,7 +104,7 @@ public class Viewer extends JFrame implements 
TreeSelectionListener, TreeExpansi
     setSize(1000, 800);
     setDefaultCloseOperation(EXIT_ON_CLOSE);
     q = new QueryUtil(opts);
-    fdq = new FileDataQuery(opts.instance, opts.zookeepers, 
opts.getPrincipal(), opts.getToken(), opts.dataTable, opts.auths);
+    fdq = new FileDataQuery(opts.getConnector(), opts.dataTable, opts.auths);
     this.topPath = opts.path;
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java 
b/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java
index 5058f6d..0f25df5 100644
--- 
a/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java
+++ 
b/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java
@@ -22,13 +22,13 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.SummingArrayCombiner;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.cli.MapReduceClientOnRequiredTable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.Text;

http://git-wip-us.apache.org/repos/asf/accumulo-examples/blob/e07cdcde/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java 
b/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
index 6ee2d11..25b78bb 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
@@ -24,8 +24,6 @@ import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
@@ -35,6 +33,8 @@ import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.cli.BatchWriterOpts;
+import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
 import org.apache.hadoop.io.Text;
 
 import com.beust.jcommander.Parameter;
@@ -197,6 +197,7 @@ public class FileDataIngest {
       fdi.insertFileData(filename, bw);
     }
     bw.close();
-    opts.stopTracing();
+    //TODO
+    //opts.stopTracing();
   }
 }

Reply via email to