http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java index 37caf15..77081bf 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormat.java @@ -36,16 +36,16 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; /** * This class allows MapReduce jobs to use Accumulo as the source of data. This {@link InputFormat} provides row names as {@link Text} as keys, and a * corresponding {@link PeekingIterator} as a value, which in turn makes the {@link Key}/{@link Value} pairs for that row available to the Map function. - * + * * The user must specify the following via static configurator methods: - * + * * <ul> * <li>{@link AccumuloRowInputFormat#setConnectorInfo(Job, String, AuthenticationToken)} * <li>{@link AccumuloRowInputFormat#setInputTableName(Job, String)} * <li>{@link AccumuloRowInputFormat#setScanAuthorizations(Job, Authorizations)} * <li>{@link AccumuloRowInputFormat#setZooKeeperInstance(Job, ClientConfiguration)} OR {@link AccumuloRowInputFormat#setMockInstance(Job, String)} * </ul> - * + * * Other static methods are optional. */ public class AccumuloRowInputFormat extends InputFormatBase<Text,PeekingIterator<Entry<Key,Value>>> {
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java index e58e350..a60cb80 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java @@ -54,7 +54,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Gets the table name from the configuration. - * + * * @param context * the Hadoop context for the configured job * @return the table name @@ -67,7 +67,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Sets the name of the input table, over which this job will scan. - * + * * @param job * the Hadoop job instance to be configured * @param tableName @@ -80,7 +80,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Sets the input ranges to scan for the single input table associated with this job. - * + * * @param job * the Hadoop job instance to be configured * @param ranges @@ -93,7 +93,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Gets the ranges to scan over from a job. - * + * * @param context * the Hadoop context for the configured job * @return the ranges @@ -106,7 +106,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Restricts the columns that will be mapped over for this job for the default input table. - * + * * @param job * the Hadoop job instance to be configured * @param columnFamilyColumnQualifierPairs @@ -120,7 +120,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Gets the columns to be mapped over from this job. - * + * * @param context * the Hadoop context for the configured job * @return a set of columns @@ -133,7 +133,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Encode an iterator on the single input table for this job. - * + * * @param job * the Hadoop job instance to be configured * @param cfg @@ -146,7 +146,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Gets a list of the iterator settings (for iterators to apply to a scanner) from this configuration. - * + * * @param context * the Hadoop context for the configured job * @return a list of iterators @@ -160,10 +160,10 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Controls the automatic adjustment of ranges for this job. This feature merges overlapping ranges, then splits them to align with tablet boundaries. * Disabling this feature will cause exactly one Map task to be created for each specified range. The default setting is enabled. * - * + * * <p> * By default, this feature is <b>enabled</b>. - * + * * @param job * the Hadoop job instance to be configured * @param enableFeature @@ -177,7 +177,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Determines whether a configuration has auto-adjust ranges enabled. - * + * * @param context * the Hadoop context for the configured job * @return false if the feature is disabled, true otherwise @@ -190,10 +190,10 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Controls the use of the {@link IsolatedScanner} in this job. - * + * * <p> * By default, this feature is <b>disabled</b>. - * + * * @param job * the Hadoop job instance to be configured * @param enableFeature @@ -206,7 +206,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Determines whether a configuration has isolation enabled. - * + * * @param context * the Hadoop context for the configured job * @return true if the feature is enabled, false otherwise @@ -220,10 +220,10 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Controls the use of the {@link ClientSideIteratorScanner} in this job. Enabling this feature will cause the iterator stack to be constructed within the Map * task, rather than within the Accumulo TServer. To use this feature, all classes needed for those iterators must be available on the classpath for the task. - * + * * <p> * By default, this feature is <b>disabled</b>. - * + * * @param job * the Hadoop job instance to be configured * @param enableFeature @@ -236,7 +236,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Determines whether a configuration uses local iterators. - * + * * @param context * the Hadoop context for the configured job * @return true if the feature is enabled, false otherwise @@ -252,26 +252,26 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { * Enable reading offline tables. By default, this feature is disabled and only online tables are scanned. This will make the map reduce job directly read the * table's files. If the table is not offline, then the job will fail. If the table comes online during the map reduce job, it is likely that the job will * fail. - * + * * <p> * To use this option, the map reduce user will need access to read the Accumulo directory in HDFS. - * + * * <p> * Reading the offline table will create the scan time iterator stack in the map process. So any iterators that are configured for the table will need to be * on the mapper's classpath. - * + * * <p> * One way to use this feature is to clone a table, take the clone offline, and use the clone as the input table for a map reduce job. If you plan to map * reduce over the data many times, it may be better to the compact the table, clone it, take it offline, and use the clone for all map reduce jobs. The * reason to do this is that compaction will reduce each tablet in the table to one file, and it is faster to read from one file. - * + * * <p> * There are two possible advantages to reading a tables file directly out of HDFS. First, you may see better read performance. Second, it will support * speculative execution better. When reading an online table speculative execution can put more load on an already slow tablet server. - * + * * <p> * By default, this feature is <b>disabled</b>. - * + * * @param job * the Hadoop job instance to be configured * @param enableFeature @@ -284,7 +284,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Determines whether a configuration has the offline table scan feature enabled. - * + * * @param context * the Hadoop context for the configured job * @return true if the feature is enabled, false otherwise @@ -297,7 +297,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Initializes an Accumulo {@link org.apache.accumulo.core.client.impl.TabletLocator} based on the configuration. - * + * * @param context * the Hadoop context for the configured job * @return an Accumulo tablet locator @@ -315,7 +315,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Apply the configured iterators from the configuration to the scanner for the specified table name - * + * * @param context * the Hadoop context for the configured job * @param scanner @@ -329,7 +329,7 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> { /** * Apply the configured iterators from the configuration to the scanner. - * + * * @param context * the Hadoop context for the configured job * @param scanner http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java index fa3b7eb..03473f2 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java @@ -48,7 +48,7 @@ public class InputTableConfig implements Writable { /** * Creates a batch scan config object out of a previously serialized batch scan config object. - * + * * @param input * the data input of the serialized batch scan config */ @@ -58,7 +58,7 @@ public class InputTableConfig implements Writable { /** * Sets the input ranges to scan for all tables associated with this job. This will be added to any per-table ranges that have been set using - * + * * @param ranges * the ranges that will be mapped over * @since 1.6.0 @@ -77,7 +77,7 @@ public class InputTableConfig implements Writable { /** * Restricts the columns that will be mapped over for this job for the default input table. - * + * * @param columns * a pair of {@link Text} objects corresponding to column family and column qualifier. If the column qualifier is null, the entire column family is * selected. An empty set is the default and is equivalent to scanning the all columns. @@ -97,7 +97,7 @@ public class InputTableConfig implements Writable { /** * Set iterators on to be used in the query. - * + * * @param iterators * the configurations for the iterators * @since 1.6.0 @@ -117,10 +117,10 @@ public class InputTableConfig implements Writable { /** * Controls the automatic adjustment of ranges for this job. This feature merges overlapping ranges, then splits them to align with tablet boundaries. * Disabling this feature will cause exactly one Map task to be created for each specified range. The default setting is enabled. * - * + * * <p> * By default, this feature is <b>enabled</b>. - * + * * @param autoAdjustRanges * the feature is enabled if true, disabled otherwise * @see #setRanges(java.util.List) @@ -133,7 +133,7 @@ public class InputTableConfig implements Writable { /** * Determines whether a configuration has auto-adjust ranges enabled. - * + * * @return false if the feature is disabled, true otherwise * @since 1.6.0 * @see #setAutoAdjustRanges(boolean) @@ -146,10 +146,10 @@ public class InputTableConfig implements Writable { * Controls the use of the {@link org.apache.accumulo.core.client.ClientSideIteratorScanner} in this job. Enabling this feature will cause the iterator stack * to be constructed within the Map task, rather than within the Accumulo TServer. To use this feature, all classes needed for those iterators must be * available on the classpath for the task. - * + * * <p> * By default, this feature is <b>disabled</b>. - * + * * @param useLocalIterators * the feature is enabled if true, disabled otherwise * @since 1.6.0 @@ -161,7 +161,7 @@ public class InputTableConfig implements Writable { /** * Determines whether a configuration uses local iterators. - * + * * @return true if the feature is enabled, false otherwise * @since 1.6.0 * @see #setUseLocalIterators(boolean) @@ -175,26 +175,26 @@ public class InputTableConfig implements Writable { * Enable reading offline tables. By default, this feature is disabled and only online tables are scanned. This will make the map reduce job directly read the * table's files. If the table is not offline, then the job will fail. If the table comes online during the map reduce job, it is likely that the job will * fail. - * + * * <p> * To use this option, the map reduce user will need access to read the Accumulo directory in HDFS. - * + * * <p> * Reading the offline table will create the scan time iterator stack in the map process. So any iterators that are configured for the table will need to be * on the mapper's classpath. The accumulo-site.xml may need to be on the mapper's classpath if HDFS or the Accumulo directory in HDFS are non-standard. - * + * * <p> * One way to use this feature is to clone a table, take the clone offline, and use the clone as the input table for a map reduce job. If you plan to map * reduce over the data many times, it may be better to the compact the table, clone it, take it offline, and use the clone for all map reduce jobs. The * reason to do this is that compaction will reduce each tablet in the table to one file, and it is faster to read from one file. - * + * * <p> * There are two possible advantages to reading a tables file directly out of HDFS. First, you may see better read performance. Second, it will support * speculative execution better. When reading an online table speculative execution can put more load on an already slow tablet server. - * + * * <p> * By default, this feature is <b>disabled</b>. - * + * * @param offlineScan * the feature is enabled if true, disabled otherwise * @since 1.6.0 @@ -206,7 +206,7 @@ public class InputTableConfig implements Writable { /** * Determines whether a configuration has the offline table scan feature enabled. - * + * * @return true if the feature is enabled, false otherwise * @since 1.6.0 * @see #setOfflineScan(boolean) @@ -217,10 +217,10 @@ public class InputTableConfig implements Writable { /** * Controls the use of the {@link org.apache.accumulo.core.client.IsolatedScanner} in this job. - * + * * <p> * By default, this feature is <b>disabled</b>. - * + * * @param useIsolatedScanners * the feature is enabled if true, disabled otherwise * @since 1.6.0 @@ -232,7 +232,7 @@ public class InputTableConfig implements Writable { /** * Determines whether a configuration has isolation enabled. - * + * * @return true if the feature is enabled, false otherwise * @since 1.6.0 * @see #setUseIsolatedScanners(boolean) @@ -243,7 +243,7 @@ public class InputTableConfig implements Writable { /** * Writes the state for the current object out to the specified {@link DataOutput} - * + * * @param dataOutput * the output for which to write the object's state */ @@ -286,7 +286,7 @@ public class InputTableConfig implements Writable { /** * Reads the fields in the {@link DataInput} into the current object - * + * * @param dataInput * the input fields to read into the current object */ http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java index 29cf95d..fe27b01 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java @@ -33,8 +33,8 @@ import org.apache.accumulo.core.client.ClientConfiguration; import org.apache.accumulo.core.client.Instance; import org.apache.accumulo.core.client.IteratorSetting; import org.apache.accumulo.core.client.ZooKeeperInstance; -import org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator; import org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.TokenSource; +import org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator; import org.apache.accumulo.core.client.mock.MockInstance; import org.apache.accumulo.core.client.security.tokens.AuthenticationToken; import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer; http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java index ae1d46f..b2b5150 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java @@ -22,6 +22,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; + import org.apache.accumulo.core.Constants; import org.apache.accumulo.core.client.AccumuloSecurityException; import org.apache.accumulo.core.client.ClientConfiguration; @@ -47,7 +48,7 @@ public class ConfiguratorBase { /** * Configuration keys for {@link Instance#getConnector(String, AuthenticationToken)}. - * + * * @since 1.6.0 */ public static enum ConnectorInfo { @@ -70,7 +71,7 @@ public class ConfiguratorBase { /** * Configuration keys for {@link Instance}, {@link ZooKeeperInstance}, and {@link MockInstance}. - * + * * @since 1.6.0 */ public static enum InstanceOpts { @@ -79,17 +80,16 @@ public class ConfiguratorBase { /** * Configuration keys for general configuration options. - * + * * @since 1.6.0 */ public static enum GeneralOpts { - LOG_LEVEL, - VISIBILITY_CACHE_SIZE + LOG_LEVEL, VISIBILITY_CACHE_SIZE } /** * Provides a configuration key for a given feature enum, prefixed by the implementingClass - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param e @@ -102,23 +102,23 @@ public class ConfiguratorBase { } /** - * Provides a configuration key for a given feature enum. - * - * @param e - * the enum used to provide the unique part of the configuration key - * @return the configuration key - */ + * Provides a configuration key for a given feature enum. + * + * @param e + * the enum used to provide the unique part of the configuration key + * @return the configuration key + */ protected static String enumToConfKey(Enum<?> e) { - return e.getDeclaringClass().getSimpleName() + "." + StringUtils.camelize(e.name().toLowerCase()); + return e.getDeclaringClass().getSimpleName() + "." + StringUtils.camelize(e.name().toLowerCase()); } /** * Sets the connector information needed to communicate with Accumulo in this job. - * + * * <p> * <b>WARNING:</b> The serialized token is stored in the configuration and shared with all MapReduce tasks. It is BASE64 encoded to provide a charset safe * conversion to a string, and is not intended to be secure. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -144,11 +144,11 @@ public class ConfiguratorBase { /** * Sets the connector information needed to communicate with Accumulo in this job. - * + * * <p> * Pulls a token file into the Distributed Cache that contains the authentication token in an attempt to be more secure than storing the password in the * Configuration. Token file created with "bin/accumulo create-token". - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -179,7 +179,7 @@ public class ConfiguratorBase { /** * Determines if the connector info has already been set for this instance. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -194,7 +194,7 @@ public class ConfiguratorBase { /** * Gets the user name from the configuration. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -209,7 +209,7 @@ public class ConfiguratorBase { /** * Gets the authenticated token from either the specified token file or directly from the configuration, whichever was used when the job was configured. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -237,7 +237,7 @@ public class ConfiguratorBase { /** * Reads from the token file in distributed cache. Currently, the token file stores data separated by colons e.g. principal:token_class:token - * + * * @param conf * the Hadoop context for the configured job * @return path to the token file as a String @@ -275,7 +275,7 @@ public class ConfiguratorBase { /** * Configures a {@link ZooKeeperInstance} for this job. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -296,7 +296,7 @@ public class ConfiguratorBase { /** * Configures a {@link MockInstance} for this job. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -317,7 +317,7 @@ public class ConfiguratorBase { /** * Initializes an Accumulo {@link Instance} based on the configuration. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -348,7 +348,7 @@ public class ConfiguratorBase { /** * Sets the log level for this job. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -365,7 +365,7 @@ public class ConfiguratorBase { /** * Gets the log level from this configuration. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -380,7 +380,7 @@ public class ConfiguratorBase { /** * Sets the valid visibility count for this job. - * + * * @param conf * the Hadoop configuration object to configure * @param visibilityCacheSize @@ -392,13 +392,13 @@ public class ConfiguratorBase { /** * Gets the valid visibility count for this job. - * + * * @param conf * the Hadoop configuration object to configure * @return the valid visibility count */ public static int getVisibilityCacheSize(Configuration conf) { - return conf.getInt(enumToConfKey(GeneralOpts.VISIBILITY_CACHE_SIZE),Constants.DEFAULT_VISIBILITY_CACHE_SIZE); + return conf.getInt(enumToConfKey(GeneralOpts.VISIBILITY_CACHE_SIZE), Constants.DEFAULT_VISIBILITY_CACHE_SIZE); } } http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/FileOutputConfigurator.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/FileOutputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/FileOutputConfigurator.java index ce84209..882c6d3 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/FileOutputConfigurator.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/FileOutputConfigurator.java @@ -31,7 +31,7 @@ public class FileOutputConfigurator extends ConfiguratorBase { /** * Configuration keys for {@link AccumuloConfiguration}. - * + * * @since 1.6.0 */ public static enum Opts { @@ -41,7 +41,7 @@ public class FileOutputConfigurator extends ConfiguratorBase { /** * The supported Accumulo properties we set in this OutputFormat, that change the behavior of the RecordWriter.<br /> * These properties correspond to the supported public static setter methods available to this class. - * + * * @param property * the Accumulo property to check * @since 1.6.0 @@ -61,7 +61,7 @@ public class FileOutputConfigurator extends ConfiguratorBase { /** * Helper for transforming Accumulo configuration properties into something that can be stored safely inside the Hadoop Job configuration. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -86,7 +86,7 @@ public class FileOutputConfigurator extends ConfiguratorBase { /** * This helper method provides an AccumuloConfiguration object constructed from the Accumulo defaults, and overridden with Accumulo properties that have been * stored in the Job's configuration. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -104,7 +104,7 @@ public class FileOutputConfigurator extends ConfiguratorBase { /** * Sets the compression type to use for data blocks. Specifying a compression may require additional libraries to be available to your Job. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -122,10 +122,10 @@ public class FileOutputConfigurator extends ConfiguratorBase { /** * Sets the size for data blocks within each file.<br /> * Data blocks are a span of key/value pairs stored in the file that are compressed and indexed as a group. - * + * * <p> * Making this value smaller may increase seek performance, but at the cost of increasing the size of the indexes (which can also affect seek performance). - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -140,7 +140,7 @@ public class FileOutputConfigurator extends ConfiguratorBase { /** * Sets the size for file blocks in the file system; file blocks are managed, and replicated, by the underlying file system. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -156,7 +156,7 @@ public class FileOutputConfigurator extends ConfiguratorBase { /** * Sets the size for index blocks within each file; smaller blocks means a deeper index hierarchy within the file, while larger blocks mean a more shallow * index hierarchy within the file. This can affect the performance of queries. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -171,7 +171,7 @@ public class FileOutputConfigurator extends ConfiguratorBase { /** * Sets the file system replication factor for the resulting file, overriding the file system default. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java index af84bb4..5405ac0 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java @@ -81,7 +81,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Configuration keys for {@link Scanner}. - * + * * @since 1.6.0 */ public static enum ScanOpts { @@ -90,7 +90,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Configuration keys for various features. - * + * * @since 1.6.0 */ public static enum Features { @@ -99,7 +99,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Sets the name of the input table, over which this job will scan. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -115,7 +115,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Sets the name of the input table, over which this job will scan. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -128,7 +128,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Sets the {@link Authorizations} used to scan. Must be a subset of the user's authorization. Defaults to the empty set. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -144,7 +144,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Gets the authorizations to set for the scans from the configuration. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -160,7 +160,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Sets the input ranges to scan on all input tables for this job. If not set, the entire table will be scanned. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -189,7 +189,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Gets the ranges to scan over from a job. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -215,7 +215,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Gets a list of the iterator settings (for iterators to apply to a scanner) from this configuration. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -249,7 +249,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Restricts the columns that will be mapped over for the single input table on this job. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -286,7 +286,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Gets the columns to be mapped over from this job. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -326,7 +326,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Encode an iterator on the input for the single input table associated with this job. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -364,10 +364,10 @@ public class InputConfigurator extends ConfiguratorBase { /** * Controls the automatic adjustment of ranges for this job. This feature merges overlapping ranges, then splits them to align with tablet boundaries. * Disabling this feature will cause exactly one Map task to be created for each specified range. The default setting is enabled. * - * + * * <p> * By default, this feature is <b>enabled</b>. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -383,7 +383,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Determines whether a configuration has auto-adjust ranges enabled. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -398,10 +398,10 @@ public class InputConfigurator extends ConfiguratorBase { /** * Controls the use of the {@link IsolatedScanner} in this job. - * + * * <p> * By default, this feature is <b>disabled</b>. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -416,7 +416,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Determines whether a configuration has isolation enabled. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -432,10 +432,10 @@ public class InputConfigurator extends ConfiguratorBase { /** * Controls the use of the {@link ClientSideIteratorScanner} in this job. Enabling this feature will cause the iterator stack to be constructed within the Map * task, rather than within the Accumulo TServer. To use this feature, all classes needed for those iterators must be available on the classpath for the task. - * + * * <p> * By default, this feature is <b>disabled</b>. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -450,7 +450,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Determines whether a configuration uses local iterators. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -468,26 +468,26 @@ public class InputConfigurator extends ConfiguratorBase { * Enable reading offline tables. By default, this feature is disabled and only online tables are scanned. This will make the map reduce job directly read the * table's files. If the table is not offline, then the job will fail. If the table comes online during the map reduce job, it is likely that the job will * fail. - * + * * <p> * To use this option, the map reduce user will need access to read the Accumulo directory in HDFS. - * + * * <p> * Reading the offline table will create the scan time iterator stack in the map process. So any iterators that are configured for the table will need to be * on the mapper's classpath. - * + * * <p> * One way to use this feature is to clone a table, take the clone offline, and use the clone as the input table for a map reduce job. If you plan to map * reduce over the data many times, it may be better to the compact the table, clone it, take it offline, and use the clone for all map reduce jobs. The * reason to do this is that compaction will reduce each tablet in the table to one file, and it is faster to read from one file. - * + * * <p> * There are two possible advantages to reading a tables file directly out of HDFS. First, you may see better read performance. Second, it will support * speculative execution better. When reading an online table speculative execution can put more load on an already slow tablet server. - * + * * <p> * By default, this feature is <b>disabled</b>. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -502,7 +502,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Determines whether a configuration has the offline table scan feature enabled. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -517,7 +517,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Sets configurations for multiple tables at a time. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -544,7 +544,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Returns all {@link InputTableConfig} objects associated with this job. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -577,7 +577,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Returns the {@link InputTableConfig} for the given table - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -594,7 +594,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Initializes an Accumulo {@link TabletLocator} based on the configuration. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -619,7 +619,7 @@ public class InputConfigurator extends ConfiguratorBase { // InputFormat doesn't have the equivalent of OutputFormat's checkOutputSpecs(JobContext job) /** * Check whether a configuration is fully configured to be used with an Accumulo {@link org.apache.hadoop.mapreduce.InputFormat}. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -674,7 +674,7 @@ public class InputConfigurator extends ConfiguratorBase { /** * Returns the {@link org.apache.accumulo.core.client.mapreduce.InputTableConfig} for the configuration based on the properties set using the single-table * input methods. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/OutputConfigurator.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/OutputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/OutputConfigurator.java index 13b67d5..55e980c 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/OutputConfigurator.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/OutputConfigurator.java @@ -23,6 +23,7 @@ import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; + import org.apache.accumulo.core.client.BatchWriter; import org.apache.accumulo.core.client.BatchWriterConfig; import org.apache.hadoop.conf.Configuration; @@ -34,7 +35,7 @@ public class OutputConfigurator extends ConfiguratorBase { /** * Configuration keys for {@link BatchWriter}. - * + * * @since 1.6.0 */ public static enum WriteOpts { @@ -43,7 +44,7 @@ public class OutputConfigurator extends ConfiguratorBase { /** * Configuration keys for various features. - * + * * @since 1.6.0 */ public static enum Features { @@ -53,7 +54,7 @@ public class OutputConfigurator extends ConfiguratorBase { /** * Sets the default table name to use if one emits a null in place of a table name for a given mutation. Table names can only be alpha-numeric and * underscores. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -69,7 +70,7 @@ public class OutputConfigurator extends ConfiguratorBase { /** * Gets the default table name from the configuration. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -85,7 +86,7 @@ public class OutputConfigurator extends ConfiguratorBase { /** * Sets the configuration for for the job's {@link BatchWriter} instances. If not set, a new {@link BatchWriterConfig}, with sensible built-in defaults is * used. Setting the configuration multiple times overwrites any previous configuration. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -109,7 +110,7 @@ public class OutputConfigurator extends ConfiguratorBase { /** * Gets the {@link BatchWriterConfig} settings. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -137,10 +138,10 @@ public class OutputConfigurator extends ConfiguratorBase { /** * Sets the directive to create new tables, as necessary. Table names can only be alpha-numeric and underscores. - * + * * <p> * By default, this feature is <b>disabled</b>. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -155,7 +156,7 @@ public class OutputConfigurator extends ConfiguratorBase { /** * Determines whether tables are permitted to be created as needed. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -170,10 +171,10 @@ public class OutputConfigurator extends ConfiguratorBase { /** * Sets the directive to use simulation mode for this job. In simulation mode, no output is produced. This is useful for testing. - * + * * <p> * By default, this feature is <b>disabled</b>. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf @@ -188,7 +189,7 @@ public class OutputConfigurator extends ConfiguratorBase { /** * Determines whether this feature is enabled. - * + * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/package-info.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/package-info.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/package-info.java index 243160d..34ea7d2 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/package-info.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/package-info.java @@ -21,13 +21,13 @@ * InputFormats/OutputFormats, so as not to clutter their API with methods that don't match the conventions for that framework. These classes may be useful to * input/output plugins for other frameworks, so they can reuse the same configuration options and/or serialize them into a * {@link org.apache.hadoop.conf.Configuration} instance in a standard way. - * + * * <p> * It is not expected these will change much (except when new features are added), but end users should not use these classes. They should use the static * configurators on the {@link org.apache.hadoop.mapreduce.InputFormat} or {@link org.apache.hadoop.mapreduce.OutputFormat} they are configuring, which in turn * may use these classes to implement their own static configurators. Once again, these classes are intended for internal use, but may be useful to developers * of plugins for other frameworks that read/write to Accumulo. - * + * * @since 1.6.0 */ package org.apache.accumulo.core.client.mapreduce.lib.impl; http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/KeyRangePartitioner.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/KeyRangePartitioner.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/KeyRangePartitioner.java index c59841d..bd4857e 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/KeyRangePartitioner.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/KeyRangePartitioner.java @@ -28,29 +28,29 @@ import org.apache.hadoop.mapreduce.Partitioner; */ public class KeyRangePartitioner extends Partitioner<Key,Writable> implements Configurable { private RangePartitioner rp = new RangePartitioner(); - + @Override public int getPartition(Key key, Writable value, int numPartitions) { return rp.getPartition(key.getRow(), value, numPartitions); } - + @Override public Configuration getConf() { return rp.getConf(); } - + @Override public void setConf(Configuration conf) { rp.setConf(conf); } - + /** * Sets the hdfs file name to use, containing a newline separated list of Base64 encoded split points that represent ranges for partitioning */ public static void setSplitFile(Job job, String file) { RangePartitioner.setSplitFile(job, file); } - + /** * Sets the number of random sub-bins per range */ http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/IteratorAdapter.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/IteratorAdapter.java b/core/src/main/java/org/apache/accumulo/core/client/mock/IteratorAdapter.java index 840db41..d4d4004 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/IteratorAdapter.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/IteratorAdapter.java @@ -27,18 +27,18 @@ import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.iterators.SortedKeyValueIterator; public class IteratorAdapter implements Iterator<Entry<Key,Value>> { - + SortedKeyValueIterator<Key,Value> inner; - + public IteratorAdapter(SortedKeyValueIterator<Key,Value> inner) { this.inner = inner; } - + @Override public boolean hasNext() { return inner.hasTop(); } - + @Override public Entry<Key,Value> next() { try { @@ -49,7 +49,7 @@ public class IteratorAdapter implements Iterator<Entry<Key,Value>> { throw new NoSuchElementException(); } } - + @Override public void remove() { throw new UnsupportedOperationException(); http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java index c55c378..f171889 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java @@ -110,7 +110,7 @@ public class MockAccumulo { public void createTable(String username, String tableName, TimeType timeType, Map<String,String> properties) { String namespace = Tables.qualify(tableName).getFirst(); - HashMap<String, String> props = new HashMap<>(properties); + HashMap<String,String> props = new HashMap<>(properties); if (!namespaceExists(namespace)) { return; http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchDeleter.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchDeleter.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchDeleter.java index 6f321ff..bb9f2c8 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchDeleter.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchDeleter.java @@ -35,14 +35,14 @@ import org.apache.accumulo.core.security.ColumnVisibility; * <li>There is no waiting for memory to fill before flushing</li> * <li>Only one thread is used for writing</li> * </ol> - * + * * Otherwise, it behaves as expected. */ public class MockBatchDeleter extends MockBatchScanner implements BatchDeleter { - + private final MockAccumulo acc; private final String tableName; - + /** * Create a {@link BatchDeleter} for the specified instance on the specified table where the writer uses the specified {@link Authorizations}. */ @@ -51,10 +51,10 @@ public class MockBatchDeleter extends MockBatchScanner implements BatchDeleter { this.acc = acc; this.tableName = tableName; } - + @Override public void delete() throws MutationsRejectedException, TableNotFoundException { - + BatchWriter writer = new MockBatchWriter(acc, tableName); try { Iterator<Entry<Key,Value>> iter = super.iterator(); @@ -69,5 +69,5 @@ public class MockBatchDeleter extends MockBatchScanner implements BatchDeleter { writer.close(); } } - + } http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchScanner.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchScanner.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchScanner.java index 4512006..4034271 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchScanner.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchScanner.java @@ -33,22 +33,22 @@ import org.apache.accumulo.core.security.Authorizations; import org.apache.commons.collections.iterators.IteratorChain; public class MockBatchScanner extends MockScannerBase implements BatchScanner { - + List<Range> ranges = null; - + public MockBatchScanner(MockTable mockTable, Authorizations authorizations) { super(mockTable, authorizations); } - + @Override public void setRanges(Collection<Range> ranges) { if (ranges == null || ranges.size() == 0) { throw new IllegalArgumentException("ranges must be non null and contain at least 1 range"); } - + this.ranges = new ArrayList<Range>(ranges); } - + @SuppressWarnings("unchecked") @Override public Iterator<Entry<Key,Value>> iterator() { @@ -69,7 +69,7 @@ public class MockBatchScanner extends MockScannerBase implements BatchScanner { } return chain; } - + @Override public void close() {} } http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java index f2c5c85..163587f 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java @@ -23,21 +23,21 @@ import org.apache.accumulo.core.client.MutationsRejectedException; import org.apache.accumulo.core.data.Mutation; public class MockBatchWriter implements BatchWriter { - + final String tablename; final MockAccumulo acu; - + MockBatchWriter(MockAccumulo acu, String tablename) { this.acu = acu; this.tablename = tablename; } - + @Override public void addMutation(Mutation m) throws MutationsRejectedException { checkArgument(m != null, "m is null"); acu.addMutation(tablename, m); } - + @Override public void addMutations(Iterable<Mutation> iterable) throws MutationsRejectedException { checkArgument(iterable != null, "iterable is null"); @@ -45,11 +45,11 @@ public class MockBatchWriter implements BatchWriter { acu.addMutation(tablename, m); } } - + @Override public void flush() throws MutationsRejectedException {} - + @Override public void close() throws MutationsRejectedException {} - + } http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockConfiguration.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockConfiguration.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockConfiguration.java index ce262a2..8c57c5e 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockConfiguration.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockConfiguration.java @@ -24,15 +24,15 @@ import org.apache.accumulo.core.conf.Property; class MockConfiguration extends AccumuloConfiguration { Map<String,String> map; - + MockConfiguration(Map<String,String> settings) { map = settings; } - + public void put(String k, String v) { map.put(k, v); } - + @Override public String get(Property property) { return map.get(property.getKey()); http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java index 8613602..4d32093 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java @@ -31,9 +31,9 @@ import org.apache.accumulo.core.client.MultiTableBatchWriter; import org.apache.accumulo.core.client.Scanner; import org.apache.accumulo.core.client.TableNotFoundException; import org.apache.accumulo.core.client.admin.InstanceOperations; +import org.apache.accumulo.core.client.admin.NamespaceOperations; import org.apache.accumulo.core.client.admin.ReplicationOperations; import org.apache.accumulo.core.client.admin.SecurityOperations; -import org.apache.accumulo.core.client.admin.NamespaceOperations; import org.apache.accumulo.core.client.admin.TableOperations; import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode; import org.apache.accumulo.core.client.security.tokens.NullToken; @@ -41,15 +41,15 @@ import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.security.Credentials; public class MockConnector extends Connector { - + String username; private final MockAccumulo acu; private final Instance instance; - + MockConnector(String username, MockInstance instance) throws AccumuloSecurityException { this(new Credentials(username, new NullToken()), new MockAccumulo(MockInstance.getDefaultFileSystem()), instance); } - + MockConnector(Credentials credentials, MockAccumulo acu, MockInstance instance) throws AccumuloSecurityException { if (credentials.getToken().isDestroyed()) throw new AccumuloSecurityException(credentials.getPrincipal(), SecurityErrorCode.TOKEN_EXPIRED); @@ -57,14 +57,14 @@ public class MockConnector extends Connector { this.acu = acu; this.instance = instance; } - + @Override public BatchScanner createBatchScanner(String tableName, Authorizations authorizations, int numQueryThreads) throws TableNotFoundException { if (acu.tables.get(tableName) == null) throw new TableNotFoundException(tableName, tableName, "no such table"); return acu.createBatchScanner(tableName, authorizations); } - + @Deprecated @Override public BatchDeleter createBatchDeleter(String tableName, Authorizations authorizations, int numQueryThreads, long maxMemory, long maxLatency, @@ -73,14 +73,14 @@ public class MockConnector extends Connector { throw new TableNotFoundException(tableName, tableName, "no such table"); return new MockBatchDeleter(acu, tableName, authorizations); } - + @Override public BatchDeleter createBatchDeleter(String tableName, Authorizations authorizations, int numQueryThreads, BatchWriterConfig config) throws TableNotFoundException { return createBatchDeleter(tableName, authorizations, numQueryThreads, config.getMaxMemory(), config.getMaxLatency(TimeUnit.MILLISECONDS), config.getMaxWriteThreads()); } - + @Deprecated @Override public BatchWriter createBatchWriter(String tableName, long maxMemory, long maxLatency, int maxWriteThreads) throws TableNotFoundException { @@ -88,23 +88,23 @@ public class MockConnector extends Connector { throw new TableNotFoundException(tableName, tableName, "no such table"); return new MockBatchWriter(acu, tableName); } - + @Override public BatchWriter createBatchWriter(String tableName, BatchWriterConfig config) throws TableNotFoundException { return createBatchWriter(tableName, config.getMaxMemory(), config.getMaxLatency(TimeUnit.MILLISECONDS), config.getMaxWriteThreads()); } - + @Deprecated @Override public MultiTableBatchWriter createMultiTableBatchWriter(long maxMemory, long maxLatency, int maxWriteThreads) { return new MockMultiTableBatchWriter(acu); } - + @Override public MultiTableBatchWriter createMultiTableBatchWriter(BatchWriterConfig config) { return createMultiTableBatchWriter(config.getMaxMemory(), config.getMaxLatency(TimeUnit.MILLISECONDS), config.getMaxWriteThreads()); } - + @Override public Scanner createScanner(String tableName, Authorizations authorizations) throws TableNotFoundException { MockTable table = acu.tables.get(tableName); @@ -112,27 +112,27 @@ public class MockConnector extends Connector { throw new TableNotFoundException(tableName, tableName, "no such table"); return new MockScanner(table, authorizations); } - + @Override public Instance getInstance() { return instance; } - + @Override public String whoami() { return username; } - + @Override public TableOperations tableOperations() { return new MockTableOperations(acu, username); } - + @Override public SecurityOperations securityOperations() { return new MockSecurityOperations(acu); } - + @Override public InstanceOperations instanceOperations() { return new MockInstanceOperations(acu); @@ -142,7 +142,7 @@ public class MockConnector extends Connector { public NamespaceOperations namespaceOperations() { return new MockNamespaceOperations(acu, username); } - + @Override public ConditionalWriter createConditionalWriter(String tableName, ConditionalWriterConfig config) throws TableNotFoundException { // TODO add implementation @@ -154,5 +154,5 @@ public class MockConnector extends Connector { // TODO add implementation throw new UnsupportedOperationException(); } - + } http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java index 9b07d49..67435d2 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java @@ -42,11 +42,11 @@ import org.apache.hadoop.io.Text; * Mock Accumulo provides an in memory implementation of the Accumulo client API. It is possible that the behavior of this implementation may differ subtly from * the behavior of Accumulo. This could result in unit tests that pass on Mock Accumulo and fail on Accumulo or visa-versa. Documenting the differences would be * difficult and is not done. - * + * * <p> * An alternative to Mock Accumulo called MiniAccumuloCluster was introduced in Accumulo 1.5. MiniAccumuloCluster spins up actual Accumulo server processes, can * be used for unit testing, and its behavior should match Accumulo. The drawback of MiniAccumuloCluster is that it starts more slowly than Mock Accumulo. - * + * */ public class MockInstance implements Instance { http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java index 87359bc..48122b7 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java @@ -71,7 +71,7 @@ class MockInstanceOperations implements InstanceOperations { try { AccumuloVFSClassLoader.loadClass(className, Class.forName(asTypeName)); } catch (ClassNotFoundException e) { - log.warn("Could not find class named '"+className+"' in testClassLoad.", e); + log.warn("Could not find class named '" + className + "' in testClassLoad.", e); return false; } return true; @@ -88,6 +88,5 @@ class MockInstanceOperations implements InstanceOperations { } @Override - public void waitForBalance() throws AccumuloException { - } + public void waitForBalance() throws AccumuloException {} } http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockMultiTableBatchWriter.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockMultiTableBatchWriter.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockMultiTableBatchWriter.java index b4a7068..9cc3dfb 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockMultiTableBatchWriter.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockMultiTableBatchWriter.java @@ -29,12 +29,12 @@ import org.apache.accumulo.core.client.TableNotFoundException; public class MockMultiTableBatchWriter implements MultiTableBatchWriter { MockAccumulo acu = null; Map<String,MockBatchWriter> bws = null; - + public MockMultiTableBatchWriter(MockAccumulo acu) { this.acu = acu; bws = new HashMap<String,MockBatchWriter>(); } - + @Override public BatchWriter getBatchWriter(String table) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { if (!bws.containsKey(table)) { @@ -42,13 +42,13 @@ public class MockMultiTableBatchWriter implements MultiTableBatchWriter { } return bws.get(table); } - + @Override public void flush() throws MutationsRejectedException {} - + @Override public void close() throws MutationsRejectedException {} - + @Override public boolean isClosed() { throw new UnsupportedOperationException(); http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockNamespaceOperations.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockNamespaceOperations.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockNamespaceOperations.java index 7e7eecb..ac581ab 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockNamespaceOperations.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockNamespaceOperations.java @@ -36,7 +36,7 @@ import org.apache.log4j.Logger; class MockNamespaceOperations extends NamespaceOperationsHelper { private static final Logger log = Logger.getLogger(MockNamespaceOperations.class); - + final private MockAccumulo acu; final private String username; @@ -125,7 +125,7 @@ class MockNamespaceOperations extends NamespaceOperationsHelper { try { AccumuloVFSClassLoader.loadClass(className, Class.forName(asTypeName)); } catch (ClassNotFoundException e) { - log.warn("Could not load class '"+className+"' with type name '"+asTypeName+"' in testClassLoad()", e); + log.warn("Could not load class '" + className + "' with type name '" + asTypeName + "' in testClassLoad()", e); return false; } return true; http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockScanner.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockScanner.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockScanner.java index e7c0ee0..a9b6fd5 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockScanner.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockScanner.java @@ -31,14 +31,14 @@ import org.apache.accumulo.core.iterators.SortedMapIterator; import org.apache.accumulo.core.security.Authorizations; public class MockScanner extends MockScannerBase implements Scanner { - + int batchSize = 0; Range range = new Range(); - + MockScanner(MockTable table, Authorizations auths) { super(table, auths); } - + @Deprecated @Override public void setTimeOut(int timeOut) { @@ -47,7 +47,7 @@ public class MockScanner extends MockScannerBase implements Scanner { else setTimeout(timeOut, TimeUnit.SECONDS); } - + @Deprecated @Override public int getTimeOut() { @@ -56,47 +56,47 @@ public class MockScanner extends MockScannerBase implements Scanner { return Integer.MAX_VALUE; return (int) timeout; } - + @Override public void setRange(Range range) { this.range = range; } - + @Override public Range getRange() { return this.range; } - + @Override public void setBatchSize(int size) { this.batchSize = size; } - + @Override public int getBatchSize() { return this.batchSize; } - + @Override public void enableIsolation() {} - + @Override public void disableIsolation() {} - + static class RangeFilter extends Filter { Range range; - + RangeFilter(SortedKeyValueIterator<Key,Value> i, Range range) { setSource(i); this.range = range; } - + @Override public boolean accept(Key k, Value v) { return range.contains(k); } } - + @Override public Iterator<Entry<Key,Value>> iterator() { SortedKeyValueIterator<Key,Value> i = new SortedMapIterator(table.table); @@ -107,7 +107,7 @@ public class MockScanner extends MockScannerBase implements Scanner { } catch (IOException e) { throw new RuntimeException(e); } - + } @Override @@ -117,7 +117,7 @@ public class MockScanner extends MockScannerBase implements Scanner { @Override public void setReadaheadThreshold(long batches) { - + } - + } http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockScannerBase.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockScannerBase.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockScannerBase.java index 72cb863..d88c30a 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockScannerBase.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockScannerBase.java @@ -44,15 +44,15 @@ import org.apache.accumulo.core.security.Authorizations; import org.apache.commons.lang.NotImplementedException; public class MockScannerBase extends ScannerOptions implements ScannerBase { - + protected final MockTable table; protected final Authorizations auths; - + MockScannerBase(MockTable mockTable, Authorizations authorizations) { this.table = mockTable; this.auths = authorizations; } - + static HashSet<ByteSequence> createColumnBSS(Collection<Column> columns) { HashSet<ByteSequence> columnSet = new HashSet<ByteSequence>(); for (Column c : columns) { @@ -60,35 +60,35 @@ public class MockScannerBase extends ScannerOptions implements ScannerBase { } return columnSet; } - + static class MockIteratorEnvironment implements IteratorEnvironment { @Override public SortedKeyValueIterator<Key,Value> reserveMapFileReader(String mapFileName) throws IOException { throw new NotImplementedException(); } - + @Override public AccumuloConfiguration getConfig() { return AccumuloConfiguration.getDefaultConfiguration(); } - + @Override public IteratorScope getIteratorScope() { return IteratorScope.scan; } - + @Override public boolean isFullMajorCompaction() { return false; } - + private ArrayList<SortedKeyValueIterator<Key,Value>> topLevelIterators = new ArrayList<SortedKeyValueIterator<Key,Value>>(); - + @Override public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) { topLevelIterators.add(iter); } - + SortedKeyValueIterator<Key,Value> getTopLevelIterator(SortedKeyValueIterator<Key,Value> iter) { if (topLevelIterators.isEmpty()) return iter; @@ -97,7 +97,7 @@ public class MockScannerBase extends ScannerOptions implements ScannerBase { return new MultiIterator(allIters, false); } } - + public SortedKeyValueIterator<Key,Value> createFilter(SortedKeyValueIterator<Key,Value> inner) throws IOException { byte[] defaultLabels = {}; inner = new ColumnFamilySkippingIterator(new DeletingIterator(inner, false)); @@ -109,7 +109,7 @@ public class MockScannerBase extends ScannerOptions implements ScannerBase { serverSideIteratorList, serverSideIteratorOptions, iterEnv, false)); return result; } - + @Override public Iterator<Entry<Key,Value>> iterator() { throw new UnsupportedOperationException(); http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java index ee9244b..2244d20 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java @@ -41,30 +41,30 @@ import org.apache.accumulo.core.security.TablePermission; import org.apache.hadoop.io.Text; public class MockTable { - + static class MockMemKey extends Key { private int count; - + MockMemKey(Key key, int count) { super(key); this.count = count; } - + @Override public int hashCode() { return super.hashCode() + count; } - + @Override public boolean equals(Object other) { return (other instanceof MockMemKey) && super.equals((MockMemKey) other) && count == ((MockMemKey) other).count; } - + @Override public String toString() { return super.toString() + " count=" + count; } - + @Override public int compareTo(Key o) { int compare = super.compareTo(o); @@ -82,7 +82,7 @@ public class MockTable { return 0; } }; - + final SortedMap<Key,Value> table = new ConcurrentSkipListMap<Key,Value>(); int mutationCount = 0; final Map<String,String> settings; @@ -93,7 +93,7 @@ public class MockTable { private MockNamespace namespace; private String namespaceName; private String tableId; - + MockTable(boolean limitVersion, TimeType timeType, String tableId) { this.timeType = timeType; this.tableId = tableId; @@ -160,27 +160,27 @@ public class MockTable { key.setTimestamp(mutationCount); else key.setTimestamp(now); - + table.put(new MockMemKey(key, mutationCount), new Value(u.getValue())); } } - + public void addSplits(SortedSet<Text> partitionKeys) { splits.addAll(partitionKeys); } - + public Collection<Text> getSplits() { return splits; } - + public void setLocalityGroups(Map<String,Set<Text>> groups) { localityGroups = groups; } - + public Map<String,Set<Text>> getLocalityGroups() { return localityGroups; } - + public void merge(Text start, Text end) { boolean reAdd = false; if (splits.contains(start)) @@ -189,19 +189,19 @@ public class MockTable { if (reAdd) splits.add(start); } - + public void setNamespaceName(String n) { this.namespaceName = n; } - + public void setNamespace(MockNamespace n) { this.namespace = n; } - + public String getNamespaceName() { return this.namespaceName; } - + public MockNamespace getNamespace() { return this.namespace; } http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/mock/MockUser.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockUser.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockUser.java index b39791d..efc896e 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockUser.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockUser.java @@ -27,7 +27,7 @@ public class MockUser { final String name; AuthenticationToken token; Authorizations authorizations; - + MockUser(String principal, AuthenticationToken token, Authorizations auths) { this.name = principal; this.token = token.clone(); http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/replication/ReplicaSystem.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/replication/ReplicaSystem.java b/core/src/main/java/org/apache/accumulo/core/client/replication/ReplicaSystem.java index 900ae5a..bdcc652 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/replication/ReplicaSystem.java +++ b/core/src/main/java/org/apache/accumulo/core/client/replication/ReplicaSystem.java @@ -28,10 +28,15 @@ public interface ReplicaSystem { /** * Replicate the given status to the target peer - * @param p Path to the resource we're reading from - * @param status Information to replicate - * @param target The peer - * @param helper Instance of ReplicaSystemHelper + * + * @param p + * Path to the resource we're reading from + * @param status + * Information to replicate + * @param target + * The peer + * @param helper + * Instance of ReplicaSystemHelper * @return A new Status for the progress that was made */ public Status replicate(Path p, Status status, ReplicationTarget target, ReplicaSystemHelper helper); @@ -39,8 +44,7 @@ public interface ReplicaSystem { /** * Configure the implementation with necessary information from the system configuration * <p> - * For example, we only need one implementation for Accumulo, but, for each peer, - * we have a ZK quorum and instance name + * For example, we only need one implementation for Accumulo, but, for each peer, we have a ZK quorum and instance name */ public void configure(String configuration); } http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/replication/ReplicaSystemFactory.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/replication/ReplicaSystemFactory.java b/core/src/main/java/org/apache/accumulo/core/client/replication/ReplicaSystemFactory.java index e721278..d76b3d8 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/replication/ReplicaSystemFactory.java +++ b/core/src/main/java/org/apache/accumulo/core/client/replication/ReplicaSystemFactory.java @@ -22,7 +22,7 @@ import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; /** - * + * */ public class ReplicaSystemFactory { private static final Logger log = LoggerFactory.getLogger(ReplicaSystemFactory.class); @@ -64,7 +64,7 @@ public class ReplicaSystemFactory { /** * Generate the configuration value for a {@link ReplicaSystem} in the instance properties - * + * * @param system * The desired ReplicaSystem to use * @param configuration http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/security/SecurityErrorCode.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/security/SecurityErrorCode.java b/core/src/main/java/org/apache/accumulo/core/client/security/SecurityErrorCode.java index 30c60d5..b4027b2 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/security/SecurityErrorCode.java +++ b/core/src/main/java/org/apache/accumulo/core/client/security/SecurityErrorCode.java @@ -17,7 +17,7 @@ package org.apache.accumulo.core.client.security; /** - * + * */ public enum SecurityErrorCode { DEFAULT_SECURITY_ERROR, http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/client/security/tokens/AuthenticationToken.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/AuthenticationToken.java b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/AuthenticationToken.java index 99cc721..7836ea5 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/AuthenticationToken.java +++ b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/AuthenticationToken.java @@ -33,15 +33,15 @@ import javax.security.auth.Destroyable; import org.apache.hadoop.io.Writable; /** - * + * * @since 1.5.0 */ public interface AuthenticationToken extends Writable, Destroyable, Cloneable { - + /** * A utility class to serialize/deserialize {@link AuthenticationToken} objects.<br/> * Unfortunately, these methods are provided in an inner-class, to avoid breaking the interface API. - * + * * @since 1.6.0 */ public static final class AuthenticationTokenSerializer { @@ -49,7 +49,7 @@ public interface AuthenticationToken extends Writable, Destroyable, Cloneable { * A convenience method to create tokens from serialized bytes, created by {@link #serialize(AuthenticationToken)} * <p> * The specified tokenType will be instantiated, and used to deserialize the decoded bytes. The resulting object will then be returned to the caller. - * + * * @param tokenType * the token class to use to deserialize the bytes * @param tokenBytes @@ -78,10 +78,10 @@ public interface AuthenticationToken extends Writable, Destroyable, Cloneable { } return type; } - + /** * An alternate version of {@link #deserialize(Class, byte[])} that accepts a token class name rather than a token class. - * + * * @param tokenClassName * the fully-qualified class name to be returned * @see #serialize(AuthenticationToken) @@ -97,12 +97,12 @@ public interface AuthenticationToken extends Writable, Destroyable, Cloneable { } return deserialize(tokenType, tokenBytes); } - + /** * A convenience method to serialize tokens. * <p> * The provided {@link AuthenticationToken} will be serialized to bytes by its own implementation and returned to the caller. - * + * * @param token * the token to serialize * @return a serialized representation of the provided {@link AuthenticationToken} @@ -125,17 +125,17 @@ public interface AuthenticationToken extends Writable, Destroyable, Cloneable { return bytes; } } - + class Properties implements Destroyable, Map<String,char[]> { - + private boolean destroyed = false; private HashMap<String,char[]> map = new HashMap<String,char[]>(); - + private void checkDestroyed() { if (destroyed) throw new IllegalStateException(); } - + public char[] put(String key, CharSequence value) { checkDestroyed(); char[] toPut = new char[value.length()]; @@ -143,14 +143,14 @@ public interface AuthenticationToken extends Writable, Destroyable, Cloneable { toPut[i] = value.charAt(i); return map.put(key, toPut); } - + public void putAllStrings(Map<String,? extends CharSequence> map) { checkDestroyed(); for (Map.Entry<String,? extends CharSequence> entry : map.entrySet()) { put(entry.getKey(), entry.getValue()); } } - + @Override public void destroy() throws DestroyFailedException { for (String key : this.keySet()) { @@ -160,133 +160,133 @@ public interface AuthenticationToken extends Writable, Destroyable, Cloneable { this.clear(); destroyed = true; } - + @Override public boolean isDestroyed() { return destroyed; } - + @Override public int size() { checkDestroyed(); return map.size(); } - + @Override public boolean isEmpty() { checkDestroyed(); return map.isEmpty(); } - + @Override public boolean containsKey(Object key) { checkDestroyed(); return map.containsKey(key); } - + @Override public boolean containsValue(Object value) { checkDestroyed(); return map.containsValue(value); } - + @Override public char[] get(Object key) { checkDestroyed(); return map.get(key); } - + @Override public char[] put(String key, char[] value) { checkDestroyed(); return map.put(key, value); } - + @Override public char[] remove(Object key) { checkDestroyed(); return map.remove(key); } - + @Override public void putAll(Map<? extends String,? extends char[]> m) { checkDestroyed(); map.putAll(m); } - + @Override public void clear() { checkDestroyed(); map.clear(); } - + @Override public Set<String> keySet() { checkDestroyed(); return map.keySet(); } - + @Override public Collection<char[]> values() { checkDestroyed(); return map.values(); } - + @Override public Set<Map.Entry<String,char[]>> entrySet() { checkDestroyed(); return map.entrySet(); } } - + static class TokenProperty implements Comparable<TokenProperty> { private String key, description; private boolean masked; - + public TokenProperty(String name, String description, boolean mask) { this.key = name; this.description = description; this.masked = mask; } - + @Override public String toString() { return this.key + " - " + description; } - + public String getKey() { return this.key; } - + public String getDescription() { return this.description; } - + public boolean getMask() { return this.masked; } - + @Override public int hashCode() { return key.hashCode(); } - + @Override public boolean equals(Object o) { if (o instanceof TokenProperty) return ((TokenProperty) o).key.equals(key); return false; } - + @Override public int compareTo(TokenProperty o) { return key.compareTo(o.key); } } - + public void init(Properties properties); - + public Set<TokenProperty> getProperties(); - + public AuthenticationToken clone(); }