Component docs

Project: http://git-wip-us.apache.org/repos/asf/camel/repo
Commit: http://git-wip-us.apache.org/repos/asf/camel/commit/e370c690
Tree: http://git-wip-us.apache.org/repos/asf/camel/tree/e370c690
Diff: http://git-wip-us.apache.org/repos/asf/camel/diff/e370c690

Branch: refs/heads/master
Commit: e370c6902949318cb5a86acf365a83d4c2143ba7
Parents: 35adbaa
Author: Claus Ibsen <davscl...@apache.org>
Authored: Sat May 14 09:37:10 2016 +0200
Committer: Claus Ibsen <davscl...@apache.org>
Committed: Sat May 14 09:42:52 2016 +0200

----------------------------------------------------------------------
 components/camel-gora/src/main/docs/gora.adoc   |  36 ++--
 .../camel/component/gora/GoraConfiguration.java | 193 +++----------------
 .../camel/component/gora/GoraConsumer.java      |   6 -
 .../camel/component/gora/GoraProducer.java      |   3 -
 components/camel-hdfs/src/main/docs/hdfs.adoc   |  22 ++-
 .../camel/component/hdfs/HdfsConfiguration.java |  20 +-
 components/camel-hdfs2/src/main/docs/hdfs2.adoc |  22 ++-
 .../component/hdfs2/HdfsConfiguration.java      |  20 +-
 8 files changed, 95 insertions(+), 227 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/camel/blob/e370c690/components/camel-gora/src/main/docs/gora.adoc
----------------------------------------------------------------------
diff --git a/components/camel-gora/src/main/docs/gora.adoc 
b/components/camel-gora/src/main/docs/gora.adoc
index 45befea..834dacc 100644
--- a/components/camel-gora/src/main/docs/gora.adoc
+++ b/components/camel-gora/src/main/docs/gora.adoc
@@ -91,6 +91,7 @@ The Gora component has no options.
 
 
 
+
 // endpoint options: START
 The Gora component supports 22 endpoint options which are listed below:
 
@@ -99,26 +100,26 @@ The Gora component supports 22 endpoint options which are 
listed below:
 |=======================================================================
 | Name | Group | Default | Java Type | Description
 | name | common |  | String | *Required* Instance name
-| concurrentConsumers | common | 1 | int | Concurrent Consumers NOTE: used 
only by consumer
-| dataStoreClass | common |  | String | dataStore type
-| endKey | common |  | Object | Gora Query End Key attribute
-| endTime | common |  | long | Gora Query End Time attribute
-| fields | common |  | Strings | Gora Query Fields attribute
-| flushOnEveryOperation | common | true | boolean | Flush on every operation 
NOTE: used only by producer
-| hadoopConfiguration | common |  | Configuration | configuration
-| keyClass | common |  | String | key type
-| keyRangeFrom | common |  | Object | Gora Query Key Range From attribute
-| keyRangeTo | common |  | Object | Gora Query Key Range To attribute
-| limit | common |  | long | Gora Query Limit attribute
-| startKey | common |  | Object | Gora Query Start Key attribute
-| startTime | common |  | long | Gora Query Start Time attribute
-| timeRangeFrom | common |  | long | Gora Query Time Range From attribute
-| timeRangeTo | common |  | long | Gora Query Key Range To attribute
-| timestamp | common |  | long | Gora Query Timestamp attribute
-| valueClass | common |  | String | value type
+| dataStoreClass | common |  | String | The type of the dataStore
+| keyClass | common |  | String | The type class of the key
+| valueClass | common |  | String | The type of the value
 | bridgeErrorHandler | consumer | false | boolean | Allows for bridging the 
consumer to the Camel routing Error Handler which mean any exceptions occurred 
while the consumer is trying to pickup incoming messages or the likes will now 
be processed as a message and handled by the routing Error Handler. By default 
the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with 
exceptions that will be logged at WARN/ERROR level and ignored.
+| concurrentConsumers | consumer | 1 | int | Number of concurrent consumers
+| endKey | consumer |  | Object | The End Key
+| endTime | consumer |  | long | The End Time
+| fields | consumer |  | Strings | The Fields
+| keyRangeFrom | consumer |  | Object | The Key Range From
+| keyRangeTo | consumer |  | Object | The Key Range To
+| limit | consumer |  | long | The Limit
+| startKey | consumer |  | Object | The Start Key
+| startTime | consumer |  | long | The Start Time
+| timeRangeFrom | consumer |  | long | The Time Range From
+| timeRangeTo | consumer |  | long | The Time Range To
+| timestamp | consumer |  | long | The Timestamp
 | exceptionHandler | consumer (advanced) |  | ExceptionHandler | To let the 
consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler 
is enabled then this options is not in use. By default the consumer will deal 
with exceptions that will be logged at WARN/ERROR level and ignored.
+| flushOnEveryOperation | producer | true | boolean | Flush on every operation
 | exchangePattern | advanced | InOnly | ExchangePattern | Sets the default 
exchange pattern when creating an exchange
+| hadoopConfiguration | advanced |  | Configuration | Hadoop Configuration
 | synchronous | advanced | false | boolean | Sets whether synchronous 
processing should be strictly used or Camel is allowed to use asynchronous 
processing (if supported).
 |=======================================================================
 {% endraw %}
@@ -126,6 +127,7 @@ The Gora component supports 22 endpoint options which are 
listed below:
 
 
 
+
 [[Gora-SupportedGoraOperations]]
 Supported Gora Operations
 ~~~~~~~~~~~~~~~~~~~~~~~~~

http://git-wip-us.apache.org/repos/asf/camel/blob/e370c690/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConfiguration.java
----------------------------------------------------------------------
diff --git 
a/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConfiguration.java
 
b/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConfiguration.java
index 4be4836..0cb2928 100644
--- 
a/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConfiguration.java
+++ 
b/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConfiguration.java
@@ -34,133 +34,52 @@ public class GoraConfiguration {
 
     @UriPath @Metadata(required = "true")
     private String name;
-
-    /**
-     * key type
-     */
     @UriParam
     private String keyClass;
-
-    /**
-     * configuration
-     */
-    @UriParam
-    private Configuration hadoopConfiguration;
-
-    /**
-     * value type
-     */
     @UriParam
     private String valueClass;
-
-    /**
-     *  dataStore type
-     */
     @UriParam
     private String dataStoreClass;
+    @UriParam(label = "advanced")
+    private Configuration hadoopConfiguration;
 
-    /** Consumer only properties! */
-
-    /**
-     *  Gora Query Start Time attribute
-     */
-    @UriParam
+    @UriParam(label = "consumer")
     private long startTime;
-
-    /**
-     * Gora Query End Time attribute
-     */
-    @UriParam
+    @UriParam(label = "consumer")
     private long endTime;
-
-    /**
-     * Gora Query Time Range From attribute
-     */
-    @UriParam
+    @UriParam(label = "consumer")
     private long timeRangeFrom;
-
-    /**
-     * Gora Query Key Range To attribute
-     */
-    @UriParam
+    @UriParam(label = "consumer")
     private long timeRangeTo;
-
-    /**
-     * Gora Query Limit attribute
-     */
-    @UriParam
+    @UriParam(label = "consumer")
     private long limit;
-
-    /**
-     * Gora Query Timestamp attribute
-     */
-    @UriParam
+    @UriParam(label = "consumer")
     private long timestamp;
-
-    /**
-     * Gora Query Start Key attribute
-     */
-    @UriParam
+    @UriParam(label = "consumer")
     private Object startKey;
-
-    /**
-     * Gora Query End Key attribute
-     */
-    @UriParam
+    @UriParam(label = "consumer")
     private Object endKey;
-
-    /**
-     * Gora Query Key Range From attribute
-     */
-    @UriParam
+    @UriParam(label = "consumer")
     private Object keyRangeFrom;
-
-    /**
-     * Gora Query Key Range To attribute
-     */
-    @UriParam
+    @UriParam(label = "consumer")
     private Object keyRangeTo;
-
-    /**
-     * Gora Query Fields attribute
-     */
-    @UriParam
+    @UriParam(label = "consumer")
     private Strings fields;
-
-    /**
-     * Concurrent Consumers
-     *
-     * <b>NOTE:<b/> used only by consumer
-     */
-    @UriParam(defaultValue = "1")
+    @UriParam(label = "consumer", defaultValue = "1")
     private int concurrentConsumers = 1;
-
-    /**
-     * Flush on every operation
-     *
-     * <b>NOTE:<b/> used only by producer
-     */
-    @UriParam(defaultValue = "true")
+    @UriParam(label = "producer", defaultValue = "true")
     private boolean flushOnEveryOperation = true;
 
-    /**
-     * Default Constructor
-     */
     public GoraConfiguration() {
         this.hadoopConfiguration = new Configuration();
     }
 
-    /**
-     * Get type of the key (i.e clients)
-     *
-     * @return key class
-     */
     public String getKeyClass() {
         return keyClass;
     }
 
     /**
-     * Set type class of the key
+     * The type class of the key
      */
     public void setKeyClass(final String keyClass) {
         if (isNullOrEmpty(keyClass)) {
@@ -170,15 +89,12 @@ public class GoraConfiguration {
         this.keyClass = keyClass;
     }
 
-    /**
-     * Get type of the value
-     */
     public String getValueClass() {
         return valueClass;
     }
 
     /**
-     * Set type of the value
+     * The type of the value
      */
     public void setValueClass(final String valueClass) {
         if (isNullOrEmpty(valueClass)) {
@@ -187,15 +103,12 @@ public class GoraConfiguration {
         this.valueClass = valueClass;
     }
 
-    /**
-     * Get type of the dataStore
-     */
     public String getDataStoreClass() {
         return dataStoreClass;
     }
 
     /**
-     * Set type of the dataStore
+     * The type of the dataStore
      */
     public void setDataStoreClass(String dataStoreClass) {
         if (isNullOrEmpty(dataStoreClass)) {
@@ -204,197 +117,155 @@ public class GoraConfiguration {
         this.dataStoreClass = dataStoreClass;
     }
 
-    /**
-     * Get Hadoop Configuration
-     */
     public Configuration getHadoopConfiguration() {
         return hadoopConfiguration;
     }
 
-    /**
-     * Get Start Time
-     */
     public long getStartTime() {
         return startTime;
     }
 
     /**
-     * Set Start Time
+     * The Start Time
      */
     public void setStartTime(long startTime) {
         this.startTime = startTime;
     }
 
-    /**
-     * Get End Time
-     */
     public long getEndTime() {
         return endTime;
     }
 
     /**
-     * Set End Time
+     * The End Time
      */
     public void setEndTime(long endTime) {
         this.endTime = endTime;
     }
 
-    /**
-     * Get Time Range From
-     */
     public long getTimeRangeFrom() {
         return timeRangeFrom;
     }
 
     /**
-     * Set Time Range From
+     * The Time Range From
      */
     public void setTimeRangeFrom(long timeRangeFrom) {
         this.timeRangeFrom = timeRangeFrom;
     }
 
-    /**
-     * Get Time Range To
-     */
     public long getTimeRangeTo() {
         return timeRangeTo;
     }
 
     /**
-     * Set Time Range To
+     * The Time Range To
      */
     public void setTimeRangeTo(long timeRangeTo) {
         this.timeRangeTo = timeRangeTo;
     }
 
-    /**
-     * Get Limit
-     */
     public long getLimit() {
         return limit;
     }
 
     /**
-     * Set Limit
+     * The Limit
      */
     public void setLimit(long limit) {
         this.limit = limit;
     }
 
-    /**
-     * Get Timestamp
-     */
     public long getTimestamp() {
         return timestamp;
     }
 
     /**
-     * Set Timestamp
+     * The Timestamp
      */
     public void setTimestamp(long timestamp) {
         this.timestamp = timestamp;
     }
 
-    /**
-     * Get Start Key
-     */
     public Object getStartKey() {
         return startKey;
     }
 
     /**
-     * Set Start Key
+     * The Start Key
      */
     public void setStartKey(Object startKey) {
         this.startKey = startKey;
     }
 
-    /**
-     * Get End Key
-     */
     public Object getEndKey() {
         return endKey;
     }
 
     /**
-     * Set End Key
+     * The End Key
      */
     public void setEndKey(Object endKey) {
         this.endKey = endKey;
     }
 
-    /**
-     * Get Key Range From
-     */
     public Object getKeyRangeFrom() {
         return keyRangeFrom;
     }
 
     /**
-     * Set Key Range From
+     * The Key Range From
      */
     public void setKeyRangeFrom(Object keyRangeFrom) {
         this.keyRangeFrom = keyRangeFrom;
     }
 
-    /**
-     * Get Key Range To
-     */
     public Object getKeyRangeTo() {
         return keyRangeTo;
     }
 
     /**
-     * Set Key Range To
+     * The Key Range To
      */
     public void setKeyRangeTo(Object keyRangeTo) {
         this.keyRangeTo = keyRangeTo;
     }
 
-    /**
-     * Get Fields
-     */
     public Strings getFields() {
         return fields;
     }
 
     /**
-     * Set Fields
+     * The Fields
      */
     public void setFields(Strings fields) {
         this.fields = fields;
     }
 
-    /**
-     * Get Concurrent Consumers
-     */
     public int getConcurrentConsumers() {
         return concurrentConsumers;
     }
 
     /**
-     * Set Concurrent Consumers
+     * Number of concurrent consumers
      */
     public void setConcurrentConsumers(int concurrentConsumers) {
         this.concurrentConsumers = concurrentConsumers;
     }
 
-    /**
-     * Get flush on every operation
-     */
     public boolean isFlushOnEveryOperation() {
         return flushOnEveryOperation;
     }
 
     /**
-     * Set flush on every operation
+     * Flush on every operation
      */
     public void setFlushOnEveryOperation(boolean flushOnEveryOperation) {
         this.flushOnEveryOperation = flushOnEveryOperation;
     }
 
     /**
-     * Set Hadoop Configuration
+     * Hadoop Configuration
      */
     public void setHadoopConfiguration(Configuration hadoopConfiguration) {
         checkNotNull(hadoopConfiguration, "Hadoop Configuration could not be 
null!");

http://git-wip-us.apache.org/repos/asf/camel/blob/e370c690/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConsumer.java
----------------------------------------------------------------------
diff --git 
a/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConsumer.java
 
b/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConsumer.java
index 2ec9176..e1495bd 100644
--- 
a/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConsumer.java
+++ 
b/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConsumer.java
@@ -34,7 +34,6 @@ import org.slf4j.LoggerFactory;
 
 /**
  * Implementation of Camel-Gora {@link Consumer}.
- *
  */
 public class GoraConsumer extends ScheduledPollConsumer {
 
@@ -59,11 +58,6 @@ public class GoraConsumer extends ScheduledPollConsumer {
     private Query query;
 
     /**
-     * executor service
-     */
-    private ExecutorService executor;
-
-    /**
      * Poll run
      */
     private boolean firstRun;

http://git-wip-us.apache.org/repos/asf/camel/blob/e370c690/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraProducer.java
----------------------------------------------------------------------
diff --git 
a/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraProducer.java
 
b/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraProducer.java
index f68daad..b05d048 100644
--- 
a/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraProducer.java
+++ 
b/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraProducer.java
@@ -67,9 +67,6 @@ public class GoraProducer extends DefaultProducer implements 
ServicePoolAware {
         this.configuration = configuration;
     }
 
-    /**
-     * {@inheritDoc}
-     */
     @Override
     public void process(final Exchange exchange) throws Exception {
         final String operation = (String) 
exchange.getIn().getHeader(GoraAttribute.GORA_OPERATION.value);

http://git-wip-us.apache.org/repos/asf/camel/blob/e370c690/components/camel-hdfs/src/main/docs/hdfs.adoc
----------------------------------------------------------------------
diff --git a/components/camel-hdfs/src/main/docs/hdfs.adoc 
b/components/camel-hdfs/src/main/docs/hdfs.adoc
index ac17630..41821a2 100644
--- a/components/camel-hdfs/src/main/docs/hdfs.adoc
+++ b/components/camel-hdfs/src/main/docs/hdfs.adoc
@@ -77,6 +77,7 @@ The HDFS component supports 1 options which are listed below.
 
 
 
+
 // endpoint options: START
 The HDFS component supports 41 endpoint options which are listed below:
 
@@ -87,21 +88,11 @@ The HDFS component supports 41 endpoint options which are 
listed below:
 | hostName | common |  | String | *Required* HDFS host to use
 | port | common | 8020 | int | HDFS port to use
 | path | common |  | String | *Required* The directory path to use
-| blockSize | common | 67108864 | long | The size of the HDFS blocks
-| bufferSize | common | 4096 | int | The buffer size used by HDFS
-| checkIdleInterval | common | 500 | int | How often (time in millis) in to 
run the idle checker background task. This option is only in use if the 
splitter strategy is IDLE.
-| chunkSize | common | 4096 | int | When reading a normal file this is split 
into chunks producing a message per chunk.
-| compressionCodec | common | DEFAULT | HdfsCompressionCodec | The compression 
codec to use
-| compressionType | common | NONE | CompressionType | The compression type to 
use (is default not in use)
 | connectOnStartup | common | true | boolean | Whether to connect to the HDFS 
file system on starting the producer/consumer. If false then the connection is 
created on-demand. Notice that HDFS may take up till 15 minutes to establish a 
connection as it has hardcoded 45 x 20 sec redelivery. By setting this option 
to false allows your application to startup and not block for up till 15 
minutes.
 | fileSystemType | common | HDFS | HdfsFileSystemType | Set to LOCAL to not 
use HDFS but local java.io.File instead.
 | fileType | common | NORMAL_FILE | HdfsFileType | The file type to use. For 
more details see Hadoop HDFS documentation about the various files types.
 | keyType | common | NULL | WritableType | The type for the key in case of 
sequence or map files.
-| openedSuffix | common | opened | String | When a file is opened for 
reading/writing the file is renamed with this suffix to avoid to read it during 
the writing phase.
 | owner | common |  | String | The file owner must match this owner for the 
consumer to pickup the file. Otherwise the file is skipped.
-| readSuffix | common | read | String | Once the file has been read is renamed 
with this suffix to avoid to read it again.
-| replication | common | 3 | short | The HDFS replication factor
-| splitStrategy | common |  | String | In the current version of Hadoop 
opening a file in append mode is disabled since it's not very reliable. So for 
the moment it's only possible to create new files. The Camel HDFS endpoint 
tries to solve this problem in this way: If the split strategy option has been 
defined the hdfs path will be used as a directory and files will be created 
using the configured UuidGenerator. Every time a splitting condition is met a 
new file is created. The splitStrategy option is defined as a string with the 
following syntax: splitStrategy=ST:valueST:value... where ST can be: BYTES a 
new file is created and the old is closed when the number of written bytes is 
more than value MESSAGES a new file is created and the old is closed when the 
number of written messages is more than value IDLE a new file is created and 
the old is closed when no writing happened in the last value milliseconds
 | valueType | common | BYTES | WritableType | The type for the key in case of 
sequence or map files
 | bridgeErrorHandler | consumer | false | boolean | Allows for bridging the 
consumer to the Camel routing Error Handler which mean any exceptions occurred 
while the consumer is trying to pickup incoming messages or the likes will now 
be processed as a message and handled by the routing Error Handler. By default 
the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with 
exceptions that will be logged at WARN/ERROR level and ignored.
 | delay | consumer | 1000 | long | The interval (milliseconds) between the 
directory scans.
@@ -112,7 +103,17 @@ The HDFS component supports 41 endpoint options which are 
listed below:
 | pollStrategy | consumer (advanced) |  | PollingConsumerPollStrategy | A 
pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to 
provide your custom implementation to control error handling usually occurred 
during the poll operation before an Exchange have been created and being routed 
in Camel.
 | append | producer | false | boolean | Append to existing file. Notice that 
not all HDFS file systems support the append option.
 | overwrite | producer | true | boolean | Whether to overwrite existing files 
with the same name
+| blockSize | advanced | 67108864 | long | The size of the HDFS blocks
+| bufferSize | advanced | 4096 | int | The buffer size used by HDFS
+| checkIdleInterval | advanced | 500 | int | How often (time in millis) in to 
run the idle checker background task. This option is only in use if the 
splitter strategy is IDLE.
+| chunkSize | advanced | 4096 | int | When reading a normal file this is split 
into chunks producing a message per chunk.
+| compressionCodec | advanced | DEFAULT | HdfsCompressionCodec | The 
compression codec to use
+| compressionType | advanced | NONE | CompressionType | The compression type 
to use (is default not in use)
 | exchangePattern | advanced | InOnly | ExchangePattern | Sets the default 
exchange pattern when creating an exchange
+| openedSuffix | advanced | opened | String | When a file is opened for 
reading/writing the file is renamed with this suffix to avoid to read it during 
the writing phase.
+| readSuffix | advanced | read | String | Once the file has been read is 
renamed with this suffix to avoid to read it again.
+| replication | advanced | 3 | short | The HDFS replication factor
+| splitStrategy | advanced |  | String | In the current version of Hadoop 
opening a file in append mode is disabled since it's not very reliable. So for 
the moment it's only possible to create new files. The Camel HDFS endpoint 
tries to solve this problem in this way: If the split strategy option has been 
defined the hdfs path will be used as a directory and files will be created 
using the configured UuidGenerator. Every time a splitting condition is met a 
new file is created. The splitStrategy option is defined as a string with the 
following syntax: splitStrategy=ST:valueST:value... where ST can be: BYTES a 
new file is created and the old is closed when the number of written bytes is 
more than value MESSAGES a new file is created and the old is closed when the 
number of written messages is more than value IDLE a new file is created and 
the old is closed when no writing happened in the last value milliseconds
 | synchronous | advanced | false | boolean | Sets whether synchronous 
processing should be strictly used or Camel is allowed to use asynchronous 
processing (if supported).
 | backoffErrorThreshold | scheduler |  | int | The number of subsequent error 
polls (failed due some error) that should happen before the backoffMultipler 
should kick-in.
 | backoffIdleThreshold | scheduler |  | int | The number of subsequent idle 
polls that should happen before the backoffMultipler should kick-in.
@@ -132,6 +133,7 @@ The HDFS component supports 41 endpoint options which are 
listed below:
 
 
 
+
 [[HDFS-KeyTypeandValueType]]
 KeyType and ValueType
 +++++++++++++++++++++

http://git-wip-us.apache.org/repos/asf/camel/blob/e370c690/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConfiguration.java
----------------------------------------------------------------------
diff --git 
a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConfiguration.java
 
b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConfiguration.java
index e663d74..e625bf0 100644
--- 
a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConfiguration.java
+++ 
b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConfiguration.java
@@ -46,17 +46,17 @@ public class HdfsConfiguration {
     private boolean overwrite = true;
     @UriParam(label = "producer")
     private boolean append;
-    @UriParam
+    @UriParam(label = "advanced")
     private String splitStrategy;
-    @UriParam(defaultValue = "" + HdfsConstants.DEFAULT_BUFFERSIZE)
+    @UriParam(label = "advanced", defaultValue = "" + 
HdfsConstants.DEFAULT_BUFFERSIZE)
     private int bufferSize = HdfsConstants.DEFAULT_BUFFERSIZE;
-    @UriParam(defaultValue = "" + HdfsConstants.DEFAULT_REPLICATION)
+    @UriParam(label = "advanced", defaultValue = "" + 
HdfsConstants.DEFAULT_REPLICATION)
     private short replication = HdfsConstants.DEFAULT_REPLICATION;
-    @UriParam(defaultValue = "" + HdfsConstants.DEFAULT_BLOCKSIZE)
+    @UriParam(label = "advanced", defaultValue = "" + 
HdfsConstants.DEFAULT_BLOCKSIZE)
     private long blockSize = HdfsConstants.DEFAULT_BLOCKSIZE;
-    @UriParam(defaultValue = "NONE")
+    @UriParam(label = "advanced", defaultValue = "NONE")
     private SequenceFile.CompressionType compressionType = 
HdfsConstants.DEFAULT_COMPRESSIONTYPE;
-    @UriParam(defaultValue = "DEFAULT")
+    @UriParam(label = "advanced", defaultValue = "DEFAULT")
     private HdfsCompressionCodec compressionCodec = 
HdfsConstants.DEFAULT_CODEC;
     @UriParam(defaultValue = "NORMAL_FILE")
     private HdfsFileType fileType = HdfsFileType.NORMAL_FILE;
@@ -66,9 +66,9 @@ public class HdfsConfiguration {
     private WritableType keyType = WritableType.NULL;
     @UriParam(defaultValue = "BYTES")
     private WritableType valueType = WritableType.BYTES;
-    @UriParam(defaultValue = HdfsConstants.DEFAULT_OPENED_SUFFIX)
+    @UriParam(label = "advanced", defaultValue = 
HdfsConstants.DEFAULT_OPENED_SUFFIX)
     private String openedSuffix = HdfsConstants.DEFAULT_OPENED_SUFFIX;
-    @UriParam(defaultValue = HdfsConstants.DEFAULT_READ_SUFFIX)
+    @UriParam(label = "advanced", defaultValue = 
HdfsConstants.DEFAULT_READ_SUFFIX)
     private String readSuffix = HdfsConstants.DEFAULT_READ_SUFFIX;
     @UriParam(label = "consumer")
     private long initialDelay;
@@ -76,9 +76,9 @@ public class HdfsConfiguration {
     private long delay = HdfsConstants.DEFAULT_DELAY;
     @UriParam(label = "consumer", defaultValue = HdfsConstants.DEFAULT_PATTERN)
     private String pattern = HdfsConstants.DEFAULT_PATTERN;
-    @UriParam(defaultValue = "" + HdfsConstants.DEFAULT_BUFFERSIZE)
+    @UriParam(label = "advanced", defaultValue = "" + 
HdfsConstants.DEFAULT_BUFFERSIZE)
     private int chunkSize = HdfsConstants.DEFAULT_BUFFERSIZE;
-    @UriParam(defaultValue = "" + HdfsConstants.DEFAULT_CHECK_IDLE_INTERVAL)
+    @UriParam(label = "advanced", defaultValue = "" + 
HdfsConstants.DEFAULT_CHECK_IDLE_INTERVAL)
     private int checkIdleInterval = HdfsConstants.DEFAULT_CHECK_IDLE_INTERVAL;
     @UriParam(defaultValue = "true")
     private boolean connectOnStartup = true;

http://git-wip-us.apache.org/repos/asf/camel/blob/e370c690/components/camel-hdfs2/src/main/docs/hdfs2.adoc
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/main/docs/hdfs2.adoc 
b/components/camel-hdfs2/src/main/docs/hdfs2.adoc
index de6f15d..5d0ee27 100644
--- a/components/camel-hdfs2/src/main/docs/hdfs2.adoc
+++ b/components/camel-hdfs2/src/main/docs/hdfs2.adoc
@@ -74,6 +74,7 @@ The HDFS2 component supports 1 options which are listed below.
 
 
 
+
 // endpoint options: START
 The HDFS2 component supports 41 endpoint options which are listed below:
 
@@ -84,21 +85,11 @@ The HDFS2 component supports 41 endpoint options which are 
listed below:
 | hostName | common |  | String | *Required* HDFS host to use
 | port | common | 8020 | int | HDFS port to use
 | path | common |  | String | *Required* The directory path to use
-| blockSize | common | 67108864 | long | The size of the HDFS blocks
-| bufferSize | common | 4096 | int | The buffer size used by HDFS
-| checkIdleInterval | common | 500 | int | How often (time in millis) in to 
run the idle checker background task. This option is only in use if the 
splitter strategy is IDLE.
-| chunkSize | common | 4096 | int | When reading a normal file this is split 
into chunks producing a message per chunk.
-| compressionCodec | common | DEFAULT | HdfsCompressionCodec | The compression 
codec to use
-| compressionType | common | NONE | CompressionType | The compression type to 
use (is default not in use)
 | connectOnStartup | common | true | boolean | Whether to connect to the HDFS 
file system on starting the producer/consumer. If false then the connection is 
created on-demand. Notice that HDFS may take up till 15 minutes to establish a 
connection as it has hardcoded 45 x 20 sec redelivery. By setting this option 
to false allows your application to startup and not block for up till 15 
minutes.
 | fileSystemType | common | HDFS | HdfsFileSystemType | Set to LOCAL to not 
use HDFS but local java.io.File instead.
 | fileType | common | NORMAL_FILE | HdfsFileType | The file type to use. For 
more details see Hadoop HDFS documentation about the various files types.
 | keyType | common | NULL | WritableType | The type for the key in case of 
sequence or map files.
-| openedSuffix | common | opened | String | When a file is opened for 
reading/writing the file is renamed with this suffix to avoid to read it during 
the writing phase.
 | owner | common |  | String | The file owner must match this owner for the 
consumer to pickup the file. Otherwise the file is skipped.
-| readSuffix | common | read | String | Once the file has been read is renamed 
with this suffix to avoid to read it again.
-| replication | common | 3 | short | The HDFS replication factor
-| splitStrategy | common |  | String | In the current version of Hadoop 
opening a file in append mode is disabled since it's not very reliable. So for 
the moment it's only possible to create new files. The Camel HDFS endpoint 
tries to solve this problem in this way: If the split strategy option has been 
defined the hdfs path will be used as a directory and files will be created 
using the configured UuidGenerator. Every time a splitting condition is met a 
new file is created. The splitStrategy option is defined as a string with the 
following syntax: splitStrategy=ST:valueST:value... where ST can be: BYTES a 
new file is created and the old is closed when the number of written bytes is 
more than value MESSAGES a new file is created and the old is closed when the 
number of written messages is more than value IDLE a new file is created and 
the old is closed when no writing happened in the last value milliseconds
 | valueType | common | BYTES | WritableType | The type for the key in case of 
sequence or map files
 | bridgeErrorHandler | consumer | false | boolean | Allows for bridging the 
consumer to the Camel routing Error Handler which mean any exceptions occurred 
while the consumer is trying to pickup incoming messages or the likes will now 
be processed as a message and handled by the routing Error Handler. By default 
the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with 
exceptions that will be logged at WARN/ERROR level and ignored.
 | delay | consumer | 1000 | long | The interval (milliseconds) between the 
directory scans.
@@ -109,7 +100,17 @@ The HDFS2 component supports 41 endpoint options which are 
listed below:
 | pollStrategy | consumer (advanced) |  | PollingConsumerPollStrategy | A 
pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to 
provide your custom implementation to control error handling usually occurred 
during the poll operation before an Exchange have been created and being routed 
in Camel.
 | append | producer | false | boolean | Append to existing file. Notice that 
not all HDFS file systems support the append option.
 | overwrite | producer | true | boolean | Whether to overwrite existing files 
with the same name
+| blockSize | advanced | 67108864 | long | The size of the HDFS blocks
+| bufferSize | advanced | 4096 | int | The buffer size used by HDFS
+| checkIdleInterval | advanced | 500 | int | How often (time in millis) in to 
run the idle checker background task. This option is only in use if the 
splitter strategy is IDLE.
+| chunkSize | advanced | 4096 | int | When reading a normal file this is split 
into chunks producing a message per chunk.
+| compressionCodec | advanced | DEFAULT | HdfsCompressionCodec | The 
compression codec to use
+| compressionType | advanced | NONE | CompressionType | The compression type 
to use (is default not in use)
 | exchangePattern | advanced | InOnly | ExchangePattern | Sets the default 
exchange pattern when creating an exchange
+| openedSuffix | advanced | opened | String | When a file is opened for 
reading/writing the file is renamed with this suffix to avoid to read it during 
the writing phase.
+| readSuffix | advanced | read | String | Once the file has been read is 
renamed with this suffix to avoid to read it again.
+| replication | advanced | 3 | short | The HDFS replication factor
+| splitStrategy | advanced |  | String | In the current version of Hadoop 
opening a file in append mode is disabled since it's not very reliable. So for 
the moment it's only possible to create new files. The Camel HDFS endpoint 
tries to solve this problem in this way: If the split strategy option has been 
defined the hdfs path will be used as a directory and files will be created 
using the configured UuidGenerator. Every time a splitting condition is met a 
new file is created. The splitStrategy option is defined as a string with the 
following syntax: splitStrategy=ST:valueST:value... where ST can be: BYTES a 
new file is created and the old is closed when the number of written bytes is 
more than value MESSAGES a new file is created and the old is closed when the 
number of written messages is more than value IDLE a new file is created and 
the old is closed when no writing happened in the last value milliseconds
 | synchronous | advanced | false | boolean | Sets whether synchronous 
processing should be strictly used or Camel is allowed to use asynchronous 
processing (if supported).
 | backoffErrorThreshold | scheduler |  | int | The number of subsequent error 
polls (failed due some error) that should happen before the backoffMultipler 
should kick-in.
 | backoffIdleThreshold | scheduler |  | int | The number of subsequent idle 
polls that should happen before the backoffMultipler should kick-in.
@@ -128,6 +129,7 @@ The HDFS2 component supports 41 endpoint options which are 
listed below:
 
 
 
+
 [[HDFS2-KeyTypeandValueType]]
 KeyType and ValueType
 +++++++++++++++++++++

http://git-wip-us.apache.org/repos/asf/camel/blob/e370c690/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsConfiguration.java
----------------------------------------------------------------------
diff --git 
a/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsConfiguration.java
 
b/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsConfiguration.java
index 01b5680..c1f5d50 100644
--- 
a/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsConfiguration.java
+++ 
b/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsConfiguration.java
@@ -46,17 +46,17 @@ public class HdfsConfiguration {
     private boolean overwrite = true;
     @UriParam(label = "producer")
     private boolean append;
-    @UriParam
+    @UriParam(label = "advanced")
     private String splitStrategy;
-    @UriParam(defaultValue = "" + HdfsConstants.DEFAULT_BUFFERSIZE)
+    @UriParam(label = "advanced", defaultValue = "" + 
HdfsConstants.DEFAULT_BUFFERSIZE)
     private int bufferSize = HdfsConstants.DEFAULT_BUFFERSIZE;
-    @UriParam(defaultValue = "" + HdfsConstants.DEFAULT_REPLICATION)
+    @UriParam(label = "advanced", defaultValue = "" + 
HdfsConstants.DEFAULT_REPLICATION)
     private short replication = HdfsConstants.DEFAULT_REPLICATION;
-    @UriParam(defaultValue = "" + HdfsConstants.DEFAULT_BLOCKSIZE)
+    @UriParam(label = "advanced", defaultValue = "" + 
HdfsConstants.DEFAULT_BLOCKSIZE)
     private long blockSize = HdfsConstants.DEFAULT_BLOCKSIZE;
-    @UriParam(defaultValue = "NONE")
+    @UriParam(label = "advanced", defaultValue = "NONE")
     private SequenceFile.CompressionType compressionType = 
HdfsConstants.DEFAULT_COMPRESSIONTYPE;
-    @UriParam(defaultValue = "DEFAULT")
+    @UriParam(label = "advanced", defaultValue = "DEFAULT")
     private HdfsCompressionCodec compressionCodec = 
HdfsConstants.DEFAULT_CODEC;
     @UriParam(defaultValue = "NORMAL_FILE")
     private HdfsFileType fileType = HdfsFileType.NORMAL_FILE;
@@ -66,9 +66,9 @@ public class HdfsConfiguration {
     private WritableType keyType = WritableType.NULL;
     @UriParam(defaultValue = "BYTES")
     private WritableType valueType = WritableType.BYTES;
-    @UriParam(defaultValue = HdfsConstants.DEFAULT_OPENED_SUFFIX)
+    @UriParam(label = "advanced", defaultValue = 
HdfsConstants.DEFAULT_OPENED_SUFFIX)
     private String openedSuffix = HdfsConstants.DEFAULT_OPENED_SUFFIX;
-    @UriParam(defaultValue = HdfsConstants.DEFAULT_READ_SUFFIX)
+    @UriParam(label = "advanced", defaultValue = 
HdfsConstants.DEFAULT_READ_SUFFIX)
     private String readSuffix = HdfsConstants.DEFAULT_READ_SUFFIX;
     @UriParam(label = "consumer")
     private long initialDelay;
@@ -76,9 +76,9 @@ public class HdfsConfiguration {
     private long delay = HdfsConstants.DEFAULT_DELAY;
     @UriParam(label = "consumer", defaultValue = HdfsConstants.DEFAULT_PATTERN)
     private String pattern = HdfsConstants.DEFAULT_PATTERN;
-    @UriParam(defaultValue = "" + HdfsConstants.DEFAULT_BUFFERSIZE)
+    @UriParam(label = "advanced", defaultValue = "" + 
HdfsConstants.DEFAULT_BUFFERSIZE)
     private int chunkSize = HdfsConstants.DEFAULT_BUFFERSIZE;
-    @UriParam(defaultValue = "" + HdfsConstants.DEFAULT_CHECK_IDLE_INTERVAL)
+    @UriParam(label = "advanced", defaultValue = "" + 
HdfsConstants.DEFAULT_CHECK_IDLE_INTERVAL)
     private int checkIdleInterval = HdfsConstants.DEFAULT_CHECK_IDLE_INTERVAL;
     @UriParam(defaultValue = "true")
     private boolean connectOnStartup = true;

Reply via email to