This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel-kafka-connector.git


The following commit(s) were added to refs/heads/main by this push:
     new 2c668e0  [create-pull-request] automated change
2c668e0 is described below

commit 2c668e0c049d26ba466049511762428041498dcc
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Thu Apr 29 03:29:52 2021 +0000

    [create-pull-request] automated change
---
 .../resources/connectors/camel-atlasmap-sink.json          | 12 ++++++------
 .../resources/connectors/camel-aws2-ddb-sink.json          | 14 ++++++++++++++
 .../generated/resources/connectors/camel-jt400-source.json |  7 +++++++
 .../src/generated/resources/camel-atlasmap-sink.json       | 12 ++++++------
 .../src/main/docs/camel-atlasmap-kafka-sink-connector.adoc |  2 +-
 .../atlasmap/CamelAtlasmapSinkConnectorConfig.java         |  8 ++++----
 .../src/generated/resources/camel-aws2-ddb-sink.json       | 14 ++++++++++++++
 .../src/main/docs/camel-aws2-ddb-kafka-sink-connector.adoc |  4 +++-
 .../aws2ddb/CamelAws2ddbSinkConnectorConfig.java           |  8 ++++++++
 .../src/generated/resources/camel-jt400-source.json        |  7 +++++++
 .../src/main/docs/camel-jt400-kafka-source-connector.adoc  |  3 ++-
 .../jt400/CamelJt400SourceConnectorConfig.java             |  4 ++++
 .../connectors/camel-atlasmap-kafka-sink-connector.adoc    |  2 +-
 .../connectors/camel-aws2-ddb-kafka-sink-connector.adoc    |  4 +++-
 .../connectors/camel-jt400-kafka-source-connector.adoc     |  3 ++-
 15 files changed, 82 insertions(+), 22 deletions(-)

diff --git 
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-atlasmap-sink.json
 
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-atlasmap-sink.json
index 293ae3d..ec1e6bc 100644
--- 
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-atlasmap-sink.json
+++ 
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-atlasmap-sink.json
@@ -60,12 +60,6 @@
                        "priority": "MEDIUM",
                        "required": "false"
                },
-               "camel.sink.endpoint.propertiesFile": {
-                       "name": "camel.sink.endpoint.propertiesFile",
-                       "description": "The URI of the properties file which is 
used for AtlasContextFactory initialization.",
-                       "priority": "MEDIUM",
-                       "required": "false"
-               },
                "camel.component.atlasmap.lazyStartProducer": {
                        "name": "camel.component.atlasmap.lazyStartProducer",
                        "description": "Whether the producer should be started 
lazy (on the first message). By starting lazy you can use this to allow 
CamelContext and routes to startup in situations where a producer may otherwise 
fail during starting and cause the route to fail being started. By deferring 
this startup to be lazy then the startup failure can be handled during routing 
messages via Camel's routing error handlers. Beware that when the first message 
is processed then creating and starting the pr [...]
@@ -85,6 +79,12 @@
                        "defaultValue": "true",
                        "priority": "MEDIUM",
                        "required": "false"
+               },
+               "camel.component.atlasmap.propertiesFile": {
+                       "name": "camel.component.atlasmap.propertiesFile",
+                       "description": "The URI of the properties file which is 
used for AtlasContextFactory initialization.",
+                       "priority": "MEDIUM",
+                       "required": "false"
                }
        }
 }
\ No newline at end of file
diff --git 
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-aws2-ddb-sink.json
 
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-aws2-ddb-sink.json
index f89a8ae..b64ecb0 100644
--- 
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-aws2-ddb-sink.json
+++ 
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-aws2-ddb-sink.json
@@ -127,6 +127,13 @@
                        "priority": "MEDIUM",
                        "required": "false"
                },
+               "camel.sink.endpoint.useDefaultCredentialsProvider": {
+                       "name": 
"camel.sink.endpoint.useDefaultCredentialsProvider",
+                       "description": "Set whether the S3 client should expect 
to load credentials through a default credentials provider or to expect static 
credentials to be passed in.",
+                       "defaultValue": "false",
+                       "priority": "MEDIUM",
+                       "required": "false"
+               },
                "camel.sink.endpoint.writeCapacity": {
                        "name": "camel.sink.endpoint.writeCapacity",
                        "description": "The provisioned throughput to reserved 
for writing resources to your table",
@@ -263,6 +270,13 @@
                        "priority": "MEDIUM",
                        "required": "false"
                },
+               "camel.component.aws2-ddb.useDefaultCredentialsProvider": {
+                       "name": 
"camel.component.aws2-ddb.useDefaultCredentialsProvider",
+                       "description": "Set whether the S3 client should expect 
to load credentials through a default credentials provider or to expect static 
credentials to be passed in.",
+                       "defaultValue": "false",
+                       "priority": "MEDIUM",
+                       "required": "false"
+               },
                "camel.component.aws2-ddb.writeCapacity": {
                        "name": "camel.component.aws2-ddb.writeCapacity",
                        "description": "The provisioned throughput to reserved 
for writing resources to your table",
diff --git 
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-jt400-source.json
 
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-jt400-source.json
index 56a31a2..21d384c 100644
--- 
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-jt400-source.json
+++ 
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-jt400-source.json
@@ -130,6 +130,13 @@
                        "priority": "MEDIUM",
                        "required": "false"
                },
+               "camel.source.endpoint.sendingReply": {
+                       "name": "camel.source.endpoint.sendingReply",
+                       "description": "If true, the consumer endpoint will set 
the Jt400Constants.MESSAGE_REPLYTO_KEY header of the camel message for any IBM 
i inquiry messages received. If that message is then routed to a producer 
endpoint, the action will not be processed as sending a message to the queue, 
but rather a reply to the specific inquiry message.",
+                       "defaultValue": "true",
+                       "priority": "MEDIUM",
+                       "required": "false"
+               },
                "camel.source.endpoint.exceptionHandler": {
                        "name": "camel.source.endpoint.exceptionHandler",
                        "description": "To let the consumer use a custom 
ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this 
option is not in use. By default the consumer will deal with exceptions, that 
will be logged at WARN or ERROR level and ignored.",
diff --git 
a/connectors/camel-atlasmap-kafka-connector/src/generated/resources/camel-atlasmap-sink.json
 
b/connectors/camel-atlasmap-kafka-connector/src/generated/resources/camel-atlasmap-sink.json
index 293ae3d..ec1e6bc 100644
--- 
a/connectors/camel-atlasmap-kafka-connector/src/generated/resources/camel-atlasmap-sink.json
+++ 
b/connectors/camel-atlasmap-kafka-connector/src/generated/resources/camel-atlasmap-sink.json
@@ -60,12 +60,6 @@
                        "priority": "MEDIUM",
                        "required": "false"
                },
-               "camel.sink.endpoint.propertiesFile": {
-                       "name": "camel.sink.endpoint.propertiesFile",
-                       "description": "The URI of the properties file which is 
used for AtlasContextFactory initialization.",
-                       "priority": "MEDIUM",
-                       "required": "false"
-               },
                "camel.component.atlasmap.lazyStartProducer": {
                        "name": "camel.component.atlasmap.lazyStartProducer",
                        "description": "Whether the producer should be started 
lazy (on the first message). By starting lazy you can use this to allow 
CamelContext and routes to startup in situations where a producer may otherwise 
fail during starting and cause the route to fail being started. By deferring 
this startup to be lazy then the startup failure can be handled during routing 
messages via Camel's routing error handlers. Beware that when the first message 
is processed then creating and starting the pr [...]
@@ -85,6 +79,12 @@
                        "defaultValue": "true",
                        "priority": "MEDIUM",
                        "required": "false"
+               },
+               "camel.component.atlasmap.propertiesFile": {
+                       "name": "camel.component.atlasmap.propertiesFile",
+                       "description": "The URI of the properties file which is 
used for AtlasContextFactory initialization.",
+                       "priority": "MEDIUM",
+                       "required": "false"
                }
        }
 }
\ No newline at end of file
diff --git 
a/connectors/camel-atlasmap-kafka-connector/src/main/docs/camel-atlasmap-kafka-sink-connector.adoc
 
b/connectors/camel-atlasmap-kafka-connector/src/main/docs/camel-atlasmap-kafka-sink-connector.adoc
index 0220636..45c966b 100644
--- 
a/connectors/camel-atlasmap-kafka-connector/src/main/docs/camel-atlasmap-kafka-sink-connector.adoc
+++ 
b/connectors/camel-atlasmap-kafka-connector/src/main/docs/camel-atlasmap-kafka-sink-connector.adoc
@@ -38,10 +38,10 @@ The camel-atlasmap sink connector supports 11 options, 
which are listed below.
 | *camel.sink.endpoint.sourceMapName* | The Exchange property name for a 
source message map which hold java.util.Map&lt;String, Message&gt; where the 
key is AtlasMap Document ID. AtlasMap consumes Message bodies as source 
documents, as well as message headers as source properties where the scope 
equals to Document ID. | null | false | MEDIUM
 | *camel.sink.endpoint.targetMapMode* | TargetMapMode enum value to specify 
how multiple target documents are delivered if exist. 'MAP': Stores them into a 
java.util.Map, and the java.util.Map is set to an exchange property if 
'targetMapName' is specified, otherwise message body. 'MESSAGE_HEADER': Stores 
them into message headers. 'EXCHANGE_PROPERTY': Stores them into exchange 
properties. ) One of: [MAP] [MESSAGE_HEADER] [EXCHANGE_PROPERTY] | "MAP" | 
false | MEDIUM
 | *camel.sink.endpoint.targetMapName* | The Exchange property name for a 
target document map which hold java.util.Map&lt;String, Object&gt; where the 
key is AtlasMap Document ID. AtlasMap populates multiple target documents into 
this map. | null | false | MEDIUM
-| *camel.sink.endpoint.propertiesFile* | The URI of the properties file which 
is used for AtlasContextFactory initialization. | null | false | MEDIUM
 | *camel.component.atlasmap.lazyStartProducer* | Whether the producer should 
be started lazy (on the first message). By starting lazy you can use this to 
allow CamelContext and routes to startup in situations where a producer may 
otherwise fail during starting and cause the route to fail being started. By 
deferring this startup to be lazy then the startup failure can be handled 
during routing messages via Camel's routing error handlers. Beware that when 
the first message is processed the [...]
 | *camel.component.atlasmap.atlasContextFactory* | To use the 
AtlasContextFactory otherwise a new engine is created. | null | false | MEDIUM
 | *camel.component.atlasmap.autowiredEnabled* | Whether autowiring is enabled. 
This is used for automatic autowiring options (the option must be marked as 
autowired) by looking up in the registry to find if there is a single instance 
of matching type, which then gets configured on the component. This can be used 
for automatic configuring JDBC data sources, JMS connection factories, AWS 
Clients, etc. | true | false | MEDIUM
+| *camel.component.atlasmap.propertiesFile* | The URI of the properties file 
which is used for AtlasContextFactory initialization. | null | false | MEDIUM
 |===
 
 
diff --git 
a/connectors/camel-atlasmap-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/atlasmap/CamelAtlasmapSinkConnectorConfig.java
 
b/connectors/camel-atlasmap-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/atlasmap/CamelAtlasmapSinkConnectorConfig.java
index 81504d1..c76b50d 100644
--- 
a/connectors/camel-atlasmap-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/atlasmap/CamelAtlasmapSinkConnectorConfig.java
+++ 
b/connectors/camel-atlasmap-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/atlasmap/CamelAtlasmapSinkConnectorConfig.java
@@ -47,9 +47,6 @@ public class CamelAtlasmapSinkConnectorConfig
     public static final String 
CAMEL_SINK_ATLASMAP_ENDPOINT_TARGET_MAP_NAME_CONF = 
"camel.sink.endpoint.targetMapName";
     public static final String 
CAMEL_SINK_ATLASMAP_ENDPOINT_TARGET_MAP_NAME_DOC = "The Exchange property name 
for a target document map which hold java.util.Map&lt;String, Object&gt; where 
the key is AtlasMap Document ID. AtlasMap populates multiple target documents 
into this map.";
     public static final String 
CAMEL_SINK_ATLASMAP_ENDPOINT_TARGET_MAP_NAME_DEFAULT = null;
-    public static final String 
CAMEL_SINK_ATLASMAP_ENDPOINT_PROPERTIES_FILE_CONF = 
"camel.sink.endpoint.propertiesFile";
-    public static final String 
CAMEL_SINK_ATLASMAP_ENDPOINT_PROPERTIES_FILE_DOC = "The URI of the properties 
file which is used for AtlasContextFactory initialization.";
-    public static final String 
CAMEL_SINK_ATLASMAP_ENDPOINT_PROPERTIES_FILE_DEFAULT = null;
     public static final String 
CAMEL_SINK_ATLASMAP_COMPONENT_LAZY_START_PRODUCER_CONF = 
"camel.component.atlasmap.lazyStartProducer";
     public static final String 
CAMEL_SINK_ATLASMAP_COMPONENT_LAZY_START_PRODUCER_DOC = "Whether the producer 
should be started lazy (on the first message). By starting lazy you can use 
this to allow CamelContext and routes to startup in situations where a producer 
may otherwise fail during starting and cause the route to fail being started. 
By deferring this startup to be lazy then the startup failure can be handled 
during routing messages via Camel's routing error handlers. Beware that  [...]
     public static final Boolean 
CAMEL_SINK_ATLASMAP_COMPONENT_LAZY_START_PRODUCER_DEFAULT = false;
@@ -59,6 +56,9 @@ public class CamelAtlasmapSinkConnectorConfig
     public static final String 
CAMEL_SINK_ATLASMAP_COMPONENT_AUTOWIRED_ENABLED_CONF = 
"camel.component.atlasmap.autowiredEnabled";
     public static final String 
CAMEL_SINK_ATLASMAP_COMPONENT_AUTOWIRED_ENABLED_DOC = "Whether autowiring is 
enabled. This is used for automatic autowiring options (the option must be 
marked as autowired) by looking up in the registry to find if there is a single 
instance of matching type, which then gets configured on the component. This 
can be used for automatic configuring JDBC data sources, JMS connection 
factories, AWS Clients, etc.";
     public static final Boolean 
CAMEL_SINK_ATLASMAP_COMPONENT_AUTOWIRED_ENABLED_DEFAULT = true;
+    public static final String 
CAMEL_SINK_ATLASMAP_COMPONENT_PROPERTIES_FILE_CONF = 
"camel.component.atlasmap.propertiesFile";
+    public static final String 
CAMEL_SINK_ATLASMAP_COMPONENT_PROPERTIES_FILE_DOC = "The URI of the properties 
file which is used for AtlasContextFactory initialization.";
+    public static final String 
CAMEL_SINK_ATLASMAP_COMPONENT_PROPERTIES_FILE_DEFAULT = null;
 
     public CamelAtlasmapSinkConnectorConfig(
             ConfigDef config,
@@ -79,10 +79,10 @@ public class CamelAtlasmapSinkConnectorConfig
         conf.define(CAMEL_SINK_ATLASMAP_ENDPOINT_SOURCE_MAP_NAME_CONF, 
ConfigDef.Type.STRING, CAMEL_SINK_ATLASMAP_ENDPOINT_SOURCE_MAP_NAME_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_ATLASMAP_ENDPOINT_SOURCE_MAP_NAME_DOC);
         conf.define(CAMEL_SINK_ATLASMAP_ENDPOINT_TARGET_MAP_MODE_CONF, 
ConfigDef.Type.STRING, CAMEL_SINK_ATLASMAP_ENDPOINT_TARGET_MAP_MODE_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_ATLASMAP_ENDPOINT_TARGET_MAP_MODE_DOC);
         conf.define(CAMEL_SINK_ATLASMAP_ENDPOINT_TARGET_MAP_NAME_CONF, 
ConfigDef.Type.STRING, CAMEL_SINK_ATLASMAP_ENDPOINT_TARGET_MAP_NAME_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_ATLASMAP_ENDPOINT_TARGET_MAP_NAME_DOC);
-        conf.define(CAMEL_SINK_ATLASMAP_ENDPOINT_PROPERTIES_FILE_CONF, 
ConfigDef.Type.STRING, CAMEL_SINK_ATLASMAP_ENDPOINT_PROPERTIES_FILE_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_ATLASMAP_ENDPOINT_PROPERTIES_FILE_DOC);
         conf.define(CAMEL_SINK_ATLASMAP_COMPONENT_LAZY_START_PRODUCER_CONF, 
ConfigDef.Type.BOOLEAN, 
CAMEL_SINK_ATLASMAP_COMPONENT_LAZY_START_PRODUCER_DEFAULT, 
ConfigDef.Importance.MEDIUM, 
CAMEL_SINK_ATLASMAP_COMPONENT_LAZY_START_PRODUCER_DOC);
         conf.define(CAMEL_SINK_ATLASMAP_COMPONENT_ATLAS_CONTEXT_FACTORY_CONF, 
ConfigDef.Type.STRING, 
CAMEL_SINK_ATLASMAP_COMPONENT_ATLAS_CONTEXT_FACTORY_DEFAULT, 
ConfigDef.Importance.MEDIUM, 
CAMEL_SINK_ATLASMAP_COMPONENT_ATLAS_CONTEXT_FACTORY_DOC);
         conf.define(CAMEL_SINK_ATLASMAP_COMPONENT_AUTOWIRED_ENABLED_CONF, 
ConfigDef.Type.BOOLEAN, 
CAMEL_SINK_ATLASMAP_COMPONENT_AUTOWIRED_ENABLED_DEFAULT, 
ConfigDef.Importance.MEDIUM, 
CAMEL_SINK_ATLASMAP_COMPONENT_AUTOWIRED_ENABLED_DOC);
+        conf.define(CAMEL_SINK_ATLASMAP_COMPONENT_PROPERTIES_FILE_CONF, 
ConfigDef.Type.STRING, CAMEL_SINK_ATLASMAP_COMPONENT_PROPERTIES_FILE_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_ATLASMAP_COMPONENT_PROPERTIES_FILE_DOC);
         return conf;
     }
 }
\ No newline at end of file
diff --git 
a/connectors/camel-aws2-ddb-kafka-connector/src/generated/resources/camel-aws2-ddb-sink.json
 
b/connectors/camel-aws2-ddb-kafka-connector/src/generated/resources/camel-aws2-ddb-sink.json
index f89a8ae..b64ecb0 100644
--- 
a/connectors/camel-aws2-ddb-kafka-connector/src/generated/resources/camel-aws2-ddb-sink.json
+++ 
b/connectors/camel-aws2-ddb-kafka-connector/src/generated/resources/camel-aws2-ddb-sink.json
@@ -127,6 +127,13 @@
                        "priority": "MEDIUM",
                        "required": "false"
                },
+               "camel.sink.endpoint.useDefaultCredentialsProvider": {
+                       "name": 
"camel.sink.endpoint.useDefaultCredentialsProvider",
+                       "description": "Set whether the S3 client should expect 
to load credentials through a default credentials provider or to expect static 
credentials to be passed in.",
+                       "defaultValue": "false",
+                       "priority": "MEDIUM",
+                       "required": "false"
+               },
                "camel.sink.endpoint.writeCapacity": {
                        "name": "camel.sink.endpoint.writeCapacity",
                        "description": "The provisioned throughput to reserved 
for writing resources to your table",
@@ -263,6 +270,13 @@
                        "priority": "MEDIUM",
                        "required": "false"
                },
+               "camel.component.aws2-ddb.useDefaultCredentialsProvider": {
+                       "name": 
"camel.component.aws2-ddb.useDefaultCredentialsProvider",
+                       "description": "Set whether the S3 client should expect 
to load credentials through a default credentials provider or to expect static 
credentials to be passed in.",
+                       "defaultValue": "false",
+                       "priority": "MEDIUM",
+                       "required": "false"
+               },
                "camel.component.aws2-ddb.writeCapacity": {
                        "name": "camel.component.aws2-ddb.writeCapacity",
                        "description": "The provisioned throughput to reserved 
for writing resources to your table",
diff --git 
a/connectors/camel-aws2-ddb-kafka-connector/src/main/docs/camel-aws2-ddb-kafka-sink-connector.adoc
 
b/connectors/camel-aws2-ddb-kafka-connector/src/main/docs/camel-aws2-ddb-kafka-sink-connector.adoc
index 7f8a826..3e73ea1 100644
--- 
a/connectors/camel-aws2-ddb-kafka-connector/src/main/docs/camel-aws2-ddb-kafka-sink-connector.adoc
+++ 
b/connectors/camel-aws2-ddb-kafka-connector/src/main/docs/camel-aws2-ddb-kafka-sink-connector.adoc
@@ -24,7 +24,7 @@ 
connector.class=org.apache.camel.kafkaconnector.aws2ddb.CamelAws2ddbSinkConnecto
 ----
 
 
-The camel-aws2-ddb sink connector supports 39 options, which are listed below.
+The camel-aws2-ddb sink connector supports 41 options, which are listed below.
 
 
 
@@ -47,6 +47,7 @@ The camel-aws2-ddb sink connector supports 39 options, which 
are listed below.
 | *camel.sink.endpoint.region* | The region in which DDB client needs to work 
| null | false | MEDIUM
 | *camel.sink.endpoint.trustAllCertificates* | If we want to trust all 
certificates in case of overriding the endpoint | false | false | MEDIUM
 | *camel.sink.endpoint.uriEndpointOverride* | Set the overriding uri endpoint. 
This option needs to be used in combination with overrideEndpoint option | null 
| false | MEDIUM
+| *camel.sink.endpoint.useDefaultCredentialsProvider* | Set whether the S3 
client should expect to load credentials through a default credentials provider 
or to expect static credentials to be passed in. | false | false | MEDIUM
 | *camel.sink.endpoint.writeCapacity* | The provisioned throughput to reserved 
for writing resources to your table | null | false | MEDIUM
 | *camel.sink.endpoint.accessKey* | Amazon AWS Access Key | null | false | 
MEDIUM
 | *camel.sink.endpoint.secretKey* | Amazon AWS Secret Key | null | false | 
MEDIUM
@@ -66,6 +67,7 @@ The camel-aws2-ddb sink connector supports 39 options, which 
are listed below.
 | *camel.component.aws2-ddb.region* | The region in which DDB client needs to 
work | null | false | MEDIUM
 | *camel.component.aws2-ddb.trustAllCertificates* | If we want to trust all 
certificates in case of overriding the endpoint | false | false | MEDIUM
 | *camel.component.aws2-ddb.uriEndpointOverride* | Set the overriding uri 
endpoint. This option needs to be used in combination with overrideEndpoint 
option | null | false | MEDIUM
+| *camel.component.aws2-ddb.useDefaultCredentials Provider* | Set whether the 
S3 client should expect to load credentials through a default credentials 
provider or to expect static credentials to be passed in. | false | false | 
MEDIUM
 | *camel.component.aws2-ddb.writeCapacity* | The provisioned throughput to 
reserved for writing resources to your table | null | false | MEDIUM
 | *camel.component.aws2-ddb.autowiredEnabled* | Whether autowiring is enabled. 
This is used for automatic autowiring options (the option must be marked as 
autowired) by looking up in the registry to find if there is a single instance 
of matching type, which then gets configured on the component. This can be used 
for automatic configuring JDBC data sources, JMS connection factories, AWS 
Clients, etc. | true | false | MEDIUM
 | *camel.component.aws2-ddb.accessKey* | Amazon AWS Access Key | null | false 
| MEDIUM
diff --git 
a/connectors/camel-aws2-ddb-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/aws2ddb/CamelAws2ddbSinkConnectorConfig.java
 
b/connectors/camel-aws2-ddb-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/aws2ddb/CamelAws2ddbSinkConnectorConfig.java
index 46de65d..7c94eb6 100644
--- 
a/connectors/camel-aws2-ddb-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/aws2ddb/CamelAws2ddbSinkConnectorConfig.java
+++ 
b/connectors/camel-aws2-ddb-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/aws2ddb/CamelAws2ddbSinkConnectorConfig.java
@@ -72,6 +72,9 @@ public class CamelAws2ddbSinkConnectorConfig extends 
CamelSinkConnectorConfig {
     public static final String 
CAMEL_SINK_AWS2DDB_ENDPOINT_URI_ENDPOINT_OVERRIDE_CONF = 
"camel.sink.endpoint.uriEndpointOverride";
     public static final String 
CAMEL_SINK_AWS2DDB_ENDPOINT_URI_ENDPOINT_OVERRIDE_DOC = "Set the overriding uri 
endpoint. This option needs to be used in combination with overrideEndpoint 
option";
     public static final String 
CAMEL_SINK_AWS2DDB_ENDPOINT_URI_ENDPOINT_OVERRIDE_DEFAULT = null;
+    public static final String 
CAMEL_SINK_AWS2DDB_ENDPOINT_USE_DEFAULT_CREDENTIALS_PROVIDER_CONF = 
"camel.sink.endpoint.useDefaultCredentialsProvider";
+    public static final String 
CAMEL_SINK_AWS2DDB_ENDPOINT_USE_DEFAULT_CREDENTIALS_PROVIDER_DOC = "Set whether 
the S3 client should expect to load credentials through a default credentials 
provider or to expect static credentials to be passed in.";
+    public static final Boolean 
CAMEL_SINK_AWS2DDB_ENDPOINT_USE_DEFAULT_CREDENTIALS_PROVIDER_DEFAULT = false;
     public static final String CAMEL_SINK_AWS2DDB_ENDPOINT_WRITE_CAPACITY_CONF 
= "camel.sink.endpoint.writeCapacity";
     public static final String CAMEL_SINK_AWS2DDB_ENDPOINT_WRITE_CAPACITY_DOC 
= "The provisioned throughput to reserved for writing resources to your table";
     public static final String 
CAMEL_SINK_AWS2DDB_ENDPOINT_WRITE_CAPACITY_DEFAULT = null;
@@ -129,6 +132,9 @@ public class CamelAws2ddbSinkConnectorConfig extends 
CamelSinkConnectorConfig {
     public static final String 
CAMEL_SINK_AWS2DDB_COMPONENT_URI_ENDPOINT_OVERRIDE_CONF = 
"camel.component.aws2-ddb.uriEndpointOverride";
     public static final String 
CAMEL_SINK_AWS2DDB_COMPONENT_URI_ENDPOINT_OVERRIDE_DOC = "Set the overriding 
uri endpoint. This option needs to be used in combination with overrideEndpoint 
option";
     public static final String 
CAMEL_SINK_AWS2DDB_COMPONENT_URI_ENDPOINT_OVERRIDE_DEFAULT = null;
+    public static final String 
CAMEL_SINK_AWS2DDB_COMPONENT_USE_DEFAULT_CREDENTIALS_PROVIDER_CONF = 
"camel.component.aws2-ddb.useDefaultCredentialsProvider";
+    public static final String 
CAMEL_SINK_AWS2DDB_COMPONENT_USE_DEFAULT_CREDENTIALS_PROVIDER_DOC = "Set 
whether the S3 client should expect to load credentials through a default 
credentials provider or to expect static credentials to be passed in.";
+    public static final Boolean 
CAMEL_SINK_AWS2DDB_COMPONENT_USE_DEFAULT_CREDENTIALS_PROVIDER_DEFAULT = false;
     public static final String 
CAMEL_SINK_AWS2DDB_COMPONENT_WRITE_CAPACITY_CONF = 
"camel.component.aws2-ddb.writeCapacity";
     public static final String CAMEL_SINK_AWS2DDB_COMPONENT_WRITE_CAPACITY_DOC 
= "The provisioned throughput to reserved for writing resources to your table";
     public static final String 
CAMEL_SINK_AWS2DDB_COMPONENT_WRITE_CAPACITY_DEFAULT = null;
@@ -170,6 +176,7 @@ public class CamelAws2ddbSinkConnectorConfig extends 
CamelSinkConnectorConfig {
         conf.define(CAMEL_SINK_AWS2DDB_ENDPOINT_REGION_CONF, 
ConfigDef.Type.STRING, CAMEL_SINK_AWS2DDB_ENDPOINT_REGION_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_AWS2DDB_ENDPOINT_REGION_DOC);
         conf.define(CAMEL_SINK_AWS2DDB_ENDPOINT_TRUST_ALL_CERTIFICATES_CONF, 
ConfigDef.Type.BOOLEAN, 
CAMEL_SINK_AWS2DDB_ENDPOINT_TRUST_ALL_CERTIFICATES_DEFAULT, 
ConfigDef.Importance.MEDIUM, 
CAMEL_SINK_AWS2DDB_ENDPOINT_TRUST_ALL_CERTIFICATES_DOC);
         conf.define(CAMEL_SINK_AWS2DDB_ENDPOINT_URI_ENDPOINT_OVERRIDE_CONF, 
ConfigDef.Type.STRING, 
CAMEL_SINK_AWS2DDB_ENDPOINT_URI_ENDPOINT_OVERRIDE_DEFAULT, 
ConfigDef.Importance.MEDIUM, 
CAMEL_SINK_AWS2DDB_ENDPOINT_URI_ENDPOINT_OVERRIDE_DOC);
+        
conf.define(CAMEL_SINK_AWS2DDB_ENDPOINT_USE_DEFAULT_CREDENTIALS_PROVIDER_CONF, 
ConfigDef.Type.BOOLEAN, 
CAMEL_SINK_AWS2DDB_ENDPOINT_USE_DEFAULT_CREDENTIALS_PROVIDER_DEFAULT, 
ConfigDef.Importance.MEDIUM, 
CAMEL_SINK_AWS2DDB_ENDPOINT_USE_DEFAULT_CREDENTIALS_PROVIDER_DOC);
         conf.define(CAMEL_SINK_AWS2DDB_ENDPOINT_WRITE_CAPACITY_CONF, 
ConfigDef.Type.STRING, CAMEL_SINK_AWS2DDB_ENDPOINT_WRITE_CAPACITY_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_AWS2DDB_ENDPOINT_WRITE_CAPACITY_DOC);
         conf.define(CAMEL_SINK_AWS2DDB_ENDPOINT_ACCESS_KEY_CONF, 
ConfigDef.Type.PASSWORD, CAMEL_SINK_AWS2DDB_ENDPOINT_ACCESS_KEY_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_AWS2DDB_ENDPOINT_ACCESS_KEY_DOC);
         conf.define(CAMEL_SINK_AWS2DDB_ENDPOINT_SECRET_KEY_CONF, 
ConfigDef.Type.PASSWORD, CAMEL_SINK_AWS2DDB_ENDPOINT_SECRET_KEY_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_AWS2DDB_ENDPOINT_SECRET_KEY_DOC);
@@ -189,6 +196,7 @@ public class CamelAws2ddbSinkConnectorConfig extends 
CamelSinkConnectorConfig {
         conf.define(CAMEL_SINK_AWS2DDB_COMPONENT_REGION_CONF, 
ConfigDef.Type.STRING, CAMEL_SINK_AWS2DDB_COMPONENT_REGION_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_AWS2DDB_COMPONENT_REGION_DOC);
         conf.define(CAMEL_SINK_AWS2DDB_COMPONENT_TRUST_ALL_CERTIFICATES_CONF, 
ConfigDef.Type.BOOLEAN, 
CAMEL_SINK_AWS2DDB_COMPONENT_TRUST_ALL_CERTIFICATES_DEFAULT, 
ConfigDef.Importance.MEDIUM, 
CAMEL_SINK_AWS2DDB_COMPONENT_TRUST_ALL_CERTIFICATES_DOC);
         conf.define(CAMEL_SINK_AWS2DDB_COMPONENT_URI_ENDPOINT_OVERRIDE_CONF, 
ConfigDef.Type.STRING, 
CAMEL_SINK_AWS2DDB_COMPONENT_URI_ENDPOINT_OVERRIDE_DEFAULT, 
ConfigDef.Importance.MEDIUM, 
CAMEL_SINK_AWS2DDB_COMPONENT_URI_ENDPOINT_OVERRIDE_DOC);
+        
conf.define(CAMEL_SINK_AWS2DDB_COMPONENT_USE_DEFAULT_CREDENTIALS_PROVIDER_CONF, 
ConfigDef.Type.BOOLEAN, 
CAMEL_SINK_AWS2DDB_COMPONENT_USE_DEFAULT_CREDENTIALS_PROVIDER_DEFAULT, 
ConfigDef.Importance.MEDIUM, 
CAMEL_SINK_AWS2DDB_COMPONENT_USE_DEFAULT_CREDENTIALS_PROVIDER_DOC);
         conf.define(CAMEL_SINK_AWS2DDB_COMPONENT_WRITE_CAPACITY_CONF, 
ConfigDef.Type.STRING, CAMEL_SINK_AWS2DDB_COMPONENT_WRITE_CAPACITY_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_AWS2DDB_COMPONENT_WRITE_CAPACITY_DOC);
         conf.define(CAMEL_SINK_AWS2DDB_COMPONENT_AUTOWIRED_ENABLED_CONF, 
ConfigDef.Type.BOOLEAN, CAMEL_SINK_AWS2DDB_COMPONENT_AUTOWIRED_ENABLED_DEFAULT, 
ConfigDef.Importance.MEDIUM, 
CAMEL_SINK_AWS2DDB_COMPONENT_AUTOWIRED_ENABLED_DOC);
         conf.define(CAMEL_SINK_AWS2DDB_COMPONENT_ACCESS_KEY_CONF, 
ConfigDef.Type.PASSWORD, CAMEL_SINK_AWS2DDB_COMPONENT_ACCESS_KEY_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SINK_AWS2DDB_COMPONENT_ACCESS_KEY_DOC);
diff --git 
a/connectors/camel-jt400-kafka-connector/src/generated/resources/camel-jt400-source.json
 
b/connectors/camel-jt400-kafka-connector/src/generated/resources/camel-jt400-source.json
index 56a31a2..21d384c 100644
--- 
a/connectors/camel-jt400-kafka-connector/src/generated/resources/camel-jt400-source.json
+++ 
b/connectors/camel-jt400-kafka-connector/src/generated/resources/camel-jt400-source.json
@@ -130,6 +130,13 @@
                        "priority": "MEDIUM",
                        "required": "false"
                },
+               "camel.source.endpoint.sendingReply": {
+                       "name": "camel.source.endpoint.sendingReply",
+                       "description": "If true, the consumer endpoint will set 
the Jt400Constants.MESSAGE_REPLYTO_KEY header of the camel message for any IBM 
i inquiry messages received. If that message is then routed to a producer 
endpoint, the action will not be processed as sending a message to the queue, 
but rather a reply to the specific inquiry message.",
+                       "defaultValue": "true",
+                       "priority": "MEDIUM",
+                       "required": "false"
+               },
                "camel.source.endpoint.exceptionHandler": {
                        "name": "camel.source.endpoint.exceptionHandler",
                        "description": "To let the consumer use a custom 
ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this 
option is not in use. By default the consumer will deal with exceptions, that 
will be logged at WARN or ERROR level and ignored.",
diff --git 
a/connectors/camel-jt400-kafka-connector/src/main/docs/camel-jt400-kafka-source-connector.adoc
 
b/connectors/camel-jt400-kafka-connector/src/main/docs/camel-jt400-kafka-source-connector.adoc
index 9e4e737..0778765 100644
--- 
a/connectors/camel-jt400-kafka-connector/src/main/docs/camel-jt400-kafka-source-connector.adoc
+++ 
b/connectors/camel-jt400-kafka-connector/src/main/docs/camel-jt400-kafka-source-connector.adoc
@@ -24,7 +24,7 @@ 
connector.class=org.apache.camel.kafkaconnector.jt400.CamelJt400SourceConnector
 ----
 
 
-The camel-jt400 source connector supports 36 options, which are listed below.
+The camel-jt400 source connector supports 37 options, which are listed below.
 
 
 
@@ -46,6 +46,7 @@ The camel-jt400 source connector supports 36 options, which 
are listed below.
 | *camel.source.endpoint.readTimeout* | Timeout in millis the consumer will 
wait while trying to read a new message of the data queue. | 30000 | false | 
MEDIUM
 | *camel.source.endpoint.searchType* | Search type such as EQ for equal etc. 
One of: [EQ] [NE] [LT] [LE] [GT] [GE] | "EQ" | false | MEDIUM
 | *camel.source.endpoint.sendEmptyMessageWhenIdle* | If the polling consumer 
did not poll any files, you can enable this option to send an empty message (no 
body) instead. | false | false | MEDIUM
+| *camel.source.endpoint.sendingReply* | If true, the consumer endpoint will 
set the Jt400Constants.MESSAGE_REPLYTO_KEY header of the camel message for any 
IBM i inquiry messages received. If that message is then routed to a producer 
endpoint, the action will not be processed as sending a message to the queue, 
but rather a reply to the specific inquiry message. | true | false | MEDIUM
 | *camel.source.endpoint.exceptionHandler* | To let the consumer use a custom 
ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this 
option is not in use. By default the consumer will deal with exceptions, that 
will be logged at WARN or ERROR level and ignored. | null | false | MEDIUM
 | *camel.source.endpoint.exchangePattern* | Sets the exchange pattern when the 
consumer creates an exchange. One of: [InOnly] [InOut] [InOptionalOut] | null | 
false | MEDIUM
 | *camel.source.endpoint.pollStrategy* | A pluggable 
org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your 
custom implementation to control error handling usually occurred during the 
poll operation before an Exchange have been created and being routed in Camel. 
| null | false | MEDIUM
diff --git 
a/connectors/camel-jt400-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/jt400/CamelJt400SourceConnectorConfig.java
 
b/connectors/camel-jt400-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/jt400/CamelJt400SourceConnectorConfig.java
index 6602768..c2dbaae 100644
--- 
a/connectors/camel-jt400-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/jt400/CamelJt400SourceConnectorConfig.java
+++ 
b/connectors/camel-jt400-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/jt400/CamelJt400SourceConnectorConfig.java
@@ -71,6 +71,9 @@ public class CamelJt400SourceConnectorConfig
     public static final String 
CAMEL_SOURCE_JT400_ENDPOINT_SEND_EMPTY_MESSAGE_WHEN_IDLE_CONF = 
"camel.source.endpoint.sendEmptyMessageWhenIdle";
     public static final String 
CAMEL_SOURCE_JT400_ENDPOINT_SEND_EMPTY_MESSAGE_WHEN_IDLE_DOC = "If the polling 
consumer did not poll any files, you can enable this option to send an empty 
message (no body) instead.";
     public static final Boolean 
CAMEL_SOURCE_JT400_ENDPOINT_SEND_EMPTY_MESSAGE_WHEN_IDLE_DEFAULT = false;
+    public static final String CAMEL_SOURCE_JT400_ENDPOINT_SENDING_REPLY_CONF 
= "camel.source.endpoint.sendingReply";
+    public static final String CAMEL_SOURCE_JT400_ENDPOINT_SENDING_REPLY_DOC = 
"If true, the consumer endpoint will set the Jt400Constants.MESSAGE_REPLYTO_KEY 
header of the camel message for any IBM i inquiry messages received. If that 
message is then routed to a producer endpoint, the action will not be processed 
as sending a message to the queue, but rather a reply to the specific inquiry 
message.";
+    public static final Boolean 
CAMEL_SOURCE_JT400_ENDPOINT_SENDING_REPLY_DEFAULT = true;
     public static final String 
CAMEL_SOURCE_JT400_ENDPOINT_EXCEPTION_HANDLER_CONF = 
"camel.source.endpoint.exceptionHandler";
     public static final String 
CAMEL_SOURCE_JT400_ENDPOINT_EXCEPTION_HANDLER_DOC = "To let the consumer use a 
custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled 
then this option is not in use. By default the consumer will deal with 
exceptions, that will be logged at WARN or ERROR level and ignored.";
     public static final String 
CAMEL_SOURCE_JT400_ENDPOINT_EXCEPTION_HANDLER_DEFAULT = null;
@@ -162,6 +165,7 @@ public class CamelJt400SourceConnectorConfig
         conf.define(CAMEL_SOURCE_JT400_ENDPOINT_READ_TIMEOUT_CONF, 
ConfigDef.Type.INT, CAMEL_SOURCE_JT400_ENDPOINT_READ_TIMEOUT_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_JT400_ENDPOINT_READ_TIMEOUT_DOC);
         conf.define(CAMEL_SOURCE_JT400_ENDPOINT_SEARCH_TYPE_CONF, 
ConfigDef.Type.STRING, CAMEL_SOURCE_JT400_ENDPOINT_SEARCH_TYPE_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_JT400_ENDPOINT_SEARCH_TYPE_DOC);
         
conf.define(CAMEL_SOURCE_JT400_ENDPOINT_SEND_EMPTY_MESSAGE_WHEN_IDLE_CONF, 
ConfigDef.Type.BOOLEAN, 
CAMEL_SOURCE_JT400_ENDPOINT_SEND_EMPTY_MESSAGE_WHEN_IDLE_DEFAULT, 
ConfigDef.Importance.MEDIUM, 
CAMEL_SOURCE_JT400_ENDPOINT_SEND_EMPTY_MESSAGE_WHEN_IDLE_DOC);
+        conf.define(CAMEL_SOURCE_JT400_ENDPOINT_SENDING_REPLY_CONF, 
ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_JT400_ENDPOINT_SENDING_REPLY_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_JT400_ENDPOINT_SENDING_REPLY_DOC);
         conf.define(CAMEL_SOURCE_JT400_ENDPOINT_EXCEPTION_HANDLER_CONF, 
ConfigDef.Type.STRING, CAMEL_SOURCE_JT400_ENDPOINT_EXCEPTION_HANDLER_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_JT400_ENDPOINT_EXCEPTION_HANDLER_DOC);
         conf.define(CAMEL_SOURCE_JT400_ENDPOINT_EXCHANGE_PATTERN_CONF, 
ConfigDef.Type.STRING, CAMEL_SOURCE_JT400_ENDPOINT_EXCHANGE_PATTERN_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_JT400_ENDPOINT_EXCHANGE_PATTERN_DOC);
         conf.define(CAMEL_SOURCE_JT400_ENDPOINT_POLL_STRATEGY_CONF, 
ConfigDef.Type.STRING, CAMEL_SOURCE_JT400_ENDPOINT_POLL_STRATEGY_DEFAULT, 
ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_JT400_ENDPOINT_POLL_STRATEGY_DOC);
diff --git 
a/docs/modules/ROOT/pages/connectors/camel-atlasmap-kafka-sink-connector.adoc 
b/docs/modules/ROOT/pages/connectors/camel-atlasmap-kafka-sink-connector.adoc
index 0220636..45c966b 100644
--- 
a/docs/modules/ROOT/pages/connectors/camel-atlasmap-kafka-sink-connector.adoc
+++ 
b/docs/modules/ROOT/pages/connectors/camel-atlasmap-kafka-sink-connector.adoc
@@ -38,10 +38,10 @@ The camel-atlasmap sink connector supports 11 options, 
which are listed below.
 | *camel.sink.endpoint.sourceMapName* | The Exchange property name for a 
source message map which hold java.util.Map&lt;String, Message&gt; where the 
key is AtlasMap Document ID. AtlasMap consumes Message bodies as source 
documents, as well as message headers as source properties where the scope 
equals to Document ID. | null | false | MEDIUM
 | *camel.sink.endpoint.targetMapMode* | TargetMapMode enum value to specify 
how multiple target documents are delivered if exist. 'MAP': Stores them into a 
java.util.Map, and the java.util.Map is set to an exchange property if 
'targetMapName' is specified, otherwise message body. 'MESSAGE_HEADER': Stores 
them into message headers. 'EXCHANGE_PROPERTY': Stores them into exchange 
properties. ) One of: [MAP] [MESSAGE_HEADER] [EXCHANGE_PROPERTY] | "MAP" | 
false | MEDIUM
 | *camel.sink.endpoint.targetMapName* | The Exchange property name for a 
target document map which hold java.util.Map&lt;String, Object&gt; where the 
key is AtlasMap Document ID. AtlasMap populates multiple target documents into 
this map. | null | false | MEDIUM
-| *camel.sink.endpoint.propertiesFile* | The URI of the properties file which 
is used for AtlasContextFactory initialization. | null | false | MEDIUM
 | *camel.component.atlasmap.lazyStartProducer* | Whether the producer should 
be started lazy (on the first message). By starting lazy you can use this to 
allow CamelContext and routes to startup in situations where a producer may 
otherwise fail during starting and cause the route to fail being started. By 
deferring this startup to be lazy then the startup failure can be handled 
during routing messages via Camel's routing error handlers. Beware that when 
the first message is processed the [...]
 | *camel.component.atlasmap.atlasContextFactory* | To use the 
AtlasContextFactory otherwise a new engine is created. | null | false | MEDIUM
 | *camel.component.atlasmap.autowiredEnabled* | Whether autowiring is enabled. 
This is used for automatic autowiring options (the option must be marked as 
autowired) by looking up in the registry to find if there is a single instance 
of matching type, which then gets configured on the component. This can be used 
for automatic configuring JDBC data sources, JMS connection factories, AWS 
Clients, etc. | true | false | MEDIUM
+| *camel.component.atlasmap.propertiesFile* | The URI of the properties file 
which is used for AtlasContextFactory initialization. | null | false | MEDIUM
 |===
 
 
diff --git 
a/docs/modules/ROOT/pages/connectors/camel-aws2-ddb-kafka-sink-connector.adoc 
b/docs/modules/ROOT/pages/connectors/camel-aws2-ddb-kafka-sink-connector.adoc
index 7f8a826..3e73ea1 100644
--- 
a/docs/modules/ROOT/pages/connectors/camel-aws2-ddb-kafka-sink-connector.adoc
+++ 
b/docs/modules/ROOT/pages/connectors/camel-aws2-ddb-kafka-sink-connector.adoc
@@ -24,7 +24,7 @@ 
connector.class=org.apache.camel.kafkaconnector.aws2ddb.CamelAws2ddbSinkConnecto
 ----
 
 
-The camel-aws2-ddb sink connector supports 39 options, which are listed below.
+The camel-aws2-ddb sink connector supports 41 options, which are listed below.
 
 
 
@@ -47,6 +47,7 @@ The camel-aws2-ddb sink connector supports 39 options, which 
are listed below.
 | *camel.sink.endpoint.region* | The region in which DDB client needs to work 
| null | false | MEDIUM
 | *camel.sink.endpoint.trustAllCertificates* | If we want to trust all 
certificates in case of overriding the endpoint | false | false | MEDIUM
 | *camel.sink.endpoint.uriEndpointOverride* | Set the overriding uri endpoint. 
This option needs to be used in combination with overrideEndpoint option | null 
| false | MEDIUM
+| *camel.sink.endpoint.useDefaultCredentialsProvider* | Set whether the S3 
client should expect to load credentials through a default credentials provider 
or to expect static credentials to be passed in. | false | false | MEDIUM
 | *camel.sink.endpoint.writeCapacity* | The provisioned throughput to reserved 
for writing resources to your table | null | false | MEDIUM
 | *camel.sink.endpoint.accessKey* | Amazon AWS Access Key | null | false | 
MEDIUM
 | *camel.sink.endpoint.secretKey* | Amazon AWS Secret Key | null | false | 
MEDIUM
@@ -66,6 +67,7 @@ The camel-aws2-ddb sink connector supports 39 options, which 
are listed below.
 | *camel.component.aws2-ddb.region* | The region in which DDB client needs to 
work | null | false | MEDIUM
 | *camel.component.aws2-ddb.trustAllCertificates* | If we want to trust all 
certificates in case of overriding the endpoint | false | false | MEDIUM
 | *camel.component.aws2-ddb.uriEndpointOverride* | Set the overriding uri 
endpoint. This option needs to be used in combination with overrideEndpoint 
option | null | false | MEDIUM
+| *camel.component.aws2-ddb.useDefaultCredentials Provider* | Set whether the 
S3 client should expect to load credentials through a default credentials 
provider or to expect static credentials to be passed in. | false | false | 
MEDIUM
 | *camel.component.aws2-ddb.writeCapacity* | The provisioned throughput to 
reserved for writing resources to your table | null | false | MEDIUM
 | *camel.component.aws2-ddb.autowiredEnabled* | Whether autowiring is enabled. 
This is used for automatic autowiring options (the option must be marked as 
autowired) by looking up in the registry to find if there is a single instance 
of matching type, which then gets configured on the component. This can be used 
for automatic configuring JDBC data sources, JMS connection factories, AWS 
Clients, etc. | true | false | MEDIUM
 | *camel.component.aws2-ddb.accessKey* | Amazon AWS Access Key | null | false 
| MEDIUM
diff --git 
a/docs/modules/ROOT/pages/connectors/camel-jt400-kafka-source-connector.adoc 
b/docs/modules/ROOT/pages/connectors/camel-jt400-kafka-source-connector.adoc
index 9e4e737..0778765 100644
--- a/docs/modules/ROOT/pages/connectors/camel-jt400-kafka-source-connector.adoc
+++ b/docs/modules/ROOT/pages/connectors/camel-jt400-kafka-source-connector.adoc
@@ -24,7 +24,7 @@ 
connector.class=org.apache.camel.kafkaconnector.jt400.CamelJt400SourceConnector
 ----
 
 
-The camel-jt400 source connector supports 36 options, which are listed below.
+The camel-jt400 source connector supports 37 options, which are listed below.
 
 
 
@@ -46,6 +46,7 @@ The camel-jt400 source connector supports 36 options, which 
are listed below.
 | *camel.source.endpoint.readTimeout* | Timeout in millis the consumer will 
wait while trying to read a new message of the data queue. | 30000 | false | 
MEDIUM
 | *camel.source.endpoint.searchType* | Search type such as EQ for equal etc. 
One of: [EQ] [NE] [LT] [LE] [GT] [GE] | "EQ" | false | MEDIUM
 | *camel.source.endpoint.sendEmptyMessageWhenIdle* | If the polling consumer 
did not poll any files, you can enable this option to send an empty message (no 
body) instead. | false | false | MEDIUM
+| *camel.source.endpoint.sendingReply* | If true, the consumer endpoint will 
set the Jt400Constants.MESSAGE_REPLYTO_KEY header of the camel message for any 
IBM i inquiry messages received. If that message is then routed to a producer 
endpoint, the action will not be processed as sending a message to the queue, 
but rather a reply to the specific inquiry message. | true | false | MEDIUM
 | *camel.source.endpoint.exceptionHandler* | To let the consumer use a custom 
ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this 
option is not in use. By default the consumer will deal with exceptions, that 
will be logged at WARN or ERROR level and ignored. | null | false | MEDIUM
 | *camel.source.endpoint.exchangePattern* | Sets the exchange pattern when the 
consumer creates an exchange. One of: [InOnly] [InOut] [InOptionalOut] | null | 
false | MEDIUM
 | *camel.source.endpoint.pollStrategy* | A pluggable 
org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your 
custom implementation to control error handling usually occurred during the 
poll operation before an Exchange have been created and being routed in Camel. 
| null | false | MEDIUM

Reply via email to