This is an automated email from the ASF dual-hosted git repository.
dlmarion pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git
The following commit(s) were added to refs/heads/main by this push:
new 766cc6676c Modified Property to use multi-line strings (#5868)
766cc6676c is described below
commit 766cc6676c89843ccb8e9598b61cd7e1dfef5184
Author: Dave Marion <[email protected]>
AuthorDate: Wed Sep 17 08:53:34 2025 -0400
Modified Property to use multi-line strings (#5868)
Modified larger text blocks with newlines and JSON
default values to use multi-line strings
---
.../org/apache/accumulo/core/conf/Property.java | 954 +++++++++++----------
.../apache/accumulo/core/conf/PropertyTest.java | 4 +-
2 files changed, 498 insertions(+), 460 deletions(-)
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index adcfb080f3..e97db01274 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -54,16 +54,18 @@ public enum Property {
COMPACTION_PREFIX("compaction.", null, PropertyType.PREFIX,
"Both major and minor compaction properties can be included under this
prefix.", "4.0.0"),
COMPACTION_SERVICE_PREFIX(COMPACTION_PREFIX + "service.", null,
PropertyType.PREFIX,
- "This prefix should be used to define all properties for the compaction
services."
- + "See {% jlink -f
org.apache.accumulo.core.spi.compaction.RatioBasedCompactionPlanner %}.\n"
- + "A new external compaction service would be defined like the
following:\n"
- + "`compaction.service.newService.planner="
- +
"\"org.apache.accumulo.core.spi.compaction.RatioBasedCompactionPlanner\".`\n"
- + "`compaction.service.newService.opts.groups=\""
- + "[{\"group\": \"small\", \"maxSize\":\"32M\"},"
- + "{ \"group\":\"medium\",
\"maxSize\":\"512M\"},{\"group\":\"large\"}]`\n"
- + "`compaction.service.newService.opts.maxOpen=50`.\n"
- + "Additional options can be defined using the
`compaction.service.<service>.opts.<option>` property.",
+ """
+ This prefix should be used to define all properties for the
compaction services.
+ See {% jlink -f
org.apache.accumulo.core.spi.compaction.RatioBasedCompactionPlanner %}.
+ A new external compaction service would be defined like the
following:
+
`compaction.service.newService.planner="org.apache.accumulo.core.spi.compaction.RatioBasedCompactionPlanner".`
+ `compaction.service.newService.opts.groups="
+ [{"group": "small", "maxSize": "32M"},
+ {"group": "medium", "maxSize": "512M"},
+ {"group": "large"}]`
+ `compaction.service.newService.opts.maxOpen=50`
+ Additional options can be defined using the
`compaction.service.<service>.opts.<option>` property.
+ """,
"4.0.0"),
COMPACTION_SERVICE_DEFAULT_PLANNER(
COMPACTION_SERVICE_PREFIX + DEFAULT_COMPACTION_SERVICE_NAME + ".planner",
@@ -71,8 +73,9 @@ public enum Property {
"Planner for default compaction service.", "4.0.0"),
COMPACTION_SERVICE_DEFAULT_MAX_OPEN(COMPACTION_SERVICE_DEFAULT_PLANNER +
".opts.maxOpen", "10",
PropertyType.COUNT, "The maximum number of files a compaction will
open.", "4.0.0"),
- COMPACTION_SERVICE_DEFAULT_GROUPS(COMPACTION_SERVICE_DEFAULT_PLANNER +
".opts.groups",
- ("[{'group':'default'}]").replaceAll("'", "\""), PropertyType.JSON,
+ COMPACTION_SERVICE_DEFAULT_GROUPS(COMPACTION_SERVICE_DEFAULT_PLANNER +
".opts.groups", """
+ [{"group": "default"}]
+ """, PropertyType.JSON,
"See {% jlink -f
org.apache.accumulo.core.spi.compaction.RatioBasedCompactionPlanner %}.",
"4.0.0"),
COMPACTION_WARN_TIME(COMPACTION_PREFIX + "warn.time", "10m",
PropertyType.TIMEDURATION,
@@ -84,22 +87,23 @@ public enum Property {
"Properties in this category related to the configuration of SSL keys
for"
+ " RPC. See also `instance.ssl.enabled`.",
"1.6.0"),
- RPC_PROCESS_ADVERTISE_ADDRESS("rpc.advertise.addr", "", PropertyType.STRING,
- "The address to use when registering this server in ZooKeeper. This
could be an"
- + " IP address or hostname and defaults to rpc.bind.addr property
value. Port "
- + "numbers, if not specified, will default to the port property for
the specific server type.",
- "2.1.4"),
- RPC_PROCESS_BIND_ADDRESS("rpc.bind.addr", "", PropertyType.STRING,
- "The local IP address to which this server should bind for sending and
receiving network traffic. If not set then the process binds to all addresses.",
- "2.1.4"),
+ RPC_PROCESS_ADVERTISE_ADDRESS("rpc.advertise.addr", "", PropertyType.STRING,
"""
+ The address to use when registering this server in ZooKeeper. This could
be an \
+ IP address or hostname and defaults to rpc.bind.addr property value.
Port \
+ numbers, if not specified, will default to the port property for the
specific server type.
+ """, "2.1.4"),
+ RPC_PROCESS_BIND_ADDRESS("rpc.bind.addr", "", PropertyType.STRING, """
+ The local IP address to which this server should bind for sending \
+ and receiving network traffic. If not set then the process binds to all
addresses.
+ """, "2.1.4"),
RPC_MAX_MESSAGE_SIZE("rpc.message.size.max",
Integer.toString(Integer.MAX_VALUE),
PropertyType.BYTES, "The maximum size of a message that can be received
by a server.",
"2.1.3"),
- RPC_BACKLOG("rpc.backlog", "50", PropertyType.COUNT,
- "Configures the TCP backlog for the server side sockets created by
Thrift."
- + " This property is not used for SSL type server sockets. A value
of zero"
- + " will use the Thrift default value.",
- "2.1.3"),
+ RPC_BACKLOG("rpc.backlog", "50", PropertyType.COUNT, """
+ Configures the TCP backlog for the server side sockets created by
Thrift. \
+ This property is not used for SSL type server sockets. A value of zero \
+ will use the Thrift default value.
+ """, "2.1.3"),
RPC_SSL_KEYSTORE_PATH("rpc.javax.net.ssl.keyStore", "", PropertyType.PATH,
"Path of the keystore file for the server's private SSL key.", "1.6.0"),
@Sensitive
@@ -117,17 +121,19 @@ public enum Property {
RPC_SSL_TRUSTSTORE_TYPE("rpc.javax.net.ssl.trustStoreType", "jks",
PropertyType.STRING,
"Type of SSL truststore.", "1.6.0"),
RPC_USE_JSSE("rpc.useJsse", "false", PropertyType.BOOLEAN,
- "Use JSSE system properties to configure SSL rather than the " +
RPC_PREFIX.getKey()
- + "javax.net.ssl.* Accumulo properties.",
+ """
+ Use JSSE system properties to configure SSL rather than the
%sjavax.net.ssl.* Accumulo properties.
+ """
+ .formatted(RPC_PREFIX.getKey()),
"1.6.0"),
RPC_SSL_CIPHER_SUITES("rpc.ssl.cipher.suites", "", PropertyType.STRING,
"Comma separated list of cipher suites that can be used by accepted
connections.", "1.6.1"),
RPC_SSL_ENABLED_PROTOCOLS("rpc.ssl.server.enabled.protocols", "TLSv1.3",
PropertyType.STRING,
"Comma separated list of protocols that can be used to accept
connections.", "1.6.2"),
- RPC_SSL_CLIENT_PROTOCOL("rpc.ssl.client.protocol", "TLSv1.3",
PropertyType.STRING,
- "The protocol used to connect to a secure server. Must be in the list of
enabled protocols "
- + "on the server side `rpc.ssl.server.enabled.protocols`.",
- "1.6.2"),
+ RPC_SSL_CLIENT_PROTOCOL("rpc.ssl.client.protocol", "TLSv1.3",
PropertyType.STRING, """
+ The protocol used to connect to a secure server. Must be in the list of
enabled protocols \
+ on the server side `rpc.ssl.server.enabled.protocols`.
+ """, "1.6.2"),
RPC_SASL_QOP("rpc.sasl.qop", "auth", PropertyType.STRING,
"The quality of protection to be used with SASL. Valid values are
'auth', 'auth-int',"
+ " and 'auth-conf'.",
@@ -135,7 +141,7 @@ public enum Property {
// instance properties (must be the same for every node in an instance)
INSTANCE_PREFIX("instance.", null, PropertyType.PREFIX,
- "Properties in this category must be consistent throughout a cloud. "
+ "Properties in this category must be consistent throughout an instance. "
+ "This is enforced and servers won't be able to communicate if
these differ.",
"1.3.5"),
INSTANCE_ZK_HOST("instance.zookeeper.host", "localhost:2181",
PropertyType.HOSTLIST,
@@ -146,52 +152,54 @@ public enum Property {
+ Integer.MAX_VALUE + ".",
"1.3.5"),
@Sensitive
- INSTANCE_SECRET("instance.secret", "DEFAULT", PropertyType.STRING,
- "A secret unique to a given instance that all servers must know in order"
- + " to communicate with one another. It should be changed prior to
the"
- + " initialization of Accumulo. To change it after Accumulo has been"
- + " initialized, use the ChangeSecret tool and then update
accumulo.properties"
- + " everywhere. Before using the ChangeSecret tool, make sure
Accumulo is not"
- + " running and you are logged in as the user that controls Accumulo
files in"
- + " HDFS. To use the ChangeSecret tool, run the command:
`./bin/accumulo"
- + " admin changeSecret`.",
- "1.3.5"),
- INSTANCE_VOLUMES("instance.volumes", "", PropertyType.VOLUMES,
- "A comma separated list of dfs uris to use. Files will be stored across"
- + " these filesystems. In some situations, the first volume in this
list"
- + " may be treated differently, such as being preferred for writing
out"
- + " temporary files (for example, when creating a pre-split table)."
- + " After adding uris to this list, run 'accumulo init --add-volume'
and then"
- + " restart tservers. If entries are removed from this list then
tservers"
- + " will need to be restarted. After a uri is removed from the list
Accumulo"
- + " will not create new files in that location, however Accumulo can
still"
- + " reference files created at that location before the config
change. To use"
- + " a comma or other reserved characters in a URI use standard URI
hex"
- + " encoding. For example replace commas with %2C.",
- "1.6.0"),
+ INSTANCE_SECRET("instance.secret", "DEFAULT", PropertyType.STRING, """
+ A secret unique to a given instance that all servers must know in order \
+ to communicate with one another. It should be changed prior to the \
+ initialization of Accumulo. To change it after Accumulo has been \
+ initialized, use the ChangeSecret tool and then update
accumulo.properties \
+ everywhere. Before using the ChangeSecret tool, make sure Accumulo is
not \
+ running and you are logged in as the user that controls Accumulo files
in \
+ HDFS. To use the ChangeSecret tool, run the command: `./bin/accumulo \
+ admin changeSecret`.
+ """, "1.3.5"),
+ INSTANCE_VOLUMES("instance.volumes", "", PropertyType.VOLUMES, """
+ A comma separated list of dfs uris to use. Files will be stored across \
+ these filesystems. In some situations, the first volume in this list \
+ may be treated differently, such as being preferred for writing out \
+ temporary files (for example, when creating a pre-split table). \
+ After adding uris to this list, run 'accumulo init --add-volume' and
then \
+ restart tservers. If entries are removed from this list then tservers \
+ will need to be restarted. After a uri is removed from the list Accumulo
\
+ will not create new files in that location, however Accumulo can still \
+ reference files created at that location before the config change. To
use \
+ a comma or other reserved characters in a URI use standard URI hex \
+ encoding. For example replace commas with %2C.
+ """, "1.6.0"),
INSTANCE_VOLUME_CONFIG_PREFIX("instance.volume.config.", null,
PropertyType.PREFIX,
- "Properties in this category are used to provide volume specific
overrides to "
- + "the general filesystem client configuration. Properties using
this prefix "
- + "should be in the form "
- +
"'instance.volume.config.<volume-uri>.<property-name>=<property-value>. An "
- + "example: "
- +
"'instance.volume.config.hdfs://namespace-a:8020/accumulo.dfs.client.hedged.read.threadpool.size=10'.
"
- + "Note that when specifying property names that contain colons in
the properties "
- + "files that the colons need to be escaped with a backslash.",
+ """
+ Properties in this category are used to provide volume specific
overrides to \
+ the general filesystem client configuration. Properties using this
prefix \
+ should be in the form \
+
'instance.volume.config.<volume-uri>.<property-name>=<property-value>. An \
+ example: \
+
'instance.volume.config.hdfs://namespace-a:8020/accumulo.dfs.client.hedged.read.threadpool.size=10'.
\
+ Note that when specifying property names that contain colons in the
properties \
+ files that the colons need to be escaped with a backslash.
+ """,
"2.1.1"),
- INSTANCE_VOLUMES_REPLACEMENTS("instance.volumes.replacements", "",
PropertyType.STRING,
- "Since accumulo stores absolute URIs changing the location of a namenode
"
- + "could prevent Accumulo from starting. The property helps deal
with "
- + "that situation. Provide a comma separated list of uri replacement
"
- + "pairs here if a namenode location changes. Each pair should be
separated "
- + "with a space. For example, if hdfs://nn1 was replaced with "
- + "hdfs://nnA and hdfs://nn2 was replaced with hdfs://nnB, then set
this "
- + "property to 'hdfs://nn1 hdfs://nnA,hdfs://nn2 hdfs://nnB' "
- + "Replacements must be configured for use. To see which volumes are
"
- + "currently in use, run 'accumulo admin volumes -l'. To use a comma
or "
- + "other reserved characters in a URI use standard URI hex encoding.
For "
- + "example replace commas with %2C.",
- "1.6.0"),
+ INSTANCE_VOLUMES_REPLACEMENTS("instance.volumes.replacements", "",
PropertyType.STRING, """
+ Since accumulo stores absolute URIs changing the location of a namenode \
+ could prevent Accumulo from starting. The property helps deal with \
+ that situation. Provide a comma separated list of uri replacement \
+ pairs here if a namenode location changes. Each pair should be separated
\
+ with a space. For example, if hdfs://nn1 was replaced with \
+ hdfs://nnA and hdfs://nn2 was replaced with hdfs://nnB, then set this \
+ property to 'hdfs://nn1 hdfs://nnA,hdfs://nn2 hdfs://nnB' \
+ Replacements must be configured for use. To see which volumes are \
+ currently in use, run 'accumulo admin volumes -l'. To use a comma or \
+ other reserved characters in a URI use standard URI hex encoding. For \
+ "example replace commas with %2C.
+ """, "1.6.0"),
@Experimental // interface uses unstable internal types, use with caution
INSTANCE_SECURITY_AUTHENTICATOR("instance.security.authenticator",
"org.apache.accumulo.server.security.handler.ZKAuthenticator",
PropertyType.CLASSNAME,
@@ -351,25 +359,27 @@ public enum Property {
"Enables additional JVM metrics collection and reporting using
Micrometer. Requires "
+ "property 'general.micrometer.enabled' to be set to 'true' to take
effect.",
"2.1.0"),
- GENERAL_MICROMETER_LOG_METRICS("general.micrometer.log.metrics", "none",
PropertyType.STRING,
- "Enables additional log metrics collection and reporting using
Micrometer. Requires "
- + "property 'general.micrometer.enabled' to be set to 'true' to take
effect. Micrometer "
- + "natively instruments Log4j2 and Logback. Valid values for this
property are 'none',"
- + "'log4j2' or 'logback'.",
- "2.1.4"),
+ GENERAL_MICROMETER_LOG_METRICS("general.micrometer.log.metrics", "none",
PropertyType.STRING, """
+ Enables additional log metrics collection and reporting using
Micrometer. Requires \
+ property 'general.micrometer.enabled' to be set to 'true' to take
effect. Micrometer \
+ natively instruments Log4j2 and Logback. Valid values for this property
are 'none', \
+ 'log4j2' or 'logback'.
+ """, "2.1.4"),
GENERAL_MICROMETER_FACTORY("general.micrometer.factory",
"org.apache.accumulo.core.spi.metrics.LoggingMeterRegistryFactory",
PropertyType.CLASSNAMELIST,
- "A comma separated list of one or more class names that implements"
- + " org.apache.accumulo.core.spi.metrics.MeterRegistryFactory. Prior
to"
- + " 2.1.3 this was a single value and the default was an empty
string. In 2.1.3 the default"
- + " was changed and it now can accept multiple class names. The
metrics spi was introduced in 2.1.3,"
- + " the deprecated factory is
org.apache.accumulo.core.metrics.MeterRegistryFactory.",
+ """
+ A comma separated list of one or more class names that implements \
+ org.apache.accumulo.core.spi.metrics.MeterRegistryFactory. Prior to \
+ 2.1.3 this was a single value and the default was an empty string.
In 2.1.3 the default \
+ was changed and it now can accept multiple class names. The metrics
spi was introduced in 2.1.3, \
+ the deprecated factory is
org.apache.accumulo.core.metrics.MeterRegistryFactory.
+ """,
"2.1.0"),
- GENERAL_MICROMETER_USER_TAGS("general.micrometer.user.tags", "",
PropertyType.STRING,
- "A comma separated list of tags to emit with all metrics from the
process. Example:"
- + "\"tag1=value1,tag2=value2\".",
- "4.0.0"),
+ GENERAL_MICROMETER_USER_TAGS("general.micrometer.user.tags", "",
PropertyType.STRING, """
+ A comma separated list of tags to emit with all metrics from the
process. Example: \
+ "tag1=value1,tag2=value2".
+ """, "4.0.0"),
@Deprecated(since = "4.0.0")
@ReplacedBy(property = RPC_PROCESS_BIND_ADDRESS)
GENERAL_PROCESS_BIND_ADDRESS("general.process.bind.addr", "0.0.0.0",
PropertyType.STRING,
@@ -409,15 +419,19 @@ public enum Property {
"2.1.4"),
MANAGER_TABLET_REFRESH_MINTHREADS("manager.tablet.refresh.threads.minimum",
"10",
PropertyType.COUNT,
- "The Manager will notify TabletServers that a Tablet needs to be
refreshed after certain operations"
- + " are performed (e.g. Bulk Import). This property specifies the
number of core threads in a"
- + " ThreadPool in the Manager that will be used to request these
refresh operations.",
+ """
+ The Manager will notify TabletServers that a Tablet needs to be
refreshed after certain operations \
+ are performed (e.g. Bulk Import). This property specifies the number
of core threads in a \
+ ThreadPool in the Manager that will be used to request these refresh
operations.
+ """,
"4.0.0"),
MANAGER_TABLET_REFRESH_MAXTHREADS("manager.tablet.refresh.threads.maximum",
"10",
PropertyType.COUNT,
- "The Manager will notify TabletServers that a Tablet needs to be
refreshed after certain operations"
- + " are performed (e.g. Bulk Import). This property specifies the
maximum number of threads in a"
- + " ThreadPool in the Manager that will be used to request these
refresh operations.",
+ """
+ The Manager will notify TabletServers that a Tablet needs to be
refreshed after certain operations \
+ are performed (e.g. Bulk Import). This property specifies the
maximum number of threads in a \
+ ThreadPool in the Manager that will be used to request these refresh
operations.
+ """,
"4.0.0"),
MANAGER_TABLET_MERGEABILITY_INTERVAL("manager.tablet.mergeability.interval",
"24h",
PropertyType.TIMEDURATION,
@@ -457,48 +471,56 @@ public enum Property {
PropertyType.TIMEDURATION, "Limit calls from metric sinks to zookeeper
to update interval.",
"1.9.3"),
MANAGER_FATE_USER_CONFIG("manager.fate.user.config",
-
"{'general':{'TABLE_CREATE,TABLE_DELETE,TABLE_RENAME,TABLE_ONLINE,TABLE_OFFLINE,NAMESPACE_CREATE,"
- +
"NAMESPACE_DELETE,NAMESPACE_RENAME,TABLE_TABLET_AVAILABILITY,SHUTDOWN_TSERVER,"
- +
"TABLE_BULK_IMPORT2,TABLE_COMPACT,TABLE_CANCEL_COMPACT,TABLE_MERGE,TABLE_DELETE_RANGE,"
- + "TABLE_SPLIT,TABLE_CLONE,TABLE_IMPORT,TABLE_EXPORT,SYSTEM_MERGE':
4}, "
- + "'commit':{'COMMIT_COMPACTION': 4}, 'split':{'SYSTEM_SPLIT':
4}}".replace("'", "\""),
- PropertyType.FATE_USER_CONFIG,
- "The number of threads used to run fault-tolerant executions (FATE) on
user"
- + "tables. These are primarily table operations like merge. The
property value is JSON. "
- + "Each key is the name of the pool (can be assigned any string).
Each value is a JSON "
- + "object (with a single key/value) whose key is a comma-separated
string list of "
- + "operations and whose value is a pool size for those operations.",
- "4.0.0"),
+ """
+ {
+ 'general':
{'TABLE_CREATE,TABLE_DELETE,TABLE_RENAME,TABLE_ONLINE,TABLE_OFFLINE,NAMESPACE_CREATE,\
+
NAMESPACE_DELETE,NAMESPACE_RENAME,TABLE_TABLET_AVAILABILITY,SHUTDOWN_TSERVER,\
+
TABLE_BULK_IMPORT2,TABLE_COMPACT,TABLE_CANCEL_COMPACT,TABLE_MERGE,TABLE_DELETE_RANGE,\
+ TABLE_SPLIT,TABLE_CLONE,TABLE_IMPORT,TABLE_EXPORT,SYSTEM_MERGE': 4},
+ 'commit': {'COMMIT_COMPACTION': 4},
+ 'split': {'SYSTEM_SPLIT': 4}
+ }
+ """,
+ PropertyType.FATE_USER_CONFIG, """
+ The number of threads used to run fault-tolerant executions (FATE)
on user \
+ tables. These are primarily table operations like merge. The
property value is JSON. \
+ Each key is the name of the pool (can be assigned any string). Each
value is a JSON \
+ object (with a single key/value) whose key is a comma-separated
string list of \
+ operations and whose value is a pool size for those operations.
+ """, "4.0.0"),
MANAGER_FATE_META_CONFIG("manager.fate.meta.config",
-
"{'general':{'TABLE_CREATE,TABLE_DELETE,TABLE_RENAME,TABLE_ONLINE,TABLE_OFFLINE,NAMESPACE_CREATE,"
- +
"NAMESPACE_DELETE,NAMESPACE_RENAME,TABLE_TABLET_AVAILABILITY,SHUTDOWN_TSERVER,"
- +
"TABLE_BULK_IMPORT2,TABLE_COMPACT,TABLE_CANCEL_COMPACT,TABLE_MERGE,TABLE_DELETE_RANGE,"
- + "TABLE_SPLIT,TABLE_CLONE,TABLE_IMPORT,TABLE_EXPORT,SYSTEM_MERGE':
4}, "
- + "'commit':{'COMMIT_COMPACTION': 4}, 'split':{'SYSTEM_SPLIT':
4}}".replace("'", "\""),
- PropertyType.FATE_META_CONFIG,
- "The number of threads used to run fault-tolerant executions (FATE) on
Accumulo system"
- + "tables. These are primarily table operations like merge. The
property value is JSON. "
- + "Each key is the name of the pool (can be assigned any string).
Each value is a JSON "
- + "object (with a single key/value) whose key is a comma-separated
string list of "
- + "operations and whose value is a pool size for those operations.",
- "4.0.0"),
+ """
+ {
+ 'general':
{'TABLE_CREATE,TABLE_DELETE,TABLE_RENAME,TABLE_ONLINE,TABLE_OFFLINE,NAMESPACE_CREATE,\
+
NAMESPACE_DELETE,NAMESPACE_RENAME,TABLE_TABLET_AVAILABILITY,SHUTDOWN_TSERVER,\
+
TABLE_BULK_IMPORT2,TABLE_COMPACT,TABLE_CANCEL_COMPACT,TABLE_MERGE,TABLE_DELETE_RANGE,\
+ TABLE_SPLIT,TABLE_CLONE,TABLE_IMPORT,TABLE_EXPORT,SYSTEM_MERGE': 4},
+ 'commit': {'COMMIT_COMPACTION': 4},
+ 'split': {'SYSTEM_SPLIT': 4}
+ }
+ """,
+ PropertyType.FATE_META_CONFIG, """
+ The number of threads used to run fault-tolerant executions (FATE)
on Accumulo system \
+ tables. These are primarily table operations like merge. The
property value is JSON. \
+ Each key is the name of the pool (can be assigned any string). Each
value is a JSON \
+ object (with a single key/value) whose key is a comma-separated
string list of \
+ operations and whose value is a pool size for those operations.
+ """, "4.0.0"),
@Deprecated(since = "4.0.0")
MANAGER_FATE_THREADPOOL_SIZE("manager.fate.threadpool.size", "64",
- PropertyType.FATE_THREADPOOL_SIZE,
- String.format(
- "Previously, the number of threads used to run fault-tolerant
executions (FATE)."
- + " This is no longer used in 4.0+. %s and %s are the
replacement and must be"
- + " set instead.",
- MANAGER_FATE_USER_CONFIG.getKey(),
MANAGER_FATE_META_CONFIG.getKey()),
+ PropertyType.FATE_THREADPOOL_SIZE, """
+ Previously, the number of threads used to run fault-tolerant
executions (FATE). \
+ This is no longer used in 4.0+. %s and %s are the replacement and
must be \
+ set instead.
+ """.formatted(MANAGER_FATE_USER_CONFIG.getKey(),
MANAGER_FATE_META_CONFIG.getKey()),
"1.4.3"),
MANAGER_FATE_IDLE_CHECK_INTERVAL("manager.fate.idle.check.interval", "60m",
- PropertyType.TIMEDURATION,
- String.format(
- "The interval at which to check if the number of idle Fate threads
has consistently been"
- + " zero. The way this is checked is an approximation. Logs a
warning in the Manager"
- + " log to change %s or %s. A value less than a minute disables
this check and has a"
- + " maximum value of 60m.",
- MANAGER_FATE_USER_CONFIG.getKey(),
MANAGER_FATE_META_CONFIG.getKey()),
+ PropertyType.TIMEDURATION, """
+ The interval at which to check if the number of idle Fate threads
has consistently been \
+ zero. The way this is checked is an approximation. Logs a warning in
the Manager \
+ log to change %s or %s. A value less than a minute disables this
check and has a \
+ maximum value of 60m.
+ """.formatted(MANAGER_FATE_USER_CONFIG.getKey(),
MANAGER_FATE_META_CONFIG.getKey()),
"4.0.0"),
MANAGER_STATUS_THREAD_POOL_SIZE("manager.status.threadpool.size", "0",
PropertyType.COUNT,
"The number of threads to use when fetching the tablet server status for
balancing. Zero "
@@ -509,34 +531,34 @@ public enum Property {
+ " table to be suspended via table.suspend.duration.",
"1.8.0"),
MANAGER_STARTUP_TSERVER_AVAIL_MIN_COUNT("manager.startup.tserver.avail.min.count",
"0",
- PropertyType.COUNT,
- "Minimum number of tservers that need to be registered before manager
will "
- + "start tablet assignment - checked at manager initialization, when
manager gets lock. "
- + " When set to 0 or less, no blocking occurs. Default is 0
(disabled) to keep original "
- + " behaviour.",
- "1.10.0"),
+ PropertyType.COUNT, """
+ Minimum number of tservers that need to be registered before manager
will \
+ start tablet assignment - checked at manager initialization, when
manager gets lock. \
+ When set to 0 or less, no blocking occurs. Default is 0 (disabled)
to keep original \
+ behaviour.
+ """, "1.10.0"),
MANAGER_STARTUP_TSERVER_AVAIL_MAX_WAIT("manager.startup.tserver.avail.max.wait",
"0",
- PropertyType.TIMEDURATION,
- "Maximum time manager will wait for tserver available threshold "
- + "to be reached before continuing. When set to 0 or less, will
block "
- + "indefinitely. Default is 0 to block indefinitely. Only valid when
tserver available "
- + "threshold is set greater than 0.",
- "1.10.0"),
+ PropertyType.TIMEDURATION, """
+ Maximum time manager will wait for tserver available threshold \
+ to be reached before continuing. When set to 0 or less, will block \
+ indefinitely. Default is 0 to block indefinitely. Only valid when
tserver available \
+ threshold is set greater than 0.
+ """, "1.10.0"),
MANAGER_COMPACTION_SERVICE_PRIORITY_QUEUE_SIZE("manager.compaction.major.service.queue.size",
- "1M", PropertyType.MEMORY,
- "The data size of each resource groups compaction job priority queue.
The memory size of "
- + "each compaction job is estimated and the sum of these sizes per
resource group will not "
- + "exceed this setting. When the size is exceeded the lowest
priority jobs are dropped as "
- + "needed.",
- "4.0.0"),
+ "1M", PropertyType.MEMORY, """
+ The data size of each resource groups compaction job priority queue.
The memory size of \
+ each compaction job is estimated and the sum of these sizes per
resource group will not \
+ exceed this setting. When the size is exceeded the lowest priority
jobs are dropped as \
+ needed.
+ """, "4.0.0"),
SPLIT_PREFIX("split.", null, PropertyType.PREFIX,
"System wide properties related to splitting tablets.", "4.0.0"),
- SPLIT_MAXOPEN("split.files.max", "300", PropertyType.COUNT,
- "To find a tablets split points, all RFiles are opened and their indexes"
- + " are read. This setting determines how many RFiles can be opened
at once."
- + " When there are more RFiles than this setting the tablet will be
marked"
- + " as un-splittable.",
- "4.0.0"),
+ SPLIT_MAXOPEN("split.files.max", "300", PropertyType.COUNT, """
+ To find a tablets split points, all RFiles are opened and their indexes \
+ are read. This setting determines how many RFiles can be opened at once.
\
+ When there are more RFiles than this setting the tablet will be marked \
+ as un-splittable.
+ """, "4.0.0"),
// properties that are specific to scan server behavior
SSERV_PREFIX("sserver.", null, PropertyType.PREFIX,
"Properties in this category affect the behavior of the scan servers.",
"2.1.0"),
@@ -549,23 +571,23 @@ public enum Property {
SSERV_DEFAULT_BLOCKSIZE("sserver.default.blocksize", "1M",
PropertyType.BYTES,
"Specifies a default blocksize for the scan server caches.", "2.1.0"),
SSERV_GROUP_NAME("sserver.group",
ScanServerSelector.DEFAULT_SCAN_SERVER_GROUP_NAME,
- PropertyType.STRING,
- "Resource group name for this ScanServer. Resource groups support at
least two use cases:"
- + " dedicating resources to scans and/or using different hardware
for scans. Clients can"
- + " configure the ConfigurableScanServerSelector to specify the
resource group to use for"
- + " eventual consistency scans.",
- "3.0.0"),
+ PropertyType.STRING, """
+ Resource group name for this ScanServer. Resource groups support at
least two use cases: \
+ dedicating resources to scans and/or using different hardware for
scans. Clients can \
+ configure the ConfigurableScanServerSelector to specify the resource
group to use for \
+ eventual consistency scans.
+ """, "3.0.0"),
SSERV_CACHED_TABLET_METADATA_EXPIRATION("sserver.cache.metadata.expiration",
"5m",
PropertyType.TIMEDURATION,
"The time after which cached tablet metadata will be expired if not
previously refreshed.",
"2.1.0"),
SSERV_CACHED_TABLET_METADATA_REFRESH_PERCENT("sserver.cache.metadata.refresh.percent",
".75",
- PropertyType.FRACTION,
- "The time after which cached tablet metadata will be refreshed,
expressed as a "
- + "percentage of the expiration time. Cache hits after this time,
but before the "
- + "expiration time, will trigger a background refresh for future
hits. "
- + "Value must be less than 100%. Set to 0 will disable refresh.",
- "2.1.3"),
+ PropertyType.FRACTION, """
+ The time after which cached tablet metadata will be refreshed,
expressed as a \
+ percentage of the expiration time. Cache hits after this time, but
before the \
+ expiration time, will trigger a background refresh for future hits. \
+ Value must be less than 100%. Set to 0 will disable refresh.
+ """, "2.1.3"),
SSERV_PORTSEARCH("sserver.port.search", "true", PropertyType.BOOLEAN,
"if the sserver.port.client ports are in use, search higher ports until
one is available.",
"2.1.0"),
@@ -576,25 +598,25 @@ public enum Property {
SSERV_MINTHREADS_TIMEOUT("sserver.server.threads.timeout", "0s",
PropertyType.TIMEDURATION,
"The time after which incoming request threads terminate with no work
available. Zero (0) will keep the threads alive indefinitely.",
"2.1.0"),
- SSERV_SCAN_EXECUTORS_PREFIX("sserver.scan.executors.", null,
PropertyType.PREFIX,
- "Prefix for defining executors to service scans. See "
- + "[scan executors]({% durl administration/scan-executors %}) for an
overview of why and"
- + " how to use this property. For each executor the number of
threads, thread priority, "
- + "and an optional prioritizer can be configured. To configure a new
executor, set "
- + "`sserver.scan.executors.<name>.threads=<number>`. Optionally,
can also set "
- + "`sserver.scan.executors.<name>.priority=<number 1 to 10>`, "
- + "`sserver.scan.executors.<name>.prioritizer=<class name>`, and "
- + "`sserver.scan.executors.<name>.prioritizer.opts.<key>=<value>`.",
- "2.1.0"),
+ SSERV_SCAN_EXECUTORS_PREFIX("sserver.scan.executors.", null,
PropertyType.PREFIX, """
+ Prefix for defining executors to service scans. See \
+ [scan executors]({% durl administration/scan-executors %}) for an
overview of why and \
+ how to use this property. For each executor the number of threads,
thread priority, \
+ and an optional prioritizer can be configured. To configure a new
executor, set \
+ `sserver.scan.executors.<name>.threads=<number>`. Optionally, can also
set \
+ `sserver.scan.executors.<name>.priority=<number 1 to 10>`, \
+ `sserver.scan.executors.<name>.prioritizer=<class name>`, and \
+ `sserver.scan.executors.<name>.prioritizer.opts.<key>=<value>`.
+ """, "2.1.0"),
SSERV_SCAN_EXECUTORS_DEFAULT_THREADS("sserver.scan.executors.default.threads",
"16",
PropertyType.COUNT, "The number of threads for the scan executor that
tables use by default.",
"2.1.0"),
SSERV_SCAN_EXECUTORS_DEFAULT_PRIORITIZER("sserver.scan.executors.default.prioritizer",
"",
- PropertyType.STRING,
- "Prioritizer for the default scan executor. Defaults to none which "
- + "results in FIFO priority. Set to a class that implements "
- + ScanPrioritizer.class.getName() + " to configure one.",
- "2.1.0"),
+ PropertyType.STRING, """
+ Prioritizer for the default scan executor. Defaults to none which \
+ results in FIFO priority. Set to a class that implements \
+ %s + " to configure one.
+ """.formatted(ScanPrioritizer.class.getName()), "2.1.0"),
SSERV_SCAN_EXECUTORS_META_THREADS("sserver.scan.executors.meta.threads",
"8", PropertyType.COUNT,
"The number of threads for the metadata table scan executor.", "2.1.0"),
SSERV_SCAN_REFERENCE_EXPIRATION_TIME("sserver.scan.reference.expiration",
"5m",
@@ -638,12 +660,12 @@ public enum Property {
TSERV_WAL_MAX_AGE("tserver.wal.max.age", "24h", PropertyType.TIMEDURATION,
"The maximum age for each write-ahead log.", "2.1.0"),
TSERV_WAL_TOLERATED_CREATION_FAILURES("tserver.wal.tolerated.creation.failures",
"50",
- PropertyType.COUNT,
- "The maximum number of failures tolerated when creating a new
write-ahead"
- + " log. Negative values will allow unlimited creation failures.
Exceeding this"
- + " number of failures consecutively trying to create a new
write-ahead log"
- + " causes the TabletServer to exit.",
- "2.1.0"),
+ PropertyType.COUNT, """
+ The maximum number of failures tolerated when creating a new
write-ahead \
+ log. Negative values will allow unlimited creation failures.
Exceeding this \
+ number of failures consecutively trying to create a new write-ahead
log \
+ causes the TabletServer to exit.
+ """, "2.1.0"),
TSERV_WAL_TOLERATED_WAIT_INCREMENT("tserver.wal.tolerated.wait.increment",
"1000ms",
PropertyType.TIMEDURATION,
"The amount of time to wait between failures to create or write a
write-ahead log.", "2.1.0"),
@@ -663,14 +685,14 @@ public enum Property {
"An off-heap in-memory data store for accumulo implemented in c++ that
increases"
+ " the amount of data accumulo can hold in memory and avoids Java
GC pauses.",
"1.3.5"),
- TSERV_MAXMEM("tserver.memory.maps.max", "33%", PropertyType.MEMORY,
- "Maximum amount of memory that can be used to buffer data written to a"
- + " tablet server. There are two other properties that can
effectively limit"
- + " memory usage `table.compaction.minor.logs.threshold` and"
- + " `tserver.wal.max.size`. Ensure that
`table.compaction.minor.logs.threshold`"
- + " * `tserver.wal.max.size` >= this property. This map is created
in off-heap"
- + " memory when " + TSERV_NATIVEMAP_ENABLED.name() + " is enabled.",
- "1.3.5"),
+ TSERV_MAXMEM("tserver.memory.maps.max", "33%", PropertyType.MEMORY, """
+ Maximum amount of memory that can be used to buffer data written to a \
+ tablet server. There are two other properties that can effectively limit
\
+ memory usage `table.compaction.minor.logs.threshold` and \
+ `tserver.wal.max.size`. Ensure that
`table.compaction.minor.logs.threshold` \
+ * `tserver.wal.max.size` >= this property. This map is created in
off-heap \
+ memory when %s is enabled.
+ """.formatted(TSERV_NATIVEMAP_ENABLED.name()), "1.3.5"),
TSERV_SESSION_MAXIDLE("tserver.session.idle.max", "1m",
PropertyType.TIMEDURATION,
"When a tablet server's SimpleTimer thread triggers to check idle"
+ " sessions, this configurable option will be used to evaluate scan
sessions"
@@ -681,25 +703,25 @@ public enum Property {
+ " sessions, this configurable option will be used to evaluate
update"
+ " sessions to determine if they can be closed due to inactivity.",
"1.6.5"),
- TSERV_SCAN_EXECUTORS_PREFIX("tserver.scan.executors.", null,
PropertyType.PREFIX,
- "Prefix for defining executors to service scans. See "
- + "[scan executors]({% durl administration/scan-executors %}) for an
overview of why and"
- + " how to use this property. For each executor the number of
threads, thread priority, "
- + "and an optional prioritizer can be configured. To configure a new
executor, set "
- + "`tserver.scan.executors.<name>.threads=<number>`. Optionally,
can also set "
- + "`tserver.scan.executors.<name>.priority=<number 1 to 10>`, "
- + "`tserver.scan.executors.<name>.prioritizer=<class name>`, and "
- + "`tserver.scan.executors.<name>.prioritizer.opts.<key>=<value>`.",
- "2.0.0"),
+ TSERV_SCAN_EXECUTORS_PREFIX("tserver.scan.executors.", null,
PropertyType.PREFIX, """
+ Prefix for defining executors to service scans. See \
+ [scan executors]({% durl administration/scan-executors %}) for an
overview of why and \
+ how to use this property. For each executor the number of threads,
thread priority, \
+ and an optional prioritizer can be configured. To configure a new
executor, set \
+ `tserver.scan.executors.<name>.threads=<number>`. Optionally, can also
set \
+ `tserver.scan.executors.<name>.priority=<number 1 to 10>`, \
+ `tserver.scan.executors.<name>.prioritizer=<class name>`, and \
+ `tserver.scan.executors.<name>.prioritizer.opts.<key>=<value>`.
+ """, "2.0.0"),
TSERV_SCAN_EXECUTORS_DEFAULT_THREADS("tserver.scan.executors.default.threads",
"16",
PropertyType.COUNT, "The number of threads for the scan executor that
tables use by default.",
"2.0.0"),
TSERV_SCAN_EXECUTORS_DEFAULT_PRIORITIZER("tserver.scan.executors.default.prioritizer",
"",
- PropertyType.STRING,
- "Prioritizer for the default scan executor. Defaults to none which "
- + "results in FIFO priority. Set to a class that implements "
- + ScanPrioritizer.class.getName() + " to configure one.",
- "2.0.0"),
+ PropertyType.STRING, """
+ Prioritizer for the default scan executor. Defaults to none which \
+ results in FIFO priority. Set to a class that implements \
+ %s to configure one.
+ """.formatted(ScanPrioritizer.class.getName()), "2.0.0"),
TSERV_SCAN_EXECUTORS_META_THREADS("tserver.scan.executors.meta.threads",
"8", PropertyType.COUNT,
"The number of threads for the metadata table scan executor.", "2.0.0"),
TSERV_SCAN_RESULTS_MAX_TIMEOUT("tserver.scan.results.max.timeout", "1s",
@@ -714,13 +736,13 @@ public enum Property {
"The number of concurrent threads that will load bloom filters in the
background. "
+ "Setting this to zero will make bloom filters load in the
foreground.",
"1.3.5"),
- TSERV_MEMDUMP_DIR("tserver.dir.memdump", "/tmp", PropertyType.PATH,
- "A long running scan could possibly hold memory that has been minor"
- + " compacted. To prevent this, the in memory map is dumped to a
local file"
- + " and the scan is switched to that local file. We can not switch
to the"
- + " minor compacted file because it may have been modified by
iterators. The"
- + " file dumped to the local dir is an exact copy of what was in
memory.",
- "1.3.5"),
+ TSERV_MEMDUMP_DIR("tserver.dir.memdump", "/tmp", PropertyType.PATH, """
+ A long running scan could possibly hold memory that has been minor \
+ compacted. To prevent this, the in memory map is dumped to a local file \
+ and the scan is switched to that local file. We can not switch to the \
+ minor compacted file because it may have been modified by iterators. The
\
+ file dumped to the local dir is an exact copy of what was in memory.
+ """, "1.3.5"),
TSERV_MINTHREADS("tserver.server.threads.minimum", "20", PropertyType.COUNT,
"The minimum number of threads to use to handle incoming requests.",
"1.4.0"),
TSERV_MINTHREADS_TIMEOUT("tserver.server.threads.timeout", "0s",
PropertyType.TIMEDURATION,
@@ -735,12 +757,12 @@ public enum Property {
TSERV_LOG_BUSY_TABLETS_INTERVAL("tserver.log.busy.tablets.interval", "1h",
PropertyType.TIMEDURATION, "Time interval between logging out busy
tablets information.",
"1.10.0"),
- TSERV_HOLD_TIME_SUICIDE("tserver.hold.time.max", "5m",
PropertyType.TIMEDURATION,
- "The maximum time for a tablet server to be in the \"memory full\"
state."
- + " If the tablet server cannot write out memory in this much time,
it will"
- + " assume there is some failure local to its node, and quit. A
value of zero"
- + " is equivalent to forever.",
- "1.4.0"),
+ TSERV_HOLD_TIME_SUICIDE("tserver.hold.time.max", "5m",
PropertyType.TIMEDURATION, """
+ The maximum time for a tablet server to be in the "memory full" state. \
+ If the tablet server cannot write out memory in this much time, it will \
+ assume there is some failure local to its node, and quit. A value of
zero \
+ is equivalent to forever.
+ """, "1.4.0"),
TSERV_WAL_BLOCKSIZE("tserver.wal.blocksize", "0", PropertyType.BYTES,
"The size of the HDFS blocks used to write to the Write-Ahead log. If"
+ " zero, it will be 110% of `tserver.wal.max.size` (that is, try to
use just"
@@ -780,18 +802,18 @@ public enum Property {
"If a thread blocks more than this period of time waiting to get file
permits,"
+ " debugging information will be written.",
"1.9.3"),
- TSERV_SUMMARY_PARTITION_THREADS("tserver.summary.partition.threads", "10",
PropertyType.COUNT,
- "Summary data must be retrieved from RFiles. For a large number of"
- + " RFiles, the files are broken into partitions of 100k files. This
setting"
- + " determines how many of these groups of 100k RFiles will be
processed"
- + " concurrently.",
- "2.0.0"),
- TSERV_SUMMARY_REMOTE_THREADS("tserver.summary.remote.threads", "128",
PropertyType.COUNT,
- "For a partitioned group of 100k RFiles, those files are grouped by"
- + " tablet server. Then a remote tablet server is asked to gather
summary"
- + " data. This setting determines how many concurrent request are
made per"
- + " partition.",
- "2.0.0"),
+ TSERV_SUMMARY_PARTITION_THREADS("tserver.summary.partition.threads", "10",
PropertyType.COUNT, """
+ Summary data must be retrieved from RFiles. For a large number of \
+ RFiles, the files are broken into partitions of 100k files. This setting
\
+ determines how many of these groups of 100k RFiles will be processed \
+ concurrently.
+ """, "2.0.0"),
+ TSERV_SUMMARY_REMOTE_THREADS("tserver.summary.remote.threads", "128",
PropertyType.COUNT, """
+ For a partitioned group of 100k RFiles, those files are grouped by \
+ tablet server. Then a remote tablet server is asked to gather summary \
+ data. This setting determines how many concurrent request are made per \
+ partition.
+ """, "2.0.0"),
TSERV_SUMMARY_RETRIEVAL_THREADS("tserver.summary.retrieval.threads", "10",
PropertyType.COUNT,
"The number of threads on each tablet server available to retrieve"
+ " summary data, that is not currently in cache, from RFiles.",
@@ -835,12 +857,12 @@ public enum Property {
"The number of threads used to delete RFiles.", "1.3.5"),
GC_SAFEMODE("gc.safemode", "false", PropertyType.BOOLEAN,
"Provides listing of files to be deleted but does not delete any
files.", "2.1.0"),
- GC_USE_FULL_COMPACTION("gc.post.metadata.action", "flush",
PropertyType.GC_POST_ACTION,
- "When the gc runs it can make a lot of changes to the metadata, on
completion, "
- + " to force the changes to be written to disk, the metadata and
root tables can be flushed"
- + " and possibly compacted. Legal values are: compact - which both
flushes and compacts the"
- + " metadata; flush - which flushes only (compactions may be
triggered if required); or none.",
- "1.10.0"),
+ GC_USE_FULL_COMPACTION("gc.post.metadata.action", "flush",
PropertyType.GC_POST_ACTION, """
+ When the gc runs it can make a lot of changes to the metadata, on
completion, \
+ to force the changes to be written to disk, the metadata and root tables
can be flushed \
+ and possibly compacted. Legal values are: compact - which both flushes
and compacts the \
+ metadata; flush - which flushes only (compactions may be triggered if
required); or none.
+ """, "1.10.0"),
// properties that are specific to the monitor server behavior
MONITOR_PREFIX("monitor.", null, PropertyType.PREFIX,
@@ -878,57 +900,59 @@ public enum Property {
"A comma-separate list of allowed SSL protocols.", "1.5.3"),
MONITOR_LOCK_CHECK_INTERVAL("monitor.lock.check.interval", "5s",
PropertyType.TIMEDURATION,
"The amount of time to sleep between checking for the Monitor ZooKeeper
lock.", "1.5.1"),
- MONITOR_RESOURCES_EXTERNAL("monitor.resources.external", "",
PropertyType.JSON,
- "A JSON Map of Strings. Each String should be an HTML tag of an external"
- + " resource (JS or CSS) to be imported by the Monitor. Be sure to
wrap"
- + " with CDATA tags. If this value is set, all of the external
resources"
- + " in the `<head>` tag of the Monitor will be replaced with the
tags set here."
- + " Be sure the jquery tag is first since other scripts will depend
on it."
- + " The resources that are used by default can be seen in"
- + "
`accumulo/server/monitor/src/main/resources/templates/default.ftl`.",
- "2.0.0"),
- MONITOR_FETCH_TIMEOUT("monitor.fetch.timeout", "5m",
PropertyType.TIMEDURATION,
- "The Monitor fetches information for display in a set of background
threads. This property"
- + " controls the amount of time that process should wait before
cancelling any remaining"
- + " tasks to fetch information. These background threads could end
up waiting on servers"
- + " to respond or for scans to complete.",
- "4.0.0"),
- MONITOR_DEAD_LIST_RG_EXCLUSIONS("monitor.dead.server.rg.exclusions", "",
PropertyType.STRING,
- "The Monitor displays information about servers that it believes have
died recently."
- + " This property accepts a comma separated list of resource group
names. If"
- + " the dead servers resource group matches a resource group in this
list,"
- + " then it will be suppressed from the dead servers list in the
monitor.",
- "4.0.0"),
+ MONITOR_RESOURCES_EXTERNAL("monitor.resources.external", "",
PropertyType.JSON, """
+ A JSON Map of Strings. Each String should be an HTML tag of an external \
+ resource (JS or CSS) to be imported by the Monitor. Be sure to wrap \
+ with CDATA tags. If this value is set, all of the external resources \
+ in the `<head>` tag of the Monitor will be replaced with the tags set
here. \
+ Be sure the jquery tag is first since other scripts will depend on it. \
+ The resources that are used by default can be seen in \
+ `accumulo/server/monitor/src/main/resources/templates/default.ftl`.
+ """, "2.0.0"),
+ MONITOR_FETCH_TIMEOUT("monitor.fetch.timeout", "5m",
PropertyType.TIMEDURATION, """
+ The Monitor fetches information for display in a set of background
threads. This property \
+ controls the amount of time that process should wait before cancelling
any remaining \
+ tasks to fetch information. These background threads could end up
waiting on servers \
+ to respond or for scans to complete.
+ """, "4.0.0"),
+ MONITOR_DEAD_LIST_RG_EXCLUSIONS("monitor.dead.server.rg.exclusions", "",
PropertyType.STRING, """
+ The Monitor displays information about servers that it believes have
died recently. \
+ This property accepts a comma separated list of resource group names. If
\
+ the dead servers resource group matches a resource group in this list, \
+ then it will be suppressed from the dead servers list in the monitor.
+ """, "4.0.0"),
MONITOR_ROOT_CONTEXT("monitor.root.context", "/", PropertyType.STRING,
- "The root context path of the monitor application. If this value is set,
all paths for the"
- + " monitor application will be hosted using this context. As an
example, setting this to `/accumulo`"
- + " would cause all `/rest/` endpoints to be hosted at
`/accumulo/rest/*`.",
+ """
+ The root context path of the monitor application. If this value is
set, all paths for the \
+ monitor application will be hosted using this context. As an
example, setting this to `/accumulo` \
+ would cause all `/rest/` endpoints to be hosted at
`/accumulo/rest/*`.
+ """,
"2.1.4"),
// per table properties
- TABLE_PREFIX("table.", null, PropertyType.PREFIX,
- "Properties in this category affect tablet server treatment of tablets,"
- + " but can be configured on a per-table basis. Setting these
properties in"
- + " accumulo.properties will override the default globally for all
tables and not"
- + " any specific table. However, both the default and the global
setting can"
- + " be overridden per table using the table operations API or in the
shell,"
- + " which sets the overridden value in zookeeper. Restarting
accumulo tablet"
- + " servers after setting these properties in accumulo.properties
will cause the"
- + " global setting to take effect. However, you must use the API or
the shell"
- + " to change properties in zookeeper that are set on a table.",
- "1.3.5"),
+ TABLE_PREFIX("table.", null, PropertyType.PREFIX, """
+ Properties in this category affect tablet server treatment of tablets, \
+ but can be configured on a per-table basis. Setting these properties in \
+ accumulo.properties will override the default globally for all tables
and not \
+ any specific table. However, both the default and the global setting can
\
+ be overridden per table using the table operations API or in the shell, \
+ which sets the overridden value in zookeeper. Restarting accumulo tablet
\
+ servers after setting these properties in accumulo.properties will cause
the \
+ global setting to take effect. However, you must use the API or the
shell \
+ to change properties in zookeeper that are set on a table.
+ """, "1.3.5"),
TABLE_ARBITRARY_PROP_PREFIX("table.custom.", null, PropertyType.PREFIX,
"Prefix to be used for user defined arbitrary properties.", "1.7.0"),
TABLE_COMPACTION_INPUT_DROP_CACHE_BEHIND("table.compaction.input.drop.cache",
"ALL",
- PropertyType.DROP_CACHE_SELECTION,
- "FSDataInputStream.setDropBehind(true) is set on compaction input
streams"
- + " for the specified type of files. This tells the DataNode to
advise the OS"
- + " that it does not need to keep blocks for the associated file in
the page cache."
- + " 'ALL', the default, will call setDropBehind on all file types.
'NONE' will call"
- + " setDropBehind on none of the files, which can be useful when a
table is cloned."
- + " 'NON-IMPORT' will call setDropBehind on all file types except
those that are"
- + " bulk imported, which is useful when bulk import files are mapped
to many tablets"
- + " and will be compacted at different times.",
- "2.1.4"),
+ PropertyType.DROP_CACHE_SELECTION, """
+ FSDataInputStream.setDropBehind(true) is set on compaction input
streams \
+ for the specified type of files. This tells the DataNode to advise
the OS \
+ that it does not need to keep blocks for the associated file in the
page cache. \
+ 'ALL', the default, will call setDropBehind on all file types.
'NONE' will call \
+ setDropBehind on none of the files, which can be useful when a table
is cloned. \
+ 'NON-IMPORT' will call setDropBehind on all file types except those
that are \
+ bulk imported, which is useful when bulk import files are mapped to
many tablets \
+ and will be compacted at different times.
+ """, "2.1.4"),
TABLE_MINC_OUTPUT_DROP_CACHE("table.compaction.minor.output.drop.cache",
"false",
PropertyType.BOOLEAN,
"Setting this property to true will call"
@@ -948,14 +972,17 @@ public enum Property {
TABLE_MAX_END_ROW_SIZE("table.split.endrow.size.max", "10k",
PropertyType.BYTES,
"Maximum size of end row.", "1.7.0"),
TABLE_MINC_COMPACT_MAXAGE("table.compaction.minor.age", "10m",
PropertyType.TIMEDURATION,
- "Key values written to a tablet are temporarily stored in a per tablet
in memory map. When "
- + "the age of the oldest key value in a tablets in memory map
exceeds this configuration, then "
- + "a minor compaction may be initiated. This determines the maximum
amount of time new data can "
- + "be buffered in memory before being flushed to a file. This is
useful when using scan servers "
- + "in conjunction with the property " +
SSERV_CACHED_TABLET_METADATA_EXPIRATION.getKey()
- + ". These two properties together can be used to control that
amount of time it takes for a scan "
- + "server to see a write to a tablet server. The default value of
this property is set to such a "
- + "high value that is should never cause a minor compaction.",
+ """
+ Key values written to a tablet are temporarily stored in a per
tablet in memory map. When \
+ the age of the oldest key value in a tablets in memory map exceeds
this configuration, then \
+ a minor compaction may be initiated. This determines the maximum
amount of time new data can \
+ be buffered in memory before being flushed to a file. This is
useful when using scan servers \
+ in conjunction with the property %s. \
+ These two properties together can be used to control that amount of
time it takes for a scan \
+ server to see a write to a tablet server. The default value of this
property is set to such a \
+ high value that is should never cause a minor compaction.
+ """
+ .formatted(SSERV_CACHED_TABLET_METADATA_EXPIRATION.getKey()),
"4.0.0"),
TABLE_COMPACTION_DISPATCHER("table.compaction.dispatcher",
SimpleCompactionDispatcher.class.getName(), PropertyType.CLASSNAME,
@@ -965,11 +992,13 @@ public enum Property {
"Options for the table compaction dispatcher.", "2.1.0"),
TABLE_COMPACTION_SELECTION_EXPIRATION("table.compaction.selection.expiration.ms",
"2m",
PropertyType.TIMEDURATION,
- "User compactions select files and are then queued for compaction,
preventing these files "
- + "from being used in system compactions. This timeout allows
system compactions to cancel "
- + "the hold queued user compactions have on files, when its queued
for more than the "
- + "specified time. If a system compaction cancels a hold and runs,
then the user compaction"
- + " can reselect and hold files after the system compaction runs.",
+ """
+ User compactions select files and are then queued for compaction,
preventing these files \
+ from being used in system compactions. This timeout allows system
compactions to cancel \
+ the hold queued user compactions have on files, when its queued for
more than the \
+ specified time. If a system compaction cancels a hold and runs,
then the user compaction \
+ can reselect and hold files after the system compaction runs.
+ """,
"2.1.0"),
TABLE_COMPACTION_CONFIGURER("table.compaction.configurer", "",
PropertyType.CLASSNAME,
"A plugin that can dynamically configure compaction output files based
on input files.",
@@ -993,11 +1022,15 @@ public enum Property {
"Sensitive properties related to on-disk file encryption.", "2.1.0"),
TABLE_SCAN_DISPATCHER("table.scan.dispatcher",
SimpleScanDispatcher.class.getName(),
PropertyType.CLASSNAME,
- "This class is used to dynamically dispatch scans to configured scan
executors. Configured "
- + "classes must implement {% jlink " +
ScanDispatcher.class.getName() + " %}. See "
- + "[scan executors]({% durl administration/scan-executors %}) for an
overview of why"
- + " and how to use this property. This property is ignored for the
root and metadata"
- + " table. The metadata table always dispatches to a scan executor
named `meta`.",
+ """
+ This class is used to dynamically dispatch scans to configured scan
executors. Configured \
+ classes must implement %s. See \
+ [scan executors](%s) for an overview of why \
+ and how to use this property. This property is ignored for the root
and metadata \
+ table. The metadata table always dispatches to a scan executor
named `meta`.
+ """
+ .formatted("{% jlink " + ScanDispatcher.class.getName() + " %}",
+ "{% durl administration/scan-executors %}"),
"2.0.0"),
TABLE_SCAN_DISPATCHER_OPTS("table.scan.dispatcher.opts.", null,
PropertyType.PREFIX,
"Options for the table scan dispatcher.", "2.0.0"),
@@ -1038,43 +1071,41 @@ public enum Property {
"The number of replicas for a table's RFiles in HDFS. When set to 0,
HDFS"
+ " defaults are used.",
"1.3.5"),
- TABLE_FILE_MAX("table.file.max", "15", PropertyType.COUNT,
- "This property is used to signal to the compaction planner that it
should be more "
- + "aggressive for compacting tablets that exceed this limit. The "
- + "RatioBasedCompactionPlanner will lower the compaction ratio and
increase the "
- + "priority for tablets that exceed this limit. When adjusting this
property you may "
- + "want to consider adjusting table.compaction.major.ratio also.
Setting this property "
- + "to 0 will make it default to tserver.scan.files.open.max-1, this
will prevent a tablet"
- + " from having more RFiles than can be opened by a scan.",
- "1.4.0"),
- TABLE_FILE_PAUSE("table.file.pause", "100", PropertyType.COUNT,
- "When a tablet has more than this number of files, bulk imports and
minor compactions "
- + "will wait until the tablet has less files before proceeding.
This will cause back "
- + "pressure on bulk imports and writes to tables when compactions
are not keeping up. "
- + "Only the number of files a tablet currently has is considered for
pausing, the "
- + "number of files a bulk import will add is not considered. This
means a bulk import "
- + "can surge above this limit once causing future bulk imports or
minor compactions to "
- + "pause until compactions can catch up. This property plus "
- + TABLE_BULK_MAX_TABLET_FILES.getKey()
- + " determines the total number of files a tablet could temporarily
surge to based on bulk "
- + "imports. Ideally this property would be set higher than " +
TABLE_FILE_MAX.getKey()
- + " so that compactions are more aggressive prior to reaching the
pause point. Value of 0 is "
- + "unlimited.",
- "4.0.0"),
- TABLE_MERGE_FILE_MAX("table.merge.file.max", "10000", PropertyType.COUNT,
- "The maximum number of files that a merge operation will process.
Before "
- + "merging a sum of the number of files in the merge range is
computed and if it "
- + "exceeds this configuration then the merge will error and fail.
For example if "
- + "there are 100 tablets each having 10 files in the merge range,
then the sum would "
- + "be 1000 and the merge will only proceed if this property is
greater than 1000.",
- "4.0.0"),
- TABLE_FILE_SUMMARY_MAX_SIZE("table.file.summary.maxSize", "256k",
PropertyType.BYTES,
- "The maximum size summary that will be stored. The number of RFiles that"
- + " had summary data exceeding this threshold is reported by"
- + " Summary.getFileStatistics().getLarge(). When adjusting this
consider the"
- + " expected number RFiles with summaries on each tablet server and
the"
- + " summary cache size.",
- "2.0.0"),
+ TABLE_FILE_MAX("table.file.max", "15", PropertyType.COUNT, """
+ This property is used to signal to the compaction planner that it should
be more \
+ aggressive for compacting tablets that exceed this limit. The \
+ RatioBasedCompactionPlanner will lower the compaction ratio and increase
the \
+ priority for tablets that exceed this limit. When adjusting this
property you may \
+ want to consider adjusting table.compaction.major.ratio also. Setting
this property \
+ to 0 will make it default to tserver.scan.files.open.max-1, this will
prevent a tablet \
+ from having more RFiles than can be opened by a scan.
+ """, "1.4.0"),
+ TABLE_FILE_PAUSE("table.file.pause", "100", PropertyType.COUNT, """
+ When a tablet has more than this number of files, bulk imports and minor
compactions \
+ will wait until the tablet has less files before proceeding. This will
cause back \
+ pressure on bulk imports and writes to tables when compactions are not
keeping up. \
+ Only the number of files a tablet currently has is considered for
pausing, the \
+ number of files a bulk import will add is not considered. This means a
bulk import \
+ can surge above this limit once causing future bulk imports or minor
compactions to \
+ pause until compactions can catch up. This property plus %s determines
the total \
+ number of files a tablet could temporarily surge to based on bulk
imports. Ideally \
+ this property would be set higher than %s so that compactions are more
aggressive \
+ prior to reaching the pause point. Value of 0 is unlimited.
+ """.formatted(TABLE_BULK_MAX_TABLET_FILES.getKey(),
TABLE_FILE_MAX.getKey()), "4.0.0"),
+ TABLE_MERGE_FILE_MAX("table.merge.file.max", "10000", PropertyType.COUNT, """
+ The maximum number of files that a merge operation will process. Before
\
+ merging a sum of the number of files in the merge range is computed and
if it \
+ exceeds this configuration then the merge will error and fail. For
example if \
+ there are 100 tablets each having 10 files in the merge range, then the
sum would \
+ be 1000 and the merge will only proceed if this property is greater than
1000.
+ """, "4.0.0"),
+ TABLE_FILE_SUMMARY_MAX_SIZE("table.file.summary.maxSize", "256k",
PropertyType.BYTES, """
+ The maximum size summary that will be stored. The number of RFiles that \
+ had summary data exceeding this threshold is reported by \
+ Summary.getFileStatistics().getLarge(). When adjusting this consider the
\
+ expected number RFiles with summaries on each tablet server and the \
+ summary cache size.
+ """, "2.0.0"),
TABLE_BLOOM_ENABLED("table.bloom.enabled", "false", PropertyType.BOOLEAN,
"Use bloom filters on this table.", "1.3.5"),
TABLE_BLOOM_LOAD_THRESHOLD("table.bloom.load.threshold", "1",
PropertyType.COUNT,
@@ -1087,82 +1118,87 @@ public enum Property {
TABLE_BLOOM_ERRORRATE("table.bloom.error.rate", "0.5%",
PropertyType.FRACTION,
"Bloom filter error rate.", "1.3.5"),
TABLE_BLOOM_KEY_FUNCTOR("table.bloom.key.functor",
- "org.apache.accumulo.core.file.keyfunctor.RowFunctor",
PropertyType.CLASSNAME,
- "A function that can transform the key prior to insertion and check of"
- + " bloom filter.
org.apache.accumulo.core.file.keyfunctor.RowFunctor,"
- + " org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor,
and"
- + " org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor
are"
- + " allowable values. One can extend any of the above mentioned
classes to"
- + " perform specialized parsing of the key.",
- "1.3.5"),
+ "org.apache.accumulo.core.file.keyfunctor.RowFunctor",
PropertyType.CLASSNAME, """
+ A function that can transform the key prior to insertion and check
of \
+ bloom filter. org.apache.accumulo.core.file.keyfunctor.RowFunctor, \
+ org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor, and \
+ org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor are \
+ allowable values. One can extend any of the above mentioned classes
to \
+ perform specialized parsing of the key.
+ """, "1.3.5"),
TABLE_BLOOM_HASHTYPE("table.bloom.hash.type", "murmur", PropertyType.STRING,
"The bloom filter hash type.", "1.3.5"),
TABLE_BULK_SKIP_THRESHOLD("table.bulk.metadata.skip.distance", "0",
PropertyType.COUNT,
- "When performing bulk v2 imports to a table, the Manager iterates over
the tables metadata"
- + " tablets sequentially. When importing files into a small table or
into all or a majority"
- + " of tablets of a large table then the tablet metadata information
for most tablets will be needed."
- + " However, when importing files into a small number of
non-contiguous tablets in a large table, then"
- + " the Manager will look at each tablets metadata when it could be
skipped. The value of this"
- + " property tells the Manager if, and when, it should set up a new
scanner over the metadata"
- + " table instead of just iterating over tablet metadata to find the
matching tablet. Setting up"
- + " a new scanner is analogous to performing a seek in an iterator,
but it has a cost. A value of zero (default) disables"
- + " this feature. A non-zero value enables this feature and the
Manager will setup a new scanner"
- + " when the tablet metadata distance is above the supplied value.",
+ """
+ When performing bulk v2 imports to a table, the Manager iterates
over the tables metadata \
+ tablets sequentially. When importing files into a small table or
into all or a majority \
+ of tablets of a large table then the tablet metadata information for
most tablets will be needed. \
+ However, when importing files into a small number of non-contiguous
tablets in a large table, then \
+ the Manager will look at each tablets metadata when it could be
skipped. The value of this \
+ property tells the Manager if, and when, it should set up a new
scanner over the metadata \
+ table instead of just iterating over tablet metadata to find the
matching tablet. Setting up \
+ a new scanner is analogous to performing a seek in an iterator, but
it has a cost. A value of zero (default) disables \
+ this feature. A non-zero value enables this feature and the Manager
will setup a new scanner \
+ when the tablet metadata distance is above the supplied value.
+ """,
"2.1.4"),
- TABLE_DURABILITY("table.durability", "sync", PropertyType.DURABILITY,
- "The durability used to write to the write-ahead log. Legal values are:"
- + " none, which skips the write-ahead log; log, which sends the data
to the"
- + " write-ahead log, but does nothing to make it durable; flush,
which pushes"
- + " data to the file system; and sync, which ensures the data is
written to disk.",
- "1.7.0"),
+ TABLE_DURABILITY("table.durability", "sync", PropertyType.DURABILITY, """
+ The durability used to write to the write-ahead log. Legal values are: \
+ none, which skips the write-ahead log; log, which sends the data to the \
+ write-ahead log, but does nothing to make it durable; flush, which
pushes \
+ data to the file system; and sync, which ensures the data is written to
disk. \
+ """, "1.7.0"),
- TABLE_FAILURES_IGNORE("table.failures.ignore", "false", PropertyType.BOOLEAN,
- "If you want queries for your table to hang or fail when data is missing"
- + " from the system, then set this to false. When this set to true
missing"
- + " data will be reported but queries will still run possibly
returning a"
- + " subset of the data.",
- "1.3.5"),
+ TABLE_FAILURES_IGNORE("table.failures.ignore", "false",
PropertyType.BOOLEAN, """
+ If you want queries for your table to hang or fail when data is missing \
+ from the system, then set this to false. When this set to true missing \
+ data will be reported but queries will still run possibly returning a \
+ subset of the data. \
+ """, "1.3.5"),
TABLE_DEFAULT_SCANTIME_VISIBILITY("table.security.scan.visibility.default",
"",
- PropertyType.STRING,
- "The security label that will be assumed at scan time if an entry does"
- + " not have a visibility expression.\n"
- + "Note: An empty security label is displayed as []. The scan
results"
- + " will show an empty visibility even if the visibility from this"
- + " setting is applied to the entry.\n"
- + "CAUTION: If a particular key has an empty security label AND its"
- + " table's default visibility is also empty, access will ALWAYS be"
- + " granted for users with permission to that table. Additionally,
if this"
- + " field is changed, all existing data with an empty visibility
label"
- + " will be interpreted with the new label on the next scan.",
- "1.3.5"),
+ PropertyType.STRING, """
+ The security label that will be assumed at scan time if an entry
does \
+ not have a visibility expression.
+ Note: An empty security label is displayed as []. The scan results \
+ will show an empty visibility even if the visibility from this \
+ setting is applied to the entry.
+ CAUTION: If a particular key has an empty security label AND its \
+ table's default visibility is also empty, access will ALWAYS be \
+ granted for users with permission to that table. Additionally, if
this \
+ field is changed, all existing data with an empty visibility label \
+ will be interpreted with the new label on the next scan. \
+ """, "1.3.5"),
TABLE_LOCALITY_GROUPS("table.groups.enabled", "", PropertyType.STRING,
"A comma separated list of locality group names to enable for this
table.", "1.3.5"),
- TABLE_CONSTRAINT_PREFIX("table.constraint.", null, PropertyType.PREFIX,
- "Properties in this category are per-table properties that add"
- + " constraints to a table. These properties start with the category"
- + " prefix, followed by a number, and their values correspond to a
fully"
- + " qualified Java class that implements the Constraint
interface.\nFor example:\n"
- + "table.constraint.1 =
org.apache.accumulo.core.constraints.MyCustomConstraint\n"
- + "and:\n table.constraint.2 =
my.package.constraints.MySecondConstraint.",
- "1.3.5"),
+ TABLE_CONSTRAINT_PREFIX("table.constraint.", null, PropertyType.PREFIX, """
+ Properties in this category are per-table properties that add \
+ constraints to a table. These properties start with the category \
+ prefix, followed by a number, and their values correspond to a fully \
+ qualified Java class that implements the Constraint interface.
+ For example:
+ table.constraint.1 =
org.apache.accumulo.core.constraints.MyCustomConstraint
+ and:
+ table.constraint.2 = my.package.constraints.MySecondConstraint.
+ """, "1.3.5"),
TABLE_INDEXCACHE_ENABLED("table.cache.index.enable", "true",
PropertyType.BOOLEAN,
"Determines whether index block cache is enabled for a table.", "1.3.5"),
TABLE_BLOCKCACHE_ENABLED("table.cache.block.enable", "false",
PropertyType.BOOLEAN,
"Determines whether data block cache is enabled for a table.", "1.3.5"),
- TABLE_ITERATOR_PREFIX("table.iterator.", null, PropertyType.PREFIX,
- "Properties in this category specify iterators that are applied at"
- + " various stages (scopes) of interaction with a table. These
properties"
- + " start with the category prefix, followed by a scope (minc, majc,
scan,"
- + " etc.), followed by a period, followed by a name, as in"
- + " table.iterator.scan.vers, or table.iterator.scan.custom. The
values for"
- + " these properties are a number indicating the ordering in which
it is"
- + " applied, and a class name such as:\n"
- + "table.iterator.scan.vers =
10,org.apache.accumulo.core.iterators.VersioningIterator\n"
- + "These iterators can take options if additional properties are set
that"
- + " look like this property, but are suffixed with a period,
followed by 'opt'"
- + " followed by another period, and a property name.\n"
- + "For example, table.iterator.minc.vers.opt.maxVersions = 3.",
- "1.3.5"),
+ TABLE_ITERATOR_PREFIX("table.iterator.", null, PropertyType.PREFIX, """
+ Properties in this category specify iterators that are applied at \
+ various stages (scopes) of interaction with a table. These properties \
+ start with the category prefix, followed by a scope (minc, majc, scan, \
+ etc.), followed by a period, followed by a name, as in \
+ table.iterator.scan.vers, or table.iterator.scan.custom. The values for \
+ these properties are a number indicating the ordering in which it is \
+ applied, and a class name such as:
+ table.iterator.scan.vers =
10,org.apache.accumulo.core.iterators.VersioningIterator
+ These iterators can take options if additional properties are set that \
+ look like this property, but are suffixed with a period, followed by
'opt' \
+ followed by another period, and a property name.
+ For example:
+ table.iterator.minc.vers.opt.maxVersions = 3.
+ """, "1.3.5"),
TABLE_ITERATOR_SCAN_PREFIX(TABLE_ITERATOR_PREFIX.getKey() +
IteratorScope.scan.name() + ".", null,
PropertyType.PREFIX, "Convenience prefix to find options for the scan
iterator scope.",
"1.5.2"),
@@ -1172,32 +1208,32 @@ public enum Property {
TABLE_ITERATOR_MAJC_PREFIX(TABLE_ITERATOR_PREFIX.getKey() +
IteratorScope.majc.name() + ".", null,
PropertyType.PREFIX, "Convenience prefix to find options for the majc
iterator scope.",
"1.5.2"),
- TABLE_LOCALITY_GROUP_PREFIX("table.group.", null, PropertyType.PREFIX,
- "Properties in this category are per-table properties that define"
- + " locality groups in a table. These properties start with the
category"
- + " prefix, followed by a name, followed by a period, and followed
by a"
- + " property for that group.\n"
- + "For example table.group.group1=x,y,z sets the column families for
a"
- + " group called group1. Once configured, group1 can be enabled by
adding"
- + " it to the list of groups in the " +
TABLE_LOCALITY_GROUPS.getKey() + " property.\n"
- + "Additional group options may be specified for a named group by
setting"
- + " `table.group.<name>.opt.<key>=<value>`.",
- "1.3.5"),
+ TABLE_LOCALITY_GROUP_PREFIX("table.group.", null, PropertyType.PREFIX, """
+ Properties in this category are per-table properties that define \
+ locality groups in a table. These properties start with the category \
+ prefix, followed by a name, followed by a period, and followed by a \
+ property for that group.
+ For example table.group.group1=x,y,z sets the column families for a \
+ group called group1. Once configured, group1 can be enabled by adding \
+ it to the list of groups in the %s property.
+ Additional group options may be specified for a named group by setting \
+ `table.group.<name>.opt.<key>=<value>`.
+ """.formatted(TABLE_LOCALITY_GROUPS.getKey()), "1.3.5"),
TABLE_FORMATTER_CLASS("table.formatter", DefaultFormatter.class.getName(),
PropertyType.STRING,
"The Formatter class to apply on results in the shell.", "1.4.0"),
TABLE_CLASSLOADER_CONTEXT("table.class.loader.context", "",
PropertyType.STRING,
"The context to use for loading per-table resources, such as iterators"
+ " from the configured factory in
`general.context.class.loader.factory`.",
"2.1.0"),
- TABLE_SAMPLER("table.sampler", "", PropertyType.CLASSNAME,
- "The name of a class that implements org.apache.accumulo.core.Sampler."
- + " Setting this option enables storing a sample of data which can
be"
- + " scanned. Always having a current sample can useful for query
optimization"
- + " and data comprehension. After enabling sampling for an existing
table,"
- + " a compaction is needed to compute the sample for existing data.
The"
- + " compact command in the shell has an option to only compact
RFiles without"
- + " sample data.",
- "1.8.0"),
+ TABLE_SAMPLER("table.sampler", "", PropertyType.CLASSNAME, """
+ The name of a class that implements org.apache.accumulo.core.Sampler. \
+ Setting this option enables storing a sample of data which can be \
+ scanned. Always having a current sample can useful for query
optimization \
+ and data comprehension. After enabling sampling for an existing table, \
+ a compaction is needed to compute the sample for existing data. The \
+ compact command in the shell has an option to only compact RFiles
without \
+ sample data.
+ """, "1.8.0"),
TABLE_SAMPLER_OPTS("table.sampler.opt.", null, PropertyType.PREFIX,
"The property is used to set options for a sampler. If a sample had two"
+ " options like hasher and modulous, then the two properties"
@@ -1210,32 +1246,32 @@ public enum Property {
+ " to other tablet servers.",
"1.8.0"),
TABLE_SUMMARIZER_PREFIX("table.summarizer.", null, PropertyType.PREFIX,
- "Prefix for configuring summarizers for a table. Using this prefix"
- + " multiple summarizers can be configured with options for each
one. Each"
- + " summarizer configured should have a unique id, this id can be
anything."
- + " To add a summarizer set "
- + "`table.summarizer.<unique id>=<summarizer class name>.` If the
summarizer has options"
- + ", then for each option set `table.summarizer.<unique
id>.opt.<key>=<value>`.",
+ """
+ Prefix for configuring summarizers for a table. Using this prefix \
+ multiple summarizers can be configured with options for each one.
Each \
+ summarizer configured should have a unique id, this id can be
anything. \
+ To add a summarizer set `table.summarizer.<unique id>=<summarizer
class name>.` \
+ If the summarizer has options then for each option set
`table.summarizer.<unique id>.opt.<key>=<value>`.
+ """,
"2.0.0"),
@Experimental
TABLE_DELETE_BEHAVIOR("table.delete.behavior",
- DeletingIterator.Behavior.PROCESS.name().toLowerCase(),
PropertyType.STRING,
- "This determines what action to take when a delete marker is seen."
- + " Valid values are `process` and `fail` with `process` being the
default. When set to "
- + "`process`, deletes will suppress data. When set to `fail`, any
deletes seen will cause"
- + " an exception. The purpose of `fail` is to support tables that
never delete data and"
- + " need fast seeks within the timestamp range of a column. When
setting this to fail, "
- + "also consider configuring the `" +
NoDeleteConstraint.class.getName() + "` "
- + "constraint.",
- "2.0.0"),
- TABLE_ENABLE_ERASURE_CODES("table.file.ec", "inherit", PropertyType.EC,
- "This determines if Accumulo will manage erasure codes on a table."
- + " When setting this to 'enable' must also set erasure.code.policy
and that policy will "
- + "always be used regardless of DFS directory settings. When set to
'disable', replication "
- + "will always be used regardless of DFS directory settings. When
set to 'inherit' "
- + "the settings from the directory in dfs will be used. Enabling
erasure coding on a volume "
- + "that does not support it is a noop.",
- "2.1.4"),
+ DeletingIterator.Behavior.PROCESS.name().toLowerCase(),
PropertyType.STRING, """
+ This determines what action to take when a delete marker is seen. \
+ Valid values are `process` and `fail` with `process` being the
default. When set to \
+ `process`, deletes will suppress data. When set to `fail`, any
deletes seen will cause \
+ an exception. The purpose of `fail` is to support tables that never
delete data and \
+ need fast seeks within the timestamp range of a column. When setting
this to fail, \
+ also consider configuring the `%s` constraint.
+ """.formatted(NoDeleteConstraint.class.getName()), "2.0.0"),
+ TABLE_ENABLE_ERASURE_CODES("table.file.ec", "inherit", PropertyType.EC, """
+ This determines if Accumulo will manage erasure codes on a table. \
+ When setting this to 'enable' must also set erasure.code.policy and that
policy will \
+ always be used regardless of DFS directory settings. When set to
'disable', replication \
+ will always be used regardless of DFS directory settings. When set to
'inherit' \
+ the settings from the directory in dfs will be used. Enabling erasure
coding on a volume \
+ "that does not support it is a noop.
+ """, "2.1.4"),
TABLE_ERASURE_CODE_POLICY("table.file.ec.policy", "", PropertyType.STRING,
"The name of the erasure code policy to be used. Policy must be
available and enabled in hdfs. "
@@ -1271,12 +1307,14 @@ public enum Property {
"2.1.4"),
COMPACTOR_FAILURE_BACKOFF_INTERVAL("compactor.failure.backoff.interval", "0",
PropertyType.TIMEDURATION,
- "The time basis for computing the wait time for compaction failure
backoff. A value of zero disables"
- + " the backoff feature. When a non-zero value is supplied, then
after compactor.failure.backoff.threshold"
- + " failures have occurred, the compactor will wait
compactor.failure.backoff.interval * the number of"
- + " failures seconds before executing the next compaction. For
example, if this value is 10s, then after"
- + " three failures the Compactor will wait 30s before starting the
next compaction. If the compaction fails"
- + " again, then it will wait 40s before starting the next
compaction.",
+ """
+ The time basis for computing the wait time for compaction failure
backoff. A value of zero disables \
+ the backoff feature. When a non-zero value is supplied, then after
compactor.failure.backoff.threshold \
+ failures have occurred, the compactor will wait
compactor.failure.backoff.interval * the number of \
+ failures seconds before executing the next compaction. For example,
if this value is 10s, then after \
+ three failures the Compactor will wait 30s before starting the next
compaction. If the compaction fails \
+ again, then it will wait 40s before starting the next compaction.
+ """,
"2.1.4"),
COMPACTOR_FAILURE_BACKOFF_RESET("compactor.failure.backoff.reset", "10m",
PropertyType.TIMEDURATION,
diff --git a/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java
b/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java
index 1a4b25d7b4..d9f7c35476 100644
--- a/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java
@@ -71,8 +71,8 @@ public class PropertyTest {
"Description not set for " + prop);
// make sure property description ends with a period
- assertTrue(prop.getDescription().endsWith("."),
- "Property: " + prop.getKey() + " description does not end with
period.");
+ assertTrue(prop.getDescription().trim().endsWith("."), "Property: " +
prop.getKey()
+ + " description does not end with period. Description = " +
prop.getDescription());
// make sure property starts with valid prefix
boolean containsValidPrefix = false;