This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 39d7da3713 HDDS-12424. Allow config key to include config group prefix 
(#7979)
39d7da3713 is described below

commit 39d7da371341185beca69543d4cc91d6249d9734
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Fri Feb 28 13:30:52 2025 +0100

    HDDS-12424. Allow config key to include config group prefix (#7979)
---
 .../apache/hadoop/hdds/scm/OzoneClientConfig.java  | 63 ++++++++++------------
 .../java/org/apache/hadoop/hdds/scm/ScmConfig.java |  8 +--
 .../hadoop/hdds/conf/ConfigFileGenerator.java      | 14 ++---
 .../hdds/conf/ConfigurationReflectionUtil.java     | 57 +++++++++++++-------
 .../hadoop/hdds/conf/ConfigurationSource.java      | 14 +----
 .../hadoop/hdds/conf/ConfigurationTarget.java      |  5 +-
 .../hadoop/hdds/conf/ConfigurationExample.java     | 14 ++++-
 .../hadoop/hdds/conf/TestConfigFileGenerator.java  |  6 +--
 .../hdds/conf/TestConfigurationReflectionUtil.java |  8 +--
 .../hadoop/hdds/conf/TestConfigurationSource.java  | 29 ++++++++--
 .../hadoop/hdds/conf/TestReconfigurableConfig.java |  2 +-
 .../ozone/insight/ConfigurationSubCommand.java     |  6 +--
 12 files changed, 125 insertions(+), 101 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
index 5a1415268c..c45a257b05 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.hdds.scm;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
@@ -37,7 +36,7 @@ public class OzoneClientConfig {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(OzoneClientConfig.class);
 
-  @Config(key = "stream.buffer.flush.size",
+  @Config(key = "ozone.client.stream.buffer.flush.size",
       defaultValue = "16MB",
       type = ConfigType.SIZE,
       description = "Size which determines at what buffer position a partial "
@@ -46,21 +45,21 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private long streamBufferFlushSize = 16 * 1024 * 1024;
 
-  @Config(key = "stream.buffer.size",
+  @Config(key = "ozone.client.stream.buffer.size",
       defaultValue = "4MB",
       type = ConfigType.SIZE,
       description = "The size of chunks the client will send to the server",
       tags = ConfigTag.CLIENT)
   private int streamBufferSize = 4 * 1024 * 1024;
 
-  @Config(key = "datastream.buffer.flush.size",
+  @Config(key = "ozone.client.datastream.buffer.flush.size",
       defaultValue = "16MB",
       type = ConfigType.SIZE,
       description = "The boundary at which putBlock is executed",
       tags = ConfigTag.CLIENT)
   private long dataStreamBufferFlushSize = 16 * 1024 * 1024;
 
-  @Config(key = "datastream.min.packet.size",
+  @Config(key = "ozone.client.datastream.min.packet.size",
       defaultValue = "1MB",
       type = ConfigType.SIZE,
       description = "The maximum size of the ByteBuffer "
@@ -68,7 +67,7 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private int dataStreamMinPacketSize = 1024 * 1024;
 
-  @Config(key = "datastream.window.size",
+  @Config(key = "ozone.client.datastream.window.size",
       defaultValue = "64MB",
       type = ConfigType.SIZE,
       description = "Maximum size of BufferList(used for retry) size per " +
@@ -76,7 +75,7 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private long streamWindowSize = 64 * 1024 * 1024;
 
-  @Config(key = "datastream.pipeline.mode",
+  @Config(key = "ozone.client.datastream.pipeline.mode",
       defaultValue = "true",
       description = "Streaming write support both pipeline mode(datanode1->" +
           "datanode2->datanode3) and star mode(datanode1->datanode2, " +
@@ -84,7 +83,7 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private boolean datastreamPipelineMode = true;
 
-  @Config(key = "stream.buffer.increment",
+  @Config(key = "ozone.client.stream.buffer.increment",
       defaultValue = "0B",
       type = ConfigType.SIZE,
       description = "Buffer (defined by ozone.client.stream.buffer.size) "
@@ -96,7 +95,7 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private int bufferIncrement = 0;
 
-  @Config(key = "stream.buffer.flush.delay",
+  @Config(key = "ozone.client.stream.buffer.flush.delay",
       defaultValue = "true",
       description = "Default true, when call flush() and determine whether "
           + "the data in the current buffer is greater than ozone.client"
@@ -105,7 +104,7 @@ public class OzoneClientConfig {
           + "to false.", tags = ConfigTag.CLIENT)
   private boolean streamBufferFlushDelay = true;
 
-  @Config(key = "stream.buffer.max.size",
+  @Config(key = "ozone.client.stream.buffer.max.size",
       defaultValue = "32MB",
       type = ConfigType.SIZE,
       description = "Size which determines at what buffer position write call"
@@ -114,14 +113,14 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private long streamBufferMaxSize = 32 * 1024 * 1024;
 
-  @Config(key = "max.retries",
+  @Config(key = "ozone.client.max.retries",
       defaultValue = "5",
       description = "Maximum number of retries by Ozone Client on "
           + "encountering exception while writing a key",
       tags = ConfigTag.CLIENT)
   private int maxRetryCount = 5;
 
-  @Config(key = "retry.interval",
+  @Config(key = "ozone.client.retry.interval",
       defaultValue = "0",
       description =
           "Indicates the time duration a client will wait before retrying a "
@@ -130,14 +129,14 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private int retryInterval = 0;
 
-  @Config(key = "read.max.retries",
+  @Config(key = "ozone.client.read.max.retries",
       defaultValue = "3",
       description = "Maximum number of retries by Ozone Client on "
           + "encountering connectivity exception when reading a key.",
       tags = ConfigTag.CLIENT)
   private int maxReadRetryCount = 3;
 
-  @Config(key = "read.retry.interval",
+  @Config(key = "ozone.client.read.retry.interval",
       defaultValue = "1",
       description =
           "Indicates the time duration in seconds a client will wait "
@@ -147,7 +146,7 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private int readRetryInterval = 1;
 
-  @Config(key = "checksum.type",
+  @Config(key = "ozone.client.checksum.type",
       defaultValue = "CRC32",
       description = "The checksum type [NONE/ CRC32/ CRC32C/ SHA256/ MD5] "
           + "determines which algorithm would be used to compute checksum for "
@@ -155,7 +154,7 @@ public class OzoneClientConfig {
       tags = { ConfigTag.CLIENT, ConfigTag.CRYPTO_COMPLIANCE })
   private String checksumType = ChecksumType.CRC32.name();
 
-  @Config(key = "bytes.per.checksum",
+  @Config(key = "ozone.client.bytes.per.checksum",
       defaultValue = "16KB",
       type = ConfigType.SIZE,
       description = "Checksum will be computed for every bytes per checksum "
@@ -164,28 +163,28 @@ public class OzoneClientConfig {
       tags = { ConfigTag.CLIENT, ConfigTag.CRYPTO_COMPLIANCE })
   private int bytesPerChecksum = 16 * 1024;
 
-  @Config(key = "verify.checksum",
+  @Config(key = "ozone.client.verify.checksum",
       defaultValue = "true",
       description = "Ozone client to verify checksum of the checksum "
           + "blocksize data.",
       tags = ConfigTag.CLIENT)
   private boolean checksumVerify = true;
 
-  @Config(key = "max.ec.stripe.write.retries",
+  @Config(key = "ozone.client.max.ec.stripe.write.retries",
       defaultValue = "10",
       description = "Ozone EC client to retry stripe to new block group on" +
           " failures.",
       tags = ConfigTag.CLIENT)
   private int maxECStripeWriteRetries = 10;
 
-  @Config(key = "ec.stripe.queue.size",
+  @Config(key = "ozone.client.ec.stripe.queue.size",
       defaultValue = "2",
       description = "The max number of EC stripes can be buffered in client " +
           " before flushing into datanodes.",
       tags = ConfigTag.CLIENT)
   private int ecStripeQueueSize = 2;
 
-  @Config(key = "exclude.nodes.expiry.time",
+  @Config(key = "ozone.client.exclude.nodes.expiry.time",
       defaultValue = "600000",
       description = "Time after which an excluded node is reconsidered for" +
           " writes. If the value is zero, the node is excluded for the" +
@@ -193,7 +192,7 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private long excludeNodesExpiryTime = 10 * 60 * 1000;
 
-  @Config(key = "ec.reconstruct.stripe.read.pool.limit",
+  @Config(key = "ozone.client.ec.reconstruct.stripe.read.pool.limit",
       defaultValue = "30",
       description = "Thread pool max size for parallelly read" +
           " available ec chunks to reconstruct the whole stripe.",
@@ -204,14 +203,14 @@ public class OzoneClientConfig {
   // 3 concurrent stripe read should be enough.
   private int ecReconstructStripeReadPoolLimit = 10 * 3;
 
-  @Config(key = "ec.reconstruct.stripe.write.pool.limit",
+  @Config(key = "ozone.client.ec.reconstruct.stripe.write.pool.limit",
       defaultValue = "30",
       description = "Thread pool max size for parallelly write" +
           " available ec chunks to reconstruct the whole stripe.",
       tags = ConfigTag.CLIENT)
   private int ecReconstructStripeWritePoolLimit = 10 * 3;
 
-  @Config(key = "checksum.combine.mode",
+  @Config(key = "ozone.client.checksum.combine.mode",
       defaultValue = "COMPOSITE_CRC",
       description = "The combined checksum type [MD5MD5CRC / COMPOSITE_CRC] "
           + "determines which algorithm would be used to compute file 
checksum."
@@ -225,7 +224,7 @@ public class OzoneClientConfig {
   private String checksumCombineMode =
       ChecksumCombineMode.COMPOSITE_CRC.name();
 
-  @Config(key = "fs.default.bucket.layout",
+  @Config(key = "ozone.client.fs.default.bucket.layout",
       defaultValue = "FILE_SYSTEM_OPTIMIZED",
       type = ConfigType.STRING,
       description = "The bucket layout used by buckets created using OFS. " +
@@ -233,8 +232,7 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private String fsDefaultBucketLayout = "FILE_SYSTEM_OPTIMIZED";
 
-  // ozone.client.hbase.enhancements.allowed
-  @Config(key = "hbase.enhancements.allowed",
+  @Config(key = "ozone.client.hbase.enhancements.allowed",
       defaultValue = "false",
       description = "When set to false, client-side HBase enhancement-related 
Ozone (experimental) features " +
           "are disabled (not allowed to be enabled) regardless of whether 
those configs are set.\n" +
@@ -249,8 +247,7 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private boolean hbaseEnhancementsAllowed = false;
 
-  // ozone.client.incremental.chunk.list
-  @Config(key = "incremental.chunk.list",
+  @Config(key = "ozone.client.incremental.chunk.list",
       defaultValue = "false",
       type = ConfigType.BOOLEAN,
       description = "Client PutBlock request can choose incremental chunk " +
@@ -260,8 +257,7 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private boolean incrementalChunkList = false;
 
-  // ozone.client.stream.putblock.piggybacking
-  @Config(key = "stream.putblock.piggybacking",
+  @Config(key = "ozone.client.stream.putblock.piggybacking",
           defaultValue = "false",
           type = ConfigType.BOOLEAN,
           description = "Allow PutBlock to be piggybacked in WriteChunk 
requests if the chunk is small. " +
@@ -269,8 +265,7 @@ public class OzoneClientConfig {
           tags = ConfigTag.CLIENT)
   private boolean enablePutblockPiggybacking = false;
 
-  // ozone.client.key.write.concurrency
-  @Config(key = "key.write.concurrency",
+  @Config(key = "ozone.client.key.write.concurrency",
       defaultValue = "1",
       description = "Maximum concurrent writes allowed on each key. " +
           "Defaults to 1 which matches the behavior before HDDS-9844. " +
@@ -339,7 +334,6 @@ public long getStreamBufferFlushSize() {
     return streamBufferFlushSize;
   }
 
-  @VisibleForTesting
   public void setStreamBufferFlushSize(long streamBufferFlushSize) {
     this.streamBufferFlushSize = streamBufferFlushSize;
   }
@@ -348,7 +342,6 @@ public int getStreamBufferSize() {
     return streamBufferSize;
   }
 
-  @VisibleForTesting
   public void setStreamBufferSize(int streamBufferSize) {
     this.streamBufferSize = streamBufferSize;
   }
@@ -357,7 +350,6 @@ public boolean isStreamBufferFlushDelay() {
     return streamBufferFlushDelay;
   }
 
-  @VisibleForTesting
   public void setStreamBufferFlushDelay(boolean streamBufferFlushDelay) {
     this.streamBufferFlushDelay = streamBufferFlushDelay;
   }
@@ -366,7 +358,6 @@ public long getStreamBufferMaxSize() {
     return streamBufferMaxSize;
   }
 
-  @VisibleForTesting
   public void setStreamBufferMaxSize(long streamBufferMaxSize) {
     this.streamBufferMaxSize = streamBufferMaxSize;
   }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
index 0dd4d64f68..af0d448f9d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
@@ -74,7 +74,7 @@ public class ScmConfig extends ReconfigurableConfig {
       + " : chooses a pipeline in a round robin fashion. Intended for 
troubleshooting and testing purposes only.";
 
   // hdds.scm.pipeline.choose.policy.impl
-  @Config(key = "pipeline.choose.policy.impl",
+  @Config(key = "hdds.scm.pipeline.choose.policy.impl",
       type = ConfigType.STRING,
       defaultValue = 
"org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.RandomPipelineChoosePolicy",
       tags = { ConfigTag.SCM, ConfigTag.PIPELINE },
@@ -88,7 +88,7 @@ public class ScmConfig extends ReconfigurableConfig {
   private String pipelineChoosePolicyName;
 
   // hdds.scm.ec.pipeline.choose.policy.impl
-  @Config(key = "ec.pipeline.choose.policy.impl",
+  @Config(key = "hdds.scm.ec.pipeline.choose.policy.impl",
       type = ConfigType.STRING,
       defaultValue = 
"org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.RandomPipelineChoosePolicy",
       tags = { ConfigTag.SCM, ConfigTag.PIPELINE },
@@ -101,7 +101,7 @@ public class ScmConfig extends ReconfigurableConfig {
   )
   private String ecPipelineChoosePolicyName;
 
-  @Config(key = "block.deletion.per-interval.max",
+  @Config(key = "hdds.scm.block.deletion.per-interval.max",
       type = ConfigType.INT,
       defaultValue = "100000",
       reconfigurable = true,
@@ -115,7 +115,7 @@ public class ScmConfig extends ReconfigurableConfig {
   )
   private int blockDeletionLimit;
 
-  @Config(key = "block.deleting.service.interval",
+  @Config(key = "hdds.scm.block.deleting.service.interval",
       defaultValue = "60s",
       type = ConfigType.TIME,
       tags = { ConfigTag.SCM, ConfigTag.DELETION },
diff --git 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
index e81646b043..91ff6029af 100644
--- 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
+++ 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
@@ -17,6 +17,8 @@
 
 package org.apache.hadoop.hdds.conf;
 
+import static 
org.apache.hadoop.hdds.conf.ConfigurationReflectionUtil.getFullKey;
+
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -133,17 +135,7 @@ private void writeConfigAnnotations(ConfigGroup 
configGroup,
 
           Config configAnnotation = element.getAnnotation(Config.class);
 
-          if (configAnnotation.key().startsWith(configGroup.prefix())) {
-            String msg = String.format(
-                "@%s(key = \"%s\") should not duplicate prefix from 
@%s(\"%s\")",
-                Config.class.getSimpleName(), configAnnotation.key(),
-                ConfigGroup.class.getSimpleName(), configGroup.prefix());
-            processingEnv.getMessager().printMessage(Kind.ERROR, msg, element);
-            continue;
-          }
-
-          String key = configGroup.prefix() + "."
-              + configAnnotation.key();
+          String key = getFullKey(configGroup, configAnnotation);
 
           appender.addConfig(key,
               configAnnotation.defaultValue(),
diff --git 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
index a9d529228f..81d37e72cc 100644
--- 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
+++ 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
@@ -39,7 +39,7 @@ private ConfigurationReflectionUtil() {
 
   public static <T> Map<String, Field> mapReconfigurableProperties(
       Class<T> configurationClass) {
-    Optional<String> prefix = getPrefix(configurationClass);
+    String prefix = getPrefix(configurationClass);
     Map<String, Field> props =
         mapReconfigurableProperties(configurationClass, prefix);
     Class<? super T> superClass = configurationClass.getSuperclass();
@@ -51,7 +51,7 @@ public static <T> Map<String, Field> 
mapReconfigurableProperties(
   }
 
   private static <T> Map<String, Field> mapReconfigurableProperties(
-      Class<T> configurationClass, Optional<String> prefix) {
+      Class<T> configurationClass, String prefix) {
     Map<String, Field> props = new HashMap<>();
     for (Field field : configurationClass.getDeclaredFields()) {
       if (field.isAnnotationPresent(Config.class)) {
@@ -69,7 +69,8 @@ private static <T> Map<String, Field> 
mapReconfigurableProperties(
   public static <T> void injectConfiguration(
       ConfigurationSource configuration,
       Class<T> configurationClass,
-      T configObject, String prefix, boolean reconfiguration) {
+      T configObject, boolean reconfiguration) {
+    String prefix = getPrefix(configurationClass);
     injectConfigurationToObject(configuration, configurationClass, 
configObject,
         prefix, reconfiguration);
     Class<? super T> superClass = configurationClass.getSuperclass();
@@ -96,7 +97,7 @@ private static <T> void 
injectConfigurationToObject(ConfigurationSource from,
           continue;
         }
 
-        String key = prefix + "." + configAnnotation.key();
+        String key = getFullKey(prefix, configAnnotation);
         String defaultValue = configAnnotation.defaultValue();
         String value = from.get(key, defaultValue);
 
@@ -240,6 +241,11 @@ static <T> void callPostConstruct(T configObject) {
   }
 
   public static <T> void updateConfiguration(ConfigurationTarget config,
+      T object) {
+    updateConfiguration(config, object, getPrefix(object.getClass()));
+  }
+
+  private static <T> void updateConfiguration(ConfigurationTarget config,
       T object, String prefix) {
 
     Class<?> configClass = object.getClass();
@@ -264,7 +270,7 @@ private static <T> void updateConfigurationFromObject(
       if (field.isAnnotationPresent(Config.class)) {
         Config configAnnotation = field.getAnnotation(Config.class);
         String fieldLocation = configClass + "." + field.getName();
-        String key = prefix + "." + configAnnotation.key();
+        String key = getFullKey(prefix, configAnnotation);
         ConfigType type = configAnnotation.type();
 
         if (type == ConfigType.AUTO) {
@@ -295,10 +301,10 @@ public static Optional<String> getDefaultValue(Class<?> 
configClass,
 
   public static Optional<String> getKey(Class<?> configClass,
       String fieldName) {
-    Optional<String> prefix = getPrefix(configClass);
+    ConfigGroup configGroup = getConfigGroup(configClass);
 
     return findFieldConfigAnnotationByName(configClass, fieldName)
-        .map(config -> getFullKey(prefix, config));
+        .map(config -> getFullKey(configGroup, config));
   }
 
   public static Optional<ConfigType> getType(Class<?> configClass,
@@ -328,8 +334,8 @@ private static Optional<Config> 
findFieldConfigAnnotationByName(
     return Optional.empty();
   }
 
-  private static <T> void checkNotFinal(
-      Class<T> configurationClass, Field field) {
+  private static void checkNotFinal(
+      Class<?> configurationClass, Field field) {
 
     if ((field.getModifiers() & Modifier.FINAL) != 0) {
       throw new ConfigurationException(String.format(
@@ -339,20 +345,33 @@ private static <T> void checkNotFinal(
     }
   }
 
-  private static <T> Optional<String> getPrefix(Class<T> configurationClass) {
-    ConfigGroup configGroup =
-        configurationClass.getAnnotation(ConfigGroup.class);
-    return configGroup != null
-        ? Optional.of(configGroup.prefix())
-        : Optional.empty();
+  /** Compose the full config property name to be used for {@code configGroup} 
and {@code configAnnotation}. */
+  public static String getFullKey(
+      ConfigGroup configGroup, Config configAnnotation) {
+    return getFullKey(getPrefix(configGroup), configAnnotation);
+  }
+
+  private static String getPrefix(Class<?> configurationClass) {
+    return getPrefix(getConfigGroup(configurationClass));
+  }
+
+  private static ConfigGroup getConfigGroup(Class<?> configurationClass) {
+    return configurationClass.getAnnotation(ConfigGroup.class);
+  }
+
+  /** Get {@code configGroup}'s prefix with dot appended. */
+  private static String getPrefix(ConfigGroup configGroup) {
+    return configGroup != null && !configGroup.prefix().isEmpty()
+        ? configGroup.prefix() + "."
+        : "";
   }
 
   private static String getFullKey(
-      Optional<String> optionalPrefix, Config configAnnotation) {
+      String prefix, Config configAnnotation) {
     String key = configAnnotation.key();
-    return optionalPrefix
-        .map(prefix -> prefix + "." + key)
-        .orElse(key);
+    return prefix != null && !prefix.isEmpty() && !key.startsWith(prefix)
+        ? prefix + key
+        : key;
   }
 
 }
diff --git 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
index 8d0a14e3c4..74347acefa 100644
--- 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
+++ 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
@@ -168,14 +168,7 @@ default <T> T getObject(Class<T> configurationClass) {
       throw new ConfigurationException(
           "Configuration class can't be created: " + configurationClass, e);
     }
-    ConfigGroup configGroup =
-        configurationClass.getAnnotation(ConfigGroup.class);
-
-    String prefix = configGroup.prefix();
-
-    ConfigurationReflectionUtil
-        .injectConfiguration(this, configurationClass, configObject,
-            prefix, false);
+    ConfigurationReflectionUtil.injectConfiguration(this, configurationClass, 
configObject, false);
 
     ConfigurationReflectionUtil.callPostConstruct(configObject);
 
@@ -187,10 +180,7 @@ default <T> T getObject(Class<T> configurationClass) {
    * Update {@code object}'s reconfigurable properties from this configuration.
    */
   default <T> void reconfigure(Class<T> configClass, T object) {
-    ConfigGroup configGroup = configClass.getAnnotation(ConfigGroup.class);
-    String prefix = configGroup.prefix();
-    ConfigurationReflectionUtil.injectConfiguration(
-        this, configClass, object, prefix, true);
+    ConfigurationReflectionUtil.injectConfiguration(this, configClass, object, 
true);
   }
 
   /**
diff --git 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
index 318d08da7e..863d80f003 100644
--- 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
+++ 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
@@ -56,10 +56,7 @@ default void setStorageSize(String name, long value, 
StorageUnit unit) {
   }
 
   default <T> void setFromObject(T object) {
-    ConfigGroup configGroup =
-        object.getClass().getAnnotation(ConfigGroup.class);
-    String prefix = configGroup.prefix();
-    ConfigurationReflectionUtil.updateConfiguration(this, object, prefix);
+    ConfigurationReflectionUtil.updateConfiguration(this, object);
   }
 
 }
diff --git 
a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java
 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java
index 37dbc82329..0304ef1b1b 100644
--- 
a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java
+++ 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java
@@ -23,7 +23,7 @@
 /**
  * Example configuration to test the configuration injection.
  */
-@ConfigGroup(prefix = "ozone.scm.client")
+@ConfigGroup(prefix = "ozone.test.config")
 public class ConfigurationExample extends ConfigurationExampleParent {
 
   @Config(key = "address", defaultValue = "localhost", description = "Client "
@@ -72,6 +72,10 @@ public class ConfigurationExample extends 
ConfigurationExampleParent {
       description = "Test dynamic property", tags = {})
   private String dynamic;
 
+  @Config(key = "ozone.test.config.with.prefix.included", defaultValue = "any",
+      description = "Test property whose name includes the group prefix", tags 
= {})
+  private String withPrefix;
+
   public void setClientAddress(String clientAddress) {
     this.clientAddress = clientAddress;
   }
@@ -96,6 +100,10 @@ public void setThreshold(double threshold) {
     this.threshold = threshold;
   }
 
+  public void setWithPrefix(String newValue) {
+    withPrefix = newValue;
+  }
+
   public String getClientAddress() {
     return clientAddress;
   }
@@ -127,4 +135,8 @@ public double getThreshold() {
   public String getDynamic() {
     return dynamic;
   }
+
+  public String getWithPrefix() {
+    return withPrefix;
+  }
 }
diff --git 
a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java
 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java
index 394efc58cb..8e2a109812 100644
--- 
a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java
+++ 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java
@@ -44,15 +44,15 @@ public void testGeneratedXml() throws FileNotFoundException 
{
 
     assertThat(generatedXml)
         .as("annotation in ConfigurationExample")
-        .contains("<name>ozone.scm.client.bind.host</name>");
+        .contains("<name>ozone.test.config.bind.host</name>");
 
     assertThat(generatedXml)
         .as("annotation in ConfigurationExampleParent")
-        .contains("<name>ozone.scm.client.secure</name>");
+        .contains("<name>ozone.test.config.secure</name>");
 
     assertThat(generatedXml)
         .as("annotation in ConfigurationExampleGrandParent")
-        .contains("<name>ozone.scm.client.number</name>");
+        .contains("<name>ozone.test.config.number</name>");
 
     assertThat(generatedXml)
         .contains("<tag>MANAGEMENT</tag>");
diff --git 
a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigurationReflectionUtil.java
 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigurationReflectionUtil.java
index 8382ba902c..6117f8a536 100644
--- 
a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigurationReflectionUtil.java
+++ 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigurationReflectionUtil.java
@@ -38,7 +38,7 @@ static Stream<Arguments> data() {
     return Stream.of(
         arguments(ConfigurationExample.class, "waitTime",
             Optional.of(ConfigType.TIME),
-            Optional.of("ozone.scm.client.wait"),
+            Optional.of("ozone.test.config.wait"),
             Optional.of("30m")),
         arguments(ConfigurationExampleGrandParent.class, "number",
             Optional.of(ConfigType.AUTO),
@@ -46,7 +46,7 @@ static Stream<Arguments> data() {
             Optional.of("2")),
         arguments(ConfigurationExample.class, "secure",
             Optional.of(ConfigType.AUTO),
-            Optional.of("ozone.scm.client.secure"),
+            Optional.of("ozone.test.config.secure"),
             Optional.of("true")),
         arguments(ConfigurationExample.class, "no-such-field",
             Optional.empty(),
@@ -58,7 +58,7 @@ static Stream<Arguments> data() {
             Optional.empty()),
         arguments(ConfigurationExample.class, "threshold",
             Optional.of(ConfigType.DOUBLE),
-            Optional.of("ozone.scm.client.threshold"),
+            Optional.of("ozone.test.config.threshold"),
             Optional.of("10"))
     );
   }
@@ -88,7 +88,7 @@ void listReconfigurableProperties() {
         ConfigurationReflectionUtil.mapReconfigurableProperties(
             ConfigurationExample.class).keySet();
 
-    String prefix = "ozone.scm.client";
+    String prefix = "ozone.test.config";
     assertEquals(ImmutableSet.of(
         prefix + ".dynamic",
         prefix + ".grandpa.dyna"
diff --git 
a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigurationSource.java
 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigurationSource.java
index 2e04d29946..de0718e9f6 100644
--- 
a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigurationSource.java
+++ 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigurationSource.java
@@ -44,7 +44,7 @@ void getPropsMatchPrefix() {
   }
   @Test
   void reconfigurableProperties() {
-    String prefix = "ozone.scm.client";
+    String prefix = "ozone.test.config";
     ImmutableSet<String> expected = ImmutableSet.of(
         prefix + ".dynamic",
         prefix + ".grandpa.dyna"
@@ -62,11 +62,34 @@ void reconfiguration() {
     ConfigurationExample orig = subject.getObject(ConfigurationExample.class);
     ConfigurationExample obj = subject.getObject(ConfigurationExample.class);
 
-    subject.set("ozone.scm.client.dynamic", "updated");
-    subject.setLong("ozone.scm.client.wait", orig.getWaitTime() + 42);
+    subject.set("ozone.test.config.dynamic", "updated");
+    subject.setLong("ozone.test.config.wait", orig.getWaitTime() + 42);
     subject.reconfigure(ConfigurationExample.class, obj);
 
     assertEquals("updated", obj.getDynamic());
     assertEquals(orig.getWaitTime(), obj.getWaitTime());
   }
+
+  @Test
+  void getPropertyWithPrefixIncludedInName() {
+    MutableConfigurationSource conf = new InMemoryConfiguration();
+    String value = "newValue";
+    conf.set("ozone.test.config.with.prefix.included", value);
+
+    ConfigurationExample subject = conf.getObject(ConfigurationExample.class);
+
+    assertEquals(value, subject.getWithPrefix());
+  }
+
+  @Test
+  void setPropertyWithPrefixIncludedInName() {
+    MutableConfigurationSource conf = new InMemoryConfiguration();
+    ConfigurationExample subject = conf.getObject(ConfigurationExample.class);
+
+    String value = "newValue";
+    subject.setWithPrefix(value);
+    conf.setFromObject(subject);
+
+    assertEquals(value, conf.get("ozone.test.config.with.prefix.included"));
+  }
 }
diff --git 
a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestReconfigurableConfig.java
 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestReconfigurableConfig.java
index 797eff51a7..5441b68b6c 100644
--- 
a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestReconfigurableConfig.java
+++ 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestReconfigurableConfig.java
@@ -31,7 +31,7 @@ void testReconfigureProperty() {
     ConfigurationExample subject = new InMemoryConfiguration()
         .getObject(ConfigurationExample.class);
 
-    subject.reconfigureProperty("ozone.scm.client.dynamic", "updated");
+    subject.reconfigureProperty("ozone.test.config.dynamic", "updated");
 
     assertEquals("updated", subject.getDynamic());
   }
diff --git 
a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java
 
b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java
index 5dbe779217..660986ddfe 100644
--- 
a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java
+++ 
b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java
@@ -17,6 +17,8 @@
 
 package org.apache.hadoop.ozone.insight;
 
+import static 
org.apache.hadoop.hdds.conf.ConfigurationReflectionUtil.getFullKey;
+
 import java.lang.reflect.Field;
 import java.util.concurrent.Callable;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
@@ -78,12 +80,10 @@ protected void printConfig(Class clazz, OzoneConfiguration 
conf) {
 
   private void printConfig(ConfigGroup configGroup, Class clazz,
       OzoneConfiguration conf) {
-    String prefix = configGroup.prefix();
-
     for (Field field : clazz.getDeclaredFields()) {
       if (field.isAnnotationPresent(Config.class)) {
         Config config = field.getAnnotation(Config.class);
-        String key = prefix + "." + config.key();
+        String key = getFullKey(configGroup, config);
         System.out.println(">>> " + key);
         System.out.println("       default: " + config.defaultValue());
         System.out.println("       current: " + conf.get(key));


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to