This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 81fc4c4f85 HDDS-12550. Use DatanodeID instead of UUID in NodeManager 
CommandQueue. (#8560)
81fc4c4f85 is described below

commit 81fc4c4f859c8bb7723b84f2e80d3b4f902efc8c
Author: Nandakumar Vadivelu <[email protected]>
AuthorDate: Thu Jun 5 22:25:17 2025 +0530

    HDDS-12550. Use DatanodeID instead of UUID in NodeManager CommandQueue. 
(#8560)
---
 .../hadoop/hdds/protocol/MockDatanodeDetails.java  | 31 ++++++------
 .../hdds/scm/block/SCMBlockDeletingService.java    |  2 +-
 .../apache/hadoop/hdds/scm/node/CommandQueue.java  | 31 +++++-------
 .../hadoop/hdds/scm/node/DeadNodeHandler.java      |  2 +-
 .../apache/hadoop/hdds/scm/node/NodeManager.java   |  8 ++--
 .../hadoop/hdds/scm/node/SCMNodeManager.java       | 20 ++++----
 .../scm/server/SCMDatanodeHeartbeatDispatcher.java |  4 +-
 .../org/apache/hadoop/hdds/scm/HddsTestUtils.java  |  2 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java | 20 ++++----
 .../hdds/scm/container/SimpleMockNodeManager.java  |  5 +-
 .../hadoop/hdds/scm/node/TestCommandQueue.java     | 56 +++++++++++-----------
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  |  2 +-
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   | 37 +++++++-------
 .../ozoneimpl/TestOzoneContainerWithTLS.java       |  3 +-
 .../ozone/om/TestOmContainerLocationCache.java     | 12 ++---
 15 files changed, 110 insertions(+), 125 deletions(-)

diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java
index f3ea2fe808..8d1424248f 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java
@@ -20,7 +20,6 @@
 import static 
org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.ALL_PORTS;
 
 import java.util.Random;
-import java.util.UUID;
 import java.util.concurrent.ThreadLocalRandom;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.ozone.test.GenericTestUtils;
@@ -31,16 +30,16 @@
 public final class MockDatanodeDetails {
 
   /**
-   * Creates DatanodeDetails with random UUID and random IP address.
+   * Creates DatanodeDetails with random ID and random IP address.
    *
    * @return DatanodeDetails
    */
   public static DatanodeDetails randomDatanodeDetails() {
-    return createDatanodeDetails(UUID.randomUUID());
+    return createDatanodeDetails(DatanodeID.randomID());
   }
 
   /**
-   * Creates DatanodeDetails with random UUID, specific hostname and network
+   * Creates DatanodeDetails with random DatanodeID, specific hostname and 
network
    * location.
    *
    * @return DatanodeDetails
@@ -52,46 +51,46 @@ public static DatanodeDetails createDatanodeDetails(String 
hostname,
         + "." + random.nextInt(256)
         + "." + random.nextInt(256)
         + "." + random.nextInt(256);
-    return createDatanodeDetails(UUID.randomUUID().toString(), hostname,
+    return createDatanodeDetails(DatanodeID.randomID(), hostname,
         ipAddress, loc);
   }
 
   /**
-   * Creates DatanodeDetails using the given UUID.
+   * Creates DatanodeDetails using the given DatanodeID.
    *
-   * @param uuid Datanode's UUID
+   * @param id Datanode's ID
    *
    * @return DatanodeDetails
    */
-  public static DatanodeDetails createDatanodeDetails(UUID uuid) {
+  public static DatanodeDetails createDatanodeDetails(DatanodeID id) {
     Random random = ThreadLocalRandom.current();
     String ipAddress = random.nextInt(256)
         + "." + random.nextInt(256)
         + "." + random.nextInt(256)
         + "." + random.nextInt(256);
-    return createDatanodeDetails(uuid.toString(), "localhost" + "-" + 
ipAddress,
+    return createDatanodeDetails(id, "localhost" + "-" + ipAddress,
         ipAddress, null);
   }
 
   /**
    * Creates DatanodeDetails with the given information.
    *
-   * @param uuid      Datanode's UUID
+   * @param id      Datanode's ID
    * @param hostname  hostname of Datanode
    * @param ipAddress ip address of Datanode
    *
    * @return DatanodeDetails
    */
-  public static DatanodeDetails createDatanodeDetails(String uuid,
+  public static DatanodeDetails createDatanodeDetails(DatanodeID id,
       String hostname, String ipAddress, String networkLocation) {
-    return createDatanodeDetails(uuid, hostname, ipAddress, networkLocation, 
0);
+    return createDatanodeDetails(id, hostname, ipAddress, networkLocation, 0);
   }
 
-  public static DatanodeDetails createDatanodeDetails(String uuid,
+  public static DatanodeDetails createDatanodeDetails(DatanodeID id,
       String hostname, String ipAddress, String networkLocation, int port) {
 
     DatanodeDetails.Builder dn = DatanodeDetails.newBuilder()
-        .setUuid(UUID.fromString(uuid))
+        .setID(id)
         .setHostName(hostname)
         .setIpAddress(ipAddress)
         .setNetworkLocation(networkLocation)
@@ -106,12 +105,12 @@ public static DatanodeDetails 
createDatanodeDetails(String uuid,
   }
 
   /**
-   * Creates DatanodeDetails with random UUID and valid local address and port.
+   * Creates DatanodeDetails with random ID and valid local address and port.
    *
    * @return DatanodeDetails
    */
   public static DatanodeDetails randomLocalDatanodeDetails() {
-    return createDatanodeDetails(UUID.randomUUID().toString(),
+    return createDatanodeDetails(DatanodeID.randomID(),
         GenericTestUtils.PortAllocator.HOSTNAME,
         GenericTestUtils.PortAllocator.HOST_ADDRESS, null,
         GenericTestUtils.PortAllocator.getFreePort());
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
index 81b40729c4..dfcaca333c 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
@@ -313,7 +313,7 @@ protected Set<DatanodeDetails> 
getDatanodesWithinCommandLimit(
     final Set<DatanodeDetails> included = new HashSet<>();
     for (DatanodeDetails dn : datanodes) {
       if (nodeManager.getTotalDatanodeCommandCount(dn, 
Type.deleteBlocksCommand) < deleteBlocksPendingCommandLimit
-          && nodeManager.getCommandQueueCount(dn.getUuid(), 
Type.deleteBlocksCommand) < 2) {
+          && nodeManager.getCommandQueueCount(dn.getID(), 
Type.deleteBlocksCommand) < 2) {
         included.add(dn);
       }
     }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
index f122215105..87e458767f 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
@@ -24,7 +24,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 
@@ -38,7 +38,7 @@
  * Note this class is not thread safe, and accesses must be protected by a 
lock.
  */
 public class CommandQueue {
-  private final Map<UUID, Commands> commandMap;
+  private final Map<DatanodeID, Commands> commandMap;
   private long commandsInQueue;
 
   /**
@@ -71,12 +71,11 @@ public void clear() {
    * commands returns a empty list otherwise the current set of
    * commands are returned and command map set to empty list again.
    *
-   * @param datanodeUuid Datanode UUID
    * @return List of SCM Commands.
    */
   @SuppressWarnings("unchecked")
-  List<SCMCommand<?>> getCommand(final UUID datanodeUuid) {
-    Commands cmds = commandMap.remove(datanodeUuid);
+  List<SCMCommand<?>> getCommand(final DatanodeID datanodeID) {
+    Commands cmds = commandMap.remove(datanodeID);
     List<SCMCommand<?>> cmdList = null;
     if (cmds != null) {
       cmdList = cmds.getCommands();
@@ -93,13 +92,13 @@ List<SCMCommand<?>> getCommand(final UUID datanodeUuid) {
    * Command.contributesToQueueSize() method will not be included in the count.
    * At the current time, only low priority ReplicateContainerCommands meet 
this
    * condition.
-   * @param datanodeUuid Datanode UUID.
+   * @param datanodeID Datanode ID.
    * @param commandType The type of command for which to get the count.
    * @return The currently queued command count, or zero if none are queued.
    */
   public int getDatanodeCommandCount(
-      final UUID datanodeUuid, SCMCommandProto.Type commandType) {
-    Commands commands = commandMap.get(datanodeUuid);
+      final DatanodeID datanodeID, SCMCommandProto.Type commandType) {
+    Commands commands = commandMap.get(datanodeID);
     if (commands == null) {
       return 0;
     }
@@ -112,27 +111,21 @@ public int getDatanodeCommandCount(
    * Command.contributesToQueueSize() method will not be included in the count.
    * At the current time, only low priority ReplicateContainerCommands meet 
this
    * condition.
-   * @param datanodeUuid Datanode UUID
    * @return A map containing the command summary. Note the returned map is a
    *         copy of the internal map and can be modified safely by the caller.
    */
   public Map<SCMCommandProto.Type, Integer> getDatanodeCommandSummary(
-      final UUID datanodeUuid) {
-    Commands commands = commandMap.get(datanodeUuid);
+      final DatanodeID datanodeID) {
+    Commands commands = commandMap.get(datanodeID);
     if (commands == null) {
       return Collections.emptyMap();
     }
     return commands.getAllCommandsSummary();
   }
 
-  /**
-   * Adds a Command to the SCM Queue to send the command to container.
-   *
-   * @param datanodeUuid DatanodeDetails.Uuid
-   * @param command    - Command
-   */
-  public void addCommand(final UUID datanodeUuid, final SCMCommand<?> command) 
{
-    commandMap.computeIfAbsent(datanodeUuid, s -> new Commands()).add(command);
+  /** Adds a Command to the SCM Queue to send the command to container. */
+  public void addCommand(final DatanodeID datanodeID, final SCMCommand<?> 
command) {
+    commandMap.computeIfAbsent(datanodeID, s -> new Commands()).add(command);
     commandsInQueue++;
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
index bd6ee9503b..773841575a 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
@@ -109,7 +109,7 @@ public void onMessage(final DatanodeDetails datanodeDetails,
 
       // remove commands in command queue for the DN
       final List<SCMCommand<?>> cmdList = nodeManager.getCommandQueue(
-          datanodeDetails.getUuid());
+          datanodeDetails.getID());
       LOG.info("Clearing command queue of size {} for DN {}",
           cmdList.size(), datanodeDetails);
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 2893acb78e..cb8093ff1b 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -26,7 +26,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.function.BiConsumer;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeID;
@@ -312,11 +311,11 @@ int getNodeQueuedCommandCount(DatanodeDetails 
datanodeDetails,
   /**
    * Get the number of commands of the given type queued in the SCM 
CommandQueue
    * for the given datanode.
-   * @param dnID The UUID of the datanode.
+   * @param dnID The ID of the datanode.
    * @param cmdType The Type of command to query the current count for.
    * @return The count of commands queued, or zero if none.
    */
-  int getCommandQueueCount(UUID dnID, SCMCommandProto.Type cmdType);
+  int getCommandQueueCount(DatanodeID dnID, SCMCommandProto.Type cmdType);
 
   /**
    * Get the total number of pending commands of the given type on the given
@@ -354,11 +353,10 @@ Map<SCMCommandProto.Type, Integer> 
getTotalDatanodeCommandCounts(
 
   /**
    * Get list of SCMCommands in the Command Queue for a particular Datanode.
-   * @param dnID - Datanode uuid.
    * @return list of commands
    */
   // TODO: We can give better name to this method!
-  List<SCMCommand<?>> getCommandQueue(UUID dnID);
+  List<SCMCommand<?>> getCommandQueue(DatanodeID dnID);
 
   /** @return the datanode of the given id if it exists; otherwise, return 
null. */
   @Nullable DatanodeDetails getNode(@Nullable DatanodeID id);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 658b9ffe82..1485f1ea29 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -40,7 +40,6 @@
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
@@ -547,9 +546,9 @@ public List<SCMCommand<?>> processHeartbeat(DatanodeDetails 
datanodeDetails,
     writeLock().lock();
     try {
       Map<SCMCommandProto.Type, Integer> summary =
-          commandQueue.getDatanodeCommandSummary(datanodeDetails.getUuid());
+          commandQueue.getDatanodeCommandSummary(datanodeDetails.getID());
       List<SCMCommand<?>> commands =
-          commandQueue.getCommand(datanodeDetails.getUuid());
+          commandQueue.getCommand(datanodeDetails.getID());
 
       // Update the SCMCommand of deleteBlocksCommand Status
       for (SCMCommand<?> command : commands) {
@@ -853,7 +852,7 @@ public int getNodeQueuedCommandCount(DatanodeDetails 
datanodeDetails,
    * @return The count of commands queued, or zero if none.
    */
   @Override
-  public int getCommandQueueCount(UUID dnID, SCMCommandProto.Type cmdType) {
+  public int getCommandQueueCount(DatanodeID dnID, SCMCommandProto.Type 
cmdType) {
     readLock().lock();
     try {
       return commandQueue.getDatanodeCommandCount(dnID, cmdType);
@@ -885,7 +884,7 @@ public int getTotalDatanodeCommandCount(DatanodeDetails 
datanodeDetails,
             ". Assuming zero", datanodeDetails, cmdType);
         dnCount = 0;
       }
-      return getCommandQueueCount(datanodeDetails.getUuid(), cmdType) + 
dnCount;
+      return getCommandQueueCount(datanodeDetails.getID(), cmdType) + dnCount;
     } finally {
       readLock().unlock();
     }
@@ -1490,7 +1489,7 @@ public int minHealthyVolumeNum(List<DatanodeDetails> 
dnList) {
                 getHealthyVolumeCount());
       } catch (NodeNotFoundException e) {
         LOG.warn("Cannot generate NodeStat, datanode {} not found.",
-                dn.getUuid());
+                dn.getID());
       }
     }
     Preconditions.checkArgument(!volumeCountList.isEmpty());
@@ -1525,7 +1524,7 @@ public int pipelineLimit(DatanodeDetails dn) {
       }
     } catch (NodeNotFoundException e) {
       LOG.warn("Cannot generate NodeStat, datanode {} not found.",
-          dn.getUuid());
+          dn.getID());
     }
     return 0;
   }
@@ -1647,10 +1646,9 @@ public int getPipeLineCount(DatanodeDetails 
datanodeDetails)
 
   @Override
   public void addDatanodeCommand(DatanodeID datanodeID, SCMCommand<?> command) 
{
-    final UUID dnId = datanodeID.getUuid();
     writeLock().lock();
     try {
-      this.commandQueue.addCommand(dnId, command);
+      this.commandQueue.addCommand(datanodeID, command);
     } finally {
       writeLock().unlock();
     }
@@ -1690,7 +1688,7 @@ public void onMessage(CommandForDatanode 
commandForDatanode,
   }
 
   @Override
-  public List<SCMCommand<?>> getCommandQueue(UUID dnID) {
+  public List<SCMCommand<?>> getCommandQueue(DatanodeID dnID) {
     // Getting the queue actually clears it and returns the commands, so this
     // is a write operation and not a read as the method name suggests.
     writeLock().lock();
@@ -1836,7 +1834,7 @@ public void removeNode(DatanodeDetails datanodeDetails) 
throws NodeNotFoundExcep
         }
         nodeStateManager.removeNode(datanodeDetails.getID());
         removeFromDnsToDnIdMap(datanodeDetails.getID(), 
datanodeDetails.getIpAddress());
-        final List<SCMCommand<?>> cmdList = 
getCommandQueue(datanodeDetails.getUuid());
+        final List<SCMCommand<?>> cmdList = 
getCommandQueue(datanodeDetails.getID());
         LOG.info("Clearing command queue of size {} for DN {}", 
cmdList.size(), datanodeDetails);
       } else {
         LOG.warn("Node not decommissioned or dead, cannot remove: {}", 
datanodeDetails);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index 92d2556f14..9617d12e25 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -31,8 +31,8 @@
 import com.google.protobuf.Message;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerActionsProto;
@@ -90,7 +90,7 @@ public List<SCMCommand<?>> dispatch(SCMHeartbeatRequestProto 
heartbeat) {
     if (!nodeManager.isNodeRegistered(datanodeDetails)) {
       LOG.info("SCM received heartbeat from an unregistered datanode {}. " +
           "Asking datanode to re-register.", datanodeDetails);
-      UUID dnID = datanodeDetails.getUuid();
+      DatanodeID dnID = datanodeDetails.getID();
       nodeManager.addDatanodeCommand(datanodeDetails.getID(), new 
ReregisterCommand());
 
       commands = nodeManager.getCommandQueue(dnID);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
index a61562c361..25c3ba3506 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
@@ -102,7 +102,7 @@ private HddsTestUtils() {
   public static DatanodeDetails getDatanodeDetails(
       RegisteredCommand registeredCommand) {
     return MockDatanodeDetails.createDatanodeDetails(
-        registeredCommand.getDatanode().getUuidString(),
+        registeredCommand.getDatanode().getID(),
         registeredCommand.getDatanode().getHostName(),
         registeredCommand.getDatanode().getIpAddress(),
         null);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index d5112b428c..5c2dafb271 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -30,7 +30,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.stream.Collectors;
@@ -103,7 +102,7 @@ public class MockNodeManager implements NodeManager {
   private final List<DatanodeDetails> deadNodes;
   private final Map<DatanodeDetails, SCMNodeStat> nodeMetricMap;
   private final SCMNodeStat aggregateStat;
-  private final Map<UUID, List<SCMCommand<?>>> commandMap;
+  private final Map<DatanodeID, List<SCMCommand<?>>> commandMap;
   private Node2PipelineMap node2PipelineMap;
   private final NodeStateMap node2ContainerMap;
   private NetworkTopology clusterMap;
@@ -520,15 +519,14 @@ public void removeContainer(DatanodeDetails dd,
 
   @Override
   public void addDatanodeCommand(DatanodeID datanodeID, SCMCommand<?> command) 
{
-    final UUID dnId = datanodeID.getUuid();
-    if (commandMap.containsKey(dnId)) {
-      List<SCMCommand<?>> commandList = commandMap.get(dnId);
+    if (commandMap.containsKey(datanodeID)) {
+      List<SCMCommand<?>> commandList = commandMap.get(datanodeID);
       Preconditions.checkNotNull(commandList);
       commandList.add(command);
     } else {
       List<SCMCommand<?>> commandList = new LinkedList<>();
       commandList.add(command);
-      commandMap.put(dnId, commandList);
+      commandMap.put(datanodeID, commandList);
     }
   }
 
@@ -581,12 +579,12 @@ public int getNodeQueuedCommandCount(DatanodeDetails 
datanodeDetails,
   /**
    * Get the number of commands of the given type queued in the SCM 
CommandQueue
    * for the given datanode.
-   * @param dnID The UUID of the datanode.
+   * @param dnID The ID of the datanode.
    * @param cmdType The Type of command to query the current count for.
    * @return The count of commands queued, or zero if none.
    */
   @Override
-  public int getCommandQueueCount(UUID dnID, SCMCommandProto.Type cmdType) {
+  public int getCommandQueueCount(DatanodeID dnID, SCMCommandProto.Type 
cmdType) {
     return 0;
   }
 
@@ -634,11 +632,11 @@ public Set<ContainerID> getContainers(DatanodeDetails 
uuid) throws NodeNotFoundE
 
   // Returns the number of commands that is queued to this node manager.
   public int getCommandCount(DatanodeDetails dd) {
-    List<SCMCommand<?>> list = commandMap.get(dd.getUuid());
+    List<SCMCommand<?>> list = commandMap.get(dd.getID());
     return (list == null) ? 0 : list.size();
   }
 
-  public void clearCommandQueue(UUID dnId) {
+  public void clearCommandQueue(DatanodeID dnId) {
     if (commandMap.containsKey(dnId)) {
       commandMap.put(dnId, new LinkedList<>());
     }
@@ -825,7 +823,7 @@ public void onMessage(CommandForDatanode commandForDatanode,
   }
 
   @Override
-  public List<SCMCommand<?>> getCommandQueue(UUID dnID) {
+  public List<SCMCommand<?>> getCommandQueue(DatanodeID dnID) {
     return null;
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
index 4157f3053f..592c94f250 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
@@ -23,7 +23,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeID;
@@ -310,7 +309,7 @@ public int getNodeQueuedCommandCount(DatanodeDetails 
datanodeDetails,
    * @return The count of commands queued, or zero if none.
    */
   @Override
-  public int getCommandQueueCount(UUID dnID, SCMCommandProto.Type cmdType) {
+  public int getCommandQueueCount(DatanodeID dnID, SCMCommandProto.Type 
cmdType) {
     return 0;
   }
 
@@ -339,7 +338,7 @@ public Map<SCMCommandProto.Type, Integer> 
getTotalDatanodeCommandCounts(
   }
 
   @Override
-  public List<SCMCommand<?>> getCommandQueue(UUID dnID) {
+  public List<SCMCommand<?>> getCommandQueue(DatanodeID dnID) {
     return null;
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestCommandQueue.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestCommandQueue.java
index d47a37f263..d0cfa0be69 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestCommandQueue.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestCommandQueue.java
@@ -22,7 +22,7 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
@@ -58,35 +58,35 @@ public void testSummaryUpdated() {
         .ReplicationCommandPriority.LOW);
 
 
-    UUID datanode1UUID = UUID.randomUUID();
-    UUID datanode2UUID = UUID.randomUUID();
+    DatanodeID datanode1ID = DatanodeID.randomID();
+    DatanodeID datanode2ID = DatanodeID.randomID();
 
-    commandQueue.addCommand(datanode1UUID, closeContainerCommand);
-    commandQueue.addCommand(datanode1UUID, closeContainerCommand);
-    commandQueue.addCommand(datanode1UUID, createPipelineCommand);
-    commandQueue.addCommand(datanode1UUID, replicationCommand);
-    commandQueue.addCommand(datanode1UUID, lowReplicationCommand);
+    commandQueue.addCommand(datanode1ID, closeContainerCommand);
+    commandQueue.addCommand(datanode1ID, closeContainerCommand);
+    commandQueue.addCommand(datanode1ID, createPipelineCommand);
+    commandQueue.addCommand(datanode1ID, replicationCommand);
+    commandQueue.addCommand(datanode1ID, lowReplicationCommand);
 
 
-    commandQueue.addCommand(datanode2UUID, closeContainerCommand);
-    commandQueue.addCommand(datanode2UUID, createPipelineCommand);
-    commandQueue.addCommand(datanode2UUID, createPipelineCommand);
+    commandQueue.addCommand(datanode2ID, closeContainerCommand);
+    commandQueue.addCommand(datanode2ID, createPipelineCommand);
+    commandQueue.addCommand(datanode2ID, createPipelineCommand);
 
     // Check zero returned for unknown DN
     assertEquals(0, commandQueue.getDatanodeCommandCount(
-        UUID.randomUUID(), SCMCommandProto.Type.closeContainerCommand));
+        DatanodeID.randomID(), SCMCommandProto.Type.closeContainerCommand));
 
     assertEquals(2, commandQueue.getDatanodeCommandCount(
-        datanode1UUID, SCMCommandProto.Type.closeContainerCommand));
+        datanode1ID, SCMCommandProto.Type.closeContainerCommand));
     assertEquals(1, commandQueue.getDatanodeCommandCount(
-        datanode1UUID, SCMCommandProto.Type.createPipelineCommand));
+        datanode1ID, SCMCommandProto.Type.createPipelineCommand));
     assertEquals(0, commandQueue.getDatanodeCommandCount(
-        datanode1UUID, SCMCommandProto.Type.closePipelineCommand));
+        datanode1ID, SCMCommandProto.Type.closePipelineCommand));
     assertEquals(1, commandQueue.getDatanodeCommandCount(
-        datanode1UUID, SCMCommandProto.Type.replicateContainerCommand));
+        datanode1ID, SCMCommandProto.Type.replicateContainerCommand));
 
     Map<SCMCommandProto.Type, Integer> commandSummary =
-        commandQueue.getDatanodeCommandSummary(datanode1UUID);
+        commandQueue.getDatanodeCommandSummary(datanode1ID);
     assertEquals(3, commandSummary.size());
     assertEquals(Integer.valueOf(2),
         commandSummary.get(SCMCommandProto.Type.closeContainerCommand));
@@ -96,34 +96,34 @@ public void testSummaryUpdated() {
         commandSummary.get(SCMCommandProto.Type.replicateContainerCommand));
 
     assertEquals(1, commandQueue.getDatanodeCommandCount(
-        datanode2UUID, SCMCommandProto.Type.closeContainerCommand));
+        datanode2ID, SCMCommandProto.Type.closeContainerCommand));
     assertEquals(2, commandQueue.getDatanodeCommandCount(
-        datanode2UUID, SCMCommandProto.Type.createPipelineCommand));
+        datanode2ID, SCMCommandProto.Type.createPipelineCommand));
 
     // Ensure the counts are cleared when the commands are retrieved
-    List<SCMCommand<?>> cmds = commandQueue.getCommand(datanode1UUID);
+    List<SCMCommand<?>> cmds = commandQueue.getCommand(datanode1ID);
     assertEquals(5, cmds.size());
 
     assertEquals(0, commandQueue.getDatanodeCommandCount(
-        datanode1UUID, SCMCommandProto.Type.closeContainerCommand));
+        datanode1ID, SCMCommandProto.Type.closeContainerCommand));
     assertEquals(0, commandQueue.getDatanodeCommandCount(
-        datanode1UUID, SCMCommandProto.Type.createPipelineCommand));
+        datanode1ID, SCMCommandProto.Type.createPipelineCommand));
     assertEquals(0, commandQueue.getDatanodeCommandCount(
-        datanode1UUID, SCMCommandProto.Type.closePipelineCommand));
+        datanode1ID, SCMCommandProto.Type.closePipelineCommand));
     assertEquals(0, commandQueue.getDatanodeCommandCount(
-        datanode1UUID, SCMCommandProto.Type.replicateContainerCommand));
+        datanode1ID, SCMCommandProto.Type.replicateContainerCommand));
 
     assertEquals(1, commandQueue.getDatanodeCommandCount(
-        datanode2UUID, SCMCommandProto.Type.closeContainerCommand));
+        datanode2ID, SCMCommandProto.Type.closeContainerCommand));
     assertEquals(2, commandQueue.getDatanodeCommandCount(
-        datanode2UUID, SCMCommandProto.Type.createPipelineCommand));
+        datanode2ID, SCMCommandProto.Type.createPipelineCommand));
 
     // Ensure the commands are zeroed when the queue is cleared
     commandQueue.clear();
     assertEquals(0, commandQueue.getDatanodeCommandCount(
-        datanode2UUID, SCMCommandProto.Type.closeContainerCommand));
+        datanode2ID, SCMCommandProto.Type.closeContainerCommand));
     assertEquals(0, commandQueue.getDatanodeCommandCount(
-        datanode2UUID, SCMCommandProto.Type.createPipelineCommand));
+        datanode2ID, SCMCommandProto.Type.createPipelineCommand));
   }
 
 }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 530fa7782c..775fd6f4ca 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -263,7 +263,7 @@ public void testOnMessage(@TempDir File tempDir) throws 
Exception {
     //deadNodeHandler.onMessage call will not change this
     assertFalse(
         nodeManager.getClusterNetworkTopologyMap().contains(datanode1));
-    assertEquals(0, nodeManager.getCommandQueueCount(datanode1.getUuid(), 
cmd.getType()));
+    assertEquals(0, nodeManager.getCommandQueueCount(datanode1.getID(), 
cmd.getType()));
 
     verify(publisher).fireEvent(SCMEvents.REPLICATION_MANAGER_NOTIFY, 
datanode1);
     verify(deletedBlockLog).onDatanodeDead(datanode1.getID());
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 0e05e91a77..0fbdb1ffe9 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -61,7 +61,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
@@ -733,7 +732,7 @@ public void testScmDetectStaleAndDeadNode()
           "Expected to find 1 stale node");
       assertEquals(1, staleNodeList.size(),
           "Expected to find 1 stale node");
-      assertEquals(staleNode.getUuid(), staleNodeList.get(0).getUuid(),
+      assertEquals(staleNode.getID(), staleNodeList.get(0).getID(),
           "Stale node is not the expected ID");
 
       Map<String, Map<String, Integer>> nodeCounts = 
nodeManager.getNodeCount();
@@ -771,7 +770,7 @@ public void testScmDetectStaleAndDeadNode()
       assertEquals(1,
           nodeCounts.get(HddsProtos.NodeOperationalState.IN_SERVICE.name())
               .get(HddsProtos.NodeState.DEAD.name()).intValue());
-      assertEquals(staleNode.getUuid(), deadNodeList.get(0).getUuid(),
+      assertEquals(staleNode.getID(), deadNodeList.get(0).getID(),
           "Dead node is not the expected ID");
     }
   }
@@ -1058,8 +1057,8 @@ public void testCommandCount()
       throws AuthenticationException, IOException {
     SCMNodeManager nodeManager = createNodeManager(getConf());
 
-    UUID datanode1 = UUID.randomUUID();
-    UUID datanode2 = UUID.randomUUID();
+    DatanodeID datanode1 = DatanodeID.randomID();
+    DatanodeID datanode2 = DatanodeID.randomID();
     long containerID = 1;
 
     SCMCommand<?> closeContainerCommand =
@@ -1070,11 +1069,11 @@ public void testCommandCount()
             HddsProtos.ReplicationFactor.THREE, emptyList());
 
     nodeManager.onMessage(
-        new CommandForDatanode<>(DatanodeID.of(datanode1), 
closeContainerCommand), null);
+        new CommandForDatanode<>(datanode1, closeContainerCommand), null);
     nodeManager.onMessage(
-        new CommandForDatanode<>(DatanodeID.of(datanode1), 
closeContainerCommand), null);
+        new CommandForDatanode<>(datanode1, closeContainerCommand), null);
     nodeManager.onMessage(
-        new CommandForDatanode<>(DatanodeID.of(datanode1), 
createPipelineCommand), null);
+        new CommandForDatanode<>(datanode1, createPipelineCommand), null);
 
     assertEquals(2, nodeManager.getCommandQueueCount(
         datanode1, SCMCommandProto.Type.closeContainerCommand));
@@ -1215,7 +1214,7 @@ public void testScmClusterIsInExpectedState1()
       List<DatanodeDetails> healthyList = nodeManager.getNodes(
           NodeStatus.inServiceHealthy());
       assertEquals(1, healthyList.size(), "Expected one healthy node");
-      assertEquals(healthyNode.getUuid(), healthyList.get(0).getUuid(),
+      assertEquals(healthyNode.getID(), healthyList.get(0).getID(),
           "Healthy node is not the expected ID");
 
       assertEquals(2, nodeManager.getNodeCount(NodeStatus.inServiceStale()));
@@ -1246,15 +1245,15 @@ public void testScmClusterIsInExpectedState1()
       assertEquals(1, nodeManager.getNodeCount(NodeStatus.inServiceDead()));
 
       assertEquals(1, healthyList.size(), "Expected one healthy node");
-      assertEquals(healthyNode.getUuid(), healthyList.get(0).getUuid(),
+      assertEquals(healthyNode.getID(), healthyList.get(0).getID(),
           "Healthy node is not the expected ID");
 
       assertEquals(1, staleList.size(), "Expected one stale node");
-      assertEquals(staleNode.getUuid(), staleList.get(0).getUuid(),
+      assertEquals(staleNode.getID(), staleList.get(0).getID(),
           "Stale node is not the expected ID");
 
       assertEquals(1, deadList.size(), "Expected one dead node");
-      assertEquals(deadNode.getUuid(), deadList.get(0).getUuid(),
+      assertEquals(deadNode.getID(), deadList.get(0).getID(),
           "Dead node is not the expected ID");
       /**
        * Cluster State : let us heartbeat all the nodes and verify that we get
@@ -1818,7 +1817,7 @@ public void testScmRegisterNodeWith4LayerNetworkTopology()
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       for (int i = 0; i < nodeCount; i++) {
         DatanodeDetails node = createDatanodeDetails(
-            UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
+            DatanodeID.randomID(), hostNames[i], ipAddress[i], null);
         nodeManager.register(node, null, null);
       }
 
@@ -1861,7 +1860,7 @@ void testScmRegisterNodeWithNetworkTopology(boolean 
useHostname)
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       for (int i = 0; i < nodeCount; i++) {
         DatanodeDetails node = createDatanodeDetails(
-            UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
+            DatanodeID.randomID(), hostNames[i], ipAddress[i], null);
         nodeManager.register(node, null, null);
       }
 
@@ -1968,7 +1967,7 @@ void testGetNodesByAddress(boolean useHostname)
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       for (int i = 0; i < nodeCount; i++) {
         DatanodeDetails node = createDatanodeDetails(
-            UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
+            DatanodeID.randomID(), hostNames[i], ipAddress[i], null);
         nodeManager.register(node, null, null);
       }
       // test get node
@@ -2007,9 +2006,9 @@ public void testScmRegisterNodeWithUpdatedIpAndHostname()
 
     // use default IP address to resolve node
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      String nodeUuid = UUID.randomUUID().toString();
+      DatanodeID nodeId = DatanodeID.randomID();
       DatanodeDetails node = createDatanodeDetails(
-              nodeUuid, hostName, ipAddress, null);
+          nodeId, hostName, ipAddress, null);
       nodeManager.register(node, null, null);
 
       // verify network topology cluster has all the registered nodes
@@ -2033,7 +2032,7 @@ public void testScmRegisterNodeWithUpdatedIpAndHostname()
       String updatedIpAddress = "2.3.4.5";
       String updatedHostName = "host2";
       DatanodeDetails updatedNode = createDatanodeDetails(
-              nodeUuid, updatedHostName, updatedIpAddress, null);
+          nodeId, updatedHostName, updatedIpAddress, null);
       nodeManager.register(updatedNode, null, null);
 
       assertEquals(1, nodeManager.getNodeCount(NodeStatus.inServiceHealthy()));
@@ -2103,7 +2102,7 @@ scmStorageConfig, eventPublisher, new 
NetworkTopologyImpl(conf),
     verify(eventPublisher, 
times(0)).fireEvent(SCMEvents.REPLICATION_MANAGER_NOTIFY, datanode);
 
     DatanodeDetails reportedDatanode = 
MockDatanodeDetails.createDatanodeDetails(
-        datanode.getUuid());
+        datanode.getID());
     reportedDatanode.setPersistedOpState(newState);
 
     nodeManager.processHeartbeat(reportedDatanode);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
index 934cd21088..4577a6b39b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
@@ -60,6 +60,7 @@
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
@@ -393,7 +394,7 @@ private ClientTrustManager aClientTrustManager() throws 
IOException {
 
   private DatanodeDetails aDatanode() {
     return MockDatanodeDetails.createDatanodeDetails(
-        UUID.randomUUID().toString(), "localhost", "0.0.0.0",
+        DatanodeID.randomID(), "localhost", "0.0.0.0",
         "/default-rack");
   }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
index a53f5a926f..e67cbfac2f 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
@@ -49,7 +49,6 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicLong;
@@ -62,6 +61,7 @@
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
@@ -148,15 +148,15 @@ public class TestOmContainerLocationCache {
   private static XceiverClientGrpc mockDn2Protocol;
   private static XceiverClientGrpc mockDnEcProtocol;
   private static final DatanodeDetails DN1 =
-      MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID());
+      MockDatanodeDetails.createDatanodeDetails(DatanodeID.randomID());
   private static final DatanodeDetails DN2 =
-      MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID());
+      MockDatanodeDetails.createDatanodeDetails(DatanodeID.randomID());
   private static final DatanodeDetails DN3 =
-      MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID());
+      MockDatanodeDetails.createDatanodeDetails(DatanodeID.randomID());
   private static final DatanodeDetails DN4 =
-      MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID());
+      MockDatanodeDetails.createDatanodeDetails(DatanodeID.randomID());
   private static final DatanodeDetails DN5 =
-      MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID());
+      MockDatanodeDetails.createDatanodeDetails(DatanodeID.randomID());
   private static final AtomicLong CONTAINER_ID = new AtomicLong(1);
   private static OzoneClient ozoneClient;
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to