This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new ce65c678496 HDDS-14048. Optimise NodesOutOfSpace metric logic (#9418)
ce65c678496 is described below

commit ce65c67849642d96544f80e7230678d3ad0937bb
Author: Sarveksha Yeshavantha Raju 
<[email protected]>
AuthorDate: Fri Dec 5 13:23:25 2025 +0530

    HDDS-14048. Optimise NodesOutOfSpace metric logic (#9418)
---
 .../hadoop/hdds/scm/node/SCMNodeManager.java       | 26 +++++++++-------------
 .../hadoop/hdds/scm/node/SCMNodeMetrics.java       |  5 ++---
 2 files changed, 12 insertions(+), 19 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 90fccad9e48..15b545986df 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -55,7 +55,6 @@
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeID;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
@@ -70,7 +69,6 @@
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.VersionInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
@@ -1387,29 +1385,25 @@ private void nodeOutOfSpaceStatistics(Map<String, 
String> nodeStatics) {
 
     int nodeOutOfSpaceCount = (int) allNodes.parallelStream()
         .filter(dn -> !hasEnoughSpace(dn, minRatisVolumeSizeBytes, 
containerSize, conf)
-            && !hasContainerWithSpace(dn, blockSize, containerSize))
+            && !hasEnoughCommittedVolumeSpace(dn, blockSize))
         .count();
 
     nodeStatics.put("NodesOutOfSpace", String.valueOf(nodeOutOfSpaceCount));
   }
   
   /**
-   * Check if a datanode has any OPEN container with enough space to accept 
new blocks.
+   * Check if any volume in the datanode has committed space >= blockSize.
+   *
+   * @return true if any volume has committed space >= blockSize, false 
otherwise
    */
-  private boolean hasContainerWithSpace(DatanodeInfo dnInfo, long blockSize, 
long containerSize) {
-    try {
-      Set<ContainerID> containers = getContainers(dnInfo);
-      for (ContainerID containerID : containers) {
-        ContainerInfo containerInfo = 
scmContext.getScm().getContainerManager().getContainer(containerID);
-        
-        if (containerInfo.getState() == HddsProtos.LifeCycleState.OPEN &&
-            containerInfo.getUsedBytes() + blockSize <= containerSize) {
-          return true;
-        }
+  private boolean hasEnoughCommittedVolumeSpace(DatanodeInfo dnInfo, long 
blockSize) {
+    for (StorageReportProto reportProto : dnInfo.getStorageReports()) {
+      if (reportProto.getCommitted() >= blockSize) {
+        return true;
       }
-    } catch (Exception e) {
-      LOG.debug("Error checking containers for datanode {}: {}", 
dnInfo.getID(), e.getMessage());
     }
+    LOG.debug("Datanode {} has no volumes with committed space >= {} bytes",
+        dnInfo.getID(), blockSize);
     return false;
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
index 133727490eb..b43256e92b9 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
@@ -160,9 +160,8 @@ public void getMetrics(MetricsCollector collector, boolean 
all) {
     String nodesOutOfSpace = nodeStatistics.get("NodesOutOfSpace");
     if (nodesOutOfSpace != null) {
       metrics.addGauge(
-          Interns.info("NodesOutOfSpace", "Number of datanodes that cannot 
accept new writes because " +
-              "they lack either sufficient metadata space, data volume space 
for creating new containers " +
-              "or free space in existing open containers."),
+          Interns.info("NodesOutOfSpace", "Number of datanodes that are out of 
space because " +
+              "they cannot allocate new containers or write to existing 
ones."),
           Integer.parseInt(nodesOutOfSpace));
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to