This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 1433d0c047 HDDS-1480. Prefer resolved datanode ip address over 
persisted ip address (#7495)
1433d0c047 is described below

commit 1433d0c047bc561607721c83eefd1a437d2e62b6
Author: Rishabh Patel <[email protected]>
AuthorDate: Tue Apr 1 14:08:03 2025 -0700

    HDDS-1480. Prefer resolved datanode ip address over persisted ip address 
(#7495)
---
 .../hadoop/hdds/protocol/DatanodeDetails.java      | 35 ++++++++-
 .../apache/hadoop/ozone/HddsDatanodeService.java   |  6 +-
 .../container/common/helpers/ContainerUtils.java   |  1 +
 .../common/helpers/TestContainerUtils.java         | 87 +++++++++++++++-------
 .../compose/ozone-om-prepare/docker-compose.yaml   |  3 +
 5 files changed, 99 insertions(+), 33 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index efc8be0503..0935eb7275 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -27,6 +27,8 @@
 import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableSet;
 import com.google.protobuf.ByteString;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.EnumSet;
@@ -34,6 +36,7 @@
 import java.util.Objects;
 import java.util.Set;
 import java.util.UUID;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.DatanodeVersion;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
@@ -175,13 +178,41 @@ public void setIpAddress(String ip) {
     this.ipAddress = StringWithByteString.valueOf(ip);
   }
 
+  /**
+   * Resolves and validates the IP address of the datanode based on its 
hostname.
+   * If the resolved IP address differs from the current IP address,
+   * it updates the IP address to the newly resolved value.
+   */
+  public void validateDatanodeIpAddress() {
+    final String oldIP = getIpAddress();
+    final String hostname = getHostName();
+    if (StringUtils.isBlank(hostname)) {
+      LOG.warn("Could not resolve IP address of datanode '{}'", this);
+      return;
+    }
+
+    try {
+      final String newIP = InetAddress.getByName(hostname).getHostAddress();
+      if (StringUtils.isBlank(newIP)) {
+        throw new UnknownHostException("New IP address is invalid: " + newIP);
+      }
+
+      if (!newIP.equals(oldIP)) {
+        LOG.info("Updating IP address of datanode {} to {}", this, newIP);
+        setIpAddress(newIP);
+      }
+    } catch (UnknownHostException e) {
+      LOG.warn("Could not resolve IP address of datanode '{}'", this, e);
+    }
+  }
+
   /**
    * Returns IP address of DataNode.
    *
    * @return IP address
    */
   public String getIpAddress() {
-    return ipAddress.getString();
+    return ipAddress == null ? null : ipAddress.getString();
   }
 
   /**
@@ -208,7 +239,7 @@ public void setHostName(String host) {
    * @return Hostname
    */
   public String getHostName() {
-    return hostName.getString();
+    return hostName == null ? null : hostName.getString();
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 5df8a23505..9b8572a870 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -34,7 +34,6 @@
 import com.google.common.base.Preconditions;
 import java.io.File;
 import java.io.IOException;
-import java.net.InetAddress;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
@@ -225,11 +224,10 @@ public String getNamespace() {
     HddsServerUtil.initializeMetrics(conf, "HddsDatanode");
     try {
       String hostname = HddsUtils.getHostName(conf);
-      String ip = InetAddress.getByName(hostname).getHostAddress();
       datanodeDetails = initializeDatanodeDetails();
       datanodeDetails.setHostName(hostname);
       serviceRuntimeInfo.setHostName(hostname);
-      datanodeDetails.setIpAddress(ip);
+      datanodeDetails.validateDatanodeIpAddress();
       datanodeDetails.setVersion(
           HddsVersionInfo.HDDS_VERSION_INFO.getVersion());
       datanodeDetails.setSetupTime(Time.now());
@@ -238,7 +236,7 @@ public String getNamespace() {
       TracingUtil.initTracing(
           "HddsDatanodeService." + datanodeDetails.getUuidString()
               .substring(0, 8), conf);
-      LOG.info("HddsDatanodeService host:{} ip:{}", hostname, ip);
+      LOG.info("HddsDatanodeService {}", datanodeDetails);
       // Authenticate Hdds Datanode service if security is enabled
       if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
         component = "dn-" + datanodeDetails.getUuidString();
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index fd0f2196f5..e5dec0846b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -162,6 +162,7 @@ public static synchronized void writeDatanodeDetailsTo(
 
   /**
    * Read {@link DatanodeDetails} from a local ID file.
+   * Use {@link DatanodeDetails#validateDatanodeIpAddress()} to ensure that 
the IP address matches with the hostname
    *
    * @param path ID file local path
    * @return {@link DatanodeDetails}
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java
index ca7427db82..a42fb2cf18 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java
@@ -25,10 +25,14 @@
 import static 
org.apache.hadoop.ozone.container.ContainerTestHelper.getDummyCommandRequestProto;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.mockStatic;
+import static org.mockito.Mockito.when;
 
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.net.InetAddress;
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
@@ -45,6 +49,7 @@
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
+import org.mockito.MockedStatic;
 
 /**
  * Test for {@link ContainerUtils}.
@@ -91,34 +96,50 @@ public void testTarName() throws IOException {
   public void testDatanodeIDPersistent(@TempDir File tempDir) throws Exception 
{
     // Generate IDs for testing
     DatanodeDetails id1 = randomDatanodeDetails();
-    id1.setPort(DatanodeDetails.newStandalonePort(1));
-    assertWriteRead(tempDir, id1);
-
-    // Add certificate serial  id.
-    id1.setCertSerialId(String.valueOf(RandomUtils.secure().randomLong()));
-    assertWriteRead(tempDir, id1);
-
-    // Read should return an empty value if file doesn't exist
-    File nonExistFile = new File(tempDir, "non_exist.id");
-    assertThrows(IOException.class,
-        () -> ContainerUtils.readDatanodeDetailsFrom(nonExistFile));
-
-    // Read should fail if the file is malformed
-    File malformedFile = new File(tempDir, "malformed.id");
-    createMalformedIDFile(malformedFile);
-    assertThrows(IOException.class,
-        () -> ContainerUtils.readDatanodeDetailsFrom(malformedFile));
-
-    // Test upgrade scenario - protobuf file instead of yaml
-    File protoFile = new File(tempDir, "valid-proto.id");
-    try (OutputStream out = Files.newOutputStream(protoFile.toPath())) {
-      HddsProtos.DatanodeDetailsProto proto = id1.getProtoBufMessage();
-      proto.writeTo(out);
+    try (MockedStatic<InetAddress> mockedStaticInetAddress = 
mockStatic(InetAddress.class)) {
+      InetAddress mockedInetAddress = mock(InetAddress.class);
+      mockedStaticInetAddress.when(() -> 
InetAddress.getByName(id1.getHostName()))
+          .thenReturn(mockedInetAddress);
+
+      // If persisted ip address is different from resolved ip address,
+      // DatanodeDetails should return the persisted ip address.
+      // Upon validation of the ip address, DatanodeDetails should return the 
resolved ip address.
+      when(mockedInetAddress.getHostAddress())
+          .thenReturn("127.0.0.1");
+      assertWriteReadWithChangedIpAddress(tempDir, id1);
+
+      when(mockedInetAddress.getHostAddress())
+          .thenReturn(id1.getIpAddress());
+
+      id1.setPort(DatanodeDetails.newStandalonePort(1));
+      assertWriteRead(tempDir, id1);
+
+      // Add certificate serial  id.
+      id1.setCertSerialId(String.valueOf(RandomUtils.secure().randomLong()));
+      assertWriteRead(tempDir, id1);
+
+      // Read should return an empty value if file doesn't exist
+      File nonExistFile = new File(tempDir, "non_exist.id");
+      assertThrows(IOException.class,
+          () -> ContainerUtils.readDatanodeDetailsFrom(nonExistFile));
+
+      // Read should fail if the file is malformed
+      File malformedFile = new File(tempDir, "malformed.id");
+      createMalformedIDFile(malformedFile);
+      assertThrows(IOException.class,
+          () -> ContainerUtils.readDatanodeDetailsFrom(malformedFile));
+
+      // Test upgrade scenario - protobuf file instead of yaml
+      File protoFile = new File(tempDir, "valid-proto.id");
+      try (OutputStream out = Files.newOutputStream(protoFile.toPath())) {
+        HddsProtos.DatanodeDetailsProto proto = id1.getProtoBufMessage();
+        proto.writeTo(out);
+      }
+      assertDetailsEquals(id1, 
ContainerUtils.readDatanodeDetailsFrom(protoFile));
+
+      id1.setInitialVersion(1);
+      assertWriteRead(tempDir, id1);
     }
-    assertDetailsEquals(id1, 
ContainerUtils.readDatanodeDetailsFrom(protoFile));
-
-    id1.setInitialVersion(1);
-    assertWriteRead(tempDir, id1);
   }
 
   private void assertWriteRead(@TempDir File tempDir,
@@ -133,6 +154,17 @@ private void assertWriteRead(@TempDir File tempDir,
     assertEquals(details.getCurrentVersion(), read.getCurrentVersion());
   }
 
+  private void assertWriteReadWithChangedIpAddress(@TempDir File tempDir,
+      DatanodeDetails details) throws IOException {
+    // Write a single ID to the file and read it out
+    File file = new File(tempDir, "valid-values.id");
+    ContainerUtils.writeDatanodeDetailsTo(details, file, conf);
+    DatanodeDetails read = ContainerUtils.readDatanodeDetailsFrom(file);
+    assertEquals(details.getIpAddress(), read.getIpAddress());
+    read.validateDatanodeIpAddress();
+    assertEquals("127.0.0.1", read.getIpAddress());
+  }
+
   private void createMalformedIDFile(File malformedFile)
       throws IOException {
     DatanodeDetails id = randomDatanodeDetails();
@@ -149,5 +181,6 @@ private static void assertDetailsEquals(DatanodeDetails 
expected,
     assertEquals(expected.getCertSerialId(), actual.getCertSerialId());
     assertEquals(expected.getProtoBufMessage(), actual.getProtoBufMessage());
     assertEquals(expected.getInitialVersion(), actual.getInitialVersion());
+    assertEquals(expected.getIpAddress(), actual.getIpAddress());
   }
 }
diff --git 
a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-compose.yaml 
b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-compose.yaml
index 599de3954e..228f78c968 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-compose.yaml
@@ -49,6 +49,7 @@ x-om:
 services:
   dn1:
     <<: *datanode
+    hostname: dn1
     networks:
      net:
        ipv4_address: 10.9.0.11
@@ -57,6 +58,7 @@ services:
       - ../..:${OZONE_DIR}
   dn2:
     <<: *datanode
+    hostname: dn2
     networks:
      net:
        ipv4_address: 10.9.0.12
@@ -65,6 +67,7 @@ services:
       - ../..:${OZONE_DIR}
   dn3:
     <<: *datanode
+    hostname: dn3
     networks:
      net:
        ipv4_address: 10.9.0.13


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to