This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 71562efcde9 branch-3.0: [fix](cloud) Fix modify cluster network not 
work in follower introduced by #52294 #53568 (#53704)
71562efcde9 is described below

commit 71562efcde997cf747ff26d00c46bf13fb48b7c0
Author: deardeng <[email protected]>
AuthorDate: Wed Jul 23 09:50:37 2025 +0800

    branch-3.0: [fix](cloud) Fix modify cluster network not work in follower 
introduced by #52294 #53568 (#53704)
    
    cherry pick from #53568
---
 .../doris/cloud/catalog/CloudClusterChecker.java   |   8 +-
 .../multi_cluster/test_change_node_net.groovy      | 103 ++++++++++++---------
 2 files changed, 62 insertions(+), 49 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java
 
b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java
index ca81b165cb9..a87582e1d83 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java
@@ -209,7 +209,8 @@ public class CloudClusterChecker extends MasterDaemon {
         boolean netChanged = false;
         String remotePublicEndpoint = remoteClusterPb.getPublicEndpoint();
         String localPublicEndpoint = 
be.getTagMap().get(Tag.CLOUD_CLUSTER_PUBLIC_ENDPOINT);
-        if (!localPublicEndpoint.equals(remotePublicEndpoint)) {
+        if ((localPublicEndpoint == null && 
!Strings.isNullOrEmpty(remotePublicEndpoint))
+                || (localPublicEndpoint != null && 
!localPublicEndpoint.equals(remotePublicEndpoint))) {
             LOG.info("be {} has changed public_endpoint from {} to {}",
                     be, localPublicEndpoint, remotePublicEndpoint);
             be.getTagMap().put(Tag.CLOUD_CLUSTER_PUBLIC_ENDPOINT, 
remotePublicEndpoint);
@@ -218,7 +219,8 @@ public class CloudClusterChecker extends MasterDaemon {
 
         String remotePrivateEndpoint = remoteClusterPb.getPrivateEndpoint();
         String localPrivateEndpoint = 
be.getTagMap().get(Tag.CLOUD_CLUSTER_PRIVATE_ENDPOINT);
-        if (!localPrivateEndpoint.equals(remotePrivateEndpoint)) {
+        if (localPrivateEndpoint == null && 
!Strings.isNullOrEmpty(remotePrivateEndpoint)
+                || (localPrivateEndpoint != null && 
!localPrivateEndpoint.equals(remotePrivateEndpoint))) {
             LOG.info("be {} has changed private_endpoint from {} to {}",
                     be, localPrivateEndpoint, remotePrivateEndpoint);
             be.getTagMap().put(Tag.CLOUD_CLUSTER_PRIVATE_ENDPOINT, 
remotePrivateEndpoint);
@@ -226,7 +228,7 @@ public class CloudClusterChecker extends MasterDaemon {
         }
         if (netChanged) {
             // edit log
-            Env.getCurrentEnv().getEditLog().logBackendStateChange(be);
+            Env.getCurrentEnv().getEditLog().logModifyBackend(be);
         }
     }
 
diff --git 
a/regression-test/suites/cloud_p0/multi_cluster/test_change_node_net.groovy 
b/regression-test/suites/cloud_p0/multi_cluster/test_change_node_net.groovy
index 3cc6991fa20..991e73cd9c0 100644
--- a/regression-test/suites/cloud_p0/multi_cluster/test_change_node_net.groovy
+++ b/regression-test/suites/cloud_p0/multi_cluster/test_change_node_net.groovy
@@ -20,11 +20,19 @@ import groovy.json.JsonSlurper
 import groovy.json.JsonOutput
 
 suite('test_change_node_net', 'multi_cluster,docker') {
-    def options = new ClusterOptions()
-    options.feConfigs += [
-        'cloud_cluster_check_interval_second=5',
+    def clusterOptions = [
+        new ClusterOptions(),
+        new ClusterOptions(),
     ]
-    options.cloudMode = true
+    for (options in clusterOptions) {
+        options.feConfigs += [
+            'cloud_cluster_check_interval_second=5',
+        ]
+        options.feNum = 3
+        options.cloudMode = true
+    }
+    clusterOptions[0].connectToFollower = true
+    clusterOptions[1].connectToFollower = false
 
     def token = "greedisgood9999"
     def update_cluster_endpoint_api = { msHttpPort, request_body, check_func ->
@@ -37,6 +45,8 @@ suite('test_change_node_net', 'multi_cluster,docker') {
     }
 
     def showClusterBackends = { clusterName ->
+        // The new optimizer has a bug, all queries are forwarded to the 
master. Including show backend
+        sql """set forward_to_master=false"""
         def bes = sql_return_maparray "show backends"
         def clusterBes = bes.findAll { be -> be.Tag.contains(clusterName) }
         def backendMap = clusterBes.collectEntries { be ->
@@ -45,58 +55,59 @@ suite('test_change_node_net', 'multi_cluster,docker') {
         logger.info("Collected BackendId and Tag map: {}", backendMap)
         backendMap
     }
+    for (options in clusterOptions) {
+        docker(options) {
+            def ms = cluster.getAllMetaservices().get(0)
+            def msHttpPort = ms.host + ":" + ms.httpPort
+            logger.info("ms addr={}, port={}, ms endpoint={}", ms.host, 
ms.httpPort, msHttpPort)
 
-    docker(options) {
-        def ms = cluster.getAllMetaservices().get(0)
-        def msHttpPort = ms.host + ":" + ms.httpPort
-        logger.info("ms addr={}, port={}, ms endpoint={}", ms.host, 
ms.httpPort, msHttpPort)
-
-        def clusterName = "newcluster1"
-        // 添加一个新的cluster add_new_cluster
-        cluster.addBackend(3, clusterName)
-       
-        def result = sql """show clusters"""
-        logger.info("show cluster1 : {}", result)
+            def clusterName = "newcluster1"
+            // 添加一个新的cluster add_new_cluster
+            cluster.addBackend(3, clusterName)
+        
+            def result = sql """show clusters"""
+            logger.info("show cluster1 : {}", result)
 
-        def beforeBackendMap = showClusterBackends.call(clusterName)
+            def beforeBackendMap = showClusterBackends.call(clusterName)
 
-        def tag = beforeBackendMap.entrySet().iterator().next().Value
-        assertNotNull(tag)
-        def jsonSlurper = new JsonSlurper()
-        def jsonObject = jsonSlurper.parseText(tag)
-        def cloudUniqueId = jsonObject.cloud_unique_id
-        def clusterId = jsonObject.compute_group_id
-        def before_public_endpoint = jsonObject.public_endpoint
-        def after_private_endpoint = jsonObject.private_endpoint
+            def tag = beforeBackendMap.entrySet().iterator().next().Value
+            assertNotNull(tag)
+            def jsonSlurper = new JsonSlurper()
+            def jsonObject = jsonSlurper.parseText(tag)
+            def cloudUniqueId = jsonObject.cloud_unique_id
+            def clusterId = jsonObject.compute_group_id
+            def before_public_endpoint = jsonObject.public_endpoint
+            def after_private_endpoint = jsonObject.private_endpoint
 
 
-        def changeCluster = [cluster_id: "${clusterId}", public_endpoint: 
"test_public_endpoint", private_endpoint: "test_private_endpoint"]
-        def updateClusterEndpointBody = [cloud_unique_id: "${cloudUniqueId}", 
cluster: changeCluster]
-        def jsonOutput = new JsonOutput()
-        def updateClusterEndpointJson = 
jsonOutput.toJson(updateClusterEndpointBody)
+            def changeCluster = [cluster_id: "${clusterId}", public_endpoint: 
"test_public_endpoint", private_endpoint: "test_private_endpoint"]
+            def updateClusterEndpointBody = [cloud_unique_id: 
"${cloudUniqueId}", cluster: changeCluster]
+            def jsonOutput = new JsonOutput()
+            def updateClusterEndpointJson = 
jsonOutput.toJson(updateClusterEndpointBody)
 
-        update_cluster_endpoint_api.call(msHttpPort, 
updateClusterEndpointJson) {
-            respCode, body ->
-                def json = parseJson(body)
-                log.info("update cluster endpoint result: ${body} ${respCode} 
${json}".toString())
-        }
+            update_cluster_endpoint_api.call(msHttpPort, 
updateClusterEndpointJson) {
+                respCode, body ->
+                    def json = parseJson(body)
+                    log.info("update cluster endpoint result: ${body} 
${respCode} ${json}".toString())
+            }
 
-        def futrue = thread {
-            // check 15s
-            for (def i = 0; i < 15; i++) {
-                def afterBackendMap = showClusterBackends.call(clusterName)
-                if (i > 5) {
-                    // cloud_cluster_check_interval_second = 5
-                    afterBackendMap.each { key, value ->
-                        assert value.contains("test_public_endpoint") : "Value 
for key ${key} does not contain 'test_public_endpoint'"
-                        assert value.contains("test_private_endpoint") : 
"Value for key ${key} does not contain 'test_private_endpoint'"
+            def futrue = thread {
+                // check 15s
+                for (def i = 0; i < 15; i++) {
+                    def afterBackendMap = showClusterBackends.call(clusterName)
+                    if (i > 5) {
+                        // cloud_cluster_check_interval_second = 5
+                        afterBackendMap.each { key, value ->
+                            assert value.contains("test_public_endpoint") : 
"Value for key ${key} does not contain 'test_public_endpoint'"
+                            assert value.contains("test_private_endpoint") : 
"Value for key ${key} does not contain 'test_private_endpoint'"
+                        }
                     }
+                    // check beid not changed
+                    assertEquals(afterBackendMap.keySet(), 
beforeBackendMap.keySet())
+                    sleep(1 * 1000)
                 }
-                // check beid not changed
-                assertEquals(afterBackendMap.keySet(), 
beforeBackendMap.keySet())
-                sleep(1 * 1000)
             }
+            futrue.get()
         }
-        futrue.get()
     }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to