This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new a9d6def4142 branch-3.0: [fix](docker case) Fix cluster id check causes 
cloud be startup abnormality #53444 (#53593)
a9d6def4142 is described below

commit a9d6def4142b864ebd465dd448c9f527d094afbb
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Mon Jul 21 20:11:03 2025 +0800

    branch-3.0: [fix](docker case) Fix cluster id check causes cloud be startup 
abnormality #53444 (#53593)
    
    Cherry-picked from #53444
    
    Co-authored-by: deardeng <[email protected]>
---
 be/src/cloud/cloud_storage_engine.cpp              | 14 ++---
 docker/runtime/doris-compose/command.py            |  4 +-
 .../doris/regression/suite/SuiteCluster.groovy     |  8 +--
 .../cloud_p0/multi_cluster/test_auto_start.groovy  |  2 +-
 .../suites/cloud_p0/node_mgr/test_ms_api.groovy    | 66 +++++++++++-----------
 .../node_mgr/test_rename_compute_group.groovy      |  6 +-
 .../node_mgr/test_sql_mode_node_mgr.groovy         | 44 ++++++++++++---
 7 files changed, 87 insertions(+), 57 deletions(-)

diff --git a/be/src/cloud/cloud_storage_engine.cpp 
b/be/src/cloud/cloud_storage_engine.cpp
index 6cac3c7f8d0..90b3ca0bfa6 100644
--- a/be/src/cloud/cloud_storage_engine.cpp
+++ b/be/src/cloud/cloud_storage_engine.cpp
@@ -1175,7 +1175,7 @@ Status 
CloudStorageEngine::_check_all_root_path_cluster_id() {
             return Status::OK();
         } else {
             // If no cluster id file exists, use the configured cluster id
-            RETURN_IF_ERROR(set_cluster_id(_effective_cluster_id));
+            return set_cluster_id(_effective_cluster_id);
         }
     }
     if (cluster_ids.size() > 1) {
@@ -1184,12 +1184,12 @@ Status 
CloudStorageEngine::_check_all_root_path_cluster_id() {
                 "different cluster ids: {}",
                 fmt::join(cluster_ids, ", "));
     }
-    if (_effective_cluster_id != -1 && *cluster_ids.begin() != 
_effective_cluster_id) {
-        RETURN_NOT_OK_STATUS_WITH_WARN(
-                Status::Corruption("multiple cluster ids is not equal. 
config::cluster_id={}, "
-                                   "storage path cluster_id={}",
-                                   _effective_cluster_id, 
*cluster_ids.begin()),
-                "cluster id not equal");
+    if (_effective_cluster_id != -1 && !cluster_ids.empty() &&
+        *cluster_ids.begin() != _effective_cluster_id) {
+        return Status::Corruption(
+                "multiple cluster ids is not equal. config::cluster_id={}, "
+                "storage path cluster_id={}",
+                _effective_cluster_id, *cluster_ids.begin());
     }
     return Status::OK();
 }
diff --git a/docker/runtime/doris-compose/command.py 
b/docker/runtime/doris-compose/command.py
index 1194b3d1f15..57f302dcc45 100644
--- a/docker/runtime/doris-compose/command.py
+++ b/docker/runtime/doris-compose/command.py
@@ -483,14 +483,14 @@ class UpCommand(Command):
         if self._support_boolean_action():
             parser.add_argument(
                 "--be-cluster-id",
-                default=True,
+                default=False,
                 action=self._get_parser_bool_action(False),
                 help="Do not set BE cluster ID in conf. Default is False.")
         else:
             parser.add_argument(
                 "--no-be-cluster-id",
                 dest='be_cluster_id',
-                default=True,
+                default=False,
                 action=self._get_parser_bool_action(False),
                 help="Do not set BE cluster ID in conf. Default is False.")
 
diff --git 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy
 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy
index 7e2bc4d681c..61758f9c5dd 100644
--- 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy
+++ 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy
@@ -128,7 +128,7 @@ class ServerNode {
     static void fromCompose(ServerNode node, ListHeader header, int index, 
List<Object> fields) {
         node.index = index
         node.host = (String) fields.get(header.indexOf('IP'))
-        node.httpPort = (Integer) fields.get(header.indexOf('http_port'))
+        node.httpPort = (int) 
toLongOrDefault(fields.get(header.indexOf('http_port')), -1)
         node.alive = fields.get(header.indexOf('alive')) == 'true'
         node.path = (String) fields.get(header.indexOf('path'))
     }
@@ -179,8 +179,8 @@ class Frontend extends ServerNode {
     static Frontend fromCompose(ListHeader header, int index, List<Object> 
fields) {
         Frontend fe = new Frontend()
         ServerNode.fromCompose(fe, header, index, fields)
-        fe.queryPort = (Integer) fields.get(header.indexOf('query_port'))
-        fe.editLogPort = (Integer) fields.get(header.indexOf('edit_log_port'))
+        fe.queryPort = (int) 
toLongOrDefault(fields.get(header.indexOf('query_port')), -1)
+        fe.editLogPort = (int) 
toLongOrDefault(fields.get(header.indexOf('edit_log_port')), -1)
         fe.isMaster = fields.get(header.indexOf('is_master')) == 'true'
         return fe
     }
@@ -208,7 +208,7 @@ class Backend extends ServerNode {
     static Backend fromCompose(ListHeader header, int index, List<Object> 
fields) {
         Backend be = new Backend()
         ServerNode.fromCompose(be, header, index, fields)
-        be.heartbeatPort = (Integer) 
fields.get(header.indexOf('heartbeat_port'))
+        be.heartbeatPort = (int) 
toLongOrDefault(fields.get(header.indexOf('heartbeat_port')), -1)
         be.backendId = 
toLongOrDefault(fields.get(header.indexOf('backend_id')), -1L)
         be.tabletNum = (int) 
toLongOrDefault(fields.get(header.indexOf('tablet_num')), 0L)
 
diff --git 
a/regression-test/suites/cloud_p0/multi_cluster/test_auto_start.groovy 
b/regression-test/suites/cloud_p0/multi_cluster/test_auto_start.groovy
index a02481dcebf..ef49d7ee217 100644
--- a/regression-test/suites/cloud_p0/multi_cluster/test_auto_start.groovy
+++ b/regression-test/suites/cloud_p0/multi_cluster/test_auto_start.groovy
@@ -152,7 +152,7 @@ suite('test_auto_start_in_cloud', 'multi_cluster, docker') {
         // insert
    
         // cloud control
-        future2 = thread {
+        def future2 = thread {
             // check cluster "TO_RESUME"
             awaitUntil(5) {
                 tag = getCloudBeTagByName(clusterName)
diff --git a/regression-test/suites/cloud_p0/node_mgr/test_ms_api.groovy 
b/regression-test/suites/cloud_p0/node_mgr/test_ms_api.groovy
index b094b5354af..9cdc56e561a 100644
--- a/regression-test/suites/cloud_p0/node_mgr/test_ms_api.groovy
+++ b/regression-test/suites/cloud_p0/node_mgr/test_ms_api.groovy
@@ -429,7 +429,7 @@ suite('test_ms_api', 'p0, docker') {
                                     obj:[ak:"test-ak2", sk:"test-sk2", 
bucket:"test-bucket",
                                         prefix: "test-prefix", endpoint: 
"test-endpoint", region:"test-region", provider:"COS"]]
         jsonOutput = new JsonOutput()
-        addObjInfoBody = jsonOutput.toJson(add_obj_info_api_body)
+        def addObjInfoBody = jsonOutput.toJson(add_obj_info_api_body)
 
 
         add_obj_info_api.call(msHttpPort, addObjInfoBody) {
@@ -625,7 +625,7 @@ suite('test_ms_api', 'p0, docker') {
         def clusterName2 = "cluster_name2"
         def clusterId2 = "cluster_id2"
         def nodeList1 = [node1]
-        clusterMap1 = [cluster_name: "${clusterName2}", 
cluster_id:"${clusterId2}", type:"COMPUTE", nodes:nodeList1]
+        def clusterMap1 = [cluster_name: "${clusterName2}", 
cluster_id:"${clusterId2}", type:"COMPUTE", nodes:nodeList1]
         instance = [instance_id: "${instance_id}", cluster: clusterMap1]
         jsonOutput = new JsonOutput()
         def addNewComputeGroupBody = jsonOutput.toJson(instance)
@@ -1091,9 +1091,9 @@ suite('test_ms_api', 'p0, docker') {
             def compute_ip1 = "182.0.0.1" 
             def heartbeatPort = 9050
             def nodeMap = [cloud_unique_id: "${cloudUniqueId}", ip: 
"${compute_ip1}", heartbeat_port: "${heartbeatPort}"]
-            nodeList = [nodeMap]
-            clusterMap = [cluster_name: "${clusterName}", 
cluster_id:"${clusterId}", type:"COMPUTE", nodes:nodeList]
-            instance = [instance_id: "${instance_id}", cluster: clusterMap]
+            def nodeList = [nodeMap]
+            def clusterMap = [cluster_name: "${clusterName}", 
cluster_id:"${clusterId}", type:"COMPUTE", nodes:nodeList]
+            def instance = [instance_id: "${instance_id}", cluster: clusterMap]
             def addComputeGroupBody = jsonOutput.toJson(instance)
             add_cluster_api.call(msHttpPort, addComputeGroupBody) {
                 respCode, body ->
@@ -1104,18 +1104,18 @@ suite('test_ms_api', 'p0, docker') {
             
             // 1. Test that a node cannot be repeatedly added to multiple 
clusters
             // 1.1 compute node
-            node1 = [cloud_unique_id: "${cloudUniqueId}", ip : 
"${compute_ip1}", heartbeat_port: 9050]
-            add_nodes = [node1]
+            def node1 = [cloud_unique_id: "${cloudUniqueId}", ip : 
"${compute_ip1}", heartbeat_port: 9050]
+            def add_nodes = [node1]
             def otherClusterName = "compute_name_1_other"
             def otherClusterId = "compute_id_1_other"
-            add_nodes_cluster = [cluster_name: "${otherClusterName}", 
cluster_id: "${otherClusterId}", type: "COMPUTE", nodes: add_nodes]
+            def add_nodes_cluster = [cluster_name: "${otherClusterName}", 
cluster_id: "${otherClusterId}", type: "COMPUTE", nodes: add_nodes]
             def addNodeToOtherCluster = [instance_id: "${instance_id}", 
cluster: add_nodes_cluster]
             jsonOutput = new JsonOutput()
-            addNodeToOtherClusterbody = 
jsonOutput.toJson(addNodeToOtherCluster)
+            def addNodeToOtherClusterbody = 
jsonOutput.toJson(addNodeToOtherCluster)
             add_cluster_api.call(msHttpPort, addNodeToOtherClusterbody) {
                 respCode, body ->
                     log.info("add node to other compute group http cli result: 
${body} ${respCode}".toString())
-                    json = parseJson(body)
+                    def json = parseJson(body)
                     assertTrue(json.code.equalsIgnoreCase("ALREADY_EXISTED"))
                     assertTrue(json.msg.contains("compute node endpoint has 
been added"))
             }
@@ -1143,18 +1143,18 @@ suite('test_ms_api', 'p0, docker') {
                         assertTrue(json.code.equalsIgnoreCase("OK"))
             }
             
-            node_fe_other = [cloud_unique_id: "${cloudUniqueId}", ip : 
"${ip3}", edit_log_port: 8050, node_type:"FE_FOLLOWER"]
+            def node_fe_other = [cloud_unique_id: "${cloudUniqueId}", ip : 
"${ip3}", edit_log_port: 8050, node_type:"FE_FOLLOWER"]
             add_nodes = [node_fe_other]
             otherClusterName = "RESERVED_CLUSTER_ID_FOR_SQL_SERVER_OTHER"
             otherClusterId = "RESERVED_CLUSTER_ID_FOR_SQL_SERVER_OTHER"
             add_nodes_cluster = [cluster_name: "${otherClusterName}", 
cluster_id: "${otherClusterId}", type:"SQL", nodes: add_nodes]
             def addNodeToOtherClusterFE = [instance_id: "${instance_id}", 
cluster: add_nodes_cluster]
             jsonOutput = new JsonOutput()
-            addNodeToOtherFEClusterbody = 
jsonOutput.toJson(addNodeToOtherClusterFE)
+            def addNodeToOtherFEClusterbody = 
jsonOutput.toJson(addNodeToOtherClusterFE)
             add_cluster_api.call(msHttpPort, addNodeToOtherFEClusterbody) {
                 respCode, body ->
                     log.info("add node to other compute group http cli result: 
${body} ${respCode}".toString())
-                    json = parseJson(body)
+                    def json = parseJson(body)
                     assertTrue(json.code.equalsIgnoreCase("ALREADY_EXISTED"))
                     assertTrue(json.msg.contains("sql node endpoint has been 
added"))
             }
@@ -1174,7 +1174,7 @@ suite('test_ms_api', 'p0, docker') {
             add_cluster_api.call(msHttpPort, addNodesClusterFailedBody) {
                 respCode, body ->
                     log.info("add two observer fe failed test http cli result: 
${body} ${respCode}".toString())
-                    json = parseJson(body)
+                    def json = parseJson(body)
                     assertTrue(json.code.equalsIgnoreCase("INVALID_ARGUMENT"))
                     assertTrue(json.msg.contains("cluster is SQL type, but not 
set master and follower node, master count=0 follower count=0 so sql cluster 
can't get a Master node"))
             }
@@ -1198,7 +1198,7 @@ suite('test_ms_api', 'p0, docker') {
 
             add_node_api.call(msHttpPort, addSomeFENodesFailed) {
                 respCode, body ->
-                    json = parseJson(body)
+                    def json = parseJson(body)
                     // failed, due to two master node
                     // if force_change_to_multi_follower_mode == false, check 
type not changed, FE_MASTER
                     log.info("add some fe failed nodes http cli result: 
${body} ${respCode} ${json}".toString())
@@ -1208,7 +1208,7 @@ suite('test_ms_api', 'p0, docker') {
 
             add_node_api.call(msHttpPort, addSomeFENodesSucc) {
                 respCode, body ->
-                    json = parseJson(body)
+                    def json = parseJson(body)
                     log.info("add some fe nodes http cli result: ${body} 
${respCode} ${json}".toString())
                     assertTrue(json.code.equalsIgnoreCase("OK"))
             }
@@ -1226,7 +1226,7 @@ suite('test_ms_api', 'p0, docker') {
 
             drop_node_api.call(msHttpPort, dropAllFeNodesFailedJson) {
                 respCode, body ->
-                    json = parseJson(body)
+                    def json = parseJson(body)
                     log.info("drop all fe nodes failed http cli result: 
${body} ${respCode} ${json}".toString())
                     assertTrue(json.code.equalsIgnoreCase("INTERNAL_ERROR"))
                     assertTrue(json.msg.contains("instance invalid, cant 
modify, plz check")) 
@@ -1235,7 +1235,7 @@ suite('test_ms_api', 'p0, docker') {
             get_instance_api.call(msHttpPort, instance_id) {
                 respCode, body ->
                     log.info("add Master-observer mode get instance resp: 
${body} ${respCode}".toString())
-                    json = parseJson(body)
+                    def json = parseJson(body)
                     assertTrue(json.code.equalsIgnoreCase("OK"))
                     def result = json.result
                     def FECluster = result.clusters.find {
@@ -1314,7 +1314,7 @@ suite('test_ms_api', 'p0, docker') {
         def feNodeMap2 = [cloud_unique_id: "${cloudUniqueId}", ip: "${ip2}", 
edit_log_port: "${edit_log_port}", node_type:"FE_OBSERVER"]
         def feNodeList = [feNodeMap1, feNodeMap2]
         def feClusterMap = [cluster_name: "${feClusterName}", 
cluster_id:"${feClusterId}", type:"SQL", nodes:feNodeList]
-        instance = [instance_id: "${instance_id}", cluster: feClusterMap]
+        def instance = [instance_id: "${instance_id}", cluster: feClusterMap]
         jsonOutput = new JsonOutput()
         def addSqlGroupBody = jsonOutput.toJson(instance) 
 
@@ -1327,7 +1327,7 @@ suite('test_ms_api', 'p0, docker') {
         get_instance_api.call(msHttpPort, instance_id) {
             respCode, body ->
                 log.info("add Master-observer mode get instance resp: ${body} 
${respCode}".toString())
-                json = parseJson(body)
+                def json = parseJson(body)
                 assertTrue(json.code.equalsIgnoreCase("OK"))
                 def result = json.result
                 def FECluster = result.clusters.find {
@@ -1354,7 +1354,7 @@ suite('test_ms_api', 'p0, docker') {
 
         get_cluster_api.call(msHttpPort, getClusterByNameBody) {
             respCode, body ->
-                json = parseJson(body)
+                def json = parseJson(body)
                 log.info("get FE cluster http cli result: ${body} ${respCode} 
${json}".toString())
                 assertTrue(json.code.equalsIgnoreCase("OK"))
                 def result = json.result
@@ -1369,7 +1369,7 @@ suite('test_ms_api', 'p0, docker') {
         get_instance_api.call(msHttpPort, instance_id) {
             respCode, body ->
                 log.info("after get cluster get instance resp: ${body} 
${respCode}".toString())
-                json = parseJson(body)
+                def json = parseJson(body)
                 assertTrue(json.code.equalsIgnoreCase("OK"))
                 def result = json.result
                 def FECluster = result.clusters.find {
@@ -1394,7 +1394,7 @@ suite('test_ms_api', 'p0, docker') {
 
         drop_node_api.call(msHttpPort, delFeObserverNodesBody) {
             respCode, body ->
-                json = parseJson(body)
+                def json = parseJson(body)
                 log.info("drop fe observer node http cli result: ${body} 
${respCode} ${json}".toString())
                 assertTrue(json.code.equalsIgnoreCase("INTERNAL_ERROR"))
                 assertTrue(json.msg.contains("drop fe node not in safe time, 
try later"))
@@ -1411,14 +1411,14 @@ suite('test_ms_api', 'p0, docker') {
         drop_cluster_api.call(msHttpPort, dropFeClusterBody) {
             respCode, body ->
                 log.info("drop fe cluster http cli result: ${body} 
${respCode}".toString())
-                json = parseJson(body)
+                def json = parseJson(body)
                 assertTrue(json.code.equalsIgnoreCase("NOT_FOUND"))
                 assertTrue(json.msg.contains("drop fe cluster not in safe 
time, try later"))
         }
 
         get_cluster_api.call(msHttpPort, getClusterByNameBody) {
             respCode, body ->
-                json = parseJson(body)
+                def json = parseJson(body)
                 log.info("get FE cluster after drop observer http cli result: 
${body} ${respCode} ${json}".toString())
                 assertTrue(json.code.equalsIgnoreCase("OK"))
                 def result = json.result
@@ -1442,14 +1442,14 @@ suite('test_ms_api', 'p0, docker') {
         // after inject, drop fe node, drop fe cluster all succ
         drop_node_api.call(msHttpPort, delFeObserverNodesBody) {
             respCode, body ->
-                json = parseJson(body)
+                def json = parseJson(body)
                 log.info("after inject drop fe observer nodeshttp cli result: 
${body} ${respCode} ${json}".toString())
                 assertTrue(json.code.equalsIgnoreCase("OK"))
         } 
 
         get_cluster_api.call(msHttpPort, getClusterByNameBody) {
             respCode, body ->
-                json = parseJson(body)
+                def json = parseJson(body)
                 log.info("get FE cluster after drop observer http cli result: 
${body} ${respCode} ${json}".toString())
                 assertTrue(json.code.equalsIgnoreCase("OK"))
                 def result = json.result
@@ -1462,14 +1462,14 @@ suite('test_ms_api', 'p0, docker') {
         drop_cluster_api.call(msHttpPort, dropFeClusterBody) {
             respCode, body ->
                 log.info("drop fe cluster http cli result: ${body} 
${respCode}".toString())
-                json = parseJson(body)
+                def json = parseJson(body)
                 assertTrue(json.code.equalsIgnoreCase("OK"))
         }
 
         get_instance_api.call(msHttpPort, instance_id) {
             respCode, body ->
                 log.info("after get cluster get instance resp: ${body} 
${respCode}".toString())
-                json = parseJson(body)
+                def json = parseJson(body)
                 assertTrue(json.code.equalsIgnoreCase("OK"))
                 def result = json.result
                 def FECluster = result.clusters.find {
@@ -1482,8 +1482,8 @@ suite('test_ms_api', 'p0, docker') {
         def compute_ip1 = "182.0.0.1" 
         def heartbeatPort = 9050
         def nodeMap = [cloud_unique_id: "${cloudUniqueId}", ip: 
"${compute_ip1}", heartbeat_port: "${heartbeatPort}"]
-        nodeList = [nodeMap]
-        clusterMap = [cluster_name: "${clusterName}", 
cluster_id:"${clusterId}", type:"COMPUTE", nodes:nodeList]
+        def nodeList = [nodeMap]
+        def clusterMap = [cluster_name: "${clusterName}", 
cluster_id:"${clusterId}", type:"COMPUTE", nodes:nodeList]
         instance = [instance_id: "${instance_id}", cluster: clusterMap]
         def addComputeGroupBody = jsonOutput.toJson(instance)
         add_cluster_api.call(msHttpPort, addComputeGroupBody) {
@@ -1496,7 +1496,7 @@ suite('test_ms_api', 'p0, docker') {
         get_instance_api.call(msHttpPort, instance_id) {
             respCode, body ->
                 log.info("after get cluster get instance resp: ${body} 
${respCode}".toString())
-                json = parseJson(body)
+                def json = parseJson(body)
                 assertTrue(json.code.equalsIgnoreCase("OK"))
         } 
 
@@ -1512,7 +1512,7 @@ suite('test_ms_api', 'p0, docker') {
             respCode, body ->
                 log.info("drop compute group http cli result: ${body} 
${respCode}".toString())
                 assertEquals(404, respCode)
-                json = parseJson(body)
+                def json = parseJson(body)
                 assertTrue(json.code.equalsIgnoreCase("NOT_FOUND"))
         }
     }
diff --git 
a/regression-test/suites/cloud_p0/node_mgr/test_rename_compute_group.groovy 
b/regression-test/suites/cloud_p0/node_mgr/test_rename_compute_group.groovy
index fd97b15eb51..a59ab605447 100644
--- a/regression-test/suites/cloud_p0/node_mgr/test_rename_compute_group.groovy
+++ b/regression-test/suites/cloud_p0/node_mgr/test_rename_compute_group.groovy
@@ -159,7 +159,7 @@ suite('test_rename_compute_group', 'docker, p0') {
 
         // tag = {"cloud_unique_id" : "compute_node_4", "compute_group_status" 
: "NORMAL", "private_endpoint" : "", "compute_group_name" : "newcluster1", 
"location" : "default", "public_endpoint" : "", "compute_group_id" : 
"newcluster1_id"}
         def toDropIP = cluster.getBeByIndex(4).host
-        toDropUniqueId = findToDropUniqueId.call(cloudClusterId, toDropIP, ms)
+        def toDropUniqueId = findToDropUniqueId.call(cloudClusterId, toDropIP, 
ms)
         drop_node(toDropUniqueId, toDropIP, 9050,
                 0, "", clusterName, cloudClusterId, ms)
         // check have empty compute group
@@ -168,7 +168,7 @@ suite('test_rename_compute_group', 'docker, p0') {
         get_instance_api(msHttpPort, "default_instance_id") {
             respCode, body ->
                 log.info("before drop node get instance resp: ${body} 
${respCode}".toString())
-                json = parseJson(body)
+                def json = parseJson(body)
                 assertTrue(json.code.equalsIgnoreCase("OK"))
                 def clusters = json.result.clusters
                 assertTrue(clusters.any { cluster -> 
@@ -197,7 +197,7 @@ suite('test_rename_compute_group', 'docker, p0') {
         get_instance_api(msHttpPort, "default_instance_id") {
             respCode, body ->
                 log.info("after drop node get instance resp: ${body} 
${respCode}".toString())
-                json = parseJson(body)
+                def json = parseJson(body)
                 assertTrue(json.code.equalsIgnoreCase("OK"))
                 def clusters = json.result.clusters
                 assertTrue(clusters.any { cluster -> 
diff --git 
a/regression-test/suites/cloud_p0/node_mgr/test_sql_mode_node_mgr.groovy 
b/regression-test/suites/cloud_p0/node_mgr/test_sql_mode_node_mgr.groovy
index 0d792950105..daf83add6d2 100644
--- a/regression-test/suites/cloud_p0/node_mgr/test_sql_mode_node_mgr.groovy
+++ b/regression-test/suites/cloud_p0/node_mgr/test_sql_mode_node_mgr.groovy
@@ -28,6 +28,8 @@ suite('test_sql_mode_node_mgr', 'multi_cluster,docker,p1') {
         new ClusterOptions(),
         new ClusterOptions(),
         new ClusterOptions(),
+        new ClusterOptions(),
+        new ClusterOptions(),
     ]
 
     for (options in clusterOptions) {
@@ -46,21 +48,49 @@ suite('test_sql_mode_node_mgr', 'multi_cluster,docker,p1') {
                 "heartbeat_interval_second=1",]
     }
 
+    // Private deployment
+    // fe cluster id(docker compose 生成的),be cluster id 为空,ms endpoint 是配置的
     clusterOptions[0].sqlModeNodeMgr = true;
-    clusterOptions[0].beClusterId = true;
+    clusterOptions[0].beClusterId = false;
     clusterOptions[0].beMetaServiceEndpoint = true;
 
+    // fe cluster id(docker compose 生成的),be cluster id 为空,ms endpoint 下发的
     clusterOptions[1].sqlModeNodeMgr = true;
     clusterOptions[1].beClusterId = false;
     clusterOptions[1].beMetaServiceEndpoint = false;
 
-    clusterOptions[2].sqlModeNodeMgr = false;
+    // fe cluster id(docker compose 生成的),be cluster id (docker compose 生成的),ms 
endpoint 下发的
+    clusterOptions[2].sqlModeNodeMgr = true;
     clusterOptions[2].beClusterId = true;
-    clusterOptions[2].beMetaServiceEndpoint = true;
-
-    clusterOptions[3].sqlModeNodeMgr = false;
-    clusterOptions[3].beClusterId = false;
-    clusterOptions[3].beMetaServiceEndpoint = false;
+    clusterOptions[2].beMetaServiceEndpoint = false;
+
+    // fe cluster id(docker compose 生成的),be cluster id (docker compose 生成的),ms 
endpoint 是配置的
+    clusterOptions[3].sqlModeNodeMgr = true;
+    clusterOptions[3].beClusterId = true;
+    clusterOptions[3].beMetaServiceEndpoint = true;
+
+    // saas
+    // fe cluster id(随机生成),be cluster id 是空,ms endpoint 是配置的
+    clusterOptions[4].sqlModeNodeMgr = false;
+    clusterOptions[4].beClusterId = false;
+    clusterOptions[4].beMetaServiceEndpoint = true;
+    
+    // fe cluster id(随机生成), be cluster id 是空,ms endpoint 用的是fe 下发的
+    clusterOptions[5].sqlModeNodeMgr = false;
+    clusterOptions[5].beClusterId = false;
+    clusterOptions[5].beMetaServiceEndpoint = false;
+
+    /*
+    fe cluster id(随机生成) 不等于 be cluster id(docker compose 配置1234567)
+    clusterOptions[].sqlModeNodeMgr = false;
+    clusterOptions[].beClusterId = true;
+    clusterOptions[].beMetaServiceEndpoint = true;
+
+    fe cluster id(随机生成) 不等于 be cluster id(docker compose 配置1234567)
+    clusterOptions[].sqlModeNodeMgr = false;
+    clusterOptions[].beClusterId = true;
+    clusterOptions[].beMetaServiceEndpoint = false;
+    */
 
     def inject_to_ms_api = { msHttpPort, key, value, check_func ->
         httpTest {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to