This is an automated email from the ASF dual-hosted git repository.
morrysnow pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-3.1 by this push:
new e5137caf4ff branch-3.1: [fix](regression) fix multi-az failover
regression case #53805 #54709 (#54710)
e5137caf4ff is described below
commit e5137caf4ff320659d15ab2b398e684cbf78bc15
Author: Luwei <[email protected]>
AuthorDate: Thu Aug 14 11:57:12 2025 +0800
branch-3.1: [fix](regression) fix multi-az failover regression case #53805
#54709 (#54710)
pick 3.0 #53805 #54709
---
.../data/cloud/multi_cluster/vcluster/vcluster.out | Bin 129 -> 0 bytes
.../use_default_vcg_read_write.out | Bin 197 -> 299 bytes
.../use_vcg_read_write_routine_load.out | Bin 0 -> 3581 bytes
.../cloud/multi_cluster/vcluster/vcluster.groovy | 173 ---------------------
.../default_vcg_auto_failover.groovy | 8 +-
.../failover_standby_disable_compaction.groovy | 28 +++-
.../standby_disable_compaction.groovy | 3 +-
.../use_default_vcg_read_write.groovy | 11 +-
.../use_vcg_read_write_unhealthy_node_50.groovy | 18 ++-
9 files changed, 57 insertions(+), 184 deletions(-)
diff --git a/regression-test/data/cloud/multi_cluster/vcluster/vcluster.out
b/regression-test/data/cloud/multi_cluster/vcluster/vcluster.out
deleted file mode 100644
index 369f2e3af78..00000000000
Binary files a/regression-test/data/cloud/multi_cluster/vcluster/vcluster.out
and /dev/null differ
diff --git
a/regression-test/data/cloud_p0/multi_cluster/virtual_compute_group/use_default_vcg_read_write.out
b/regression-test/data/cloud_p0/multi_cluster/virtual_compute_group/use_default_vcg_read_write.out
index 3900f080bd0..0abde7ca45b 100644
Binary files
a/regression-test/data/cloud_p0/multi_cluster/virtual_compute_group/use_default_vcg_read_write.out
and
b/regression-test/data/cloud_p0/multi_cluster/virtual_compute_group/use_default_vcg_read_write.out
differ
diff --git
a/regression-test/data/cloud_p0/multi_cluster/virtual_compute_group/use_vcg_read_write_routine_load.out
b/regression-test/data/cloud_p0/multi_cluster/virtual_compute_group/use_vcg_read_write_routine_load.out
new file mode 100644
index 00000000000..6006a85a705
Binary files /dev/null and
b/regression-test/data/cloud_p0/multi_cluster/virtual_compute_group/use_vcg_read_write_routine_load.out
differ
diff --git
a/regression-test/suites/cloud/multi_cluster/vcluster/vcluster.groovy
b/regression-test/suites/cloud/multi_cluster/vcluster/vcluster.groovy
deleted file mode 100644
index 390e65d4b39..00000000000
--- a/regression-test/suites/cloud/multi_cluster/vcluster/vcluster.groovy
+++ /dev/null
@@ -1,173 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-import groovy.json.JsonOutput
-
-suite("vcluster") {
- def token = context.config.metaServiceToken
- def instance_id = context.config.multiClusterInstance
- String tableName = "test_all_vcluster"
-
- List<String> ipList = new ArrayList<>()
- List<String> hbPortList = new ArrayList<>()
- List<String> httpPortList = new ArrayList<>()
- List<String> beUniqueIdList = new ArrayList<>()
- List<String> bePortList = new ArrayList<>()
-
- String[] bes = context.config.multiClusterBes.split(',');
- println("the value is " + context.config.multiClusterBes);
- for(String values : bes) {
- println("the value is " + values);
- String[] beInfo = values.split(':');
- ipList.add(beInfo[0]);
- hbPortList.add(beInfo[1]);
- httpPortList.add(beInfo[2]);
- beUniqueIdList.add(beInfo[3]);
- }
-
- println("the ip is " + ipList);
- println("the heartbeat port is " + hbPortList);
- println("the http port is " + httpPortList);
- println("the be unique id is " + beUniqueIdList);
-
- for (unique_id : beUniqueIdList) {
- resp = get_cluster.call(unique_id);
- for (cluster : resp) {
- log.info("lw test drop : ${cluster.type} ".toString())
- if (cluster.type == "COMPUTE" || cluster.type == "VIRTUAL") {
- drop_cluster.call(cluster.cluster_name, cluster.cluster_id);
- }
- }
- }
- wait_cluster_change()
-
- List<List<Object>> result = sql "show clusters"
- assertTrue(result.size() == 0);
-
- add_cluster.call(beUniqueIdList[0], ipList[0], hbPortList[0],
- "regression_cluster_name0", "regression_cluster_id0");
- add_cluster.call(beUniqueIdList[1], ipList[1], hbPortList[1],
- "regression_cluster_name1", "regression_cluster_id1");
- add_vcluster.call("regression_vcluster_name0", "regression_vcluster_id0",
- "regression_cluster_name1", "regression_cluster_name0");
- wait_cluster_change()
-
- result = sql "show clusters"
- assertTrue(result.size() == 3);
-
- for (row : result) {
- println row
- }
-
- try {
- sql """ use @regression_vcluster_name0 """
- sql """ drop table if exists ${tableName} """
-
- sql """
- CREATE TABLE IF NOT EXISTS ${tableName} (
- `k1` int(11) NULL,
- `k2` tinyint(4) NULL,
- `k3` smallint(6) NULL,
- `k4` bigint(20) NULL,
- `k5` largeint(40) NULL,
- `k6` float NULL,
- `k7` double NULL,
- `k8` decimal(9, 0) NULL,
- `k9` char(10) NULL,
- `k10` varchar(1024) NULL,
- `k11` text NULL,
- `k12` date NULL,
- `k13` datetime NULL
- ) ENGINE=OLAP
- DISTRIBUTED BY HASH(`k1`) BUCKETS 3
- ;
- """
-
- sql """ set enable_profile = true """
-
- before_cluster0_load_rows = get_be_metric(ipList[0], httpPortList[0],
"load_rows");
- log.info("before_cluster0_load_rows :
${before_cluster0_load_rows}".toString())
- before_cluster0_flush = get_be_metric(ipList[0], httpPortList[0],
"memtable_flush_total");
- log.info("before_cluster0_flush : ${before_cluster0_flush}".toString())
-
- before_cluster1_load_rows = get_be_metric(ipList[1], httpPortList[1],
"load_rows");
- log.info("before_cluster1_load_rows :
${before_cluster1_load_rows}".toString())
- before_cluster1_flush = get_be_metric(ipList[1], httpPortList[1],
"memtable_flush_total");
- log.info("before_cluster1_flush : ${before_cluster1_flush}".toString())
-
- txnId = -1;
- streamLoad {
- table "${tableName}"
-
- set 'column_separator', ','
- set 'cloud_cluster', 'regression_vcluster_name0'
-
- file 'all_types.csv'
- time 10000 // limit inflight 10s
-
- check { loadResult, exception, startTime, endTime ->
- if (exception != null) {
- throw exception
- }
- log.info("Stream load result: ${loadResult}".toString())
- def json = parseJson(loadResult)
- assertEquals("success", json.Status.toLowerCase())
- assertEquals(20, json.NumberTotalRows)
- assertEquals(0, json.NumberFilteredRows)
- txnId = json.TxnId
- }
- }
- sql "sync"
- order_qt_all11 "SELECT count(*) FROM ${tableName}" // 20
- order_qt_all12 "SELECT count(*) FROM ${tableName} where k1 <= 10" //
11
-
- after_cluster0_load_rows = get_be_metric(ipList[0], httpPortList[0],
"load_rows");
- log.info("after_cluster0_load_rows :
${after_cluster0_load_rows}".toString())
- after_cluster0_flush = get_be_metric(ipList[0], httpPortList[0],
"memtable_flush_total");
- log.info("after_cluster0_flush : ${after_cluster0_flush}".toString())
-
- after_cluster1_load_rows = get_be_metric(ipList[1], httpPortList[1],
"load_rows");
- log.info("after_cluster1_load_rows :
${after_cluster1_load_rows}".toString())
- after_cluster1_flush = get_be_metric(ipList[1], httpPortList[1],
"memtable_flush_total");
- log.info("after_cluster1_flush : ${after_cluster1_flush}".toString())
-
- assertTrue(before_cluster0_load_rows == after_cluster0_load_rows)
- assertTrue(before_cluster0_flush == after_cluster0_flush)
-
- assertTrue(before_cluster1_load_rows < after_cluster1_load_rows)
- assertTrue(before_cluster1_flush < after_cluster1_flush)
-
- // fill bePortList
- for (int i = 0; i < ipList.size(); ++i) {
- result = sql """show backends"""
- for (row : result) {
- println row
- println row[2]
- if (ipList[i] == row[1] && hbPortList[i] == row[2]) {
- bePortList.add(row[5]);
- }
- }
- }
-
- set = [ipList[1] + ":" +bePortList[1]] as Set
- sql """ select count(k2) AS theCount, k3 from test_all_vcluster group
by k3 order by theCount limit 1 """
- checkProfileNew.call(set)
- //checkProfileNew1.call("e329bc41f42c49f5-9326cb8429dacc06")
- } finally {
- sql """ drop table if exists ${tableName} """
- }
-}
diff --git
a/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/default_vcg_auto_failover.groovy
b/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/default_vcg_auto_failover.groovy
index cf9934469c6..06a52a31b8b 100644
---
a/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/default_vcg_auto_failover.groovy
+++
b/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/default_vcg_auto_failover.groovy
@@ -130,7 +130,13 @@ suite('default_vcg_auto_failover', 'multi_cluster,docker')
{
def reconnectFe = {
sleep(10000)
logger.info("Reconnecting to a new frontend...")
- def newFe = cluster.getMasterFe()
+ def newFe
+ if (options.connectToFollower) {
+ newFe = cluster.getOneFollowerFe()
+ } else {
+ newFe = cluster.getMasterFe()
+ }
+
if (newFe) {
logger.info("New frontend found:
${newFe.host}:${newFe.httpPort}")
def url = String.format(
diff --git
a/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/failover_standby_disable_compaction.groovy
b/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/failover_standby_disable_compaction.groovy
index a92d5b7a920..1404b6c1e2e 100644
---
a/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/failover_standby_disable_compaction.groovy
+++
b/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/failover_standby_disable_compaction.groovy
@@ -153,6 +153,31 @@ suite('failover_standby_disable_compaction',
'multi_cluster,docker') {
"""
cluster.stopBackends(4, 5)
+
+ def reconnectFe = {
+ sleep(10000)
+ logger.info("Reconnecting to a new frontend...")
+ def newFe
+ if (options.connectToFollower) {
+ newFe = cluster.getOneFollowerFe()
+ } else {
+ newFe = cluster.getMasterFe()
+ }
+ if (newFe) {
+ logger.info("New frontend found:
${newFe.host}:${newFe.httpPort}")
+ def url = String.format(
+
"jdbc:mysql://%s:%s/?useLocalSessionState=true&allowLoadLocalInfile=false",
+ newFe.host, newFe.queryPort)
+ url = context.config.buildUrlWithDb(url, context.dbName)
+ context.connectTo(url, context.config.jdbcUser,
context.config.jdbcPassword)
+ logger.info("Successfully reconnected to the new frontend")
+ } else {
+ logger.error("No new frontend found to reconnect")
+ }
+ }
+
+ reconnectFe()
+
sleep(30000)
sql """ insert into ${tbl} (k1, k2) values (1, "10") """
sql """ SELECT count(*) FROM ${tableName} """
@@ -274,9 +299,8 @@ suite('failover_standby_disable_compaction',
'multi_cluster,docker') {
assertTrue(before_cluster2_be0_compaction <
after_cluster2_be0_compaction || before_cluster2_be1_compaction <
after_cluster2_be1_compaction )
- def set = [cluster1Ips[0] + ":" + "8060", cluster1Ips[1] + ":" +
"8060"] as Set
+ def addrSet = [cluster2Ips[0] + ":" + "8060", cluster2Ips[1] + ":"
+ "8060"] as Set
sql """ select count(k2) AS theCount, k3 from test_all_vcluster
group by k3 order by theCount limit 1 """
- checkProfileNew.call(set, false)
if (options.connectToFollower) {
checkProfileNew.call(cluster.getOneFollowerFe(), addrSet)
} else {
diff --git
a/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/standby_disable_compaction.groovy
b/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/standby_disable_compaction.groovy
index dc39526bfe0..2393e27ef0e 100644
---
a/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/standby_disable_compaction.groovy
+++
b/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/standby_disable_compaction.groovy
@@ -261,9 +261,8 @@ suite('standby_disable_compaction', 'multi_cluster,docker')
{
assertTrue(before_cluster2_be0_compaction <
after_cluster2_be0_compaction || before_cluster2_be1_compaction <
after_cluster2_be1_compaction )
- def set = [cluster1Ips[0] + ":" + "8060", cluster1Ips[1] + ":" +
"8060"] as Set
+ def addrSet = [cluster2Ips[0] + ":" + "8060", cluster2Ips[1] + ":"
+ "8060"] as Set
sql """ select count(k2) AS theCount, k3 from test_all_vcluster
group by k3 order by theCount limit 1 """
- checkProfileNew.call(set, false)
if (options.connectToFollower) {
checkProfileNew.call(cluster.getOneFollowerFe(), addrSet)
} else {
diff --git
a/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/use_default_vcg_read_write.groovy
b/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/use_default_vcg_read_write.groovy
index 707ea42ce89..77080ee3711 100644
---
a/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/use_default_vcg_read_write.groovy
+++
b/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/use_default_vcg_read_write.groovy
@@ -129,7 +129,12 @@ suite('use_default_vcg_read_write',
'multi_cluster,docker') {
def reconnectFe = {
sleep(10000)
logger.info("Reconnecting to a new frontend...")
- def newFe = cluster.getMasterFe()
+ def newFe
+ if (options.connectToFollower) {
+ newFe = cluster.getOneFollowerFe()
+ } else {
+ newFe = cluster.getMasterFe()
+ }
if (newFe) {
logger.info("New frontend found:
${newFe.host}:${newFe.httpPort}")
def url = String.format(
@@ -428,7 +433,7 @@ suite('use_default_vcg_read_write', 'multi_cluster,docker')
{
checkProfileNew.call(cluster.getMasterFe(), addrSet)
}
- sleep(16000)
+ sleep(21000)
sql """
insert into ${tbl} (k1, k2) values (1, "10");
@@ -439,7 +444,7 @@ suite('use_default_vcg_read_write', 'multi_cluster,docker')
{
log.info("show compute group {}", showComputeGroup)
vcgInShow = showComputeGroup.find { it.Name == normalVclusterName }
assertNotNull(vcgInShow)
-
assertTrue(vcgInShow.Policy.contains('"activeComputeGroup":"newcluster1","standbyComputeGroup":"newcluster2"'))
+
assertTrue(vcgInShow.Policy.contains('"activeComputeGroup":"newcluster2","standbyComputeGroup":"newcluster1"'))
}
// connect to follower, run again
options.connectToFollower = true
diff --git
a/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/use_vcg_read_write_unhealthy_node_50.groovy
b/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/use_vcg_read_write_unhealthy_node_50.groovy
index e1f2923814e..40a875f64b8 100644
---
a/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/use_vcg_read_write_unhealthy_node_50.groovy
+++
b/regression-test/suites/cloud_p0/multi_cluster/virtual_compute_group/use_vcg_read_write_unhealthy_node_50.groovy
@@ -243,7 +243,11 @@ suite('use_vcg_read_write_unhealthy_node_50',
'multi_cluster,docker') {
def addrSet = [cluster1Ips[0] + ":" + "8060", cluster1Ips[1] + ":"
+ "8060"] as Set
sql """ select count(k2) AS theCount, k3 from test_all_vcluster
group by k3 order by theCount limit 1 """
- checkProfileNew.call(addrSet)
+ if (options.connectToFollower) {
+ checkProfileNew.call(cluster.getOneFollowerFe(), addrSet)
+ } else {
+ checkProfileNew.call(cluster.getMasterFe(), addrSet)
+ }
cluster.stopBackends(4)
sleep(10000)
@@ -333,7 +337,11 @@ suite('use_vcg_read_write_unhealthy_node_50',
'multi_cluster,docker') {
addrSet = [cluster2Ips[0] + ":" + "8060", cluster2Ips[1] + ":" +
"8060"] as Set
sql """ select count(k2) AS theCount, k3 from test_all_vcluster
group by k3 order by theCount limit 1 """
- checkProfileNew.call(addrSet)
+ if (options.connectToFollower) {
+ checkProfileNew.call(cluster.getOneFollowerFe(), addrSet)
+ } else {
+ checkProfileNew.call(cluster.getMasterFe(), addrSet)
+ }
sleep(16000)
sql """
@@ -406,7 +414,11 @@ suite('use_vcg_read_write_unhealthy_node_50',
'multi_cluster,docker') {
addrSet = [cluster2Ips[0] + ":" + "8060", cluster2Ips[1] + ":" +
"8060"] as Set
sql """ select count(k2) AS theCount, k3 from test_all_vcluster
group by k3 order by theCount limit 1 """
- checkProfileNew.call(addrSet)
+ if (options.connectToFollower) {
+ checkProfileNew.call(cluster.getOneFollowerFe(), addrSet)
+ } else {
+ checkProfileNew.call(cluster.getMasterFe(), addrSet)
+ }
sleep(16000)
// show cluster
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]