This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new 41b5866c3e4 [fix](regression) fix the failed of 
cold_heat_separation_p2 (#49625) (#50090)
41b5866c3e4 is described below

commit 41b5866c3e4aabe4d84b973866ca3194a92f653c
Author: yagagagaga <zhangmi...@selectdb.com>
AuthorDate: Thu Apr 17 09:27:54 2025 +0800

    [fix](regression) fix the failed of cold_heat_separation_p2 (#49625) 
(#50090)
    
    (cherry picked from #49625 commit
    5f000e63fde699d52e63e41d710462603b80c9b2)
---
 .../add_drop_partition.groovy                      |  41 +-
 .../add_drop_partition_by_hdfs.groovy              |  60 +--
 .../create_table_use_dynamic_partition.groovy      |  38 +-
 ...eate_table_use_dynamic_partition_by_hdfs.groovy |  55 +-
 .../create_table_use_partition_policy.groovy       |  17 +-
 ...reate_table_use_partition_policy_by_hdfs.groovy |  25 +-
 .../create_table_use_policy.groovy                 |  17 +-
 .../create_table_use_policy_by_hdfs.groovy         |  27 +-
 .../load_colddata_to_hdfs.groovy                   |  22 +-
 .../modify_replica_use_partition.groovy            | 582 +++++++++++----------
 .../modify_replica_use_partition_by_hdfs.groovy    | 579 ++++++++++----------
 .../table_modify_resouce_and_policy.groovy         |  15 +-
 .../table_modify_resouce_and_policy_by_hdfs.groovy |  23 +-
 .../test_show_storage_policy_using.groovy          |   2 +-
 14 files changed, 774 insertions(+), 729 deletions(-)

diff --git 
a/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy 
b/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy
index f53d5c27b36..2250607465e 100644
--- a/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy
+++ b/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy
@@ -48,7 +48,8 @@ suite("add_drop_partition") {
     }
     // used as passing out parameter to fetchDataSize
     List<Long> sizes = [-1, -1]
-    def tableName = "tbl1"
+    def suffix = UUID.randomUUID().hashCode().abs()
+    def tableName = "tbl1${suffix}"
     sql """ DROP TABLE IF EXISTS ${tableName} """
 
     def check_storage_policy_exist = { name->
@@ -63,8 +64,8 @@ suite("add_drop_partition") {
         return false;
     }
 
-    def resource_name = "test_add_drop_partition_resource"
-    def policy_name= "test_add_drop_partition_policy"
+    def resource_name = "test_add_drop_partition_resource${suffix}"
+    def policy_name= "test_add_drop_partition_policy${suffix}"
 
     if (check_storage_policy_exist(policy_name)) {
         sql """
@@ -159,7 +160,8 @@ suite("add_drop_partition") {
     """
     log.info( "test tablets not empty")
     fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
+    def retry = 100
+    while (sizes[1] == 0 && retry --> 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
         tablets = sql_return_maparray """
@@ -167,6 +169,7 @@ suite("add_drop_partition") {
         """
         fetchDataSize(sizes, tablets[0])
     }
+    assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
     assertTrue(tablets.size() > 0)
     LocalDataSize1 = sizes[0]
     RemoteDataSize1 = sizes[1]
@@ -193,16 +196,14 @@ suite("add_drop_partition") {
         assertTrue(par[12] == "${policy_name}")
     }
 
-    try_sql """
-    drop storage policy add_policy;
-    """
+    def add_resource = "add_resource${suffix}"
 
     try_sql """
-    drop resource add_resource;
+    drop resource ${add_resource};
     """
 
     sql """
-        CREATE RESOURCE IF NOT EXISTS "add_resource"
+        CREATE RESOURCE IF NOT EXISTS "${add_resource}"
         PROPERTIES(
             "type"="s3",
             "AWS_ENDPOINT" = "${getS3Endpoint()}",
@@ -218,11 +219,6 @@ suite("add_drop_partition") {
         );
     """
 
-    try_sql """
-    create storage policy tmp_policy
-    PROPERTIES( "storage_resource" = "add_resource", "cooldown_ttl" = "300");
-    """
-
     // can not set to one policy with different resource
     try {
         sql """alter table ${tableName} set ("storage_policy" = 
"add_policy");"""
@@ -230,22 +226,23 @@ suite("add_drop_partition") {
         assertTrue(true)
     }
 
+    def add_policy1 = "add_policy1${suffix}"
     sql """
-        CREATE STORAGE POLICY IF NOT EXISTS add_policy1
+        CREATE STORAGE POLICY IF NOT EXISTS ${add_policy1}
         PROPERTIES(
             "storage_resource" = "${resource_name}",
             "cooldown_ttl" = "60"
         )
     """
 
-    sql """alter table ${tableName} set ("storage_policy" = "add_policy1");"""
+    sql """alter table ${tableName} set ("storage_policy" = 
"${add_policy1}");"""
 
     // wait for report
     sleep(300000)
     
     partitions = sql "show partitions from ${tableName}"
     for (par in partitions) {
-        assertTrue(par[12] == "add_policy1")
+        assertTrue(par[12] == "${add_policy1}")
     }
 
     
@@ -260,7 +257,7 @@ suite("add_drop_partition") {
 
     partitions = sql "show partitions from ${tableName}"
     for (par in partitions) {
-        assertTrue(par[12] == "add_policy1")
+        assertTrue(par[12] == "${add_policy1}")
     }
 
     sql """
@@ -271,16 +268,12 @@ suite("add_drop_partition") {
     DROP TABLE ${tableName}
     """
 
-    try_sql """
-    drop storage policy add_policy;
-    """
-
     sql """
-    drop storage policy add_policy1;
+    drop storage policy ${add_policy1};
     """
 
     sql """
-    drop resource add_resource;
+    drop resource ${add_resource};
     """
 
 
diff --git 
a/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
index f42ddc22503..721c12d7dd4 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
@@ -19,6 +19,9 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
 import java.time.LocalDate;
 
 suite("add_drop_partition_by_hdfs") {
+    if (!enableHdfs()) {
+        logger.info("skip this case because hdfs is not enabled");
+    }
     def fetchBeHttp = { check_func, meta_url ->
         def i = meta_url.indexOf("/api")
         String endPoint = meta_url.substring(0, i)
@@ -48,7 +51,8 @@ suite("add_drop_partition_by_hdfs") {
     }
     // used as passing out parameter to fetchDataSize
     List<Long> sizes = [-1, -1]
-    def tableName = "tbl1"
+    def suffix = UUID.randomUUID().hashCode().abs()
+    def tableName = "tbl1${suffix}"
     sql """ DROP TABLE IF EXISTS ${tableName} """
 
     def check_storage_policy_exist = { name->
@@ -63,8 +67,8 @@ suite("add_drop_partition_by_hdfs") {
         return false;
     }
 
-    def resource_name = "test_add_drop_partition_resource"
-    def policy_name= "test_add_drop_partition_policy"
+    def resource_name = "test_add_drop_partition_resource${suffix}"
+    def policy_name= "test_add_drop_partition_policy${suffix}"
 
     if (check_storage_policy_exist(policy_name)) {
         sql """
@@ -87,12 +91,7 @@ suite("add_drop_partition_by_hdfs") {
             "type"="hdfs",
             "fs.defaultFS"="${getHdfsFs()}",
             "hadoop.username"="${getHdfsUser()}",
-            "hadoop.password"="${getHdfsPasswd()}",
-            "dfs.nameservices" = "my_ha",
-            "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
-            "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
-            "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
-            "dfs.client.failover.proxy.provider" = 
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+            "hadoop.password"="${getHdfsPasswd()}"
         );
     """
 
@@ -157,7 +156,8 @@ suite("add_drop_partition_by_hdfs") {
     """
     log.info( "test tablets not empty")
     fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
+    def retry = 100
+    while (sizes[1] == 0 && retry --> 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
         tablets = sql_return_maparray """
@@ -165,6 +165,7 @@ suite("add_drop_partition_by_hdfs") {
         """
         fetchDataSize(sizes, tablets[0])
     }
+    assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
     assertTrue(tablets.size() > 0)
     LocalDataSize1 = sizes[0]
     RemoteDataSize1 = sizes[1]
@@ -191,34 +192,22 @@ suite("add_drop_partition_by_hdfs") {
         assertTrue(par[12] == "${policy_name}")
     }
 
-    try_sql """
-    drop storage policy add_policy;
-    """
+    def add_resource = "add_resource${suffix}"
 
     try_sql """
-    drop resource add_resource;
+    drop resource ${add_resource};
     """
 
     sql """
-        CREATE RESOURCE IF NOT EXISTS "add_resource"
+        CREATE RESOURCE IF NOT EXISTS "${add_resource}"
         PROPERTIES(
             "type"="hdfs",
             "fs.defaultFS"="${getHdfsFs()}",
             "hadoop.username"="${getHdfsUser()}",
-            "hadoop.password"="${getHdfsPasswd()}",
-            "dfs.nameservices" = "my_ha",
-            "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
-            "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
-            "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
-            "dfs.client.failover.proxy.provider" = 
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+            "hadoop.password"="${getHdfsPasswd()}"
         );
     """
 
-    try_sql """
-    create storage policy tmp_policy
-    PROPERTIES( "storage_resource" = "add_resource", "cooldown_ttl" = "300");
-    """
-
     // can not set to one policy with different resource
     try {
         sql """alter table ${tableName} set ("storage_policy" = 
"add_policy");"""
@@ -226,22 +215,23 @@ suite("add_drop_partition_by_hdfs") {
         assertTrue(true)
     }
 
+    def add_policy1 = "add_policy1${suffix}"
     sql """
-        CREATE STORAGE POLICY IF NOT EXISTS add_policy1
+        CREATE STORAGE POLICY IF NOT EXISTS ${add_policy1}
         PROPERTIES(
             "storage_resource" = "${resource_name}",
             "cooldown_ttl" = "60"
         )
     """
 
-    sql """alter table ${tableName} set ("storage_policy" = "add_policy1");"""
+    sql """alter table ${tableName} set ("storage_policy" = 
"${add_policy1}");"""
 
     // wait for report
     sleep(300000)
 
     partitions = sql "show partitions from ${tableName}"
     for (par in partitions) {
-        assertTrue(par[12] == "add_policy1")
+        assertTrue(par[12] == "${add_policy1}")
     }
 
 
@@ -251,12 +241,12 @@ suite("add_drop_partition_by_hdfs") {
     """
 
     sql """
-        insert into ${tableName} values(1, "2017-01-01");
+        insert into ${tableName} values(1, "2016-01-01");
     """
 
     partitions = sql "show partitions from ${tableName}"
     for (par in partitions) {
-        assertTrue(par[12] == "add_policy1")
+        assertTrue(par[12] == "${add_policy1}")
     }
 
     sql """
@@ -267,16 +257,12 @@ suite("add_drop_partition_by_hdfs") {
     DROP TABLE ${tableName}
     """
 
-    try_sql """
-    drop storage policy add_policy;
-    """
-
     sql """
-    drop storage policy add_policy1;
+    drop storage policy ${add_policy1};
     """
 
     sql """
-    drop resource add_resource;
+    drop resource ${add_resource};
     """
 
 
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
index 580946bd7a8..bcd17146b3d 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
@@ -48,7 +48,8 @@ suite("cold_heat_dynamic_partition") {
     }
     // used as passing out parameter to fetchDataSize
     List<Long> sizes = [-1, -1]
-    def tableName = "tbl2"
+    def suffix = UUID.randomUUID().hashCode().abs()
+    def tableName = "tbl2${suffix}"
     sql """ DROP TABLE IF EXISTS ${tableName} """
 
     def check_storage_policy_exist = { name->
@@ -63,8 +64,8 @@ suite("cold_heat_dynamic_partition") {
         return false;
     }
 
-    def resource_name = "test_dynamic_partition_resource"
-    def policy_name= "test_dynamic_partition_policy"
+    def resource_name = "test_dynamic_partition_resource${suffix}"
+    def policy_name= "test_dynamic_partition_policy${suffix}"
 
     if (check_storage_policy_exist(policy_name)) {
         sql """
@@ -169,7 +170,8 @@ suite("cold_heat_dynamic_partition") {
     """
     log.info( "test tablets not empty")
     fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
+    def retry = 100
+    while (sizes[1] == 0 && retry --> 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
         tablets = sql_return_maparray """
@@ -177,6 +179,7 @@ suite("cold_heat_dynamic_partition") {
         """
         fetchDataSize(sizes, tablets[0])
     }
+    assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
     assertTrue(tablets.size() > 0)
     LocalDataSize1 = sizes[0]
     RemoteDataSize1 = sizes[1]
@@ -203,16 +206,18 @@ suite("cold_heat_dynamic_partition") {
         assertTrue(par[12] == "${policy_name}")
     }
 
+    def tmp_policy = "tmp_policy${suffix}"
     try_sql """
-    drop storage policy tmp_policy;
+    drop storage policy ${tmp_policy};
     """
 
+    def tmp_resource = "tmp_resource${suffix}"
     try_sql """
-    drop resource tmp_resource;
+    drop resource ${tmp_resource};
     """
 
     sql """
-        CREATE RESOURCE IF NOT EXISTS "tmp_resource"
+        CREATE RESOURCE IF NOT EXISTS "${tmp_resource}"
         PROPERTIES(
             "type"="s3",
             "AWS_ENDPOINT" = "${getS3Endpoint()}",
@@ -229,33 +234,34 @@ suite("cold_heat_dynamic_partition") {
     """
 
     try_sql """
-    create storage policy tmp_policy
-    PROPERTIES( "storage_resource" = "tmp_resource", "cooldown_ttl" = "300");
+    create storage policy ${tmp_policy}
+    PROPERTIES( "storage_resource" = "${tmp_resource}", "cooldown_ttl" = 
"300");
     """
 
     // can not set to one policy with different resource
     try {
-        sql """alter table ${tableName} set ("storage_policy" = 
"tmp_policy");"""
+        sql """alter table ${tableName} set ("storage_policy" = 
"${tmp_policy}");"""
     } catch (java.sql.SQLException t) {
         assertTrue(true)
     }
 
+    def tmp_policy1 = "tmp_policy1${suffix}"
     sql """
-        CREATE STORAGE POLICY IF NOT EXISTS tmp_policy1
+        CREATE STORAGE POLICY IF NOT EXISTS ${tmp_policy1}
         PROPERTIES(
             "storage_resource" = "${resource_name}",
             "cooldown_ttl" = "60"
         )
     """
 
-    sql """alter table ${tableName} set ("storage_policy" = "tmp_policy1");"""
+    sql """alter table ${tableName} set ("storage_policy" = 
"${tmp_policy1}");"""
 
     // wait for report
     sleep(300000)
     
     partitions = sql "show partitions from ${tableName}"
     for (par in partitions) {
-        assertTrue(par[12] == "tmp_policy1")
+        assertTrue(par[12] == "${tmp_policy1}")
     }
 
     sql """
@@ -267,15 +273,15 @@ suite("cold_heat_dynamic_partition") {
     """
 
     sql """
-    drop storage policy tmp_policy;
+    drop storage policy ${tmp_policy};
     """
 
     sql """
-    drop storage policy tmp_policy1;
+    drop storage policy ${tmp_policy1};
     """
 
     sql """
-    drop resource tmp_resource;
+    drop resource ${tmp_resource};
     """
 
 
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
index d099e43d7be..0e64132ba51 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
@@ -19,6 +19,9 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
 import java.time.LocalDate;
 
 suite("cold_heat_dynamic_partition_by_hdfs") {
+    if (!enableHdfs()) {
+        logger.info("skip this case because hdfs is not enabled");
+    }
     def fetchBeHttp = { check_func, meta_url ->
         def i = meta_url.indexOf("/api")
         String endPoint = meta_url.substring(0, i)
@@ -48,7 +51,8 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
     }
     // used as passing out parameter to fetchDataSize
     List<Long> sizes = [-1, -1]
-    def tableName = "tbl2"
+    def suffix = UUID.randomUUID().hashCode().abs()
+    def tableName = "tbl2${suffix}"
     sql """ DROP TABLE IF EXISTS ${tableName} """
 
     def check_storage_policy_exist = { name->
@@ -63,8 +67,8 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
         return false;
     }
 
-    def resource_name = "test_dynamic_partition_resource"
-    def policy_name= "test_dynamic_partition_policy"
+    def resource_name = "test_dynamic_partition_resource${suffix}"
+    def policy_name= "test_dynamic_partition_policy${suffix}"
 
     if (check_storage_policy_exist(policy_name)) {
         sql """
@@ -87,12 +91,7 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
             "type"="hdfs",
             "fs.defaultFS"="${getHdfsFs()}",
             "hadoop.username"="${getHdfsUser()}",
-            "hadoop.password"="${getHdfsPasswd()}",
-            "dfs.nameservices" = "my_ha",
-            "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
-            "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
-            "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
-            "dfs.client.failover.proxy.provider" = 
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+            "hadoop.password"="${getHdfsPasswd()}"
         );
     """
 
@@ -167,7 +166,8 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
     """
     log.info( "test tablets not empty")
     fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
+    def retry = 100
+    while (sizes[1] == 0 && retry --> 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
         tablets = sql_return_maparray """
@@ -175,6 +175,7 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
         """
         fetchDataSize(sizes, tablets[0])
     }
+    assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
     assertTrue(tablets.size() > 0)
     LocalDataSize1 = sizes[0]
     RemoteDataSize1 = sizes[1]
@@ -201,57 +202,55 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
         assertTrue(par[12] == "${policy_name}")
     }
 
+    def tmp_policy = "tmp_policy${suffix}"
     try_sql """
-    drop storage policy tmp_policy;
+    drop storage policy ${tmp_policy};
     """
 
+    def tmp_resource = "tmp_resource${suffix}"
     try_sql """
-    drop resource tmp_resource;
+    drop resource ${tmp_resource};
     """
 
     sql """
-        CREATE RESOURCE IF NOT EXISTS "tmp_resource"
+        CREATE RESOURCE IF NOT EXISTS "${tmp_resource}"
         PROPERTIES(
             "type"="hdfs",
             "fs.defaultFS"="${getHdfsFs()}",
             "hadoop.username"="${getHdfsUser()}",
-            "hadoop.password"="${getHdfsPasswd()}",
-            "dfs.nameservices" = "my_ha",
-            "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
-            "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
-            "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
-            "dfs.client.failover.proxy.provider" = 
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+            "hadoop.password"="${getHdfsPasswd()}"
         );
     """
 
     try_sql """
-    create storage policy tmp_policy
-    PROPERTIES( "storage_resource" = "tmp_resource", "cooldown_ttl" = "300");
+    create storage policy ${tmp_policy}
+    PROPERTIES( "storage_resource" = "${tmp_resource}", "cooldown_ttl" = 
"300");
     """
 
     // can not set to one policy with different resource
     try {
-        sql """alter table ${tableName} set ("storage_policy" = 
"tmp_policy");"""
+        sql """alter table ${tableName} set ("storage_policy" = 
"${tmp_policy}");"""
     } catch (java.sql.SQLException t) {
         assertTrue(true)
     }
 
+    def tmp_policy1 = "tmp_policy1${suffix}"
     sql """
-        CREATE STORAGE POLICY IF NOT EXISTS tmp_policy1
+        CREATE STORAGE POLICY IF NOT EXISTS ${tmp_policy1}
         PROPERTIES(
             "storage_resource" = "${resource_name}",
             "cooldown_ttl" = "60"
         )
     """
 
-    sql """alter table ${tableName} set ("storage_policy" = "tmp_policy1");"""
+    sql """alter table ${tableName} set ("storage_policy" = 
"${tmp_policy1}");"""
 
     // wait for report
     sleep(300000)
 
     partitions = sql "show partitions from ${tableName}"
     for (par in partitions) {
-        assertTrue(par[12] == "tmp_policy1")
+        assertTrue(par[12] == "${tmp_policy1}")
     }
 
     sql """
@@ -263,15 +262,15 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
     """
 
     sql """
-    drop storage policy tmp_policy;
+    drop storage policy ${tmp_policy};
     """
 
     sql """
-    drop storage policy tmp_policy1;
+    drop storage policy ${tmp_policy1};
     """
 
     sql """
-    drop resource tmp_resource;
+    drop resource ${tmp_resource};
     """
 
 
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
index 97d83ec64e0..25eaa5e7a30 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
@@ -47,7 +47,8 @@ suite("create_table_use_partition_policy") {
     }
     // used as passing out parameter to fetchDataSize
     List<Long> sizes = [-1, -1]
-    def tableName = "lineitem1"
+    def suffix = UUID.randomUUID().hashCode().abs()
+    def tableName = "lineitem1${suffix}"
     sql """ DROP TABLE IF EXISTS ${tableName} """
     def stream_load_one_part = { partnum ->
         streamLoad {
@@ -90,13 +91,13 @@ suite("create_table_use_partition_policy") {
     def load_lineitem_table = {
         stream_load_one_part("00")
         stream_load_one_part("01")
-        def tablets = sql """
+        def tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
         """
-        while (tablets[0][8] == "0") {
+        while (tablets[0].LocalDataSize == "0") {
             log.info( "test local size is zero, sleep 10s")
             sleep(10000)
-            tablets = sql """
+            tablets = sql_return_maparray """
             SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
             """
         }
@@ -114,8 +115,8 @@ suite("create_table_use_partition_policy") {
         return false;
     }
 
-    def resource_name = "test_table_partition_with_data_resource"
-    def policy_name= "test_table_partition_with_data_policy"
+    def resource_name = "test_table_partition_with_data_resource${suffix}"
+    def policy_name= "test_table_partition_with_data_policy${suffix}"
 
     if (check_storage_policy_exist(policy_name)) {
         sql """
@@ -217,7 +218,8 @@ suite("create_table_use_partition_policy") {
     """
     log.info( "test tablets not empty")
     fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
+    def retry = 100
+    while (sizes[1] == 0 && retry --> 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
         tablets = sql_return_maparray """
@@ -225,6 +227,7 @@ suite("create_table_use_partition_policy") {
         """
         fetchDataSize(sizes, tablets[0])
     }
+    assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
     assertTrue(tablets.size() > 0)
     LocalDataSize1 = sizes[0]
     RemoteDataSize1 = sizes[1]
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
index 5c03aeba0c1..ce4264480a8 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
@@ -18,6 +18,9 @@ import groovy.json.JsonSlurper
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
 suite("create_table_use_partition_policy_by_hdfs") {
+    if (!enableHdfs()) {
+        logger.info("skip this case because hdfs is not enabled");
+    }
     def fetchBeHttp = { check_func, meta_url ->
         def i = meta_url.indexOf("/api")
         String endPoint = meta_url.substring(0, i)
@@ -47,7 +50,8 @@ suite("create_table_use_partition_policy_by_hdfs") {
     }
     // used as passing out parameter to fetchDataSize
     List<Long> sizes = [-1, -1]
-    def tableName = "lineitem1"
+    def suffix = UUID.randomUUID().hashCode().abs()
+    def tableName = "lineitem1${suffix}"
     sql """ DROP TABLE IF EXISTS ${tableName} """
     def stream_load_one_part = { partnum ->
         streamLoad {
@@ -90,13 +94,13 @@ suite("create_table_use_partition_policy_by_hdfs") {
     def load_lineitem_table = {
         stream_load_one_part("00")
         stream_load_one_part("01")
-        def tablets = sql """
+        def tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
         """
         while (tablets[0].LocalDataSize == "0") {
             log.info( "test local size is zero, sleep 10s")
             sleep(10000)
-            tablets = sql """
+            tablets = sql_return_maparray """
             SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
             """
         }
@@ -114,8 +118,8 @@ suite("create_table_use_partition_policy_by_hdfs") {
         return false;
     }
 
-    def resource_name = "test_table_partition_with_data_resource"
-    def policy_name= "test_table_partition_with_data_policy"
+    def resource_name = "test_table_partition_with_data_resource${suffix}"
+    def policy_name= "test_table_partition_with_data_policy${suffix}"
 
     if (check_storage_policy_exist(policy_name)) {
         sql """
@@ -138,12 +142,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
             "type"="hdfs",
             "fs.defaultFS"="${getHdfsFs()}",
             "hadoop.username"="${getHdfsUser()}",
-            "hadoop.password"="${getHdfsPasswd()}",
-            "dfs.nameservices" = "my_ha",
-            "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
-            "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
-            "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
-            "dfs.client.failover.proxy.provider" = 
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+            "hadoop.password"="${getHdfsPasswd()}"
         );
     """
 
@@ -318,8 +317,9 @@ suite("create_table_use_partition_policy_by_hdfs") {
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
     """
     log.info( "test tablets not empty")
+    def retry = 100
     fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
+    while (sizes[1] == 0 && retry --> 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
         tablets = sql_return_maparray """
@@ -327,6 +327,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
         """
         fetchDataSize(sizes, tablets[0])
     }
+    assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
     assertTrue(tablets.size() > 0)
     LocalDataSize1 = sizes[0]
     RemoteDataSize1 = sizes[1]
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy
index 4073a5c67b8..f7b024795f5 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy
@@ -47,7 +47,8 @@ suite("create_table_use_policy") {
     }
     // used as passing out parameter to fetchDataSize
     List<Long> sizes = [-1, -1]
-    def tableName = "lineitem2"
+    def suffix = UUID.randomUUID().hashCode().abs()
+    def tableName = "lineitem2${suffix}"
     sql """ DROP TABLE IF EXISTS ${tableName} """
     def stream_load_one_part = { partnum ->
         streamLoad {
@@ -114,8 +115,8 @@ suite("create_table_use_policy") {
         return false;
     }
 
-    def resource_name = "test_table_with_data_resource"
-    def policy_name= "test_table_with_data_policy"
+    def resource_name = "test_table_with_data_resource${suffix}"
+    def policy_name= "test_table_with_data_policy${suffix}"
 
     if (check_storage_policy_exist(policy_name)) {
         sql """
@@ -189,7 +190,7 @@ suite("create_table_use_policy") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql_return_maparray """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -206,7 +207,8 @@ suite("create_table_use_policy") {
     """
     log.info( "test tablets not empty")
     fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
+    def retry = 100
+    while (sizes[1] == 0 && retry --> 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
         tablets = sql_return_maparray """
@@ -214,6 +216,7 @@ suite("create_table_use_policy") {
         """
         fetchDataSize(sizes, tablets[0])
     }
+    assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
     assertTrue(tablets.size() > 0)
     log.info( "test remote size not zero")
     assertEquals(LocalDataSize1, sizes[1])
@@ -272,7 +275,8 @@ suite("create_table_use_policy") {
     """
     log.info( "test tablets not empty")
     fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
+    retry = 100
+    while (sizes[1] == 0 && retry --> 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
         tablets = sql_return_maparray """
@@ -280,6 +284,7 @@ suite("create_table_use_policy") {
         """
         fetchDataSize(sizes, tablets[0])
     }
+    assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
     assertTrue(tablets.size() > 0)
     log.info( "test remote size not zero")
     assertEquals(LocalDataSize1, sizes[1])
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
index 9ff61a35e04..408f2e6275b 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
@@ -18,6 +18,9 @@ import groovy.json.JsonSlurper
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
 suite("create_table_use_policy_by_hdfs") {
+    if (!enableHdfs()) {
+        logger.info("skip this case because hdfs is not enabled");
+    }
     def fetchBeHttp = { check_func, meta_url ->
         def i = meta_url.indexOf("/api")
         String endPoint = meta_url.substring(0, i)
@@ -47,7 +50,8 @@ suite("create_table_use_policy_by_hdfs") {
     }
     // used as passing out parameter to fetchDataSize
     List<Long> sizes = [-1, -1]
-    def tableName = "lineitem2"
+    def suffix = UUID.randomUUID().hashCode().abs()
+    def tableName = "lineitem2${suffix}"
     sql """ DROP TABLE IF EXISTS ${tableName} """
     def stream_load_one_part = { partnum ->
         streamLoad {
@@ -114,8 +118,8 @@ suite("create_table_use_policy_by_hdfs") {
         return false;
     }
 
-    def resource_name = "test_table_with_data_resource"
-    def policy_name= "test_table_with_data_policy"
+    def resource_name = "test_table_with_data_resource${suffix}"
+    def policy_name= "test_table_with_data_policy${suffix}"
 
     if (check_storage_policy_exist(policy_name)) {
         sql """
@@ -138,12 +142,7 @@ suite("create_table_use_policy_by_hdfs") {
             "type"="hdfs",
             "fs.defaultFS"="${getHdfsFs()}",
             "hadoop.username"="${getHdfsUser()}",
-            "hadoop.password"="${getHdfsPasswd()}",
-            "dfs.nameservices" = "my_ha",
-            "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
-            "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
-            "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
-            "dfs.client.failover.proxy.provider" = 
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+            "hadoop.password"="${getHdfsPasswd()}"
         );
     """
 
@@ -187,7 +186,7 @@ suite("create_table_use_policy_by_hdfs") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql_return_maparray """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -204,7 +203,8 @@ suite("create_table_use_policy_by_hdfs") {
     """
     log.info( "test tablets not empty")
     fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
+    def retry = 100
+    while (sizes[1] == 0 && retry --> 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
         tablets = sql_return_maparray """
@@ -212,6 +212,7 @@ suite("create_table_use_policy_by_hdfs") {
         """
         fetchDataSize(sizes, tablets[0])
     }
+    assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
     assertTrue(tablets.size() > 0)
     log.info( "test remote size not zero")
     assertEquals(LocalDataSize1, sizes[1])
@@ -270,7 +271,8 @@ suite("create_table_use_policy_by_hdfs") {
     """
     log.info( "test tablets not empty")
     fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
+    retry = 100
+    while (sizes[1] == 0 && retry --> 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
         tablets = sql_return_maparray """
@@ -278,6 +280,7 @@ suite("create_table_use_policy_by_hdfs") {
         """
         fetchDataSize(sizes, tablets[0])
     }
+    assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
     assertTrue(tablets.size() > 0)
     log.info( "test remote size not zero")
     assertEquals(LocalDataSize1, sizes[1])
diff --git 
a/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy 
b/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy
index 673fa9d39c8..8aa2ded305a 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy
@@ -51,7 +51,8 @@ suite("load_colddata_to_hdfs") {
     }
     // used as passing out parameter to fetchDataSize
     List<Long> sizes = [-1, -1]
-    def tableName = "lineitem2"
+    def suffix = UUID.randomUUID().hashCode().abs()
+    def tableName = "lineitem2${suffix}"
     sql """ DROP TABLE IF EXISTS ${tableName} """
     def stream_load_one_part = { partnum ->
         streamLoad {
@@ -117,8 +118,8 @@ suite("load_colddata_to_hdfs") {
         return false;
     }
 
-    def resource_name = "test_table_with_data_resource"
-    def policy_name= "test_table_with_data_policy"
+    def resource_name = "test_table_with_data_resource${suffix}"
+    def policy_name= "test_table_with_data_policy${suffix}"
 
     if (check_storage_policy_exist(policy_name)) {
         sql """
@@ -141,13 +142,8 @@ suite("load_colddata_to_hdfs") {
         PROPERTIES (
             "type"="hdfs",
             "fs.defaultFS"="${hdfsFs}",
-            "hadoop.username"="hive",
-            "hadoop.password"="hive",
-            "dfs.nameservices" = "my_ha",
-            "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
-            "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
-            "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
-            "dfs.client.failover.proxy.provider" = 
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+            "hadoop.username"="${getHdfsUser()}",
+            "hadoop.password"="${getHdfsPasswd()}"
         );
     """
 
@@ -191,7 +187,7 @@ suite("load_colddata_to_hdfs") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql_return_maparray """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -208,7 +204,8 @@ suite("load_colddata_to_hdfs") {
     """
     log.info( "test tablets not empty")
     fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
+    def retry = 100
+    while (sizes[1] == 0 && retry --> 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
         tablets = sql_return_maparray """
@@ -216,6 +213,7 @@ suite("load_colddata_to_hdfs") {
         """
         fetchDataSize(sizes, tablets[0])
     }
+    assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
     assertTrue(tablets.size() > 0)
     log.info( "test remote size not zero")
     assertEquals(LocalDataSize1, sizes[1])
diff --git 
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
 
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
index 65b333d9c4c..9be6b351bee 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
@@ -18,136 +18,140 @@ import groovy.json.JsonSlurper
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
 suite("modify_replica_use_partition") {
-    def fetchBeHttp = { check_func, meta_url ->
-        def i = meta_url.indexOf("/api")
-        String endPoint = meta_url.substring(0, i)
-        String metaUri = meta_url.substring(i)
-        i = endPoint.lastIndexOf('/')
-        endPoint = endPoint.substring(i + 1)
-        httpTest {
-            endpoint endPoint
-            uri metaUri
-            op "get"
-            check check_func
+
+    def replicaNum = getFeConfig("force_olap_table_replication_num")
+    setFeConfig("force_olap_table_replication_num", 0)
+    try {
+        def fetchBeHttp = { check_func, meta_url ->
+            def i = meta_url.indexOf("/api")
+            String endPoint = meta_url.substring(0, i)
+            String metaUri = meta_url.substring(i)
+            i = endPoint.lastIndexOf('/')
+            endPoint = endPoint.substring(i + 1)
+            httpTest {
+                endpoint endPoint
+                uri metaUri
+                op "get"
+                check check_func
+            }
         }
-    }
-    // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
-        def tabletId = t.TabletId
-        String meta_url = t.MetaUrl
-        def clos = {  respCode, body ->
-            logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
-            assertEquals("${respCode}".toString(), "200")
-            String out = "${body}".toString()
-            def obj = new JsonSlurper().parseText(out)
-            data_sizes[0] = obj.local_data_size
-            data_sizes[1] = obj.remote_data_size
+        // data_sizes is one arrayList<Long>, t is tablet
+        def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+            def tabletId = t.TabletId
+            String meta_url = t.MetaUrl
+            def clos = {  respCode, body ->
+                logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
+                assertEquals("${respCode}".toString(), "200")
+                String out = "${body}".toString()
+                def obj = new JsonSlurper().parseText(out)
+                data_sizes[0] = obj.local_data_size
+                data_sizes[1] = obj.remote_data_size
+            }
+            fetchBeHttp(clos, meta_url.replace("header", "data_size"))
         }
-        fetchBeHttp(clos, meta_url.replace("header", "data_size"))
-    }
-    // used as passing out parameter to fetchDataSize
-    List<Long> sizes = [-1, -1]
-    def get_meta = { url ->
-        StringBuilder sb = new StringBuilder();
-        sb.append("curl ")
-        sb.append(url)
-        String command = sb.toString()
-        log.info(command)
-        def process = command.execute()
-        int code = process.waitFor()
-        def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
-        def out = process.getText()
-        logger.info("Run compaction: code=" + code + ", err=" + err)
-        assertEquals(code, 0)
-        return out
-    }
-
-    def tableName = "lineitem3"
-    sql """ DROP TABLE IF EXISTS ${tableName} """
-    def stream_load_one_part = { partnum ->
-        streamLoad {
-            table tableName
-            // a default db 'regression_test' is specified in
-            // ${DORIS_HOME}/conf/regression-conf.groovy
-
-            // default label is UUID:
-            // set 'label' UUID.randomUUID().toString()
-
-            // default column_separator is specify in doris fe config, usually 
is '\t'.
-            // this line change to ','
-            set 'column_separator', '|'
-            set 'compress_type', 'GZ'
-
-
-            // relate to 
${DORIS_HOME}/regression-test/data/demo/streamload_input.csv.
-            // also, you can stream load a http stream, e.g. 
http://xxx/some.csv
-            file 
"""${getS3Url()}/regression/tpch/sf1/lineitem.csv.split${partnum}.gz"""
-
-            time 10000 // limit inflight 10s
-
-            // stream load action will check result, include Success status, 
and NumberTotalRows == NumberLoadedRows
-
-            // if declared a check callback, the default check condition will 
ignore.
-            // So you must check all condition
-            check { result, exception, startTime, endTime ->
-                if (exception != null) {
-                    throw exception
+        // used as passing out parameter to fetchDataSize
+        List<Long> sizes = [-1, -1]
+        def get_meta = { url ->
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl ")
+            sb.append(url)
+            String command = sb.toString()
+            log.info(command)
+            def process = command.execute()
+            int code = process.waitFor()
+            def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            def out = process.getText()
+            logger.info("Run compaction: code=" + code + ", err=" + err)
+            assertEquals(code, 0)
+            return out
+        }
+        def suffix = UUID.randomUUID().hashCode().abs()
+        def tableName = "lineitem3${suffix}"
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        def stream_load_one_part = { partnum ->
+            streamLoad {
+                table tableName
+                // a default db 'regression_test' is specified in
+                // ${DORIS_HOME}/conf/regression-conf.groovy
+
+                // default label is UUID:
+                // set 'label' UUID.randomUUID().toString()
+
+                // default column_separator is specify in doris fe config, 
usually is '\t'.
+                // this line change to ','
+                set 'column_separator', '|'
+                set 'compress_type', 'GZ'
+
+
+                // relate to 
${DORIS_HOME}/regression-test/data/demo/streamload_input.csv.
+                // also, you can stream load a http stream, e.g. 
http://xxx/some.csv
+                file 
"""${getS3Url()}/regression/tpch/sf1/lineitem.csv.split${partnum}.gz"""
+
+                time 10000 // limit inflight 10s
+
+                // stream load action will check result, include Success 
status, and NumberTotalRows == NumberLoadedRows
+
+                // if declared a check callback, the default check condition 
will ignore.
+                // So you must check all condition
+                check { result, exception, startTime, endTime ->
+                    if (exception != null) {
+                        throw exception
+                    }
+                    log.info("Stream load result: ${result}".toString())
+                    def json = parseJson(result)
+                    assertEquals("success", json.Status.toLowerCase())
+                    assertEquals(json.NumberTotalRows, json.NumberLoadedRows)
+                    assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
                 }
-                log.info("Stream load result: ${result}".toString())
-                def json = parseJson(result)
-                assertEquals("success", json.Status.toLowerCase())
-                assertEquals(json.NumberTotalRows, json.NumberLoadedRows)
-                assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
             }
         }
-    }
 
-    def load_lineitem_table = {
-        stream_load_one_part("00")
-        stream_load_one_part("01")
-        def tablets = sql_return_maparray """
+        def load_lineitem_table = {
+            stream_load_one_part("00")
+            stream_load_one_part("01")
+            def tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        while (tablets[0].LocalDataSize == "0") {
-            log.info( "test local size is zero, sleep 10s")
-            sleep(10000)
-            tablets = sql_return_maparray """
+            while (tablets[0].LocalDataSize == "0") {
+                log.info( "test local size is zero, sleep 10s")
+                sleep(10000)
+                tablets = sql_return_maparray """
             SHOW TABLETS FROM ${tableName}
             """
+            }
         }
-    }
 
-    def check_storage_policy_exist = { name->
-        def polices = sql"""
+        def check_storage_policy_exist = { name->
+            def polices = sql"""
         show storage policy;
         """
-        for (p in polices) {
-            if (name == p[0]) {
-                return true;
+            for (p in polices) {
+                if (name == p[0]) {
+                    return true;
+                }
             }
+            return false;
         }
-        return false;
-    }
 
-    def resource_name = "test_table_replica_with_data_resource"
-    def policy_name= "test_table_replica_with_data_policy"
+        def resource_name = "test_table_replica_with_data_resource${suffix}"
+        def policy_name= "test_table_replica_with_data_policy${suffix}"
 
-    if (check_storage_policy_exist(policy_name)) {
-        sql """
+        if (check_storage_policy_exist(policy_name)) {
+            sql """
             DROP STORAGE POLICY ${policy_name}
         """
-    }
-    
-    def has_resouce = sql """
+        }
+
+        def has_resouce = sql """
         SHOW RESOURCES WHERE NAME = "${resource_name}";
     """
-    if (has_resouce.size() > 0) {
-        sql """
+        if (has_resouce.size() > 0) {
+            sql """
             DROP RESOURCE ${resource_name}
         """
-    }
+        }
 
-    sql """
+        sql """
         CREATE RESOURCE IF NOT EXISTS "${resource_name}"
         PROPERTIES(
             "type"="s3",
@@ -164,7 +168,7 @@ suite("modify_replica_use_partition") {
         );
     """
 
-    sql """
+        sql """
         CREATE STORAGE POLICY IF NOT EXISTS ${policy_name}
         PROPERTIES(
             "storage_resource" = "${resource_name}",
@@ -172,9 +176,9 @@ suite("modify_replica_use_partition") {
         )
     """
 
-    // test one replica then we will alter all the replica of all partitions 
to 3
-    sql """ DROP TABLE IF EXISTS ${tableName} """
-    sql """
+        // test one replica then we will alter all the replica of all 
partitions to 3
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
             CREATE TABLE IF NOT EXISTS ${tableName} (
             L_ORDERKEY    INTEGER NOT NULL,
             L_PARTKEY     INTEGER NOT NULL,
@@ -205,87 +209,96 @@ suite("modify_replica_use_partition") {
             "replication_num" = "1"
             )
         """
-    
-    load_lineitem_table()
 
-    // 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的 
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
-    sleep(600000)
+        load_lineitem_table()
+
+        // 等待10min,show tablets from table, 
预期not_use_storage_policy_tablet_list 的 RemoteDataSize 
为LocalDataSize1,LocalDataSize为0
+        log.info("wait for 10min")
+        sleep(600000)
 
 
-    def tablets = sql_return_maparray """
+        def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    log.info( "test tablets not empty")
-    fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
-        log.info( "test remote size is zero, sleep 10s")
-        sleep(10000)
-        tablets = sql_return_maparray """
+        log.info( "test tablets not empty")
+        fetchDataSize(sizes, tablets[0])
+        def retry = 100
+        while (sizes[1] == 0 && retry --> 0) {
+            log.info( "test remote size is zero, sleep 10s")
+            sleep(10000)
+            tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        fetchDataSize(sizes, tablets[0])
-    }
-    assertTrue(tablets.size() > 0)
-    def LocalDataSize1 = sizes[0]
-    def RemoteDataSize1 = sizes[1]
-    log.info( "test local size is zero")
-    assertEquals(LocalDataSize1, 0)
-    log.info( "test remote size not zero")
-    assertTrue(RemoteDataSize1 != 0)
-    def originSize = tablets.size()
-
-    // alter change replication num
-    sql """
-    ALTER TABLE ${tableName}
-    MODIFY PARTITION (p202301, p202302) SET("replication_num"="3");
-    """
+            fetchDataSize(sizes, tablets[0])
+        }
+        assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
+        assertTrue(tablets.size() > 0)
+        def LocalDataSize1 = sizes[0]
+        def RemoteDataSize1 = sizes[1]
+        log.info( "test local size is zero")
+        assertEquals(LocalDataSize1, 0)
+        log.info( "test remote size not zero")
+        assertTrue(RemoteDataSize1 != 0)
+        def originSize = tablets.size()
+        assertEquals(originSize, 6, "${tableName}'s tablets should be 6")
+
+        // alter change replication num
+        if (!isCloudMode()) {
+            sql """
+        ALTER TABLE ${tableName}
+        MODIFY PARTITION (p202301, p202302) SET("replication_num"="3", 
"storage_policy" = "${policy_name}");
+        """
+        }
 
-    sql """
+        sql """
     select count(*) from ${tableName}
     """
 
-    sql """
+        sql """
     select * from ${tableName} limit 10
     """
-    // wait one minute for migration to be completed
-    sleep(60000)
+        // wait one minute for migration to be completed
+        log.info("wait one minute for migration to be completed")
+        sleep(60000)
 
-    // 对比所有tablets的replicas的rowsets meta是否相同
-    tablets = sql_return_maparray """
+        // 对比所有tablets的replicas的rowsets meta是否相同
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    while (tablets.size() != 3 * originSize) {
-        log.info( "tablets clone not finished, sleep 10s")
-        sleep(10000)
-        tablets = sql_return_maparray """
+        retry = 100
+        while (tablets.size() != 3 * originSize && retry --> 0) {
+            log.info( "tablets clone not finished(tablets.size = 
${tablets.size()}, originSize = ${originSize}), sleep 10s")
+            sleep(10000)
+            tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-    }
-    def compactionStatusIdx = tablets[0].size() - 1
-    // check rowsets inside the 3 replica
-    def iterate_num = tablets.size() / 3;
-    for (int i = 0; i < iterate_num; i++) {
-        int idx = i * 3;
-        def dst = tablets[idx].CompactionStatus
-        def text = get_meta(dst)
-        def obj = new JsonSlurper().parseText(text)
-        def rowsets = obj.rowsets
-        for (x in [1,2]) {
-            dst = tablets[idx + x].CompactionStatus
-            text = get_meta(dst)
-            obj = new JsonSlurper().parseText(text)
-            log.info( "test rowset meta is the same")
-            assertEquals(rowsets, obj.rowsets)
         }
-    }
+        assertTrue(tablets.size() == 3 * originSize, "tablets clone not 
finished, maybe some error occurred")
+        def compactionStatusIdx = tablets[0].size() - 1
+        // check rowsets inside the 3 replica
+        def iterate_num = tablets.size() / 3;
+        for (int i = 0; i < iterate_num; i++) {
+            int idx = i * 3;
+            def dst = tablets[idx].CompactionStatus
+            def text = get_meta(dst)
+            def obj = new JsonSlurper().parseText(text)
+            def rowsets = obj.rowsets
+            for (x in [1,2]) {
+                dst = tablets[idx + x].CompactionStatus
+                text = get_meta(dst)
+                obj = new JsonSlurper().parseText(text)
+                log.info( "test rowset meta is the same")
+                assertEquals(rowsets, obj.rowsets)
+            }
+        }
 
 
-    sql """
+        sql """
     DROP TABLE ${tableName}
     """
 
-    // test table with 3 replication, then we will alter all the replica of 
all partitions to 1
-    sql """
+        // test table with 3 replication, then we will alter all the replica 
of all partitions to 1
+        sql """
             CREATE TABLE IF NOT EXISTS ${tableName} (
             L_ORDERKEY    INTEGER NOT NULL,
             L_PARTKEY     INTEGER NOT NULL,
@@ -316,70 +329,75 @@ suite("modify_replica_use_partition") {
             "replication_num" = "3"
             )
         """
-    
-    load_lineitem_table()
 
-    // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql_return_maparray """
+        load_lineitem_table()
+
+        // show tablets from table, 获取第一个tablet的 LocalDataSize1
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    fetchDataSize(sizes, tablets[0])
-    log.info( "test tablets not empty")
-    assertTrue(tablets.size() > 0)
-    LocalDataSize1 = sizes[0]
-    RemoteDataSize1 = sizes[1]
-    log.info( "test local size not zero")
-    assertTrue(LocalDataSize1 != 0)
-    log.info( "test remote size is zero")
-    assertEquals(RemoteDataSize1, 0)
+        fetchDataSize(sizes, tablets[0])
+        log.info( "test tablets not empty")
+        assertTrue(tablets.size() > 0)
+        LocalDataSize1 = sizes[0]
+        RemoteDataSize1 = sizes[1]
+        log.info( "test local size not zero")
+        assertTrue(LocalDataSize1 != 0)
+        log.info( "test remote size is zero")
+        assertEquals(RemoteDataSize1, 0)
 
-    // 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的 
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
-    sleep(600000)
+        // 等待10min,show tablets from table, 
预期not_use_storage_policy_tablet_list 的 RemoteDataSize 
为LocalDataSize1,LocalDataSize为0
+        log.info("wait for 10min")
+        sleep(600000)
 
 
-    tablets = sql_return_maparray """
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    log.info( "test tablets not empty")
-    assertTrue(tablets.size() > 0)
-    fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
-        log.info( "test remote size is zero, sleep 10s")
-        sleep(10000)
-        tablets = sql_return_maparray """
+        log.info( "test tablets not empty")
+        assertTrue(tablets.size() > 0)
+        fetchDataSize(sizes, tablets[0])
+        retry = 100
+        while (sizes[1] == 0 && retry --> 0) {
+            log.info( "test remote size is zero, sleep 10s")
+            sleep(10000)
+            tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        fetchDataSize(sizes, tablets[0])
-    }
-    LocalDataSize1 = sizes[0]
-    RemoteDataSize1 = sizes[1]
-    log.info( "test local size is zero")
-    assertEquals(LocalDataSize1, 0)
-    log.info( "test remote size not zero")
-    assertTrue(RemoteDataSize1 != 0)
-
-    // alter change replication num
-    sql """
-    ALTER TABLE ${tableName}
-    MODIFY PARTITION (p202301, p202302) SET("replication_num"="1");
-    """
+            fetchDataSize(sizes, tablets[0])
+        }
+        assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
+        LocalDataSize1 = sizes[0]
+        RemoteDataSize1 = sizes[1]
+        log.info( "test local size is zero")
+        assertEquals(LocalDataSize1, 0)
+        log.info( "test remote size not zero")
+        assertTrue(RemoteDataSize1 != 0)
+
+        // alter change replication num
+        if (!isCloudMode()) {
+            sql """
+        ALTER TABLE ${tableName}
+        MODIFY PARTITION (p202301, p202302) SET("replication_num"="1", 
"storage_policy" = "${policy_name}");
+        """
+        }
 
-    sql """
+        sql """
     select count(*) from ${tableName}
     """
 
-    sql """
+        sql """
     select * from ${tableName} limit 10
     """
 
-    sql """
+        sql """
     DROP TABLE ${tableName}
     """
 
 
-    // test table with one partition p1 3 replication and another p2 with 1 
replication,
-    // then we will alter p1 to one replica and p2 to 3
-    sql """
+        // test table with one partition p1 3 replication and another p2 with 
1 replication,
+        // then we will alter p1 to one replica and p2 to 3
+        sql """
             CREATE TABLE IF NOT EXISTS ${tableName} (
             L_ORDERKEY    INTEGER NOT NULL,
             L_PARTKEY     INTEGER NOT NULL,
@@ -409,100 +427,108 @@ suite("modify_replica_use_partition") {
             "storage_policy" = "${policy_name}"
             )
         """
-    
-    load_lineitem_table()
 
-    // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql_return_maparray """
+        load_lineitem_table()
+
+        // show tablets from table, 获取第一个tablet的 LocalDataSize1
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    fetchDataSize(sizes, tablets[0])
-    log.info( "test tablets not empty")
-    assertTrue(tablets.size() > 0)
-    LocalDataSize1 = sizes[0]
-    RemoteDataSize1 = sizes[1]
-    log.info( "test local size not zero")
-    assertTrue(LocalDataSize1 != 0)
-    log.info( "test remote size is zero")
-    assertEquals(RemoteDataSize1, 0)
+        fetchDataSize(sizes, tablets[0])
+        log.info( "test tablets not empty")
+        assertTrue(tablets.size() > 0)
+        LocalDataSize1 = sizes[0]
+        RemoteDataSize1 = sizes[1]
+        log.info( "test local size not zero")
+        assertTrue(LocalDataSize1 != 0)
+        log.info( "test remote size is zero")
+        assertEquals(RemoteDataSize1, 0)
 
-    // 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的 
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
-    sleep(600000)
+        // 等待10min,show tablets from table, 
预期not_use_storage_policy_tablet_list 的 RemoteDataSize 
为LocalDataSize1,LocalDataSize为0
+        log.info("wait for 10min")
+        sleep(600000)
 
 
-    tablets = sql_return_maparray """
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    log.info( "test tablets not empty")
-    assertTrue(tablets.size() > 0)
-    fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
-        log.info( "test remote size is zero, sleep 10s")
-        sleep(10000)
-        tablets = sql_return_maparray """
+        log.info( "test tablets not empty")
+        assertTrue(tablets.size() > 0)
+        fetchDataSize(sizes, tablets[0])
+        retry = 100
+        while (sizes[1] == 0 && retry --> 0) {
+            log.info( "test remote size is zero, sleep 10s")
+            sleep(10000)
+            tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        fetchDataSize(sizes, tablets[0])
-    }
-    LocalDataSize1 = sizes[0]
-    RemoteDataSize1 = sizes[1]
-    log.info( "test local size is zero")
-    assertEquals(LocalDataSize1, 0)
-    log.info( "test remote size not zero")
-    assertTrue(RemoteDataSize1 != 0)
-
-    // alter change replication num
-    sql """
-    ALTER TABLE ${tableName}
-    MODIFY PARTITION (p202301) SET("replication_num"="1");
-    """
+            fetchDataSize(sizes, tablets[0])
+        }
+        assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
+        LocalDataSize1 = sizes[0]
+        RemoteDataSize1 = sizes[1]
+        log.info( "test local size is zero")
+        assertEquals(LocalDataSize1, 0)
+        log.info( "test remote size not zero")
+        assertTrue(RemoteDataSize1 != 0)
+
+        // alter change replication num
+        if (!isCloudMode()) {
+            sql """
+        ALTER TABLE ${tableName}
+        MODIFY PARTITION (p202301) SET("replication_num"="1", "storage_policy" 
= "${policy_name}");
+        """
 
-    sql """
-    ALTER TABLE ${tableName}
-    MODIFY PARTITION (p202302) SET("replication_num"="3");
-    """
+            sql """
+        ALTER TABLE ${tableName}
+        MODIFY PARTITION (p202302) SET("replication_num"="3", "storage_policy" 
= "${policy_name}");
+        """
+        }
 
-    sql """
+        sql """
     select count(*) from ${tableName}
     """
 
-    sql """
+        sql """
     select * from ${tableName} limit 10
     """
 
-    // wait one minute for migration to be completed
-    sleep(60000)
-    // 对比3副本的partition中所有tablets的replicas的rowsets meta是否相同
-    tablets = sql_return_maparray """
+        log.info("wait one minute for migration to be completed")
+        // wait one minute for migration to be completed
+        sleep(60000)
+        // 对比3副本的partition中所有tablets的replicas的rowsets meta是否相同
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
-    // sleep to wait for the report
-    sleep(15000)
-    tablets = sql_return_maparray """
+        // sleep to wait for the report
+        sleep(15000)
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
-    compactionStatusIdx = tablets[0].size() - 1
-    // check rowsets inside the 3 replica
-    iterate_num = tablets.size() / 3;
-    for (int i = 0; i < iterate_num; i++) {
-        int idx = i * 3;
-        def dst = tablets[idx].CompactionStatus
-        def text = get_meta(dst)
-        def obj = new JsonSlurper().parseText(text)
-        def rowsets = obj.rowsets
-        for (x in [1,2]) {
-            dst = tablets[idx + x].CompactionStatus
-            text = get_meta(dst)
-            obj = new JsonSlurper().parseText(text)
-            log.info( "test rowset meta is the same")
-            assertEquals(rowsets, obj.rowsets)
+        compactionStatusIdx = tablets[0].size() - 1
+        // check rowsets inside the 3 replica
+        iterate_num = tablets.size() / 3;
+        for (int i = 0; i < iterate_num; i++) {
+            int idx = i * 3;
+            def dst = tablets[idx].CompactionStatus
+            def text = get_meta(dst)
+            def obj = new JsonSlurper().parseText(text)
+            def rowsets = obj.rowsets
+            for (x in [1,2]) {
+                dst = tablets[idx + x].CompactionStatus
+                text = get_meta(dst)
+                obj = new JsonSlurper().parseText(text)
+                log.info( "test rowset meta is the same")
+                assertEquals(rowsets, obj.rowsets)
+            }
         }
-    }
 
 
-    sql """
+        sql """
     DROP TABLE ${tableName}
     """
+    } finally {
+        setFeConfig("force_olap_table_replication_num", replicaNum)
+    }
 
-
-}
\ No newline at end of file
+}
diff --git 
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
index cda59eb1cd8..a55ead48705 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
@@ -18,151 +18,154 @@ import groovy.json.JsonSlurper
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
 suite("modify_replica_use_partition_by_hdfs") {
-    def fetchBeHttp = { check_func, meta_url ->
-        def i = meta_url.indexOf("/api")
-        String endPoint = meta_url.substring(0, i)
-        String metaUri = meta_url.substring(i)
-        i = endPoint.lastIndexOf('/')
-        endPoint = endPoint.substring(i + 1)
-        httpTest {
-            endpoint endPoint
-            uri metaUri
-            op "get"
-            check check_func
-        }
+    if (!enableHdfs()) {
+        logger.info("skip this case because hdfs is not enabled");
     }
-    // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
-        def tabletId = t.TabletId
-        String meta_url = t.MetaUrl
-        def clos = {  respCode, body ->
-            logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
-            assertEquals("${respCode}".toString(), "200")
-            String out = "${body}".toString()
-            def obj = new JsonSlurper().parseText(out)
-            data_sizes[0] = obj.local_data_size
-            data_sizes[1] = obj.remote_data_size
+    def replicaNum = getFeConfig("force_olap_table_replication_num")
+    setFeConfig("force_olap_table_replication_num", 0)
+    try {
+
+        def fetchBeHttp = { check_func, meta_url ->
+            def i = meta_url.indexOf("/api")
+            String endPoint = meta_url.substring(0, i)
+            String metaUri = meta_url.substring(i)
+            i = endPoint.lastIndexOf('/')
+            endPoint = endPoint.substring(i + 1)
+            httpTest {
+                endpoint endPoint
+                uri metaUri
+                op "get"
+                check check_func
+            }
+        }
+        // data_sizes is one arrayList<Long>, t is tablet
+        def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+            def tabletId = t.TabletId
+            String meta_url = t.MetaUrl
+            def clos = {  respCode, body ->
+                logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
+                assertEquals("${respCode}".toString(), "200")
+                String out = "${body}".toString()
+                def obj = new JsonSlurper().parseText(out)
+                data_sizes[0] = obj.local_data_size
+                data_sizes[1] = obj.remote_data_size
+            }
+            fetchBeHttp(clos, meta_url.replace("header", "data_size"))
+        }
+        // used as passing out parameter to fetchDataSize
+        List<Long> sizes = [-1, -1]
+        def get_meta = { url ->
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl ")
+            sb.append(url)
+            String command = sb.toString()
+            log.info(command)
+            def process = command.execute()
+            int code = process.waitFor()
+            def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            def out = process.getText()
+            logger.info("Run compaction: code=" + code + ", err=" + err)
+            assertEquals(code, 0)
+            return out
         }
-        fetchBeHttp(clos, meta_url.replace("header", "data_size"))
-    }
-    // used as passing out parameter to fetchDataSize
-    List<Long> sizes = [-1, -1]
-    def get_meta = { url ->
-        StringBuilder sb = new StringBuilder();
-        sb.append("curl ")
-        sb.append(url)
-        String command = sb.toString()
-        log.info(command)
-        def process = command.execute()
-        int code = process.waitFor()
-        def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
-        def out = process.getText()
-        logger.info("Run compaction: code=" + code + ", err=" + err)
-        assertEquals(code, 0)
-        return out
-    }
-
-    def tableName = "lineitem3"
-    sql """ DROP TABLE IF EXISTS ${tableName} """
-    def stream_load_one_part = { partnum ->
-        streamLoad {
-            table tableName
-            // a default db 'regression_test' is specified in
-            // ${DORIS_HOME}/conf/regression-conf.groovy
-
-            // default label is UUID:
-            // set 'label' UUID.randomUUID().toString()
-
-            // default column_separator is specify in doris fe config, usually 
is '\t'.
-            // this line change to ','
-            set 'column_separator', '|'
-            set 'compress_type', 'GZ'
-
-
-            // relate to 
${DORIS_HOME}/regression-test/data/demo/streamload_input.csv.
-            // also, you can stream load a http stream, e.g. 
http://xxx/some.csv
-            file 
"""${getS3Url()}/regression/tpch/sf1/lineitem.csv.split${partnum}.gz"""
-
-            time 10000 // limit inflight 10s
-
-            // stream load action will check result, include Success status, 
and NumberTotalRows == NumberLoadedRows
 
-            // if declared a check callback, the default check condition will 
ignore.
-            // So you must check all condition
-            check { result, exception, startTime, endTime ->
-                if (exception != null) {
-                    throw exception
+        def suffix = UUID.randomUUID().hashCode().abs()
+        def tableName = "lineitem3${suffix}"
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        def stream_load_one_part = { partnum ->
+            streamLoad {
+                table tableName
+                // a default db 'regression_test' is specified in
+                // ${DORIS_HOME}/conf/regression-conf.groovy
+
+                // default label is UUID:
+                // set 'label' UUID.randomUUID().toString()
+
+                // default column_separator is specify in doris fe config, 
usually is '\t'.
+                // this line change to ','
+                set 'column_separator', '|'
+                set 'compress_type', 'GZ'
+
+
+                // relate to 
${DORIS_HOME}/regression-test/data/demo/streamload_input.csv.
+                // also, you can stream load a http stream, e.g. 
http://xxx/some.csv
+                file 
"""${getS3Url()}/regression/tpch/sf1/lineitem.csv.split${partnum}.gz"""
+
+                time 10000 // limit inflight 10s
+
+                // stream load action will check result, include Success 
status, and NumberTotalRows == NumberLoadedRows
+
+                // if declared a check callback, the default check condition 
will ignore.
+                // So you must check all condition
+                check { result, exception, startTime, endTime ->
+                    if (exception != null) {
+                        throw exception
+                    }
+                    log.info("Stream load result: ${result}".toString())
+                    def json = parseJson(result)
+                    assertEquals("success", json.Status.toLowerCase())
+                    assertEquals(json.NumberTotalRows, json.NumberLoadedRows)
+                    assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
                 }
-                log.info("Stream load result: ${result}".toString())
-                def json = parseJson(result)
-                assertEquals("success", json.Status.toLowerCase())
-                assertEquals(json.NumberTotalRows, json.NumberLoadedRows)
-                assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
             }
         }
-    }
 
-    def load_lineitem_table = {
-        stream_load_one_part("00")
-        stream_load_one_part("01")
-        def tablets = sql_return_maparray """
+        def load_lineitem_table = {
+            stream_load_one_part("00")
+            stream_load_one_part("01")
+            def tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        while (tablets[0].LocalDataSize == "0") {
-            log.info( "test local size is zero, sleep 10s")
-            sleep(10000)
-            tablets = sql_return_maparray """
+            while (tablets[0].LocalDataSize == "0") {
+                log.info( "test local size is zero, sleep 10s")
+                sleep(10000)
+                tablets = sql_return_maparray """
             SHOW TABLETS FROM ${tableName}
             """
+            }
         }
-    }
 
-    def check_storage_policy_exist = { name->
-        def polices = sql"""
+        def check_storage_policy_exist = { name->
+            def polices = sql"""
         show storage policy;
         """
-        for (p in polices) {
-            if (name == p[0]) {
-                return true;
+            for (p in polices) {
+                if (name == p[0]) {
+                    return true;
+                }
             }
+            return false;
         }
-        return false;
-    }
 
-    def resource_name = "test_table_replica_with_data_resource"
-    def policy_name= "test_table_replica_with_data_policy"
+        def resource_name = "test_table_replica_with_data_resource${suffix}"
+        def policy_name= "test_table_replica_with_data_policy${suffix}"
 
-    if (check_storage_policy_exist(policy_name)) {
-        sql """
+        if (check_storage_policy_exist(policy_name)) {
+            sql """
             DROP STORAGE POLICY ${policy_name}
         """
-    }
+        }
 
-    def has_resouce = sql """
+        def has_resouce = sql """
         SHOW RESOURCES WHERE NAME = "${resource_name}";
     """
-    if (has_resouce.size() > 0) {
-        sql """
+        if (has_resouce.size() > 0) {
+            sql """
             DROP RESOURCE ${resource_name}
         """
-    }
+        }
 
-    sql """
+        sql """
         CREATE RESOURCE IF NOT EXISTS "${resource_name}"
         PROPERTIES(
             "type"="hdfs",
             "fs.defaultFS"="${getHdfsFs()}",
             "hadoop.username"="${getHdfsUser()}",
-            "hadoop.password"="${getHdfsPasswd()}",
-            "dfs.nameservices" = "my_ha",
-            "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
-            "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
-            "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
-            "dfs.client.failover.proxy.provider" = 
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+            "hadoop.password"="${getHdfsPasswd()}"
         );
     """
 
-    sql """
+        sql """
         CREATE STORAGE POLICY IF NOT EXISTS ${policy_name}
         PROPERTIES(
             "storage_resource" = "${resource_name}",
@@ -170,9 +173,9 @@ suite("modify_replica_use_partition_by_hdfs") {
         )
     """
 
-    // test one replica then we will alter all the replica of all partitions 
to 3
-    sql """ DROP TABLE IF EXISTS ${tableName} """
-    sql """
+        // test one replica then we will alter all the replica of all 
partitions to 3
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
             CREATE TABLE IF NOT EXISTS ${tableName} (
             L_ORDERKEY    INTEGER NOT NULL,
             L_PARTKEY     INTEGER NOT NULL,
@@ -204,86 +207,94 @@ suite("modify_replica_use_partition_by_hdfs") {
             )
         """
 
-    load_lineitem_table()
+        load_lineitem_table()
 
-    // 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的 
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
-    sleep(600000)
+        // 等待10min,show tablets from table, 
预期not_use_storage_policy_tablet_list 的 RemoteDataSize 
为LocalDataSize1,LocalDataSize为0
+        log.info("wait for 10min")
+        sleep(600000)
 
 
-    def tablets = sql_return_maparray """
+        def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    log.info( "test tablets not empty")
-    fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
-        log.info( "test remote size is zero, sleep 10s")
-        sleep(10000)
-        tablets = sql_return_maparray """
+        log.info( "test tablets not empty")
+        fetchDataSize(sizes, tablets[0])
+        def retry = 100
+        while (sizes[1] == 0 && retry --> 0) {
+            log.info( "test remote size is zero, sleep 10s")
+            sleep(10000)
+            tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        fetchDataSize(sizes, tablets[0])
-    }
-    assertTrue(tablets.size() > 0)
-    def LocalDataSize1 = sizes[0]
-    def RemoteDataSize1 = sizes[1]
-    log.info( "test local size is zero")
-    assertEquals(LocalDataSize1, 0)
-    log.info( "test remote size not zero")
-    assertTrue(RemoteDataSize1 != 0)
-    def originSize = tablets.size()
-
-    // alter change replication num
-    sql """
-    ALTER TABLE ${tableName}
-    MODIFY PARTITION (p202301, p202302) SET("replication_num"="3");
-    """
+            fetchDataSize(sizes, tablets[0])
+        }
+        assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
+        assertTrue(tablets.size() > 0)
+        def LocalDataSize1 = sizes[0]
+        def RemoteDataSize1 = sizes[1]
+        log.info( "test local size is zero")
+        assertEquals(LocalDataSize1, 0)
+        log.info( "test remote size not zero")
+        assertTrue(RemoteDataSize1 != 0)
+        def originSize = tablets.size()
+
+        // alter change replication num
+        if (!isCloudMode()) {
+            sql """
+        ALTER TABLE ${tableName}
+        MODIFY PARTITION (p202301, p202302) SET("replication_num"="3", 
"storage_policy" = "${policy_name}");
+        """
+        }
 
-    sql """
+        sql """
     select count(*) from ${tableName}
     """
 
-    sql """
+        sql """
     select * from ${tableName} limit 10
     """
-    // wait one minute for migration to be completed
-    sleep(60000)
+        // wait one minute for migration to be completed
+        // sleep(60000)
+        log.info("wait one minute for migration to be completed")
 
-    // 对比所有tablets的replicas的rowsets meta是否相同
-    tablets = sql_return_maparray """
+        // 对比所有tablets的replicas的rowsets meta是否相同
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    while (tablets.size() != 3 * originSize) {
-        log.info( "tablets clone not finished, sleep 10s")
-        sleep(10000)
-        tablets = sql_return_maparray """
+        retry = 100
+        while (tablets.size() != 3 * originSize && retry --> 0) {
+            log.info( "tablets clone not finished(tablets.size = 
${tablets.size()}, originSize = ${originSize}), sleep 10s")
+            sleep(10000)
+            tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-    }
-    def compactionStatusIdx = tablets[0].size() - 1
-    // check rowsets inside the 3 replica
-    def iterate_num = tablets.size() / 3;
-    for (int i = 0; i < iterate_num; i++) {
-        int idx = i * 3;
-        def dst = tablets[idx].CompactionStatus
-        def text = get_meta(dst)
-        def obj = new JsonSlurper().parseText(text)
-        def rowsets = obj.rowsets
-        for (x in [1,2]) {
-            dst = tablets[idx + x].CompactionStatus
-            text = get_meta(dst)
-            obj = new JsonSlurper().parseText(text)
-            log.info( "test rowset meta is the same")
-            assertEquals(rowsets, obj.rowsets)
         }
-    }
+        assertTrue(tablets.size() == 3 * originSize, "tablets clone not 
finished, maybe some error occurred")
+        def compactionStatusIdx = tablets[0].size() - 1
+        // check rowsets inside the 3 replica
+        def iterate_num = tablets.size() / 3;
+        for (int i = 0; i < iterate_num; i++) {
+            int idx = i * 3;
+            def dst = tablets[idx].CompactionStatus
+            def text = get_meta(dst)
+            def obj = new JsonSlurper().parseText(text)
+            def rowsets = obj.rowsets
+            for (x in [1,2]) {
+                dst = tablets[idx + x].CompactionStatus
+                text = get_meta(dst)
+                obj = new JsonSlurper().parseText(text)
+                log.info( "test rowset meta is the same")
+                assertEquals(rowsets, obj.rowsets)
+            }
+        }
 
 
-    sql """
+        sql """
     DROP TABLE ${tableName}
     """
 
-    // test table with 3 replication, then we will alter all the replica of 
all partitions to 1
-    sql """
+        // test table with 3 replication, then we will alter all the replica 
of all partitions to 1
+        sql """
             CREATE TABLE IF NOT EXISTS ${tableName} (
             L_ORDERKEY    INTEGER NOT NULL,
             L_PARTKEY     INTEGER NOT NULL,
@@ -315,69 +326,74 @@ suite("modify_replica_use_partition_by_hdfs") {
             )
         """
 
-    load_lineitem_table()
+        load_lineitem_table()
 
-    // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql_return_maparray """
+        // show tablets from table, 获取第一个tablet的 LocalDataSize1
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    fetchDataSize(sizes, tablets[0])
-    log.info( "test tablets not empty")
-    assertTrue(tablets.size() > 0)
-    LocalDataSize1 = sizes[0]
-    RemoteDataSize1 = sizes[1]
-    log.info( "test local size not zero")
-    assertTrue(LocalDataSize1 != 0)
-    log.info( "test remote size is zero")
-    assertEquals(RemoteDataSize1, 0)
+        fetchDataSize(sizes, tablets[0])
+        log.info( "test tablets not empty")
+        assertTrue(tablets.size() > 0)
+        LocalDataSize1 = sizes[0]
+        RemoteDataSize1 = sizes[1]
+        log.info( "test local size not zero")
+        assertTrue(LocalDataSize1 != 0)
+        log.info( "test remote size is zero")
+        assertEquals(RemoteDataSize1, 0)
 
-    // 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的 
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
-    sleep(600000)
+        // 等待10min,show tablets from table, 
预期not_use_storage_policy_tablet_list 的 RemoteDataSize 
为LocalDataSize1,LocalDataSize为0
+        log.info("wait for 10min")
+        sleep(600000)
 
 
-    tablets = sql_return_maparray """
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    log.info( "test tablets not empty")
-    assertTrue(tablets.size() > 0)
-    fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
-        log.info( "test remote size is zero, sleep 10s")
-        sleep(10000)
-        tablets = sql_return_maparray """
+        log.info( "test tablets not empty")
+        assertTrue(tablets.size() > 0)
+        fetchDataSize(sizes, tablets[0])
+        retry = 100
+        while (sizes[1] == 0 && retry --> 0) {
+            log.info( "test remote size is zero, sleep 10s")
+            sleep(10000)
+            tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        fetchDataSize(sizes, tablets[0])
-    }
-    LocalDataSize1 = sizes[0]
-    RemoteDataSize1 = sizes[1]
-    log.info( "test local size is zero")
-    assertEquals(LocalDataSize1, 0)
-    log.info( "test remote size not zero")
-    assertTrue(RemoteDataSize1 != 0)
-
-    // alter change replication num
-    sql """
-    ALTER TABLE ${tableName}
-    MODIFY PARTITION (p202301, p202302) SET("replication_num"="1");
-    """
+            fetchDataSize(sizes, tablets[0])
+        }
+        assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
+        LocalDataSize1 = sizes[0]
+        RemoteDataSize1 = sizes[1]
+        log.info( "test local size is zero")
+        assertEquals(LocalDataSize1, 0)
+        log.info( "test remote size not zero")
+        assertTrue(RemoteDataSize1 != 0)
+
+        // alter change replication num
+        if (!isCloudMode()) {
+            sql """
+        ALTER TABLE ${tableName}
+        MODIFY PARTITION (p202301, p202302) SET("replication_num"="1", 
"storage_policy" = "${policy_name}");
+        """
+        }
 
-    sql """
+        sql """
     select count(*) from ${tableName}
     """
 
-    sql """
+        sql """
     select * from ${tableName} limit 10
     """
 
-    sql """
+        sql """
     DROP TABLE ${tableName}
     """
 
 
-    // test table with one partition p1 3 replication and another p2 with 1 
replication,
-    // then we will alter p1 to one replica and p2 to 3
-    sql """
+        // test table with one partition p1 3 replication and another p2 with 
1 replication,
+        // then we will alter p1 to one replica and p2 to 3
+        sql """
             CREATE TABLE IF NOT EXISTS ${tableName} (
             L_ORDERKEY    INTEGER NOT NULL,
             L_PARTKEY     INTEGER NOT NULL,
@@ -408,99 +424,108 @@ suite("modify_replica_use_partition_by_hdfs") {
             )
         """
 
-    load_lineitem_table()
+        load_lineitem_table()
 
-    // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql_return_maparray """
+        // show tablets from table, 获取第一个tablet的 LocalDataSize1
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    fetchDataSize(sizes, tablets[0])
-    log.info( "test tablets not empty")
-    assertTrue(tablets.size() > 0)
-    LocalDataSize1 = sizes[0]
-    RemoteDataSize1 = sizes[1]
-    log.info( "test local size not zero")
-    assertTrue(LocalDataSize1 != 0)
-    log.info( "test remote size is zero")
-    assertEquals(RemoteDataSize1, 0)
+        fetchDataSize(sizes, tablets[0])
+        log.info( "test tablets not empty")
+        assertTrue(tablets.size() > 0)
+        LocalDataSize1 = sizes[0]
+        RemoteDataSize1 = sizes[1]
+        log.info( "test local size not zero")
+        assertTrue(LocalDataSize1 != 0)
+        log.info( "test remote size is zero")
+        assertEquals(RemoteDataSize1, 0)
 
-    // 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的 
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
-    sleep(600000)
+        // 等待10min,show tablets from table, 
预期not_use_storage_policy_tablet_list 的 RemoteDataSize 
为LocalDataSize1,LocalDataSize为0
+        log.info("wait for 10min")
+        sleep(600000)
 
 
-    tablets = sql_return_maparray """
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
-    log.info( "test tablets not empty")
-    assertTrue(tablets.size() > 0)
-    fetchDataSize(sizes, tablets[0])
-    while (sizes[1] == 0) {
-        log.info( "test remote size is zero, sleep 10s")
-        sleep(10000)
-        tablets = sql_return_maparray """
+        log.info( "test tablets not empty")
+        assertTrue(tablets.size() > 0)
+        fetchDataSize(sizes, tablets[0])
+        retry = 100
+        while (sizes[1] == 0 && retry --> 0) {
+            log.info( "test remote size is zero, sleep 10s")
+            sleep(10000)
+            tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        fetchDataSize(sizes, tablets[0])
-    }
-    LocalDataSize1 = sizes[0]
-    RemoteDataSize1 = sizes[1]
-    log.info( "test local size is zero")
-    assertEquals(LocalDataSize1, 0)
-    log.info( "test remote size not zero")
-    assertTrue(RemoteDataSize1 != 0)
-
-    // alter change replication num
-    sql """
-    ALTER TABLE ${tableName}
-    MODIFY PARTITION (p202301) SET("replication_num"="1");
-    """
+            fetchDataSize(sizes, tablets[0])
+        }
+        assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error 
occurred")
+        LocalDataSize1 = sizes[0]
+        RemoteDataSize1 = sizes[1]
+        log.info( "test local size is zero")
+        assertEquals(LocalDataSize1, 0)
+        log.info( "test remote size not zero")
+        assertTrue(RemoteDataSize1 != 0)
+
+        // alter change replication num
+        if (!isCloudMode()) {
+            sql """
+        ALTER TABLE ${tableName}
+        MODIFY PARTITION (p202301) SET("replication_num"="1", "storage_policy" 
= "${policy_name}");
+        """
 
-    sql """
-    ALTER TABLE ${tableName}
-    MODIFY PARTITION (p202302) SET("replication_num"="3");
-    """
+            sql """
+        ALTER TABLE ${tableName}
+        MODIFY PARTITION (p202302) SET("replication_num"="3", "storage_policy" 
= "${policy_name}");
+        """
+        }
 
-    sql """
+        sql """
     select count(*) from ${tableName}
     """
 
-    sql """
+        sql """
     select * from ${tableName} limit 10
     """
 
-    // wait one minute for migration to be completed
-    sleep(60000)
-    // 对比3副本的partition中所有tablets的replicas的rowsets meta是否相同
-    tablets = sql_return_maparray """
+        // wait one minute for migration to be completed
+        log.info("wait one minute for migration to be completed")
+        sleep(60000)
+        // 对比3副本的partition中所有tablets的replicas的rowsets meta是否相同
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
-    // sleep to wait for the report
-    sleep(15000)
-    tablets = sql_return_maparray """
+        // sleep to wait for the report
+        sleep(15000)
+        tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
-    compactionStatusIdx = tablets[0].size() - 1
-    // check rowsets inside the 3 replica
-    iterate_num = tablets.size() / 3;
-    for (int i = 0; i < iterate_num; i++) {
-        int idx = i * 3;
-        def dst = tablets[idx].CompactionStatus
-        def text = get_meta(dst)
-        def obj = new JsonSlurper().parseText(text)
-        def rowsets = obj.rowsets
-        for (x in [1,2]) {
-            dst = tablets[idx + x].CompactionStatus
-            text = get_meta(dst)
-            obj = new JsonSlurper().parseText(text)
-            log.info( "test rowset meta is the same")
-            assertEquals(rowsets, obj.rowsets)
+        compactionStatusIdx = tablets[0].size() - 1
+        // check rowsets inside the 3 replica
+        iterate_num = tablets.size() / 3;
+        for (int i = 0; i < iterate_num; i++) {
+            int idx = i * 3;
+            def dst = tablets[idx].CompactionStatus
+            def text = get_meta(dst)
+            def obj = new JsonSlurper().parseText(text)
+            def rowsets = obj.rowsets
+            for (x in [1,2]) {
+                dst = tablets[idx + x].CompactionStatus
+                text = get_meta(dst)
+                obj = new JsonSlurper().parseText(text)
+                log.info( "test rowset meta is the same")
+                assertEquals(rowsets, obj.rowsets)
+            }
         }
-    }
 
 
-    sql """
+        sql """
     DROP TABLE ${tableName}
     """
 
+    } finally {
+        setFeConfig("force_olap_table_replication_num", replicaNum)
+    }
 
 }
diff --git 
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
 
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
index c23a9eea6df..ec8fcc8f255 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
@@ -47,7 +47,8 @@ suite("table_modify_resouce") {
     }
     // used as passing out parameter to fetchDataSize
     List<Long> sizes = [-1, -1]
-    def tableName = "lineitem4"
+    def suffix = UUID.randomUUID().hashCode().abs()
+    def tableName = "lineitem4${suffix}"
     sql """ DROP TABLE IF EXISTS ${tableName} """
     def stream_load_one_part = { partnum ->
         streamLoad {
@@ -114,8 +115,8 @@ suite("table_modify_resouce") {
         return false;
     }
 
-    def resource_name = "test_table_with_data_resource_modify_1"
-    def policy_name= "test_table_with_data_policy_modify_1"
+    def resource_name = "test_table_with_data_resource_modify_1${suffix}"
+    def policy_name= "test_table_with_data_policy_modify_1${suffix}"
 
     if (check_storage_policy_exist(policy_name)) {
         sql """
@@ -206,7 +207,7 @@ suite("table_modify_resouce") {
         """
         fetchDataSize(sizes, tablets[0])
         try_times -= 1
-        assertTrue(try_times > 0)
+        assertTrue(try_times > 0, "remote size is still zero, maybe some error 
occurred")
     }
 
     // 修改resource和policy到新值然后查看remote data size是否能对上
@@ -227,7 +228,7 @@ suite("table_modify_resouce") {
     """
 
 
-    def tablets2 = sql """
+    def tablets2 = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     // [8] local data size, [9] remote data size
@@ -289,7 +290,7 @@ suite("table_modify_resouce") {
         """
         fetchDataSize(sizes, tablets[0])
         try_times -= 1
-        assertTrue(try_times > 0)
+        assertTrue(try_times > 0, "remote size is still zero, maybe some error 
occurred")
     }
 
     // 修改resource和policy到新值然后查看remote data size是否能对上
@@ -317,7 +318,7 @@ suite("table_modify_resouce") {
     log.info( "test all remote size not zero")
     for (int i = 0; i < tablets2.size(); i++) {
         fetchDataSize(sizes, tablets2[i])
-        assertTrue(sizes[1] > 0)
+        assertTrue(sizes[1] > 0, tablets2[i].TabletId + " remote size is " + 
sizes[1] + ", no greater than 0, MetaUrl is " + tablets2[i].MetaUrl)
     }
 
 
diff --git 
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
index a2c6f32cd63..601d42ab0d2 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
@@ -18,6 +18,9 @@ import groovy.json.JsonSlurper
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
 suite("table_modify_resouce_by_hdfs") {
+    if (!enableHdfs()) {
+        logger.info("skip this case because hdfs is not enabled");
+    }
     def fetchBeHttp = { check_func, meta_url ->
         def i = meta_url.indexOf("/api")
         String endPoint = meta_url.substring(0, i)
@@ -47,7 +50,8 @@ suite("table_modify_resouce_by_hdfs") {
     }
     // used as passing out parameter to fetchDataSize
     List<Long> sizes = [-1, -1]
-    def tableName = "lineitem4"
+    def suffix = UUID.randomUUID().hashCode().abs()
+    def tableName = "lineitem4${suffix}"
     sql """ DROP TABLE IF EXISTS ${tableName} """
     def stream_load_one_part = { partnum ->
         streamLoad {
@@ -114,8 +118,8 @@ suite("table_modify_resouce_by_hdfs") {
         return false;
     }
 
-    def resource_name = "test_table_with_data_resource_modify_1"
-    def policy_name= "test_table_with_data_policy_modify_1"
+    def resource_name = "test_table_with_data_resource_modify_1${suffix}"
+    def policy_name= "test_table_with_data_policy_modify_1${suffix}"
 
     if (check_storage_policy_exist(policy_name)) {
         sql """
@@ -138,12 +142,7 @@ suite("table_modify_resouce_by_hdfs") {
             "type"="hdfs",
             "fs.defaultFS"="${getHdfsFs()}",
             "hadoop.username"="${getHdfsUser()}",
-            "hadoop.password"="${getHdfsPasswd()}",
-            "dfs.nameservices" = "my_ha",
-            "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
-            "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
-            "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
-            "dfs.client.failover.proxy.provider" = 
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+            "hadoop.password"="${getHdfsPasswd()}"
         );
     """
 
@@ -204,7 +203,7 @@ suite("table_modify_resouce_by_hdfs") {
         """
         fetchDataSize(sizes, tablets[0])
         try_times -= 1
-        assertTrue(try_times > 0)
+        assertTrue(try_times > 0, "remote size is still zero, maybe some error 
occurred")
     }
 
     // 修改resource和policy到新值然后查看remote data size是否能对上
@@ -287,7 +286,7 @@ suite("table_modify_resouce_by_hdfs") {
         """
         fetchDataSize(sizes, tablets[0])
         try_times -= 1
-        assertTrue(try_times > 0)
+        assertTrue(try_times > 0, "remote size is still zero, maybe some error 
occurred")
     }
 
     // 修改resource和policy到新值然后查看remote data size是否能对上
@@ -315,7 +314,7 @@ suite("table_modify_resouce_by_hdfs") {
     log.info( "test all remote size not zero")
     for (int i = 0; i < tablets2.size(); i++) {
         fetchDataSize(sizes, tablets2[i])
-        assertTrue(sizes[1] > 0)
+        assertTrue(sizes[1] > 0, tablets2[i].TabletId + " remote size is " + 
sizes[1] + ", no greater than 0, MetaUrl is " + tablets2[i].MetaUrl)
     }
 
 
diff --git 
a/regression-test/suites/cold_heat_separation_p2/test_show_storage_policy_using.groovy
 
b/regression-test/suites/cold_heat_separation_p2/test_show_storage_policy_using.groovy
index 6f0413f63ae..fcf8485eaf9 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/test_show_storage_policy_using.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/test_show_storage_policy_using.groovy
@@ -120,7 +120,7 @@ suite("test_show_storage_policy_using") {
         );
     """
 
-    show_result = sql """
+    def show_result = sql """
         show storage policy using for ${policy_name}
     """
     assertEquals(show_result.size(), 2)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to