This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 9fb783187b9 branch-3.0: [fix](regression) fix cache p0 regression 
tests #48515 (#48561)
9fb783187b9 is described below

commit 9fb783187b924d079758578df4fb9609fab8a90b
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Mon Mar 10 12:20:48 2025 +0800

    branch-3.0: [fix](regression) fix cache p0 regression tests #48515 (#48561)
    
    Cherry-picked from #48515
    
    Co-authored-by: zhengyu <zhangzhen...@selectdb.com>
---
 .../cache/http/test_list_cache_file.groovy         |  6 +-
 .../warm_up/table/test_warm_up_table.groovy        | 11 +--
 .../suites/cloud_p0/cache/ttl/alter_ttl_1.groovy   |  8 ++-
 .../suites/cloud_p0/cache/ttl/alter_ttl_2.groovy   |  5 +-
 .../suites/cloud_p0/cache/ttl/alter_ttl_3.groovy   |  6 +-
 .../suites/cloud_p0/cache/ttl/alter_ttl_4.groovy   |  6 +-
 .../cloud_p0/cache/ttl/alter_ttl_max_int64.groovy  |  6 +-
 .../cloud_p0/cache/ttl/alter_ttl_random.groovy     |  6 +-
 .../cloud_p0/cache/ttl/alter_ttl_seconds.groovy    |  6 +-
 .../cache/ttl/create_table_as_select.groovy        | 13 ++--
 .../cloud_p0/cache/ttl/create_table_like.groovy    |  7 +-
 .../suites/cloud_p0/cache/ttl/test_ttl.groovy      | 80 ++++++++++++++--------
 .../cloud_p0/cache/ttl/test_ttl_lru_evict.groovy   |  4 +-
 13 files changed, 101 insertions(+), 63 deletions(-)

diff --git 
a/regression-test/suites/cloud_p0/cache/http/test_list_cache_file.groovy 
b/regression-test/suites/cloud_p0/cache/http/test_list_cache_file.groovy
index acd33a22f0c..cf562a3b92f 100644
--- a/regression-test/suites/cloud_p0/cache/http/test_list_cache_file.groovy
+++ b/regression-test/suites/cloud_p0/cache/http/test_list_cache_file.groovy
@@ -54,14 +54,14 @@ suite("test_list_cache_file") {
 
     def get_tablets = { String tbl_name ->
         def res = sql "show tablets from ${tbl_name}"
-        List<Integer> tablets = new ArrayList<>()
+        List<Long> tablets = new ArrayList<>()
         for (final def line in res) {
-            tablets.add(Integer.valueOf(line[0].toString()))
+            tablets.add(Long.valueOf(line[0].toString()))
         }
         return tablets
     }
 
-    def get_rowsets = { int tablet_id ->
+    def get_rowsets = { long tablet_id ->
         var ret = []
         httpTest {
             endpoint ""
diff --git 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
index ffce02f4f64..3f9dc93d550 100644
--- 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
+++ 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
@@ -60,8 +60,8 @@ suite("test_warm_up_table") {
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
 
     sql "use @regression_cluster_name0"
 
@@ -69,9 +69,11 @@ suite("test_warm_up_table") {
     sql new File("""${context.file.parent}/../ddl/${table}_delete.sql""").text
     // create table if not exists
     sql (new File("""${context.file.parent}/../ddl/${table}.sql""").text + 
ttlProperties)
+    sql """ alter table ${table} set ("disable_auto_compaction" = "true") """ 
// no influence from compaction
+
     sleep(10000)
 
-    def load_customer_once =  { 
+    def load_customer_once =  {
         def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
         def loadLabel = table + "_" + uniqueID
         // load data from cos
@@ -112,7 +114,8 @@ suite("test_warm_up_table") {
 
     clearFileCache.call(ipList[0], httpPortList[0]);
     clearFileCache.call(ipList[1], httpPortList[1]);
-    
+    sleep(30000)
+
     load_customer_once()
     load_customer_once()
     load_customer_once()
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy
index 7cbaca8f1df..6a07df14922 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy
@@ -66,14 +66,15 @@ suite("alter_ttl_1") {
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
     sql new 
File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text
     def load_customer_ttl_once =  { String table ->
         def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
         // def table = "customer"
         // create table if not exists
         sql (new File("""${context.file.parent}/../ddl/${table}.sql""").text + 
ttlProperties)
+        sql """ alter table ${table} set ("disable_auto_compaction" = "true") 
""" // no influence from compaction
         def loadLabel = table + "_" + uniqueID
         // load data from cos
         def loadSql = new 
File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}",
 s3BucketName)
@@ -96,6 +97,7 @@ suite("alter_ttl_1") {
     clearFileCache.call() {
         respCode, body -> {}
     }
+    sleep(30000)
 
     load_customer_ttl_once("customer_ttl")
     sql """ select count(*) from customer_ttl """
@@ -139,7 +141,7 @@ suite("alter_ttl_1") {
                     assertEquals(line.substring(i).toLong(), 0)
 
                 }
-                
+
                 if (line.contains("normal_queue_cache_size")) {
                     if (line.startsWith("#")) {
                         continue
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy
index b0069cd704a..a3d83f19dab 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy
@@ -66,14 +66,15 @@ suite("alter_ttl_2") {
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
     sql new 
File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text
     def load_customer_ttl_once =  { String table ->
         def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
         // def table = "customer"
         // create table if not exists
         sql (new File("""${context.file.parent}/../ddl/${table}.sql""").text + 
ttlProperties)
+        sql """ alter table ${table} set ("disable_auto_compaction" = "true") 
""" // no influence from compaction
         def loadLabel = table + "_" + uniqueID
         // load data from cos
         def loadSql = new 
File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}",
 s3BucketName)
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy
index bd88c5287f9..e889639490e 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy
@@ -66,8 +66,8 @@ suite("alter_ttl_3") {
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
     sql new 
File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text
     def load_customer_ttl_once =  { String table ->
         def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
@@ -75,6 +75,7 @@ suite("alter_ttl_3") {
         // create table if not exists
         sql (new File("""${context.file.parent}/../ddl/${table}.sql""").text + 
ttlProperties)
         def loadLabel = table + "_" + uniqueID
+        sql """ alter table ${table} set ("disable_auto_compaction" = "true") 
""" // no influence from compaction
         // load data from cos
         def loadSql = new 
File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}",
 s3BucketName)
         loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + 
s3WithProperties
@@ -96,6 +97,7 @@ suite("alter_ttl_3") {
     clearFileCache.call() {
         respCode, body -> {}
     }
+    sleep(30000)
 
     load_customer_ttl_once("customer_ttl")
     sql """ select count(*) from customer_ttl """
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy
index 58db039f20e..6edda04994c 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy
@@ -66,8 +66,8 @@ suite("alter_ttl_4") {
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
     sql new 
File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text
     sql new File("""${context.file.parent}/../ddl/customer_delete.sql""").text
     def load_customer_ttl_once =  { String table ->
@@ -75,6 +75,7 @@ suite("alter_ttl_4") {
         // def table = "customer"
         // create table if not exists
         sql (new File("""${context.file.parent}/../ddl/${table}.sql""").text + 
ttlProperties)
+        sql """ alter table ${table} set ("disable_auto_compaction" = "true") 
""" // no influence from compaction
         def loadLabel = table + "_" + uniqueID
         // load data from cos
         def loadSql = new 
File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}",
 s3BucketName)
@@ -121,6 +122,7 @@ suite("alter_ttl_4") {
     clearFileCache.call() {
         respCode, body -> {}
     }
+    sleep(30000)
 
     load_customer_ttl_once("customer_ttl")
     sql """ select count(*) from customer_ttl """
diff --git 
a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy
index 19f946e2998..ec1b02bfc87 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy
@@ -58,13 +58,14 @@ suite("test_ttl_max_int64") {
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
     sql new 
File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text
     def load_customer_once =  { String table ->
         def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
         sql (new File("""${context.file.parent}/../ddl/${table}.sql""").text + 
ttlProperties)
         def loadLabel = table + "_" + uniqueID
+        sql """ alter table ${table} set ("disable_auto_compaction" = "true") 
""" // no influence from compaction
         // load data from cos
         def loadSql = new 
File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}",
 s3BucketName)
         loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + 
s3WithProperties
@@ -95,6 +96,7 @@ suite("test_ttl_max_int64") {
     clearFileCache.call() {
         respCode, body -> {}
     }
+    sleep(30000)
 
     load_customer_once("customer_ttl")
     sleep(30000) // 30s
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy
index 7ad86c2cc53..3aa0a3a2c58 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy
@@ -58,13 +58,14 @@ suite("test_ttl_random") {
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
     sql new 
File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text
     sql (new File("""${context.file.parent}/../ddl/customer_ttl.sql""").text + 
ttlProperties)
     def load_customer_once =  { String table ->
         def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
         def loadLabel = table + "_" + uniqueID
+        sql """ alter table ${table} set ("disable_auto_compaction" = "true") 
""" // no influence from compaction
         // load data from cos
         def loadSql = new 
File("""${context.file.parent}/../ddl/customer_ttl_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}",
 s3BucketName)
         loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + 
s3WithProperties
@@ -86,6 +87,7 @@ suite("test_ttl_random") {
     clearFileCache.call() {
         respCode, body -> {}
     }
+    sleep(30000)
 
     load_customer_once("customer_ttl")
 
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy
index 6d72e51c4c0..81d6894aeca 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy
@@ -58,13 +58,14 @@ suite("test_ttl_seconds") {
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
     sql new 
File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text
     def load_customer_once =  { String table ->
         def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
         sql (new File("""${context.file.parent}/../ddl/${table}.sql""").text + 
ttlProperties)
         def loadLabel = table + "_" + uniqueID
+        sql """ alter table ${table} set ("disable_auto_compaction" = "true") 
""" // no influence from compaction
         // load data from cos
         def loadSql = new 
File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}",
 s3BucketName)
         loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + 
s3WithProperties
@@ -95,6 +96,7 @@ suite("test_ttl_seconds") {
     clearFileCache.call() {
         respCode, body -> {}
     }
+    sleep(30000)
 
     load_customer_once("customer_ttl")
     sleep(30000) // 30s
diff --git 
a/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy
index 47c458971bb..689c6faa168 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy
@@ -57,8 +57,8 @@ def clearFileCache = { check_func ->
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
     sql new 
File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text
     sql """ DROP TABLE IF EXISTS customer_ttl_as_select """
     sql """
@@ -74,7 +74,7 @@ def clearFileCache = { check_func ->
         )
         DUPLICATE KEY(C_CUSTKEY, C_NAME)
         DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 32
-        PROPERTIES("file_cache_ttl_seconds"="180")
+        PROPERTIES("file_cache_ttl_seconds"="180", "disable_auto_compaction" = 
"true")
     """
 
     def getMetricsMethod = { check_func ->
@@ -89,6 +89,7 @@ def clearFileCache = { check_func ->
     clearFileCache.call() {
         respCode, body -> {}
     }
+    sleep(30000)
 
     def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
     def loadLabel = "customer_ttl_load_" + uniqueID
@@ -130,9 +131,9 @@ def clearFileCache = { check_func ->
             }
             assertTrue(flag1)
     }
-    sql """ CREATE TABLE customer_ttl_as_select DUPLICATE KEY(C_CUSTKEY, 
C_NAME) 
-            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 32 
-            PROPERTIES("file_cache_ttl_seconds"="120") as select * from 
customer_ttl"""
+    sql """ CREATE TABLE customer_ttl_as_select DUPLICATE KEY(C_CUSTKEY, 
C_NAME)
+            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 32
+            
PROPERTIES("file_cache_ttl_seconds"="120","disable_auto_compaction" = "true") 
as select * from customer_ttl"""
 
     sleep(30000) // 30s
     getMetricsMethod.call() {
diff --git a/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy
index 9c927f5c025..60e169789b2 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy
@@ -57,8 +57,8 @@ def clearFileCache = { check_func ->
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
     sql new 
File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text
     sql """ DROP TABLE IF EXISTS customer_ttl_like """
     sql """
@@ -74,7 +74,7 @@ def clearFileCache = { check_func ->
         )
         DUPLICATE KEY(C_CUSTKEY, C_NAME)
         DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 32
-        PROPERTIES("file_cache_ttl_seconds"="180")
+        PROPERTIES("file_cache_ttl_seconds"="180","disable_auto_compaction" = 
"true")
     """
     sql """ create table customer_ttl like customer_ttl_like """
 
@@ -90,6 +90,7 @@ def clearFileCache = { check_func ->
     clearFileCache.call() {
         respCode, body -> {}
     }
+    sleep(30000)
 
     def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
     def loadLabel = "customer_ttl_load_" + uniqueID
diff --git a/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy
index d9f928ebd89..e58b2ef8b98 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy
@@ -38,12 +38,17 @@ suite("test_ttl") {
     def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
-        httpTest {
-            endpoint ""
-            uri url
-            op "get"
-            body ""
-            check check_func
+        try {
+            httpTest {
+                endpoint ""
+                uri url
+                op "get"
+                body ""
+                check check_func
+            }
+        } catch (Exception e) {
+            logger.error("Failed to clear file cache: ${e.message}")
+            throw e
         }
     }
 
@@ -58,46 +63,61 @@ suite("test_ttl") {
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
     sql new 
File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text
     def load_customer_once =  { String table ->
-        def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
-        sql (new File("""${context.file.parent}/../ddl/${table}.sql""").text + 
ttlProperties)
-        def loadLabel = table + "_" + uniqueID
-        // load data from cos
-        def loadSql = new 
File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}",
 s3BucketName)
-        loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + 
s3WithProperties
-        sql loadSql
+        try {
+            def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
+            sql (new 
File("""${context.file.parent}/../ddl/${table}.sql""").text + ttlProperties)
+            def loadLabel = table + "_" + uniqueID
+            sql """ alter table ${table} set ("disable_auto_compaction" = 
"true") """ // no influence from compaction
+            // load data from cos
+            def loadSql = new 
File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}",
 s3BucketName)
+            loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + 
s3WithProperties
+            sql loadSql
 
-        // check load state
-        while (true) {
-            def stateResult = sql "show load where Label = '${loadLabel}'"
-            def loadState = stateResult[stateResult.size() - 1][2].toString()
-            if ("CANCELLED".equalsIgnoreCase(loadState)) {
-                throw new IllegalStateException("load ${loadLabel} failed.")
-            } else if ("FINISHED".equalsIgnoreCase(loadState)) {
-                break
+            // check load state
+            while (true) {
+                def stateResult = sql "show load where Label = '${loadLabel}'"
+                def loadState = stateResult[stateResult.size() - 
1][2].toString()
+                if ("CANCELLED".equalsIgnoreCase(loadState)) {
+                    logger.error("Data load failed for label: ${loadLabel}")
+                    throw new IllegalStateException("load ${loadLabel} 
failed.")
+                } else if ("FINISHED".equalsIgnoreCase(loadState)) {
+                    logger.info("Data load completed successfully for label: 
${loadLabel}")
+                    break
+                }
+                logger.info("Waiting for data load to complete. Current state: 
${loadState}")
+                sleep(5000)
             }
-            sleep(5000)
+        } catch (Exception e) {
+            logger.error("Failed to load customer data: ${e.message}")
+            throw e
         }
     }
 
     def getMetricsMethod = { check_func ->
-        httpTest {
-            endpoint backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendBrpcPort.get(backendId)
-            uri "/brpc_metrics"
-            op "get"
-            check check_func
+        try {
+            httpTest {
+                endpoint backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendBrpcPort.get(backendId)
+                uri "/brpc_metrics"
+                op "get"
+                check check_func
+            }
+        } catch (Exception e) {
+            logger.error("Failed to get metrics: ${e.message}")
+            throw e
         }
     }
 
     clearFileCache.call() {
         respCode, body -> {}
     }
+    sleep(10000)
 
     load_customer_once("customer_ttl")
-    sleep(30000) // 30s
+    sleep(10000)
     getMetricsMethod.call() {
         respCode, body ->
             assertEquals("${respCode}".toString(), "200")
diff --git 
a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_lru_evict.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_lru_evict.groovy
index f80e0208a74..537845600b9 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_lru_evict.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_lru_evict.groovy
@@ -38,8 +38,8 @@ import org.apache.http.impl.client.LaxRedirectStrategy;
 //  - set smaller max_ttl_cache_ratio in this test
 
 suite("test_ttl_lru_evict") {
-    //sql """ use @regression_cluster_name1 """
-    sql """ use @compute_cluster """
+    sql """ use @regression_cluster_name1 """
+    // sql """ use @compute_cluster """
     def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="150") """
     String[][] backends = sql """ show backends """
     String backendId;


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to