This is an automated email from the ASF dual-hosted git repository.

morrysnow pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new eca967b3281 branch-3.1: [chore](regression-test) move 
test_cold_data_compaction to `nonConcurrent` #50699 (#52145)
eca967b3281 is described below

commit eca967b32816cfee62125da36bece870057f6476
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Tue Jun 24 10:50:39 2025 +0800

    branch-3.1: [chore](regression-test) move test_cold_data_compaction to 
`nonConcurrent` #50699 (#52145)
    
    Cherry-picked from #50699
    
    Co-authored-by: yagagagaga <[email protected]>
---
 .../cold_heat_separation/cold_data_compaction.groovy     | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git 
a/regression-test/suites/cold_heat_separation/cold_data_compaction.groovy 
b/regression-test/suites/cold_heat_separation/cold_data_compaction.groovy
index bf9e33e7528..c80e39b8a9d 100644
--- a/regression-test/suites/cold_heat_separation/cold_data_compaction.groovy
+++ b/regression-test/suites/cold_heat_separation/cold_data_compaction.groovy
@@ -18,7 +18,7 @@
 import com.amazonaws.services.s3.model.ListObjectsRequest
 import java.util.function.Supplier
 
-suite("test_cold_data_compaction") {
+suite("test_cold_data_compaction", "nonConcurrent") {
     def retryUntilTimeout = { int timeoutSecond, Supplier<Boolean> closure ->
         long start = System.currentTimeMillis()
         while (true) {
@@ -87,7 +87,7 @@ suite("test_cold_data_compaction") {
     """
 
     // wait until files upload to S3
-    retryUntilTimeout(1800, {
+    retryUntilTimeout(3600, {
         def res = sql_return_maparray "show data from t_recycle_in_s3"
         String size = ""
         String remoteSize = ""
@@ -112,9 +112,13 @@ suite("test_cold_data_compaction") {
 
     // trigger cold data compaction
     sql """alter table t_recycle_in_s3 set ("disable_auto_compaction" = 
"false")"""
+    def v = 
get_be_param("disable_auto_compaction").values().toArray()[0].toString()
+    if ("true" == v) {
+        set_be_param("disable_auto_compaction", "false")
+    }
 
     // wait until compaction finish
-    retryUntilTimeout(1800, {
+    retryUntilTimeout(3600, {
         def filesAfterCompaction = getS3Client().listObjects(
                 new 
ListObjectsRequest().withBucketName(getS3BucketName()).withPrefix(s3Prefix+ 
"/data/${tabletId}")).getObjectSummaries()
         logger.info("t_recycle_in_s3's remote file number is 
${filesAfterCompaction.size()}")
@@ -122,8 +126,12 @@ suite("test_cold_data_compaction") {
         return filesAfterCompaction.size() == 2
     })
 
+    if ("true" == v) {
+        set_be_param("disable_auto_compaction", "true")
+    }
+
     sql "drop table t_recycle_in_s3 force"
-    retryUntilTimeout(1800, {
+    retryUntilTimeout(3600, {
         def filesAfterDrop = getS3Client().listObjects(
                 new 
ListObjectsRequest().withBucketName(getS3BucketName()).withPrefix(s3Prefix+ 
"/data/${tabletId}")).getObjectSummaries()
         logger.info("after drop t_recycle_in_s3, remote file number is 
${filesAfterDrop.size()}")


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to