This is an automated email from the ASF dual-hosted git repository.

hellostephen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 83ac0caefd7 [ci](nonConcurrent) add new ci check (#51581)
83ac0caefd7 is described below

commit 83ac0caefd7861cba646fb28d6ae642bee435275
Author: Dongyang Li <lidongy...@selectdb.com>
AuthorDate: Thu Jun 12 16:08:21 2025 +0800

    [ci](nonConcurrent) add new ci check (#51581)
---
 .github/workflows/comment-to-trigger-teamcity.yml  |  23 +++-
 regression-test/pipeline/common/teamcity-utils.sh  |  12 +-
 .../pipeline/nonConcurrent/conf/be.conf            |  90 +++++++++++++
 .../pipeline/nonConcurrent/conf/fe.conf            |  94 ++++++++++++++
 .../nonConcurrent/conf/regression-conf.groovy      | 142 +++++++++++++++++++++
 .../window_functions/test_column_boundary.groovy   |   2 +-
 6 files changed, 356 insertions(+), 7 deletions(-)

diff --git a/.github/workflows/comment-to-trigger-teamcity.yml 
b/.github/workflows/comment-to-trigger-teamcity.yml
index 3164f330c9c..ddaa3d6fd24 100644
--- a/.github/workflows/comment-to-trigger-teamcity.yml
+++ b/.github/workflows/comment-to-trigger-teamcity.yml
@@ -57,6 +57,7 @@ jobs:
             "${COMMENT_BODY}" == *'run cloud_p0'* ||
             "${COMMENT_BODY}" == *'run cloud_p1'* ||
             "${COMMENT_BODY}" == *'run vault_p0'* ||
+            "${COMMENT_BODY}" == *'run nonConcurrent'* ||
             "${COMMENT_BODY}" == *'run arm'* ||
             "${COMMENT_BODY}" == *'run performance'* ]]; then
             echo "comment_trigger=true" | tee -a "$GITHUB_OUTPUT"
@@ -87,7 +88,7 @@ jobs:
         echo "TARGET_BRANCH='${TARGET_BRANCH}'" | tee -a "$GITHUB_OUTPUT"
         echo "COMMENT_BODY='${COMMENT_BODY}'" | tee -a "$GITHUB_OUTPUT"
 
-        reg="run 
(buildall|compile|p0|p1|feut|beut|cloudut|external|clickbench|cloud_p0|cloud_p1|vault_p0|arm|performance)(
 [1-9]*[0-9]+)*"
+        reg="run 
(buildall|compile|p0|p1|feut|beut|cloudut|external|clickbench|cloud_p0|cloud_p1|vault_p0|nonConcurrent|arm|performance)(
 [1-9]*[0-9]+)*"
         COMMENT_TRIGGER_TYPE="$(echo -e "${COMMENT_BODY}" | xargs | grep -E 
"${reg}" | awk -F' ' '{print $2}' | sed -n 1p | sed 's/\r//g')"
         COMMENT_REPEAT_TIMES="$(echo -e "${COMMENT_BODY}" | xargs | grep -E 
"${reg}" | awk -F' ' '{print $3}' | sed -n 1p | sed 's/\r//g')"
         echo "COMMENT_TRIGGER_TYPE=${COMMENT_TRIGGER_TYPE}" | tee -a 
"$GITHUB_OUTPUT"
@@ -123,10 +124,12 @@ jobs:
             echo "changed_p0=true" | tee -a "$GITHUB_OUTPUT"
             echo "changed_external=true" | tee -a "$GITHUB_OUTPUT"
             echo "changed_arm=true" | tee -a "$GITHUB_OUTPUT"
+            echo "changed_nonConcurrent=true" | tee -a "$GITHUB_OUTPUT"
           else
             echo "changed_p0=false" | tee -a "$GITHUB_OUTPUT"
             echo "changed_external=false" | tee -a "$GITHUB_OUTPUT"
             echo "changed_arm=false" | tee -a "$GITHUB_OUTPUT"
+            echo "changed_nonConcurrent=false" | tee -a "$GITHUB_OUTPUT"
           fi
           if file_changed_regression_p1; then
             echo "changed_p1=true" | tee -a "$GITHUB_OUTPUT"
@@ -163,6 +166,7 @@ jobs:
           echo "changed_cloud_p0=true" | tee -a "$GITHUB_OUTPUT"
           echo "changed_cloud_p1=true" | tee -a "$GITHUB_OUTPUT"
           echo "changed_vault_p0=true" | tee -a "$GITHUB_OUTPUT"
+          echo "changed_nonConcurrent=true" | tee -a "$GITHUB_OUTPUT"
         fi
 
     # - name: "Setup tmate session"
@@ -309,6 +313,22 @@ jobs:
           "vault_p0" \
           "${{ steps.parse.outputs.COMMENT_REPEAT_TIMES }}"
 
+    - name: "Trigger or Skip nonConcurrent"
+      if: ${{ fromJSON(steps.parse.outputs.comment_trigger) && 
contains(fromJSON('["nonConcurrent", "buildall"]'), 
steps.parse.outputs.COMMENT_TRIGGER_TYPE) }}
+      run: |
+        source ./regression-test/pipeline/common/teamcity-utils.sh
+        if [[ ${{ steps.parse.outputs.COMMENT_TRIGGER_TYPE }} == "buildall" 
]]; then
+          echo "COMMENT_TRIGGER_TYPE is buildall, trigger compile is enough, 
compile will trigger nonConcurrent" && exit
+        fi
+        set -x
+        trigger_or_skip_build \
+          "${{ steps.changes.outputs.changed_nonConcurrent }}" \
+          "${{ steps.parse.outputs.PULL_REQUEST_NUM }}" \
+          "${{ steps.parse.outputs.TARGET_BRANCH }}" \
+          "${{ steps.parse.outputs.COMMIT_ID_FROM_TRIGGER }}" \
+          "nonConcurrent" \
+          "${{ steps.parse.outputs.COMMENT_REPEAT_TIMES }}"
+
     - name: "Trigger or Skip cloud_p1"
       if: ${{ fromJSON(steps.parse.outputs.comment_trigger) && 
contains(fromJSON('["cloud_p1", "buildall"]'), 
steps.parse.outputs.COMMENT_TRIGGER_TYPE) }}
       run: |
@@ -367,3 +387,4 @@ jobs:
         skip_build "${{ steps.parse.outputs.COMMIT_ID_FROM_TRIGGER }}" cloud_p1
         skip_build "${{ steps.parse.outputs.COMMIT_ID_FROM_TRIGGER }}" cloudut
         skip_build "${{ steps.parse.outputs.COMMIT_ID_FROM_TRIGGER }}" vault_p0
+        skip_build "${{ steps.parse.outputs.COMMIT_ID_FROM_TRIGGER }}" 
nonConcurrent
diff --git a/regression-test/pipeline/common/teamcity-utils.sh 
b/regression-test/pipeline/common/teamcity-utils.sh
index f6a1f79bee5..4952a29acf5 100644
--- a/regression-test/pipeline/common/teamcity-utils.sh
+++ b/regression-test/pipeline/common/teamcity-utils.sh
@@ -25,9 +25,9 @@
 # 控制哪些分支可以跑哪些流水线
 declare -A targetBranch_to_pipelines
 targetBranch_to_pipelines=(
-    ['master']='feut beut cloudut compile p0 p1 external arm performance 
cloud_p0 cloud_p1 vault_p0'
-    ['branch-3.1']='feut beut cloudut compile p0 p1 external arm performance 
cloud_p0 cloud_p1 vault_p0'
-    ['branch-3.0']='feut beut cloudut compile p0 p1 external arm performance 
cloud_p0 cloud_p1 vault_p0'
+    ['master']='feut beut cloudut compile p0 p1 external arm performance 
cloud_p0 cloud_p1 vault_p0 nonConcurrent'
+    ['branch-3.1']='feut beut cloudut compile p0 p1 external arm performance 
cloud_p0 cloud_p1 vault_p0 nonConcurrent'
+    ['branch-3.0']='feut beut cloudut compile p0 p1 external arm performance 
cloud_p0 cloud_p1 vault_p0 nonConcurrent'
     ['branch-2.1']='feut beut compile p0 p1 external arm performance'
     ['branch-2.0']='feut beut compile p0 p1 external arm performance'
 )
@@ -51,6 +51,7 @@ comment_to_pipeline=(
     ['cloud_p0']='Doris_DorisRegression_CloudP0'
     ['cloud_p1']='Doris_DorisCloudRegression_CloudP1'
     ['vault_p0']='Doris_DorisCloudRegression_VaultP0'
+    ['nonConcurrent']='Doris_DorisRegression_NonConcurrentRegression'
 )
 
 # github中评论的要触发的流水线名字
@@ -72,7 +73,7 @@ conment_to_context=(
     ['cloud_p0']='cloud_p0 (Doris Cloud Regression)'
     ['cloud_p1']='cloud_p1 (Doris Cloud Regression)'
     ['vault_p0']='vault_p0 (Doris Cloud Regression)'
-)
+    ['nonConcurrent']='NonConcurrent Regression (Doris Regression)'
 
 get_commit_id_of_build() {
     # 获取某个build的commit id
@@ -304,13 +305,14 @@ trigger_or_skip_build() {
         cancel_queue_build "${PULL_REQUEST_NUM}" "${COMMENT_TRIGGER_TYPE}"
         skip_build "${COMMIT_ID_FROM_TRIGGER}" "${COMMENT_TRIGGER_TYPE}"
         if [[ ${COMMENT_TRIGGER_TYPE} == "compile" ]]; then
-            # skip compile 的时候,也把 p0 p1 external cloud_p0 cloud_p1 vault_p0 都 
skip 了
+            # skip compile 的时候,也把 p0 p1 external cloud_p0 cloud_p1 vault_p0 
nonConcurrent 都 skip 了
             skip_build "${COMMIT_ID_FROM_TRIGGER}" "p0"
             skip_build "${COMMIT_ID_FROM_TRIGGER}" "p1"
             skip_build "${COMMIT_ID_FROM_TRIGGER}" "external"
             skip_build "${COMMIT_ID_FROM_TRIGGER}" "cloud_p0"
             skip_build "${COMMIT_ID_FROM_TRIGGER}" "cloud_p1"
             skip_build "${COMMIT_ID_FROM_TRIGGER}" "vault_p0"
+            skip_build "${COMMIT_ID_FROM_TRIGGER}" "nonConcurrent"
         fi
     fi
 }
diff --git a/regression-test/pipeline/nonConcurrent/conf/be.conf 
b/regression-test/pipeline/nonConcurrent/conf/be.conf
new file mode 100644
index 00000000000..961756b2e71
--- /dev/null
+++ b/regression-test/pipeline/nonConcurrent/conf/be.conf
@@ -0,0 +1,90 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+CUR_DATE=`date +%Y%m%d-%H%M%S`
+
+PPROF_TMPDIR="$DORIS_HOME/log/"
+
+# For jdk 8
+JAVA_OPTS="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log 
-Xloggc:$DORIS_HOME/log/be.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation 
-XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M 
-Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true 
-Dsun.java.command=DorisBE -XX:-CriticalJNINatives 
-Dcom.mysql.cj.disableAbandonedConnectionCleanup=true"
+
+# For jdk 17, this JAVA_OPTS will be used as default JVM options
+JAVA_OPTS_FOR_JDK_17="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log 
-Xlog:gc*:$DORIS_HOME/log/be.gc.log.$CUR_DATE:time,uptime:filecount=10,filesize=50M
 -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true 
-Dsun.java.command=DorisBE -XX:-CriticalJNINatives 
-XX:+IgnoreUnrecognizedVMOptions --add-opens=java.base/java.lang=ALL-UNNAMED 
--add-opens=java.base/java.lang.invoke=ALL-UNNAMED 
--add-opens=java.base/java.lang.reflect=ALL-UNNAMED 
--add-opens=java.base/java.io=AL [...]
+
+# Set your own JAVA_HOME
+# JAVA_HOME=/path/to/jdk/
+
+# 
https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
+# https://jemalloc.net/jemalloc.3.html
+JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:true,prof_active:false,lg_prof_interval:-1"
+JEMALLOC_PROF_PRFIX="jemalloc_heap_profile_"
+
+# INFO, WARNING, ERROR, FATAL
+sys_log_level = INFO
+sys_log_verbose_modules=query_context,runtime_query_statistics_mgr
+be_port = 9161
+webserver_port = 8141
+heartbeat_service_port = 9151
+brpc_port = 8161
+arrow_flight_sql_port = 8181
+
+path_gc_check_interval_second=1
+max_garbage_sweep_interval=180
+
+log_buffer_level = -1
+
+enable_stream_load_record = true
+stream_load_record_batch_size = 500
+storage_root_path=/mnt/ssd01/cluster_storage/doris.SSD/P0/cluster1;/mnt/ssd01/cluster_storage/doris.SSD
+
+priority_networks=172.19.0.0/24
+enable_fuzzy_mode=true
+max_depth_of_expr_tree=200
+enable_feature_binlog=true
+max_sys_mem_available_low_water_mark_bytes=69206016
+user_files_secure_path=/
+enable_debug_points=true
+# debug scanner context dead loop
+enable_debug_log_timeout_secs=0
+enable_missing_rows_correctness_check=true
+
+flush_thread_num_per_store = 24
+high_priority_flush_thread_num_per_store = 24
+
+trino_connector_plugin_dir=/tmp/trino_connector/connectors
+
+enable_jvm_monitor = true
+
+enable_be_proc_monitor = true
+be_proc_monitor_interval_ms = 30000
+webserver_num_workers = 128
+pipeline_task_leakage_detect_period_sec=1
+crash_in_memory_tracker_inaccurate = true
+#enable_table_size_correctness_check=true
+enable_brpc_connection_check=true
+enable_write_index_searcher_cache=true
+
+# enable download small files in batch, see apache/doris#45061 for details
+enable_batch_download = true
+
+remove_unused_remote_files_interval_sec=60
+cold_data_compaction_interval_sec=60
+large_cumu_compaction_task_min_thread_num=3
+
+# So feature has bug, so by default is false, only open it in pipeline to 
observe
+enable_parquet_page_index=true
+
diff --git a/regression-test/pipeline/nonConcurrent/conf/fe.conf 
b/regression-test/pipeline/nonConcurrent/conf/fe.conf
new file mode 100644
index 00000000000..85d7a878155
--- /dev/null
+++ b/regression-test/pipeline/nonConcurrent/conf/fe.conf
@@ -0,0 +1,94 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+#####################################################################
+## The uppercase properties are read and exported by bin/start_fe.sh.
+## To see all Frontend configurations,
+## see fe/src/org/apache/doris/common/Config.java
+#####################################################################
+
+CUR_DATE=`date +%Y%m%d-%H%M%S`
+
+# the output dir of stderr and stdout 
+LOG_DIR = ${DORIS_HOME}/log
+
+# For jdk 8
+JAVA_OPTS="-Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx4096m 
-XX:+HeapDumpOnOutOfMemoryError -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC 
-XX:MaxGCPauseMillis=200 -XX:+PrintGCDateStamps -XX:+PrintGCDetails 
-XX:+PrintClassHistogramAfterFullGC -Xloggc:$DORIS_HOME/log/fe.gc.log.$CUR_DATE 
-XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M 
-Dlog4j2.formatMsgNoLookups=true 
-Dcom.mysql.cj.disableAbandonedConnectionCleanup=true"
+
+# For jdk 17, this JAVA_OPTS will be used as default JVM options
+JAVA_OPTS_FOR_JDK_17="-Djavax.security.auth.useSubjectCredsOnly=false 
-Xmx8192m -Xms8192m -XX:+HeapDumpOnOutOfMemoryError 
-XX:HeapDumpPath=$DORIS_HOME/log/ 
-Xlog:gc*,classhisto*=trace:$DORIS_HOME/log/fe.gc.log.$CUR_DATE:time,uptime:filecount=10,filesize=50M
 -Dcom.mysql.cj.disableAbandonedConnectionCleanup=true 
--add-opens=java.base/java.nio=ALL-UNNAMED --add-opens 
java.base/jdk.internal.ref=ALL-UNNAMED"
+
+sys_log_level = INFO
+sys_log_mode = NORMAL
+sys_log_verbose_modules = 
org.apache.doris.master.MasterImpl,org.apache.doris.common.profile,org.apache.doris.qe.QeProcessorImpl,org.apache.doris.load.ExportTaskExecutor,org.apache.doris.planner.OlapScanNode
+arrow_flight_sql_port = 8081
+catalog_trash_expire_second=1
+#enable ssl for test
+enable_ssl = true
+
+enable_outfile_to_local = true
+tablet_create_timeout_second=100
+remote_fragment_exec_timeout_ms=120000
+fuzzy_test_type=p0
+use_fuzzy_session_variable=true
+
+enable_feature_binlog=true
+
+enable_debug_points=true
+
+# enable mtmv
+enable_mtmv = true
+
+dynamic_partition_check_interval_seconds=3
+
+desired_max_waiting_jobs=200
+
+# make checkpoint more frequent
+edit_log_roll_num = 1000
+
+# make job/label clean more frequent
+history_job_keep_max_second = 300
+streaming_label_keep_max_second = 300
+label_keep_max_second = 300
+
+# job test configurations
+#allows the creation of jobs with an interval of second
+enable_job_schedule_second_for_test = true
+mtmv_task_queue_size = 4096
+
+enable_workload_group = true
+publish_topic_info_interval_ms = 1000
+workload_sched_policy_interval_ms = 1000
+
+disable_decimalv2 = false
+disable_datev1 = false
+
+master_sync_policy = WRITE_NO_SYNC
+replica_sync_policy = WRITE_NO_SYNC
+
+enable_advance_next_id = true
+# enable deadlock detection
+enable_deadlock_detection = true
+max_lock_hold_threshold_seconds = 10
+
+force_olap_table_replication_allocation=tag.location.default:1
+
+# profile related
+max_query_profile_num = 2000
+max_spilled_profile_num = 2000
+
+check_table_lock_leaky=true
diff --git a/regression-test/pipeline/nonConcurrent/conf/regression-conf.groovy 
b/regression-test/pipeline/nonConcurrent/conf/regression-conf.groovy
new file mode 100644
index 00000000000..12832a9d763
--- /dev/null
+++ b/regression-test/pipeline/nonConcurrent/conf/regression-conf.groovy
@@ -0,0 +1,142 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+/* ******* Do not commit this file unless you know what you are doing ******* 
*/
+
+// **Note**: default db will be create if not exist
+defaultDb = "regression_test"
+
+jdbcUrl = 
"jdbc:mysql://172.19.0.2:9131/?useLocalSessionState=true&allowLoadLocalInfile=true&zeroDateTimeBehavior=round"
+targetJdbcUrl = 
"jdbc:mysql://172.19.0.2:9131/?useLocalSessionState=true&allowLoadLocalInfile=true&zeroDateTimeBehavior=round"
+jdbcUser = "root"
+jdbcPassword = ""
+
+ccrDownstreamUrl = 
"jdbc:mysql://172.19.0.2:9131/?useLocalSessionState=true&allowLoadLocalInfile=true"
+ccrDownstreamUser = "root"
+ccrDownstreamPassword = ""
+ccrDownstreamFeThriftAddress = "127.0.0.1:9020"
+
+feSourceThriftAddress = "127.0.0.1:9020"
+feTargetThriftAddress = "127.0.0.1:9020"
+feSyncerUser = "root"
+feSyncerPassword = ""
+
+feHttpAddress = "172.19.0.2:8131"
+feHttpUser = "root"
+feHttpPassword = ""
+
+// set DORIS_HOME by system properties
+// e.g. java -DDORIS_HOME=./
+suitePath = "${DORIS_HOME}/regression-test/suites"
+dataPath = "${DORIS_HOME}/regression-test/data"
+pluginPath = "${DORIS_HOME}/regression-test/plugins"
+realDataPath = "${DORIS_HOME}/regression-test/realdata"
+trinoPluginsPath = "/tmp/trino_connector"
+// sf1DataPath can be url like 
"https://doris-community-test-1308700295.cos.ap-hongkong.myqcloud.com"; or local 
path like "/data"
+//sf1DataPath = 
"https://doris-community-test-1308700295.cos.ap-hongkong.myqcloud.com";
+
+// will test <group>/<suite>.groovy
+// empty group will test all group
+testGroups = "nonConcurrent"
+// empty suite will test all suite
+testSuites = ""
+// empty directories will test all directories
+testDirectories = ""
+
+// this groups will not be executed
+excludeGroups = "p1,p2"
+// this suites will not be executed
+
+// this suites will not be executed
+excludeSuites = "000_the_start_sentinel_do_not_touch," + // keep this line as 
the first line
+    "test_write_inverted_index_exception_fault_injection," + // cause core dump
+    "zzz_the_end_sentinel_do_not_touch"// keep this line as the last line
+
+// this directories will not be executed
+excludeDirectories = "000_the_start_sentinel_do_not_touch," + // keep this 
line as the first line
+    "variant_github_events_nonConcurrent_p2," +
+    "variant_github_events_new_p2," +
+    "hdfs_vault_p2," +
+    "zzz_the_end_sentinel_do_not_touch"// keep this line as the last line
+
+// for test csv with header
+enableHdfs=false // set to true if hdfs is ready
+hdfsFs = "hdfs://127.0.0.1:9000"
+hdfsUser = "doris-test"
+hdfsPasswd = ""
+brokerName = "broker_name"
+
+// broker load test config
+enableBrokerLoad=true
+
+// jdbc connector test config
+// To enable jdbc test, you need first start mysql/pg container.
+// See `docker/thirdparties/start-thirdparties-docker.sh`
+enableJdbcTest=false
+mysql_57_port=7111
+pg_14_port=7121
+mariadb_10_port=3326
+// hive catalog test config
+// To enable jdbc test, you need first start hive container.
+// See `docker/thirdparties/start-thirdparties-docker.sh`
+enableHiveTest=false
+enablePaimonTest=false
+
+// port of hive2 docker
+hive2HmsPort=9083
+hive2HdfsPort=8020
+hive2ServerPort=10000
+hive2PgPort=5432
+
+// port of hive3 docker
+hive3HmsPort=9383
+hive3HdfsPort=8320
+hive3ServerPort=13000
+hive3PgPort=5732
+
+// kafka test config
+// to enable kafka test, you need firstly to start kafka container
+// See `docker/thirdparties/start-thirdparties-docker.sh`
+enableKafkaTest=true
+kafka_port=19193
+
+// iceberg test config
+iceberg_rest_uri_port=18181
+iceberg_minio_port=19001
+
+enableEsTest=false
+es_6_port=19200
+es_7_port=29200
+es_8_port=39200
+
+cacheDataPath = "/data/regression/"
+
+s3Source = "aliyun"
+s3Endpoint = "oss-cn-hongkong-internal.aliyuncs.com"
+
+//arrow flight sql test config
+extArrowFlightSqlHost = "127.0.0.1"
+extArrowFlightSqlPort = 8081
+extArrowFlightSqlUser = "root"
+extArrowFlightSqlPassword= ""
+
+max_failure_num=50
+
+externalEnvIp="127.0.0.1"
+
+// trino-connector catalog test config
+enableTrinoConnectorTest = false
diff --git 
a/regression-test/suites/query_p0/sql_functions/window_functions/test_column_boundary.groovy
 
b/regression-test/suites/query_p0/sql_functions/window_functions/test_column_boundary.groovy
index 5523c878429..05c541810a1 100644
--- 
a/regression-test/suites/query_p0/sql_functions/window_functions/test_column_boundary.groovy
+++ 
b/regression-test/suites/query_p0/sql_functions/window_functions/test_column_boundary.groovy
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-suite("test_column_boundary","nonConcurrent") {
+suite("test_column_boundary","nonConcurrent,p1") {
     sql """ DROP TABLE IF EXISTS test_column_boundary """
     sql """
         CREATE TABLE IF NOT EXISTS test_column_boundary (


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to