This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch branch-4.1
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-4.1 by this push:
new a02367e6122 branch-4.1: [test](filecache) add some file cache ttl
system test #60990 (#61563)
a02367e6122 is described below
commit a02367e612200c8ccb1e618e19a82f198c240681
Author: github-actions[bot]
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Sat Mar 21 10:45:53 2026 +0800
branch-4.1: [test](filecache) add some file cache ttl system test #60990
(#61563)
Cherry-picked from #60990
Co-authored-by: chunping <[email protected]>
---
.../cloud_p0/ttl/st04_alter_ttl_n_to_0_runtime.out | 4 +
.../cloud_p0/ttl/st06_warmup_ttl_type_assert.out | 7 +
.../data/cloud_p0/ttl/st07_qcs_consistency.out | 7 +
.../cloud_p0/ttl/st10_drop_partition_cleanup.out | 4 +
.../data/cloud_p0/ttl/st10_drop_table_cleanup.out | 4 +
.../cache/ddl/st04_alter_ttl_n_to_0_runtime.sql | 30 +++
.../cache/ddl/st06_warmup_ttl_type_assert.sql | 30 +++
.../cloud_p0/cache/ddl/st07_qcs_consistency.sql | 30 +++
.../cache/ddl/st10_drop_partition_cleanup.sql | 34 ++++
.../cloud_p0/cache/ddl/st10_drop_table_cleanup.sql | 30 +++
.../cache/ttl/st04_alter_ttl_n_to_0_runtime.groovy | 145 +++++++++++++++
.../cache/ttl/st06_warmup_ttl_type_assert.groovy | 122 ++++++++++++
.../cloud_p0/cache/ttl/st07_qcs_consistency.groovy | 106 +++++++++++
.../cache/ttl/st10_drop_partition_cleanup.groovy | 199 ++++++++++++++++++++
.../cache/ttl/st10_drop_table_cleanup.groovy | 207 +++++++++++++++++++++
15 files changed, 959 insertions(+)
diff --git
a/regression-test/data/cloud_p0/ttl/st04_alter_ttl_n_to_0_runtime.out
b/regression-test/data/cloud_p0/ttl/st04_alter_ttl_n_to_0_runtime.out
new file mode 100644
index 00000000000..475b9ff01e9
--- /dev/null
+++ b/regression-test/data/cloud_p0/ttl/st04_alter_ttl_n_to_0_runtime.out
@@ -0,0 +1,4 @@
+-- This file is automatically generated. You should know what you did if you
want to edit this
+-- !sql --
+400
+
diff --git a/regression-test/data/cloud_p0/ttl/st06_warmup_ttl_type_assert.out
b/regression-test/data/cloud_p0/ttl/st06_warmup_ttl_type_assert.out
new file mode 100644
index 00000000000..a8419e02ed9
--- /dev/null
+++ b/regression-test/data/cloud_p0/ttl/st06_warmup_ttl_type_assert.out
@@ -0,0 +1,7 @@
+-- This file is automatically generated. You should know what you did if you
want to edit this
+-- !source_preheat --
+200
+
+-- !target_query --
+200
+
diff --git a/regression-test/data/cloud_p0/ttl/st07_qcs_consistency.out
b/regression-test/data/cloud_p0/ttl/st07_qcs_consistency.out
new file mode 100644
index 00000000000..2ad8eb24dd3
--- /dev/null
+++ b/regression-test/data/cloud_p0/ttl/st07_qcs_consistency.out
@@ -0,0 +1,7 @@
+-- This file is automatically generated. You should know what you did if you
want to edit this
+-- !q1 --
+500
+
+-- !q2 --
+500
+
diff --git a/regression-test/data/cloud_p0/ttl/st10_drop_partition_cleanup.out
b/regression-test/data/cloud_p0/ttl/st10_drop_partition_cleanup.out
new file mode 100644
index 00000000000..9ce3f9b3980
--- /dev/null
+++ b/regression-test/data/cloud_p0/ttl/st10_drop_partition_cleanup.out
@@ -0,0 +1,4 @@
+-- This file is automatically generated. You should know what you did if you
want to edit this
+-- !part_preheat --
+240
+
diff --git a/regression-test/data/cloud_p0/ttl/st10_drop_table_cleanup.out
b/regression-test/data/cloud_p0/ttl/st10_drop_table_cleanup.out
new file mode 100644
index 00000000000..c393af38a3d
--- /dev/null
+++ b/regression-test/data/cloud_p0/ttl/st10_drop_table_cleanup.out
@@ -0,0 +1,4 @@
+-- This file is automatically generated. You should know what you did if you
want to edit this
+-- !sql --
+300
+
diff --git
a/regression-test/suites/cloud_p0/cache/ddl/st04_alter_ttl_n_to_0_runtime.sql
b/regression-test/suites/cloud_p0/cache/ddl/st04_alter_ttl_n_to_0_runtime.sql
new file mode 100644
index 00000000000..fd5eb2e28db
--- /dev/null
+++
b/regression-test/suites/cloud_p0/cache/ddl/st04_alter_ttl_n_to_0_runtime.sql
@@ -0,0 +1,30 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+DROP TABLE IF EXISTS ${TABLE_NAME};
+
+CREATE TABLE IF NOT EXISTS ${TABLE_NAME} (
+ k1 BIGINT NOT NULL,
+ v1 VARCHAR(128) NOT NULL
+)
+DUPLICATE KEY(k1)
+DISTRIBUTED BY HASH(k1) BUCKETS 8
+PROPERTIES (
+ "file_cache_ttl_seconds" = "3600",
+ "disable_auto_compaction" = "true"
+);
+
diff --git
a/regression-test/suites/cloud_p0/cache/ddl/st06_warmup_ttl_type_assert.sql
b/regression-test/suites/cloud_p0/cache/ddl/st06_warmup_ttl_type_assert.sql
new file mode 100644
index 00000000000..740a9699177
--- /dev/null
+++ b/regression-test/suites/cloud_p0/cache/ddl/st06_warmup_ttl_type_assert.sql
@@ -0,0 +1,30 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+DROP TABLE IF EXISTS ${TABLE_NAME};
+
+CREATE TABLE IF NOT EXISTS ${TABLE_NAME} (
+ k1 BIGINT NOT NULL,
+ v1 VARCHAR(64) NOT NULL
+)
+DUPLICATE KEY(k1)
+DISTRIBUTED BY HASH(k1) BUCKETS 8
+PROPERTIES (
+ "file_cache_ttl_seconds" = "3600",
+ "disable_auto_compaction" = "true"
+);
+
diff --git a/regression-test/suites/cloud_p0/cache/ddl/st07_qcs_consistency.sql
b/regression-test/suites/cloud_p0/cache/ddl/st07_qcs_consistency.sql
new file mode 100644
index 00000000000..86737a4024d
--- /dev/null
+++ b/regression-test/suites/cloud_p0/cache/ddl/st07_qcs_consistency.sql
@@ -0,0 +1,30 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+DROP TABLE IF EXISTS ${TABLE_NAME};
+
+CREATE TABLE IF NOT EXISTS ${TABLE_NAME} (
+ k1 BIGINT NOT NULL,
+ c1 VARCHAR(128) NOT NULL
+)
+DUPLICATE KEY(k1)
+DISTRIBUTED BY HASH(k1) BUCKETS 8
+PROPERTIES (
+ "file_cache_ttl_seconds" = "300",
+ "disable_auto_compaction" = "true"
+);
+
diff --git
a/regression-test/suites/cloud_p0/cache/ddl/st10_drop_partition_cleanup.sql
b/regression-test/suites/cloud_p0/cache/ddl/st10_drop_partition_cleanup.sql
new file mode 100644
index 00000000000..82892263eff
--- /dev/null
+++ b/regression-test/suites/cloud_p0/cache/ddl/st10_drop_partition_cleanup.sql
@@ -0,0 +1,34 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+DROP TABLE IF EXISTS ${TABLE_NAME};
+
+CREATE TABLE IF NOT EXISTS ${TABLE_NAME} (
+ k1 BIGINT NOT NULL,
+ c1 VARCHAR(64) NOT NULL
+)
+DUPLICATE KEY(k1)
+PARTITION BY RANGE(k1) (
+ PARTITION p1 VALUES LESS THAN ("1000"),
+ PARTITION p2 VALUES LESS THAN ("2000")
+)
+DISTRIBUTED BY HASH(k1) BUCKETS 8
+PROPERTIES (
+ "file_cache_ttl_seconds" = "300",
+ "disable_auto_compaction" = "true"
+);
+
diff --git
a/regression-test/suites/cloud_p0/cache/ddl/st10_drop_table_cleanup.sql
b/regression-test/suites/cloud_p0/cache/ddl/st10_drop_table_cleanup.sql
new file mode 100644
index 00000000000..49a6a61529a
--- /dev/null
+++ b/regression-test/suites/cloud_p0/cache/ddl/st10_drop_table_cleanup.sql
@@ -0,0 +1,30 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+DROP TABLE IF EXISTS ${TABLE_NAME};
+
+CREATE TABLE IF NOT EXISTS ${TABLE_NAME} (
+ k1 BIGINT NOT NULL,
+ c1 VARCHAR(64) NOT NULL
+)
+DUPLICATE KEY(k1)
+DISTRIBUTED BY HASH(k1) BUCKETS 8
+PROPERTIES (
+ "file_cache_ttl_seconds" = "300",
+ "disable_auto_compaction" = "true"
+);
+
diff --git
a/regression-test/suites/cloud_p0/cache/ttl/st04_alter_ttl_n_to_0_runtime.groovy
b/regression-test/suites/cloud_p0/cache/ttl/st04_alter_ttl_n_to_0_runtime.groovy
new file mode 100644
index 00000000000..ffc864e0e53
--- /dev/null
+++
b/regression-test/suites/cloud_p0/cache/ttl/st04_alter_ttl_n_to_0_runtime.groovy
@@ -0,0 +1,145 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("st04_alter_ttl_n_to_0_runtime") {
+ def customBeConfig = [
+ enable_evict_file_cache_in_advance : false,
+ file_cache_enter_disk_resource_limit_mode_percent : 99,
+ file_cache_background_ttl_gc_interval_ms : 1000,
+ file_cache_background_ttl_info_update_interval_ms : 1000,
+ file_cache_background_tablet_id_flush_interval_ms : 1000
+ ]
+
+ setBeConfigTemporary(customBeConfig) {
+ def clusters = sql "SHOW CLUSTERS"
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """use @${validCluster};"""
+
+ String tableName = "st04_ttl_n_to_0_tpl"
+ def ddl = new
File("""${context.file.parent}/../ddl/st04_alter_ttl_n_to_0_runtime.sql""").text
+ .replace("\${TABLE_NAME}", tableName)
+ sql ddl
+
+ String[][] backends = sql """show backends"""
+ def backendIdToBackendIP = [:]
+ def backendIdToBackendHttpPort = [:]
+ def backendIdToBackendBrpcPort = [:]
+ for (String[] backend in backends) {
+ if (backend[9].equals("true") &&
backend[19].contains("${validCluster}")) {
+ backendIdToBackendIP.put(backend[0], backend[1])
+ backendIdToBackendHttpPort.put(backend[0], backend[4])
+ backendIdToBackendBrpcPort.put(backend[0], backend[5])
+ }
+ }
+ assertEquals(backendIdToBackendIP.size(), 1)
+
+ def backendId = backendIdToBackendIP.keySet()[0]
+ def clearUrl = backendIdToBackendIP.get(backendId) + ":" +
backendIdToBackendHttpPort.get(backendId) + "/api/file_cache?op=clear&sync=true"
+ httpTest {
+ endpoint ""
+ uri clearUrl
+ op "get"
+ body ""
+ check { respCode, body ->
+ assertEquals("${respCode}".toString(), "200")
+ }
+ }
+
+ def getTabletIds = { String tbl ->
+ def tablets = sql """show tablets from ${tbl}"""
+ assertTrue(tablets.size() > 0, "No tablets found for table ${tbl}")
+ tablets.collect { it[0] as Long }
+ }
+
+ def waitForFileCacheType = { List<Long> tabletIds, String
expectedType, long timeoutMs = 600000L, long intervalMs = 2000L ->
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ boolean allMatch = true
+ for (Long tabletId in tabletIds) {
+ def rows = sql """select type from
information_schema.file_cache_info where tablet_id = ${tabletId}"""
+ if (rows.isEmpty()) {
+ allMatch = false
+ break
+ }
+ def mismatch = rows.find { row ->
!row[0]?.toString()?.equalsIgnoreCase(expectedType) }
+ if (mismatch) {
+ allMatch = false
+ break
+ }
+ }
+ if (allMatch) {
+ return
+ }
+ sleep(intervalMs)
+ }
+ assertTrue(false, "Timeout waiting for ${expectedType},
tablets=${tabletIds}")
+ }
+
+ def waitTtlCacheSizeZero = { long timeoutMs = 120000L, long intervalMs
= 2000L ->
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ long ttlCacheSize = -1L
+ httpTest {
+ endpoint backendIdToBackendIP.get(backendId) + ":" +
backendIdToBackendBrpcPort.get(backendId)
+ uri "/brpc_metrics"
+ op "get"
+ check { respCode, body ->
+ assertEquals("${respCode}".toString(), "200")
+ String out = "${body}".toString()
+ def lines = out.split('\n')
+ for (String line in lines) {
+ if (line.startsWith("#")) {
+ continue
+ }
+ if (line.contains("ttl_cache_size")) {
+ def idx = line.indexOf(' ')
+ ttlCacheSize =
line.substring(idx).trim().toLong()
+ break
+ }
+ }
+ }
+ }
+ if (ttlCacheSize == 0L) {
+ return
+ }
+ sleep(intervalMs)
+ }
+ assertTrue(false, "Timeout waiting ttl_cache_size = 0")
+ }
+
+ def insertBatch = { int start, int end ->
+ def values = (start..<end).collect { i -> "(${i}, 'value_${i}')"
}.join(",")
+ sql """insert into ${tableName} values ${values}"""
+ }
+ insertBatch(0, 200)
+ insertBatch(200, 400)
+
+ qt_sql """select count(*) from ${tableName} where v1 like 'value_%'"""
+ sleep(5000)
+
+ def tabletIds = getTabletIds.call(tableName)
+ waitForFileCacheType.call(tabletIds, "ttl")
+
+ // ST-04 未覆盖点模板:运行期 ALTER N->0 后,缓存类型应从 ttl 转为 normal
+ sql """alter table ${tableName} set ("file_cache_ttl_seconds"="0")"""
+ waitForFileCacheType.call(tabletIds, "normal")
+ waitTtlCacheSizeZero.call()
+
+ sql """drop table if exists ${tableName}"""
+ }
+}
diff --git
a/regression-test/suites/cloud_p0/cache/ttl/st06_warmup_ttl_type_assert.groovy
b/regression-test/suites/cloud_p0/cache/ttl/st06_warmup_ttl_type_assert.groovy
new file mode 100644
index 00000000000..435fa6d28bb
--- /dev/null
+++
b/regression-test/suites/cloud_p0/cache/ttl/st06_warmup_ttl_type_assert.groovy
@@ -0,0 +1,122 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("st06_warmup_ttl_type_assert") {
+ def customBeConfig = [
+ enable_evict_file_cache_in_advance : false,
+ file_cache_enter_disk_resource_limit_mode_percent : 99
+ ]
+
+ setBeConfigTemporary(customBeConfig) {
+ def clusters = sql "SHOW CLUSTERS"
+ if (clusters.size() < 2) {
+ logger.info("skip st06_warmup_ttl_type_assert, need at least 2
clusters")
+ return
+ }
+
+ def sourceCluster = clusters[0][0]
+ def targetCluster = clusters[1][0]
+ String tableName = "st06_warmup_ttl_tpl"
+
+ sql """use @${sourceCluster};"""
+ def ddl = new
File("""${context.file.parent}/../ddl/st06_warmup_ttl_type_assert.sql""").text
+ .replace("\${TABLE_NAME}", tableName)
+ sql ddl
+
+ def values = (0..<200).collect { i -> "(${i}, 'warmup_tpl_${i}')"
}.join(",")
+ sql """insert into ${tableName} values ${values}"""
+ qt_source_preheat """select count(*) from ${tableName}"""
+
+ def sourceTablets = sql """show tablets from ${tableName}"""
+ assertTrue(sourceTablets.size() > 0, "No tablets found for table
${tableName} in source cluster ${sourceCluster}")
+ def sourceTabletIds = sourceTablets.collect { it[0] as Long }
+
+ // ST-06 部分覆盖点模板:显式断言 warmup 后目标集群缓存类型为 ttl
+ def jobIdRows = sql """warm up cluster ${targetCluster} with table
${tableName};"""
+ assertTrue(!jobIdRows.isEmpty())
+ def jobId = jobIdRows[0][0]
+
+ def waitWarmUpJobFinished = { Object id, long timeoutMs = 600000L,
long intervalMs = 5000L ->
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ def stateRows = sql """SHOW WARM UP JOB WHERE ID = ${id}"""
+ if (stateRows.isEmpty()) {
+ sleep(intervalMs)
+ continue
+ }
+ def state = stateRows[0][3].toString()
+ if ("FINISHED".equalsIgnoreCase(state)) {
+ return
+ }
+ if ("CANCELLED".equalsIgnoreCase(state) ||
"FAILED".equalsIgnoreCase(state)) {
+ assertTrue(false, "Warm up job failed, id=${id},
state=${state}")
+ }
+ sleep(intervalMs)
+ }
+ assertTrue(false, "Timeout waiting warm up job finished, id=${id}")
+ }
+ waitWarmUpJobFinished.call(jobId)
+
+ sql """use @${targetCluster};"""
+ qt_target_query """select count(*) from ${tableName}"""
+ def targetTablets = sql """show tablets from ${tableName}"""
+ assertTrue(targetTablets.size() > 0, "No tablets found for table
${tableName} in target cluster ${targetCluster}")
+ def targetTabletIds = targetTablets.collect { it[0] as Long }
+ assertTrue(sourceTabletIds.size() == targetTabletIds.size(),
+ "Tablet size mismatch between source and target,
source=${sourceTabletIds.size()}, target=${targetTabletIds.size()}")
+
+ def waitForFileCacheType = { List<Long> sourceIds, List<Long>
targetIds, String expectedType, long timeoutMs = 600000L, long intervalMs =
2000L ->
+ logger.info("waitForFileCacheType,
sourceIds=${sourceIds.toString()}, targetIds=${targetIds.toString()},
expectedType=${expectedType}")
+ assertTrue(sourceIds.size() == targetIds.size(),
+ "Tablet size mismatch before waiting file cache type,
source=${sourceIds.size()}, target=${targetIds.size()}")
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ int sourceMatched = 0
+ int targetMatched = 0
+ for (Long sourceTabletId in sourceIds) {
+ def sourceTabletIdStr = sql """select * from
information_schema.file_cache_info where tablet_id = ${sourceTabletId}"""
+ logger.info("[source tablet] tablet_id=${sourceTabletId},
tablet_cache_info=${sourceTabletIdStr.toString()}")
+ def rows = sql """select type from
information_schema.file_cache_info where tablet_id = ${sourceTabletId}"""
+ if (!rows.isEmpty()) {
+ def mismatch = rows.find { row ->
!row[0]?.toString()?.equalsIgnoreCase(expectedType) }
+ if (!mismatch) {
+ sourceMatched++
+ }
+ }
+ }
+ for (Long targetTabletId in targetIds) {
+ def targetTabletIdStr = sql """select * from
information_schema.file_cache_info where tablet_id = ${targetTabletId}"""
+ logger.info("[target tablet] tablet_id=${targetTabletId},
tablet_cache_info=${targetTabletIdStr.toString()}")
+ def rows = sql """select type from
information_schema.file_cache_info where tablet_id = ${targetTabletId}"""
+ if (!rows.isEmpty()) {
+ def mismatch = rows.find { row ->
!row[0]?.toString()?.equalsIgnoreCase(expectedType) }
+ if (!mismatch) {
+ targetMatched++
+ }
+ }
+ }
+ sleep(intervalMs)
+ }
+ assertTrue(false, "Timeout waiting for ${expectedType},
sourceTablets=${sourceIds}, targetTablets=${targetIds}")
+ }
+ waitForFileCacheType.call(sourceTabletIds, targetTabletIds, "ttl")
+
+ // cleanup
+ sql """use @${sourceCluster};"""
+ sql """drop table if exists ${tableName}"""
+ }
+}
diff --git
a/regression-test/suites/cloud_p0/cache/ttl/st07_qcs_consistency.groovy
b/regression-test/suites/cloud_p0/cache/ttl/st07_qcs_consistency.groovy
new file mode 100644
index 00000000000..cd5485cce61
--- /dev/null
+++ b/regression-test/suites/cloud_p0/cache/ttl/st07_qcs_consistency.groovy
@@ -0,0 +1,106 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("st07_qcs_consistency") {
+ def customBeConfig = [
+ enable_evict_file_cache_in_advance : false,
+ file_cache_enter_disk_resource_limit_mode_percent : 99,
+ file_cache_background_ttl_gc_interval_ms : 1000,
+ file_cache_background_ttl_info_update_interval_ms : 1000,
+ file_cache_background_tablet_id_flush_interval_ms : 1000
+ ]
+
+ setBeConfigTemporary(customBeConfig) {
+ def clusters = sql "SHOW CLUSTERS"
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """use @${validCluster};"""
+
+ String tableName = "st07_qcs_tpl"
+ def ddl = new
File("""${context.file.parent}/../ddl/st07_qcs_consistency.sql""").text
+ .replace("\${TABLE_NAME}", tableName)
+ sql ddl
+
+ (0..<500).each { i ->
+ sql """insert into ${tableName} values (${i}, 'qcs_tpl_${i}')"""
+ }
+ qt_q1 """select count(*) from ${tableName} where c1 like 'qcs_tpl_%'"""
+
+ def tablets = sql """show tablets from ${tableName}"""
+ assertTrue(tablets.size() > 0, "No tablets found for table
${tableName}")
+ def tabletIds = tablets.collect { it[0] as Long }
+
+ // ST-07 部分覆盖点模板:Query + Compaction + SchemaChange 混合后检查缓存类型一致性
+ try {
+ trigger_and_wait_compaction(tableName, "cumulative")
+ } catch (Throwable t) {
+ logger.warn("trigger_and_wait_compaction failed in template,
continue. err=${t.message}")
+ }
+
+ sql """alter table ${tableName} add column c2 BIGINT default "0" """
+
+ def waitSchemaChangeFinished = { String tbl, long timeoutMs = 300000L,
long intervalMs = 5000L ->
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ def rows = sql """SHOW ALTER TABLE COLUMN WHERE
TableName='${tbl}' ORDER BY CreateTime DESC LIMIT 1"""
+ if (rows.isEmpty()) {
+ sleep(intervalMs)
+ continue
+ }
+ def state = rows[0][9].toString()
+ if ("FINISHED".equalsIgnoreCase(state)) {
+ return
+ }
+ if ("CANCELLED".equalsIgnoreCase(state)) {
+ assertTrue(false, "schema change cancelled, table=${tbl}")
+ }
+ sleep(intervalMs)
+ }
+ assertTrue(false, "Timeout waiting schema change finished,
table=${tbl}")
+ }
+ waitSchemaChangeFinished.call(tableName)
+
+ qt_q2 """select count(*) from ${tableName} where c2 = 0"""
+
+ def waitNoMixedTypePerTablet = { List<Long> ids, long timeoutMs =
600000L, long intervalMs = 3000L ->
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ boolean allOk = true
+ for (Long tabletId in ids) {
+ def rows = sql """select type from
information_schema.file_cache_info where tablet_id=${tabletId}"""
+ if (rows.isEmpty()) {
+ allOk = false
+ break
+ }
+ def typeSet = rows.collect {
it[0]?.toString()?.toLowerCase() }.toSet()
+ if (typeSet.size() > 1) {
+ allOk = false
+ break
+ }
+ }
+ if (allOk) {
+ return
+ }
+ sleep(intervalMs)
+ }
+ assertTrue(false, "Timeout waiting no mixed cache types per
tablet")
+ }
+ waitNoMixedTypePerTablet.call(tabletIds)
+
+ sql """drop table if exists ${tableName}"""
+ }
+}
diff --git
a/regression-test/suites/cloud_p0/cache/ttl/st10_drop_partition_cleanup.groovy
b/regression-test/suites/cloud_p0/cache/ttl/st10_drop_partition_cleanup.groovy
new file mode 100644
index 00000000000..965ecaf35d2
--- /dev/null
+++
b/regression-test/suites/cloud_p0/cache/ttl/st10_drop_partition_cleanup.groovy
@@ -0,0 +1,199 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("st10_drop_partition_cleanup") {
+ def customBeConfig = [
+ enable_evict_file_cache_in_advance : false,
+ file_cache_enter_disk_resource_limit_mode_percent : 99,
+ file_cache_background_ttl_gc_interval_ms : 1000,
+ file_cache_background_ttl_info_update_interval_ms : 1000,
+ file_cache_background_tablet_id_flush_interval_ms : 1000
+ ]
+ def customFeConfig = [
+ rehash_tablet_after_be_dead_seconds : 5
+ ]
+
+ setBeConfigTemporary(customBeConfig) {
+ setFeConfigTemporary(customFeConfig) {
+ def clusters = sql "SHOW CLUSTERS"
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """use @${validCluster};"""
+
+ String tableName = "st10_drop_part_cleanup_tpl"
+ def ddl = new
File("""${context.file.parent}/../ddl/st10_drop_partition_cleanup.sql""").text
+ .replace("\${TABLE_NAME}", tableName)
+ sql ddl
+
+ String[][] backends = sql """show backends"""
+ def backendIdToBackendIP = [:]
+ def backendIdToBackendHttpPort = [:]
+ def backendIdToBackendBrpcPort = [:]
+ for (String[] backend in backends) {
+ if (backend[9].equals("true") &&
backend[19].contains("${validCluster}")) {
+ backendIdToBackendIP.put(backend[0], backend[1])
+ backendIdToBackendHttpPort.put(backend[0], backend[4])
+ backendIdToBackendBrpcPort.put(backend[0], backend[5])
+ }
+ }
+ assertEquals(backendIdToBackendIP.size(), 1)
+
+ def backendId = backendIdToBackendIP.keySet()[0]
+ def clearUrl = backendIdToBackendIP.get(backendId) + ":" +
backendIdToBackendHttpPort.get(backendId) + "/api/file_cache?op=clear&sync=true"
+ httpTest {
+ endpoint ""
+ uri clearUrl
+ op "get"
+ body ""
+ printResponse false
+ check { respCode, body ->
+ assertEquals("${respCode}".toString(), "200")
+ }
+ }
+
+ def waitForFileCacheType = { List<Long> tabletIds, String
expectedType, long timeoutMs = 120000L, long intervalMs = 2000L ->
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ boolean allMatch = true
+ for (Long tabletId in tabletIds) {
+ def rows = sql """select type from
information_schema.file_cache_info where tablet_id = ${tabletId}"""
+ if (rows.isEmpty()) {
+ allMatch = false
+ break
+ }
+ def mismatch = rows.find { row ->
!row[0]?.toString()?.equalsIgnoreCase(expectedType) }
+ if (mismatch) {
+ allMatch = false
+ break
+ }
+ }
+ if (allMatch) {
+ return
+ }
+ sleep(intervalMs)
+ }
+ assertTrue(false, "Timeout waiting for ${expectedType},
tablets=${tabletIds}")
+ }
+
+ def waitDroppedTabletCacheInfoEmpty = { List<Long> tabletIds, long
timeoutMs = 300000L, long intervalMs = 3000L ->
+ if (tabletIds.isEmpty()) {
+ return
+ }
+ String idList = tabletIds.join(",")
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ def rows = sql """select tablet_id from
information_schema.file_cache_info where tablet_id in (${idList}) limit 1"""
+ if (rows.isEmpty()) {
+ return
+ }
+ sleep(intervalMs)
+ }
+ assertTrue(false, "Timeout waiting dropped tablet cache entries
cleaned, tablets=${tabletIds}")
+ }
+
+ def waitTabletCacheInfoNonEmpty = { List<Long> tabletIds, long
timeoutMs = 120000L, long intervalMs = 2000L ->
+ if (tabletIds.isEmpty()) {
+ assertTrue(false, "tabletIds is empty")
+ }
+ String idList = tabletIds.join(",")
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ def rows = sql """select tablet_id from
information_schema.file_cache_info where tablet_id in (${idList}) limit 1"""
+ if (!rows.isEmpty()) {
+ return
+ }
+ sleep(intervalMs)
+ }
+ assertTrue(false, "Timeout waiting tablet cache entries exist,
tablets=${tabletIds}")
+ }
+
+ def getBrpcMetricSum = { String metricNameSubstr ->
+ long sumValue = 0L
+ httpTest {
+ endpoint backendIdToBackendIP.get(backendId) + ":" +
backendIdToBackendBrpcPort.get(backendId)
+ uri "/brpc_metrics"
+ op "get"
+ check { respCode, body ->
+ assertEquals("${respCode}".toString(), "200")
+ String out = "${body}".toString()
+ def lines = out.split('\n')
+ for (String line in lines) {
+ if (line.startsWith("#")) {
+ continue
+ }
+ if (!line.contains(metricNameSubstr)) {
+ continue
+ }
+ logger.info("metric line: ${line}")
+ def idx = line.indexOf(' ')
+ if (idx <= 0) {
+ continue
+ }
+ try {
+ sumValue += line.substring(idx).trim().toLong()
+ } catch (Exception e) {
+ logger.warn("ignore unparsable metric line:
${line}")
+ }
+ }
+ }
+ }
+ return sumValue
+ }
+
+ def waitBrpcMetricLE = { String metricNameSubstr, long upperBound,
long timeoutMs = 180000L, long intervalMs = 3000L ->
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ long cur = getBrpcMetricSum.call(metricNameSubstr)
+ if (cur <= upperBound) {
+ return
+ }
+ sleep(intervalMs)
+ }
+ long curFinal = getBrpcMetricSum.call(metricNameSubstr)
+ assertTrue(curFinal <= upperBound, "Metric ${metricNameSubstr}
should <= ${upperBound}, actual=${curFinal}")
+ }
+
+ def getPartitionTabletIds = { String tbl, String partitionName ->
+ def tablets = sql """show tablets from ${tbl} partition
${partitionName}"""
+ assertTrue(!tablets.isEmpty(), "No tablets found for partition
${partitionName}")
+ tablets.collect { it[0] as Long }
+ }
+
+ def p1Values = (0..<120).collect { i -> "(${i}, 'p1_${i}')"
}.join(",")
+ def p2Values = (1000..<1120).collect { i -> "(${i}, 'p2_${i}')"
}.join(",")
+ sql """insert into ${tableName} values ${p1Values}"""
+ sql """insert into ${tableName} values ${p2Values}"""
+ qt_part_preheat """select count(*) from ${tableName}"""
+ sleep(5000)
+
+ def p1Tablets = getPartitionTabletIds.call(tableName, "p1")
+ def p2Tablets = getPartitionTabletIds.call(tableName, "p2")
+ waitForFileCacheType.call((p1Tablets + p2Tablets).unique(), "ttl")
+
+ final String ttlMgrSetMetric =
"file_cache_ttl_mgr_tablet_id_set_size"
+ long ttlMgrSetSizeBeforeDropPartition =
getBrpcMetricSum.call(ttlMgrSetMetric)
+
+ sql """alter table ${tableName} drop partition p1 force"""
+ waitDroppedTabletCacheInfoEmpty.call(p1Tablets)
+ waitTabletCacheInfoNonEmpty.call(p2Tablets)
+ waitBrpcMetricLE.call(ttlMgrSetMetric,
ttlMgrSetSizeBeforeDropPartition)
+
+ qt_part_survivor """select count(*) from ${tableName} where k1 >=
1000"""
+ sql """drop table if exists ${tableName}"""
+ }
+ }
+}
diff --git
a/regression-test/suites/cloud_p0/cache/ttl/st10_drop_table_cleanup.groovy
b/regression-test/suites/cloud_p0/cache/ttl/st10_drop_table_cleanup.groovy
new file mode 100644
index 00000000000..56eca6ee54e
--- /dev/null
+++ b/regression-test/suites/cloud_p0/cache/ttl/st10_drop_table_cleanup.groovy
@@ -0,0 +1,207 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("st10_drop_table_cleanup") {
+ def customBeConfig = [
+ enable_evict_file_cache_in_advance : false,
+ file_cache_enter_disk_resource_limit_mode_percent : 99,
+ file_cache_background_ttl_gc_interval_ms : 1000,
+ file_cache_background_ttl_info_update_interval_ms : 1000,
+ file_cache_background_tablet_id_flush_interval_ms : 1000
+ ]
+ def customFeConfig = [
+ rehash_tablet_after_be_dead_seconds : 5
+ ]
+
+ setBeConfigTemporary(customBeConfig) {
+ setFeConfigTemporary(customFeConfig) {
+ def clusters = sql "SHOW CLUSTERS"
+ assertTrue(!clusters.isEmpty())
+ def currentClusterRow = clusters.find { row ->
+ row.size() > 1 && row[1]?.toString()?.equalsIgnoreCase("true")
+ }
+ def validCluster = (currentClusterRow != null ?
currentClusterRow[0] : clusters[0][0]).toString()
+ sql """use @${validCluster};"""
+
+ String tableName = "st10_drop_cleanup_tpl"
+ def ddl = new
File("""${context.file.parent}/../ddl/st10_drop_table_cleanup.sql""").text
+ .replace("\${TABLE_NAME}", tableName)
+ sql ddl
+
+ String[][] backends = sql """show backends"""
+ def backendIdToBackendIP = [:]
+ def backendIdToBackendHttpPort = [:]
+ def backendIdToBackendBrpcPort = [:]
+ for (String[] backend in backends) {
+ def beClusterInfo = backend[19]?.toString() ?: ""
+ def isAlive = backend[9]?.toString()?.equalsIgnoreCase("true")
+ def belongToCurrentCluster =
beClusterInfo.contains("\"compute_group_name\" : \"${validCluster}\"")
+ ||
beClusterInfo.contains("\"compute_group_name\":\"${validCluster}\"")
+ || beClusterInfo.contains("${validCluster}")
+ if (isAlive && belongToCurrentCluster) {
+ backendIdToBackendIP.put(backend[0], backend[1])
+ backendIdToBackendHttpPort.put(backend[0], backend[4])
+ backendIdToBackendBrpcPort.put(backend[0], backend[5])
+ }
+ }
+ assertTrue(!backendIdToBackendIP.isEmpty(), "No alive backend
found in cluster ${validCluster}")
+ def backendIds = backendIdToBackendIP.keySet().toList()
+
+ for (def backendId in backendIds) {
+ def clearUrl = backendIdToBackendIP.get(backendId) + ":" +
backendIdToBackendHttpPort.get(backendId) + "/api/file_cache?op=clear&sync=true"
+ httpTest {
+ endpoint ""
+ uri clearUrl
+ op "get"
+ body ""
+ check { respCode, body ->
+ assertEquals("${respCode}".toString(), "200")
+ }
+ }
+ }
+
+ def getTabletIds = { String tbl ->
+ def tablets = sql """show tablets from ${tbl}"""
+ assertTrue(tablets.size() > 0, "No tablets found for table
${tbl}")
+ tablets.collect { it[0] as Long }
+ }
+
+ def waitForFileCacheType = { List<Long> tabletIds, String
expectedType, long timeoutMs = 120000L, long intervalMs = 2000L ->
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ boolean allMatch = true
+ for (Long tabletId in tabletIds) {
+ def rows = sql """select type from
information_schema.file_cache_info where tablet_id = ${tabletId}"""
+ if (rows.isEmpty()) {
+ allMatch = false
+ break
+ }
+ def mismatch = rows.find { row ->
!row[0]?.toString()?.equalsIgnoreCase(expectedType) }
+ if (mismatch) {
+ allMatch = false
+ break
+ }
+ }
+ if (allMatch) {
+ return
+ }
+ sleep(intervalMs)
+ }
+ assertTrue(false, "Timeout waiting for ${expectedType},
tablets=${tabletIds}")
+ }
+
+ def waitDroppedTabletCacheInfoEmpty = { List<Long> tabletIds, long
timeoutMs = 180000L, long intervalMs = 3000L ->
+ if (tabletIds.isEmpty()) {
+ return
+ }
+ String idList = tabletIds.join(",")
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ def rows = sql """select tablet_id from
information_schema.file_cache_info where tablet_id in (${idList}) limit 1"""
+ if (rows.isEmpty()) {
+ return
+ }
+ sleep(intervalMs)
+ }
+ assertTrue(false, "Timeout waiting dropped tablet cache entries
cleaned, tablets=${tabletIds}")
+ }
+
+ def getBrpcMetricSum = { String metricNameSubstr ->
+ long sumValue = 0L
+ for (def backendId in backendIds) {
+ httpTest {
+ endpoint backendIdToBackendIP.get(backendId) + ":" +
backendIdToBackendBrpcPort.get(backendId)
+ uri "/brpc_metrics"
+ op "get"
+ check { respCode, body ->
+ assertEquals("${respCode}".toString(), "200")
+ String out = "${body}".toString()
+ def lines = out.split('\n')
+ for (String line in lines) {
+ if (line.startsWith("#")) {
+ continue
+ }
+ if (!line.contains(metricNameSubstr)) {
+ continue
+ }
+ def idx = line.indexOf(' ')
+ if (idx <= 0) {
+ continue
+ }
+ try {
+ sumValue += line.substring(idx).trim().toLong()
+ } catch (Exception e) {
+ logger.warn("ignore unparsable metric line:
${line}")
+ }
+ }
+ }
+ }
+ }
+ return sumValue
+ }
+
+ def waitBrpcMetricLE = { String metricNameSubstr, long upperBound,
long timeoutMs = 180000L, long intervalMs = 3000L ->
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ long cur = getBrpcMetricSum.call(metricNameSubstr)
+ if (cur <= upperBound) {
+ return
+ }
+ sleep(intervalMs)
+ }
+ long curFinal = getBrpcMetricSum.call(metricNameSubstr)
+ assertTrue(curFinal <= upperBound, "Metric ${metricNameSubstr}
should <= ${upperBound}, actual=${curFinal}")
+ }
+
+ def waitBrpcMetricGE = { String metricNameSubstr, long lowerBound,
long timeoutMs = 120000L, long intervalMs = 2000L ->
+ long start = System.currentTimeMillis()
+ while (System.currentTimeMillis() - start < timeoutMs) {
+ long cur = getBrpcMetricSum.call(metricNameSubstr)
+ if (cur >= lowerBound) {
+ return
+ }
+ sleep(intervalMs)
+ }
+ long curFinal = getBrpcMetricSum.call(metricNameSubstr)
+ assertTrue(curFinal >= lowerBound, "Metric ${metricNameSubstr}
should >= ${lowerBound}, actual=${curFinal}")
+ }
+
+ final String ttlMgrSetMetric =
"file_cache_ttl_mgr_tablet_id_set_size"
+ long ttlMgrSetSizeBeforeAll =
getBrpcMetricSum.call(ttlMgrSetMetric)
+
+ def values = (0..<300).collect { i -> "(${i}, 'drop_tpl_${i}')"
}.join(",")
+ sql """insert into ${tableName} values ${values}"""
+ qt_sql """select count(*) from ${tableName} where c1 like
'drop_tpl_%'"""
+ sleep(5000)
+
+ def tabletIds = getTabletIds.call(tableName)
+ waitForFileCacheType.call(tabletIds, "ttl")
+ waitBrpcMetricGE.call(ttlMgrSetMetric, ttlMgrSetSizeBeforeAll +
tabletIds.toSet().size())
+ long ttlMgrSetSizeBeforeDropTable =
getBrpcMetricSum.call(ttlMgrSetMetric)
+
+ // ST-10 未覆盖点模板:Drop 后不应残留被删除 tablet 的 file_cache_info 记录
+ sql """drop table if exists ${tableName} force"""
+ waitDroppedTabletCacheInfoEmpty.call(tabletIds)
+ /*
+ // be 的 tablet 元信息清理时间较长 取决于recycler回收速度,这里不做检查
+ waitBrpcMetricLE.call(ttlMgrSetMetric, ttlMgrSetSizeBeforeAll)
+ // 可选增强 1:Drop table 前后 file_cache_ttl_mgr_tablet_id_set_size 对比
+ assertTrue(ttlMgrSetSizeBeforeDropTable >= ttlMgrSetSizeBeforeAll)
+ */
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]