This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new f97b23cb778 [fix](p2) fix the failed of cold_heat_separation_p2 
(#48199)
f97b23cb778 is described below

commit f97b23cb778e6d457d54ec24446d0f9c66fe1a93
Author: yagagagaga <zhangminkefromflyd...@gmail.com>
AuthorDate: Mon Mar 3 09:50:00 2025 +0800

    [fix](p2) fix the failed of cold_heat_separation_p2 (#48199)
---
 .../add_drop_partition.groovy                      | 18 ++++-----
 .../add_drop_partition_by_hdfs.groovy              | 16 ++++----
 .../create_table_use_dynamic_partition.groovy      | 16 ++++----
 ...eate_table_use_dynamic_partition_by_hdfs.groovy | 14 +++----
 .../create_table_use_partition_policy.groovy       | 28 +++++++-------
 ...reate_table_use_partition_policy_by_hdfs.groovy | 28 +++++++-------
 .../create_table_use_policy.groovy                 | 20 +++++-----
 .../create_table_use_policy_by_hdfs.groovy         | 24 ++++++------
 .../load_colddata_to_hdfs.groovy                   | 12 +++---
 .../modify_replica_use_partition.groovy            | 44 +++++++++++-----------
 .../modify_replica_use_partition_by_hdfs.groovy    | 44 +++++++++++-----------
 .../table_modify_resouce_and_policy.groovy         | 24 ++++++------
 .../table_modify_resouce_and_policy_by_hdfs.groovy | 24 ++++++------
 13 files changed, 156 insertions(+), 156 deletions(-)

diff --git 
a/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy 
b/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy
index a4bbe562f06..f53d5c27b36 100644
--- a/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy
+++ b/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy
@@ -33,9 +33,9 @@ suite("add_drop_partition") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
             assertEquals("${respCode}".toString(), "200")
@@ -136,7 +136,7 @@ suite("add_drop_partition") {
     """
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    def tablets = sql """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -154,7 +154,7 @@ suite("add_drop_partition") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -162,7 +162,7 @@ suite("add_drop_partition") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -174,7 +174,7 @@ suite("add_drop_partition") {
     while (RemoteDataSize1 != originLocalDataSize1 && sleepTimes < 60) {
         log.info( "test remote size is same with origin size, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -271,7 +271,7 @@ suite("add_drop_partition") {
     DROP TABLE ${tableName}
     """
 
-    sql """
+    try_sql """
     drop storage policy add_policy;
     """
 
@@ -285,4 +285,4 @@ suite("add_drop_partition") {
 
 
 
-}
\ No newline at end of file
+}
diff --git 
a/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
index 3437bcfc37b..f42ddc22503 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
@@ -33,9 +33,9 @@ suite("add_drop_partition_by_hdfs") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
             assertEquals("${respCode}".toString(), "200")
@@ -134,7 +134,7 @@ suite("add_drop_partition_by_hdfs") {
     """
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    def tablets = sql """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -152,7 +152,7 @@ suite("add_drop_partition_by_hdfs") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -160,7 +160,7 @@ suite("add_drop_partition_by_hdfs") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -172,7 +172,7 @@ suite("add_drop_partition_by_hdfs") {
     while (RemoteDataSize1 != originLocalDataSize1 && sleepTimes < 60) {
         log.info( "test remote size is same with origin size, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -267,7 +267,7 @@ suite("add_drop_partition_by_hdfs") {
     DROP TABLE ${tableName}
     """
 
-    sql """
+    try_sql """
     drop storage policy add_policy;
     """
 
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
index a9737b37abb..580946bd7a8 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
@@ -33,9 +33,9 @@ suite("cold_heat_dynamic_partition") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
             assertEquals("${respCode}".toString(), "200")
@@ -146,7 +146,7 @@ suite("cold_heat_dynamic_partition") {
     """
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    def tablets = sql """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -164,7 +164,7 @@ suite("cold_heat_dynamic_partition") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -172,7 +172,7 @@ suite("cold_heat_dynamic_partition") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -184,7 +184,7 @@ suite("cold_heat_dynamic_partition") {
     while (RemoteDataSize1 != originLocalDataSize1 && sleepTimes < 60) {
         log.info( "test remote size is same with origin size, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM
         """
         fetchDataSize(sizes, tablets[0])
@@ -280,4 +280,4 @@ suite("cold_heat_dynamic_partition") {
 
 
 
-}
\ No newline at end of file
+}
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
index a007b26305d..d099e43d7be 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
@@ -33,9 +33,9 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
             assertEquals("${respCode}".toString(), "200")
@@ -144,7 +144,7 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
     """
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    def tablets = sql """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -162,7 +162,7 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -170,7 +170,7 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -182,7 +182,7 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
     while (RemoteDataSize1 != originLocalDataSize1 && sleepTimes < 60) {
         log.info( "test remote size is same with origin size, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM
         """
         fetchDataSize(sizes, tablets[0])
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
index aa67130a9d1..97d83ec64e0 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
@@ -32,9 +32,9 @@ suite("create_table_use_partition_policy") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = { List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             assertEquals("${respCode}".toString(), "200")
             String out = "${body}".toString()
@@ -194,7 +194,7 @@ suite("create_table_use_partition_policy") {
     // sleep(30000);
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    def tablets = sql """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
     """
     log.info( "test tablets not empty")
@@ -212,7 +212,7 @@ suite("create_table_use_partition_policy") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
     """
     log.info( "test tablets not empty")
@@ -220,7 +220,7 @@ suite("create_table_use_partition_policy") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
         """
         fetchDataSize(sizes, tablets[0])
@@ -232,7 +232,7 @@ suite("create_table_use_partition_policy") {
     while (RemoteDataSize1 != originLocalDataSize1 && sleepTimes < 60) {
         log.info( "test remote size is same with origin size, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
         """
         fetchDataSize(sizes, tablets[0])
@@ -245,7 +245,7 @@ suite("create_table_use_partition_policy") {
     log.info( "test remote size not zero")
     assertEquals(RemoteDataSize1, originLocalDataSize1)
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
     log.info( "test tablets not empty")
@@ -298,7 +298,7 @@ suite("create_table_use_partition_policy") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
     """
     log.info( "test tablets not empty")
@@ -316,7 +316,7 @@ suite("create_table_use_partition_policy") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
     """
     log.info( "test tablets not empty")
@@ -324,7 +324,7 @@ suite("create_table_use_partition_policy") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
         """
         fetchDataSize(sizes, tablets[0])
@@ -336,7 +336,7 @@ suite("create_table_use_partition_policy") {
     while (RemoteDataSize1 != originLocalDataSize1 && sleepTimes < 60) {
         log.info( "test remote size is same with origin size, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
         """
         fetchDataSize(sizes, tablets[0])
@@ -349,7 +349,7 @@ suite("create_table_use_partition_policy") {
     log.info( "test remote size not zero")
     assertEquals(RemoteDataSize1, originLocalDataSize1)
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
     log.info( "test tablets not empty")
@@ -369,4 +369,4 @@ suite("create_table_use_partition_policy") {
 
 
 
-}
\ No newline at end of file
+}
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
index 723f50721ec..5c03aeba0c1 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
@@ -32,9 +32,9 @@ suite("create_table_use_partition_policy_by_hdfs") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             assertEquals("${respCode}".toString(), "200")
             String out = "${body}".toString()
@@ -93,7 +93,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
         def tablets = sql """
         SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
         """
-        while (tablets[0][8] == "0") {
+        while (tablets[0].LocalDataSize == "0") {
             log.info( "test local size is zero, sleep 10s")
             sleep(10000)
             tablets = sql """
@@ -192,7 +192,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
     // sleep(30000);
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    def tablets = sql """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
     """
     log.info( "test tablets not empty")
@@ -210,7 +210,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
     """
     log.info( "test tablets not empty")
@@ -218,7 +218,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
         """
         fetchDataSize(sizes, tablets[0])
@@ -230,7 +230,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
     while (RemoteDataSize1 != originLocalDataSize1 && sleepTimes < 60) {
         log.info( "test remote size is same with origin size, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
         """
         fetchDataSize(sizes, tablets[0])
@@ -243,7 +243,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
     log.info( "test remote size not zero")
     assertEquals(RemoteDataSize1, originLocalDataSize1)
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
     log.info( "test tablets not empty")
@@ -296,7 +296,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
     """
     log.info( "test tablets not empty")
@@ -314,7 +314,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
     """
     log.info( "test tablets not empty")
@@ -322,7 +322,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
         """
         fetchDataSize(sizes, tablets[0])
@@ -334,7 +334,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
     while (RemoteDataSize1 != originLocalDataSize1 && sleepTimes < 60) {
         log.info( "test remote size is same with origin size, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
         """
         fetchDataSize(sizes, tablets[0])
@@ -347,7 +347,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
     log.info( "test remote size not zero")
     assertEquals(RemoteDataSize1, originLocalDataSize1)
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
     log.info( "test tablets not empty")
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy
index fb7f115993d..4073a5c67b8 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy
@@ -32,9 +32,9 @@ suite("create_table_use_policy") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
             assertEquals("${respCode}".toString(), "200")
@@ -189,7 +189,7 @@ suite("create_table_use_policy") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -201,7 +201,7 @@ suite("create_table_use_policy") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -209,7 +209,7 @@ suite("create_table_use_policy") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -256,7 +256,7 @@ suite("create_table_use_policy") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -267,7 +267,7 @@ suite("create_table_use_policy") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -275,7 +275,7 @@ suite("create_table_use_policy") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -293,4 +293,4 @@ suite("create_table_use_policy") {
 
 
 
-}
\ No newline at end of file
+}
diff --git 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
index c80f82a167d..9ff61a35e04 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
@@ -32,9 +32,9 @@ suite("create_table_use_policy_by_hdfs") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
             assertEquals("${respCode}".toString(), "200")
@@ -90,13 +90,13 @@ suite("create_table_use_policy_by_hdfs") {
     def load_lineitem_table = {
         stream_load_one_part("00")
         stream_load_one_part("01")
-        def tablets = sql """
+        def tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        while (tablets[0][8] == "0") {
+        while (tablets[0].LocalDataSize == "0") {
             log.info( "test local size is zero, sleep 10s")
             sleep(10000)
-            tablets = sql """
+            tablets = sql_return_maparray """
             SHOW TABLETS FROM ${tableName}
             """
         }
@@ -187,7 +187,7 @@ suite("create_table_use_policy_by_hdfs") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -199,7 +199,7 @@ suite("create_table_use_policy_by_hdfs") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -207,7 +207,7 @@ suite("create_table_use_policy_by_hdfs") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -254,7 +254,7 @@ suite("create_table_use_policy_by_hdfs") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -265,7 +265,7 @@ suite("create_table_use_policy_by_hdfs") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -273,7 +273,7 @@ suite("create_table_use_policy_by_hdfs") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
diff --git 
a/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy 
b/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy
index b49306b3c72..673fa9d39c8 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy
@@ -36,9 +36,9 @@ suite("load_colddata_to_hdfs") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
             assertEquals("${respCode}".toString(), "200")
@@ -191,7 +191,7 @@ suite("load_colddata_to_hdfs") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -203,7 +203,7 @@ suite("load_colddata_to_hdfs") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -211,7 +211,7 @@ suite("load_colddata_to_hdfs") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
diff --git 
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
 
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
index 6538dafa649..744fef15037 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
@@ -32,9 +32,9 @@ suite("modify_replica_use_partition") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
             assertEquals("${respCode}".toString(), "200")
@@ -105,13 +105,13 @@ suite("modify_replica_use_partition") {
     def load_lineitem_table = {
         stream_load_one_part("00")
         stream_load_one_part("01")
-        def tablets = sql """
+        def tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        while (tablets[0][8] == "0") {
+        while (tablets[0].LocalDataSize == "0") {
             log.info( "test local size is zero, sleep 10s")
             sleep(10000)
-            tablets = sql """
+            tablets = sql_return_maparray """
             SHOW TABLETS FROM ${tableName}
             """
         }
@@ -212,7 +212,7 @@ suite("modify_replica_use_partition") {
     sleep(600000)
 
 
-    def tablets = sql """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -220,7 +220,7 @@ suite("modify_replica_use_partition") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -253,13 +253,13 @@ suite("modify_replica_use_partition") {
     sleep(60000)
 
     // 对比所有tablets的replicas的rowsets meta是否相同
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     while (tablets.size() != 3 * originSize) {
         log.info( "tablets clone not finished, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
     }
@@ -268,12 +268,12 @@ suite("modify_replica_use_partition") {
     def iterate_num = tablets.size() / 3;
     for (int i = 0; i < iterate_num; i++) {
         int idx = i * 3;
-        def dst = tablets[idx][18]
+        def dst = tablets[idx].CompactionStatus
         def text = get_meta(dst)
         def obj = new JsonSlurper().parseText(text)
         def rowsets = obj.rowsets
         for (x in [1,2]) {
-            dst = tablets[idx + x][18]
+            dst = tablets[idx + x].CompactionStatus
             text = get_meta(dst)
             obj = new JsonSlurper().parseText(text)
             log.info( "test rowset meta is the same")
@@ -322,7 +322,7 @@ suite("modify_replica_use_partition") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     fetchDataSize(sizes, tablets[0])
@@ -339,7 +339,7 @@ suite("modify_replica_use_partition") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -348,7 +348,7 @@ suite("modify_replica_use_partition") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -417,7 +417,7 @@ suite("modify_replica_use_partition") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     fetchDataSize(sizes, tablets[0])
@@ -434,7 +434,7 @@ suite("modify_replica_use_partition") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -443,7 +443,7 @@ suite("modify_replica_use_partition") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -479,12 +479,12 @@ suite("modify_replica_use_partition") {
     // wait one minute for migration to be completed
     sleep(60000)
     // 对比3副本的partition中所有tablets的replicas的rowsets meta是否相同
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
     // sleep to wait for the report
     sleep(15000)
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
     compactionStatusIdx = tablets[0].size() - 1
@@ -492,12 +492,12 @@ suite("modify_replica_use_partition") {
     iterate_num = tablets.size() / 3;
     for (int i = 0; i < iterate_num; i++) {
         int idx = i * 3;
-        def dst = tablets[idx][18]
+        def dst = tablets[idx].CompactionStatus
         def text = get_meta(dst)
         def obj = new JsonSlurper().parseText(text)
         def rowsets = obj.rowsets
         for (x in [1,2]) {
-            dst = tablets[idx + x][18]
+            dst = tablets[idx + x].CompactionStatus
             text = get_meta(dst)
             obj = new JsonSlurper().parseText(text)
             log.info( "test rowset meta is the same")
diff --git 
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
index 55557e02c56..9c9a9c0e1fa 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
@@ -32,9 +32,9 @@ suite("modify_replica_use_partition_by_hdfs") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
             assertEquals("${respCode}".toString(), "200")
@@ -105,13 +105,13 @@ suite("modify_replica_use_partition_by_hdfs") {
     def load_lineitem_table = {
         stream_load_one_part("00")
         stream_load_one_part("01")
-        def tablets = sql """
+        def tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        while (tablets[0][8] == "0") {
+        while (tablets[0].LocalDataSize == "0") {
             log.info( "test local size is zero, sleep 10s")
             sleep(10000)
-            tablets = sql """
+            tablets = sql_return_maparray """
             SHOW TABLETS FROM ${tableName}
             """
         }
@@ -210,7 +210,7 @@ suite("modify_replica_use_partition_by_hdfs") {
     sleep(600000)
 
 
-    def tablets = sql """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -218,7 +218,7 @@ suite("modify_replica_use_partition_by_hdfs") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -251,13 +251,13 @@ suite("modify_replica_use_partition_by_hdfs") {
     sleep(60000)
 
     // 对比所有tablets的replicas的rowsets meta是否相同
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     while (tablets.size() != 3 * originSize) {
         log.info( "tablets clone not finished, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
     }
@@ -266,12 +266,12 @@ suite("modify_replica_use_partition_by_hdfs") {
     def iterate_num = tablets.size() / 3;
     for (int i = 0; i < iterate_num; i++) {
         int idx = i * 3;
-        def dst = tablets[idx][18]
+        def dst = tablets[idx].CompactionStatus
         def text = get_meta(dst)
         def obj = new JsonSlurper().parseText(text)
         def rowsets = obj.rowsets
         for (x in [1,2]) {
-            dst = tablets[idx + x][18]
+            dst = tablets[idx + x].CompactionStatus
             text = get_meta(dst)
             obj = new JsonSlurper().parseText(text)
             log.info( "test rowset meta is the same")
@@ -320,7 +320,7 @@ suite("modify_replica_use_partition_by_hdfs") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     fetchDataSize(sizes, tablets[0])
@@ -337,7 +337,7 @@ suite("modify_replica_use_partition_by_hdfs") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -346,7 +346,7 @@ suite("modify_replica_use_partition_by_hdfs") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -415,7 +415,7 @@ suite("modify_replica_use_partition_by_hdfs") {
     load_lineitem_table()
 
     // show tablets from table, 获取第一个tablet的 LocalDataSize1
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     fetchDataSize(sizes, tablets[0])
@@ -432,7 +432,7 @@ suite("modify_replica_use_partition_by_hdfs") {
     sleep(600000)
 
 
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -441,7 +441,7 @@ suite("modify_replica_use_partition_by_hdfs") {
     while (sizes[1] == 0) {
         log.info( "test remote size is zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -477,12 +477,12 @@ suite("modify_replica_use_partition_by_hdfs") {
     // wait one minute for migration to be completed
     sleep(60000)
     // 对比3副本的partition中所有tablets的replicas的rowsets meta是否相同
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
     // sleep to wait for the report
     sleep(15000)
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName} PARTITIONS(p202302)
     """
     compactionStatusIdx = tablets[0].size() - 1
@@ -490,12 +490,12 @@ suite("modify_replica_use_partition_by_hdfs") {
     iterate_num = tablets.size() / 3;
     for (int i = 0; i < iterate_num; i++) {
         int idx = i * 3;
-        def dst = tablets[idx][18]
+        def dst = tablets[idx].CompactionStatus
         def text = get_meta(dst)
         def obj = new JsonSlurper().parseText(text)
         def rowsets = obj.rowsets
         for (x in [1,2]) {
-            dst = tablets[idx + x][18]
+            dst = tablets[idx + x].CompactionStatus
             text = get_meta(dst)
             obj = new JsonSlurper().parseText(text)
             log.info( "test rowset meta is the same")
diff --git 
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
 
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
index 79b78834f8c..c23a9eea6df 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
@@ -32,9 +32,9 @@ suite("table_modify_resouce") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
             assertEquals("${respCode}".toString(), "200")
@@ -90,13 +90,13 @@ suite("table_modify_resouce") {
     def load_lineitem_table = {
         stream_load_one_part("00")
         stream_load_one_part("01")
-        def tablets = sql """
+        def tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        while (tablets[0][8] == "0") {
+        while (tablets[0].LocalDataSize == "0") {
             log.info( "test local size is zero, sleep 10s")
             sleep(10000)
-            tablets = sql """
+            tablets = sql_return_maparray """
             SHOW TABLETS FROM ${tableName}
             """
         }
@@ -190,7 +190,7 @@ suite("table_modify_resouce") {
 
     // 等待10分钟 获取remote data size
     sleep(600000)
-    def tablets = sql """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -201,7 +201,7 @@ suite("table_modify_resouce") {
     while (sizes[0] != 0) {
         log.info( "test local size is not zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -274,7 +274,7 @@ suite("table_modify_resouce") {
 
     // 等待10分钟 获取remote data size
     sleep(600000)
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -284,7 +284,7 @@ suite("table_modify_resouce") {
     while (sizes[0] != 0) {
         log.info( "test local size is not zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -310,7 +310,7 @@ suite("table_modify_resouce") {
     """
 
 
-    tablets2 = sql """
+    tablets2 = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     // [8] local data size, [9] remote data size
@@ -325,4 +325,4 @@ suite("table_modify_resouce") {
     DROP TABLE ${tableName}
     """
 
-}
\ No newline at end of file
+}
diff --git 
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
 
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
index 2c3f2d18219..a2c6f32cd63 100644
--- 
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
+++ 
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
@@ -32,9 +32,9 @@ suite("table_modify_resouce_by_hdfs") {
         }
     }
     // data_sizes is one arrayList<Long>, t is tablet
-    def fetchDataSize = { data_sizes, t ->
-        def tabletId = t[0]
-        String meta_url = t[17]
+    def fetchDataSize = {List<Long> data_sizes, Map<String, Object> t ->
+        def tabletId = t.TabletId
+        String meta_url = t.MetaUrl
         def clos = {  respCode, body ->
             logger.info("test ttl expired resp Code {}", 
"${respCode}".toString())
             assertEquals("${respCode}".toString(), "200")
@@ -90,13 +90,13 @@ suite("table_modify_resouce_by_hdfs") {
     def load_lineitem_table = {
         stream_load_one_part("00")
         stream_load_one_part("01")
-        def tablets = sql """
+        def tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
-        while (tablets[0][8] == "0") {
+        while (tablets[0].LocalDataSize == "0") {
             log.info( "test local size is zero, sleep 10s")
             sleep(10000)
-            tablets = sql """
+            tablets = sql_return_maparray """
             SHOW TABLETS FROM ${tableName}
             """
         }
@@ -188,7 +188,7 @@ suite("table_modify_resouce_by_hdfs") {
 
     // 等待10分钟 获取remote data size
     sleep(600000)
-    def tablets = sql """
+    def tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -199,7 +199,7 @@ suite("table_modify_resouce_by_hdfs") {
     while (sizes[0] != 0) {
         log.info( "test local size is not zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -225,7 +225,7 @@ suite("table_modify_resouce_by_hdfs") {
     """
 
 
-    def tablets2 = sql """
+    def tablets2 = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     // [8] local data size, [9] remote data size
@@ -272,7 +272,7 @@ suite("table_modify_resouce_by_hdfs") {
 
     // 等待10分钟 获取remote data size
     sleep(600000)
-    tablets = sql """
+    tablets = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     log.info( "test tablets not empty")
@@ -282,7 +282,7 @@ suite("table_modify_resouce_by_hdfs") {
     while (sizes[0] != 0) {
         log.info( "test local size is not zero, sleep 10s")
         sleep(10000)
-        tablets = sql """
+        tablets = sql_return_maparray """
         SHOW TABLETS FROM ${tableName}
         """
         fetchDataSize(sizes, tablets[0])
@@ -308,7 +308,7 @@ suite("table_modify_resouce_by_hdfs") {
     """
 
 
-    tablets2 = sql """
+    tablets2 = sql_return_maparray """
     SHOW TABLETS FROM ${tableName}
     """
     // [8] local data size, [9] remote data size


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org


Reply via email to