pingchunzhang commented on code in PR #45501:
URL: https://github.com/apache/doris/pull/45501#discussion_r1899253761


##########
regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy:
##########
@@ -0,0 +1,127 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds
+// and modified by Doris.
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+ // loading one data 10 times, expect data size not rising
+suite("test_cloud_follower_show_data","p2") {
+    //cloud-mode
+    if (!isCloudMode()) {
+        logger.info("not cloud mode, not run")
+        return
+    }
+
+    def create_table = { String tableName ->
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+
+    def check = {String tableName -> 
+        List<String> tablets = get_tablets_from_table(tableName)
+        def loadTimes = [1, 10]
+        Map<String, List> sizeRecords = ["apiSize":[], "mysqlSize":[], 
"cbsSize":[]]
+        for (int i in loadTimes){
+            // stream load 1 time, record each size
+            repeate_stream_load_same_data(tableName, i, 
"regression/tpch/sf0.1/lineitem.tbl.gz")
+            def rows = sql_return_maparray "select count(*) as count from 
${tableName};"
+            logger.info("table ${tableName} has ${rows[0]["count"]} rows")
+            // 加一下触发compaction的机制
+            trigger_compaction(tablets)
+
+            // 然后 sleep 1min, 等fe汇报完
+            sleep(60 * 1000)
+            sql "select count(*) from ${tableName}"
+
+            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+            sleep(60 * 1000)
+            logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
+        }
+
+        // expect mysqlSize == apiSize == storageSize
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
+        // expect load 1 times ==  load 10 times
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
+        assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
+        assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
+
+
+        def result = sql """show frontends;"""
+        logger.info("result:" + result)
+        for (int i = 0; i < result.size(); i++) {
+            if (result[i][8] == "false" && result[i][11] == "true") {
+                def tokens = context.config.jdbcUrl.split('/')
+                url = tokens[0] + "//" + tokens[2] + "/" + 
"information_schema" + "?"
+                def new_jdbc_url = url.replaceAll(/\/\/[0-9.]+:/, 
"//${switch_ip}:")
+                logger.info("new_jdbc_url: " + new_jdbc_url)
+
+                connect('root', '', new_jdbc_url) {
+                    sql "select count(*) from ${tableName}"
+
+                    
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+                    
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+                    
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))

Review Comment:
   this case only need to compare different fe mysql size, but here not 



##########
regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_column_show_data.groovy:
##########
@@ -0,0 +1,196 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds
+// and modified by Doris.
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+ // loading one data 10 times, expect data size not rising
+suite("test_cloud_schema_change_add_and_drop_column_show_data","p2") {
+    //cloud-mode
+    if (!isCloudMode()) {
+        logger.info("not cloud mode, not run")
+        return
+    }
+
+    def create_table = { String tableName ->
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def create_index_table = { String tableName ->
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR,
+              index index_SHIPINSTRUCT (L_SHIPINSTRUCT) using inverted,
+              index index_SHIPMODE (L_SHIPMODE) using inverted,
+              index index_COMMENT (L_COMMENT) using inverted
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def schema_change_add_column = { String tableName ->
+        sql """
+        ALTER TABLE ${tableName} add column l_test int after L_COMMENT;
+        """
+
+        waitForSchemaChangeDone {
+            sql """ SHOW ALTER TABLE COLUMN WHERE TableName='${tableName}' 
ORDER BY createtime DESC LIMIT 1 """
+            time 600
+        }
+    }
+
+    def schema_change_drop_column = { String tableName ->
+        sql """
+        ALTER TABLE ${tableName} drop column L_COMMENT;
+        """
+
+        waitForSchemaChangeDone {
+            sql """ SHOW ALTER TABLE COLUMN WHERE TableName='${tableName}' 
ORDER BY createtime DESC LIMIT 1 """
+            time 600
+        }
+    }
+
+    def check = {String tableName -> 
+        List<String> tablets = get_tablets_from_table(tableName)
+        def loadTimes = [1, 10]
+        Map<String, List> sizeRecords = ["apiSize":[], "mysqlSize":[], 
"cbsSize":[]]
+        for (int i in loadTimes){
+            // stream load 1 time, record each size
+            repeate_stream_load_same_data(tableName, i, 
"regression/tpch/sf0.1/lineitem.tbl.gz")
+            def rows = sql_return_maparray "select count(*) as count from 
${tableName};"
+            logger.info("table ${tableName} has ${rows[0]["count"]} rows")
+            // 加一下触发compaction的机制
+            trigger_compaction(tablets)
+
+            // 然后 sleep 1min, 等fe汇报完
+            sleep(60 * 1000)
+            sql "select count(*) from ${tableName}"
+
+            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+            sleep(60 * 1000)
+            logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
+        }
+
+        // expect mysqlSize == apiSize == storageSize
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
+        // expect load 1 times ==  load 10 times
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
+        assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
+        assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
+
+        schema_change_add_column(tableName)
+
+        // 加一下触发compaction的机制
+        trigger_compaction(tablets)

Review Comment:
   tablets maybe changed after add or drop column,need get new tablets



##########
regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_and_recover_table_show_data.groovy:
##########
@@ -0,0 +1,149 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds
+// and modified by Doris.
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+ // loading one data 10 times, expect data size not rising
+suite("test_cloud_truncate_and_recover_table_show_data","p2") {
+    //cloud-mode
+    if (!isCloudMode()) {
+        logger.info("not cloud mode, not run")
+        return
+    }
+
+    def create_normal_table = { String tableName ->
+        sql "drop TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def check = {String tableName, int op -> 
+        List<String> tablets = get_tablets_from_table(tableName)
+        def loadTimes = [1, 10]
+        Map<String, List> sizeRecords = ["apiSize":[], "mysqlSize":[], 
"cbsSize":[]]
+        for (int i in loadTimes){
+            // stream load 1 time, record each size
+            repeate_stream_load_same_data(tableName, i, 
"regression/tpch/sf0.1/lineitem.tbl.gz")
+            def rows = sql_return_maparray "select count(*) as count from 
${tableName};"
+            logger.info("table ${tableName} has ${rows[0]["count"]} rows")
+            // 加一下触发compaction的机制
+            trigger_compaction(tablets)
+
+            // 然后 sleep 1min, 等fe汇报完
+            sleep(60 * 1000)
+            sql "select count(*) from ${tableName}"
+
+            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+            sleep(60 * 1000)
+            logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
+        }
+
+        // expect mysqlSize == apiSize == storageSize
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
+        // expect load 1 times ==  load 10 times
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
+        assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
+        assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
+
+        if(op == 1){
+
+        sql """truncate table ${tableName}"""
+
+        sleep(60 * 1000)
+
+        sql """recover table ${tableName}"""

Review Comment:
   after recover,tablets changed



##########
regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_and_recover_table_show_data.groovy:
##########
@@ -0,0 +1,149 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds
+// and modified by Doris.
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+ // loading one data 10 times, expect data size not rising
+suite("test_cloud_truncate_and_recover_table_show_data","p2") {
+    //cloud-mode
+    if (!isCloudMode()) {
+        logger.info("not cloud mode, not run")
+        return
+    }
+
+    def create_normal_table = { String tableName ->
+        sql "drop TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def check = {String tableName, int op -> 
+        List<String> tablets = get_tablets_from_table(tableName)
+        def loadTimes = [1, 10]
+        Map<String, List> sizeRecords = ["apiSize":[], "mysqlSize":[], 
"cbsSize":[]]
+        for (int i in loadTimes){
+            // stream load 1 time, record each size
+            repeate_stream_load_same_data(tableName, i, 
"regression/tpch/sf0.1/lineitem.tbl.gz")
+            def rows = sql_return_maparray "select count(*) as count from 
${tableName};"
+            logger.info("table ${tableName} has ${rows[0]["count"]} rows")
+            // 加一下触发compaction的机制
+            trigger_compaction(tablets)
+
+            // 然后 sleep 1min, 等fe汇报完
+            sleep(60 * 1000)
+            sql "select count(*) from ${tableName}"
+
+            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+            sleep(60 * 1000)
+            logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
+        }
+
+        // expect mysqlSize == apiSize == storageSize
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
+        // expect load 1 times ==  load 10 times
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
+        assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
+        assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
+
+        if(op == 1){
+
+        sql """truncate table ${tableName}"""

Review Comment:
   Use correct alignment



##########
regression-test/suites/show_data/test_show_mow_data.groovy:
##########
@@ -82,10 +82,10 @@ suite("test_mow_show_data_in_cloud","p2") {
         sb.append(triggerCompactionUrl)
         String command = sb.toString()
         logger.info(command)
-        process = command.execute()
-        code = process.waitFor()
-        err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
-        out = process.getText()
+        def process = command.execute()

Review Comment:
   show_tablet_compaction already in plugin, this suite no need repeat define 
   same as other func and suites



##########
regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_reorder_column_show_data.groovy:
##########
@@ -0,0 +1,166 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds
+// and modified by Doris.
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+ // loading one data 10 times, expect data size not rising
+suite("test_cloud_schema_change_reorder_column_show_data","p2") {
+    //cloud-mode
+    if (!isCloudMode()) {
+        logger.info("not cloud mode, not run")
+        return
+    }
+
+    def create_table = { String tableName ->
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def create_index_table = { String tableName ->
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR,
+              index index_SHIPINSTRUCT (L_SHIPINSTRUCT) using inverted,
+              index index_SHIPMODE (L_SHIPMODE) using inverted,
+              index index_COMMENT (L_COMMENT) using inverted
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def schema_change_reorder_column= { String tableName ->
+        sql """
+        ALTER TABLE ${tableName} modify column L_SHIPMODE CHAR(10) NOT NULL 
after L_COMMENT;
+        """
+
+        waitForSchemaChangeDone {
+            sql """ SHOW ALTER TABLE column WHERE TableName='${tableName}' 
ORDER BY createtime DESC LIMIT 1 """
+            time 600
+        }
+    }
+
+    def check = {String tableName -> 
+        List<String> tablets = get_tablets_from_table(tableName)
+        def loadTimes = [1, 10]
+        Map<String, List> sizeRecords = ["apiSize":[], "mysqlSize":[], 
"cbsSize":[]]
+        for (int i in loadTimes){
+            // stream load 1 time, record each size
+            repeate_stream_load_same_data(tableName, i, 
"regression/tpch/sf0.1/lineitem.tbl.gz")
+            def rows = sql_return_maparray "select count(*) as count from 
${tableName};"
+            logger.info("table ${tableName} has ${rows[0]["count"]} rows")
+            // 加一下触发compaction的机制
+            trigger_compaction(tablets)
+
+            // 然后 sleep 1min, 等fe汇报完
+            sleep(60 * 1000)
+            sql "select count(*) from ${tableName}"
+
+            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+            sleep(60 * 1000)
+            logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
+        }
+
+        // expect mysqlSize == apiSize == storageSize
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
+        // expect load 1 times ==  load 10 times
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
+        assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
+        assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
+
+        schema_change_reorder_column(tableName)
+
+        // 加一下触发compaction的机制
+        trigger_compaction(tablets)

Review Comment:
   get new tablets 



##########
regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_index_show_data.groovy:
##########
@@ -0,0 +1,196 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds
+// and modified by Doris.
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+ // loading one data 10 times, expect data size not rising
+suite("test_cloud_schema_change_add_and_drop_index_show_data","p2") {
+    //cloud-mode
+    if (!isCloudMode()) {
+        logger.info("not cloud mode, not run")
+        return
+    }
+
+    def create_table = { String tableName ->
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def create_index_table = { String tableName ->
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR,
+              index index_SHIPINSTRUCT (L_SHIPINSTRUCT) using inverted,
+              index index_SHIPMODE (L_SHIPMODE) using inverted,
+              index index_COMMENT (L_COMMENT) using inverted
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def schema_change_add_index = { String tableName ->
+        sql """
+        ALTER TABLE ${tableName} add index index1 (L_LINESTATUS) using 
inverted;
+        """
+
+        waitForSchemaChangeDone {
+            sql """ SHOW ALTER TABLE column WHERE TableName='${tableName}' 
ORDER BY createtime DESC LIMIT 1 """
+            time 600
+        }
+    }
+
+    def schema_change_drop_index = { String tableName ->
+        sql """
+        ALTER TABLE ${tableName} drop index index1;
+        """
+
+        waitForSchemaChangeDone {
+            sql """ SHOW ALTER TABLE column WHERE TableName='${tableName}' 
ORDER BY createtime DESC LIMIT 1 """
+            time 600
+        }
+    }
+
+    def check = {String tableName -> 
+        List<String> tablets = get_tablets_from_table(tableName)
+        def loadTimes = [1, 10]
+        Map<String, List> sizeRecords = ["apiSize":[], "mysqlSize":[], 
"cbsSize":[]]
+        for (int i in loadTimes){
+            // stream load 1 time, record each size
+            repeate_stream_load_same_data(tableName, i, 
"regression/tpch/sf0.1/lineitem.tbl.gz")
+            def rows = sql_return_maparray "select count(*) as count from 
${tableName};"
+            logger.info("table ${tableName} has ${rows[0]["count"]} rows")
+            // 加一下触发compaction的机制
+            trigger_compaction(tablets)
+
+            // 然后 sleep 1min, 等fe汇报完
+            sleep(60 * 1000)
+            sql "select count(*) from ${tableName}"
+
+            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+            sleep(60 * 1000)
+            logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
+        }
+
+        // expect mysqlSize == apiSize == storageSize
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
+        // expect load 1 times ==  load 10 times
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
+        assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
+        assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
+
+        schema_change_add_index(tableName)
+
+        // 加一下触发compaction的机制
+        trigger_compaction(tablets)

Review Comment:
   after add index, tablets will change, need get new lists



##########
regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy:
##########
@@ -0,0 +1,149 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds
+// and modified by Doris.
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+ // loading one data 10 times, expect data size not rising
+suite("test_cloud_drop_and_recover_table_show_data","p2") {
+    //cloud-mode
+    if (!isCloudMode()) {
+        logger.info("not cloud mode, not run")
+        return
+    }
+
+    def create_normal_table = { String tableName ->
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def check = {String tableName, int op -> 
+        List<String> tablets = get_tablets_from_table(tableName)
+        def loadTimes = [1, 10]
+        Map<String, List> sizeRecords = ["apiSize":[], "mysqlSize":[], 
"cbsSize":[]]
+        for (int i in loadTimes){
+            // stream load 1 time, record each size
+            repeate_stream_load_same_data(tableName, i, 
"regression/tpch/sf0.1/lineitem.tbl.gz")
+            def rows = sql_return_maparray "select count(*) as count from 
${tableName};"
+            logger.info("table ${tableName} has ${rows[0]["count"]} rows")
+            // 加一下触发compaction的机制
+            trigger_compaction(tablets)
+
+            // 然后 sleep 1min, 等fe汇报完
+            sleep(60 * 1000)
+            sql "select count(*) from ${tableName}"
+
+            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+            sleep(60 * 1000)
+            logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
+        }
+
+        // expect mysqlSize == apiSize == storageSize
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
+        // expect load 1 times ==  load 10 times
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
+        assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
+        assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
+
+        if(op == 1){

Review Comment:
   use right alignment



##########
regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_index_show_data.groovy:
##########
@@ -0,0 +1,196 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds
+// and modified by Doris.
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+ // loading one data 10 times, expect data size not rising
+suite("test_cloud_schema_change_add_and_drop_index_show_data","p2") {
+    //cloud-mode
+    if (!isCloudMode()) {
+        logger.info("not cloud mode, not run")
+        return
+    }
+
+    def create_table = { String tableName ->
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def create_index_table = { String tableName ->
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR,
+              index index_SHIPINSTRUCT (L_SHIPINSTRUCT) using inverted,
+              index index_SHIPMODE (L_SHIPMODE) using inverted,
+              index index_COMMENT (L_COMMENT) using inverted
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def schema_change_add_index = { String tableName ->
+        sql """
+        ALTER TABLE ${tableName} add index index1 (L_LINESTATUS) using 
inverted;
+        """
+
+        waitForSchemaChangeDone {
+            sql """ SHOW ALTER TABLE column WHERE TableName='${tableName}' 
ORDER BY createtime DESC LIMIT 1 """
+            time 600
+        }
+    }
+
+    def schema_change_drop_index = { String tableName ->
+        sql """
+        ALTER TABLE ${tableName} drop index index1;
+        """
+
+        waitForSchemaChangeDone {
+            sql """ SHOW ALTER TABLE column WHERE TableName='${tableName}' 
ORDER BY createtime DESC LIMIT 1 """
+            time 600
+        }
+    }
+
+    def check = {String tableName -> 
+        List<String> tablets = get_tablets_from_table(tableName)
+        def loadTimes = [1, 10]
+        Map<String, List> sizeRecords = ["apiSize":[], "mysqlSize":[], 
"cbsSize":[]]
+        for (int i in loadTimes){
+            // stream load 1 time, record each size
+            repeate_stream_load_same_data(tableName, i, 
"regression/tpch/sf0.1/lineitem.tbl.gz")
+            def rows = sql_return_maparray "select count(*) as count from 
${tableName};"
+            logger.info("table ${tableName} has ${rows[0]["count"]} rows")
+            // 加一下触发compaction的机制
+            trigger_compaction(tablets)
+
+            // 然后 sleep 1min, 等fe汇报完
+            sleep(60 * 1000)
+            sql "select count(*) from ${tableName}"
+
+            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+            sleep(60 * 1000)
+            logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
+        }
+
+        // expect mysqlSize == apiSize == storageSize
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
+        // expect load 1 times ==  load 10 times
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
+        assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
+        assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
+
+        schema_change_add_index(tableName)
+
+        // 加一下触发compaction的机制
+        trigger_compaction(tablets)
+
+        // 然后 sleep 1min, 等fe汇报完
+        sleep(60 * 1000)
+
+        sql "select count(*) from ${tableName}"
+
+        
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+        
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+        
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+
+
+        // expect mysqlSize == apiSize == storageSize
+        assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2])
+        assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2])
+
+        schema_change_drop_index(tableName)
+
+        // 加一下触发compaction的机制
+        trigger_compaction(tablets)

Review Comment:
   after drop index, tablets will change, need get new lists



##########
regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy:
##########
@@ -0,0 +1,93 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds
+// and modified by Doris.
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+ // loading one data 10 times, expect data size not rising
+suite("test_cloud_disable_compaction_show_data","p2") {
+    //cloud-mode

Review Comment:
   this case not disable compaction,which different from the testing objectives



##########
regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy:
##########
@@ -0,0 +1,149 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds
+// and modified by Doris.
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+ // loading one data 10 times, expect data size not rising
+suite("test_cloud_drop_and_recover_table_show_data","p2") {
+    //cloud-mode
+    if (!isCloudMode()) {
+        logger.info("not cloud mode, not run")
+        return
+    }
+
+    def create_normal_table = { String tableName ->
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )
+        """
+    }
+
+    def check = {String tableName, int op -> 
+        List<String> tablets = get_tablets_from_table(tableName)
+        def loadTimes = [1, 10]
+        Map<String, List> sizeRecords = ["apiSize":[], "mysqlSize":[], 
"cbsSize":[]]
+        for (int i in loadTimes){
+            // stream load 1 time, record each size
+            repeate_stream_load_same_data(tableName, i, 
"regression/tpch/sf0.1/lineitem.tbl.gz")
+            def rows = sql_return_maparray "select count(*) as count from 
${tableName};"
+            logger.info("table ${tableName} has ${rows[0]["count"]} rows")
+            // 加一下触发compaction的机制
+            trigger_compaction(tablets)
+
+            // 然后 sleep 1min, 等fe汇报完
+            sleep(60 * 1000)
+            sql "select count(*) from ${tableName}"
+
+            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+            sleep(60 * 1000)
+            logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
+        }
+
+        // expect mysqlSize == apiSize == storageSize
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
+        // expect load 1 times ==  load 10 times
+        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
+        assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
+        assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
+
+        if(op == 1){
+
+        sql """drop table ${tableName}"""
+
+        sleep(60 * 1000)
+
+        sql """recover table ${tableName}"""

Review Comment:
   after recover, tablets changed,use  new tablets



##########
regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v1_show_data.groovy:
##########
@@ -0,0 +1,95 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// The cases is copied from https://github.com/trinodb/trino/tree/master
+// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds
+// and modified by Doris.
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+ // loading one data 10 times, expect data size not rising
+suite("test_cloud_inverted_index_v1_show_data","p2") {
+    //cloud-mode
+    if (!isCloudMode()) {
+        logger.info("not cloud mode, not run")
+        return
+    }
+
+    def main = {
+        def tableName="test_cloud_inverted_index_v1_show_data"
+        sql "DROP TABLE IF EXISTS ${tableName};"
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName}(
+              L_ORDERKEY    INTEGER NOT NULL,
+              L_PARTKEY     INTEGER NOT NULL,
+              L_SUPPKEY     INTEGER NOT NULL,
+              L_LINENUMBER  INTEGER NOT NULL,
+              L_QUANTITY    DECIMAL(15,2) NOT NULL,
+              L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL,
+              L_DISCOUNT    DECIMAL(15,2) NOT NULL,
+              L_TAX         DECIMAL(15,2) NOT NULL,
+              L_RETURNFLAG  CHAR(1) NOT NULL,
+              L_LINESTATUS  CHAR(1) NOT NULL,
+              L_SHIPDATE    DATE NOT NULL,
+              L_COMMITDATE  DATE NOT NULL,
+              L_RECEIPTDATE DATE NOT NULL,
+              L_SHIPINSTRUCT CHAR(25) NOT NULL,
+              L_SHIPMODE     CHAR(10) NOT NULL,
+              L_COMMENT      VARCHAR(44) NOT NULL,
+              L_NULL         VARCHAR,
+              index index_SHIPINSTRUCT (L_SHIPINSTRUCT) using inverted,
+              index index_SHIPMODE (L_SHIPMODE) using inverted,
+              index index_COMMENT (L_COMMENT) using inverted
+            )
+            UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER)
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            PROPERTIES (
+              "replication_num" = "1"
+            )

Review Comment:
   this is not inverted index v1 table, cloud now use default v2



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to