This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 47be7c5e173 [test-framework](cloud) add necessary config and function 
for run cloud cases (#32127)
47be7c5e173 is described below

commit 47be7c5e1737c084f0919eeee2001583a78a7ccb
Author: chunping <cece_m...@163.com>
AuthorDate: Tue Mar 12 22:04:03 2024 +0800

    [test-framework](cloud) add necessary config and function for run cloud 
cases (#32127)
    
    * [test](cloud) add necessary config and function for run cloud cases
    
    * [test-framework](cloud) adjust format of config.groovy
---
 regression-test/framework/pom.xml                  | 153 ++++++++---
 .../org/apache/doris/regression/Config.groovy      | 298 ++++++++++++++++++++-
 .../apache/doris/regression/ConfigOptions.groovy   | 200 +++++++++++++-
 .../org/apache/doris/regression/suite/Suite.groovy | 249 +++++++++++++++++
 4 files changed, 857 insertions(+), 43 deletions(-)

diff --git a/regression-test/framework/pom.xml 
b/regression-test/framework/pom.xml
index 4c22fe22f13..41f7ae3ed50 100644
--- a/regression-test/framework/pom.xml
+++ b/regression-test/framework/pom.xml
@@ -71,9 +71,12 @@ under the License.
         <maven.compiler.target>1.8</maven.compiler.target>
         <revision>1.0-SNAPSHOT</revision>
         <project.scm.id>github</project.scm.id>
-        <groovy.version>4.0.19</groovy.version>
+        <groovy.version>3.0.7</groovy.version>
+        <groovy-eclipse-batch.version>3.0.7-01</groovy-eclipse-batch.version>
+        
<groovy-eclipse-compiler.version>3.7.0</groovy-eclipse-compiler.version>
         <antlr.version>4.9.3</antlr.version>
         <hadoop.version>2.8.0</hadoop.version>
+        <aws-java-sdk-s3.version>1.11.95</aws-java-sdk-s3.version>
         <arrow.version>15.0.0</arrow.version>
     </properties>
     <build>
@@ -84,19 +87,33 @@ under the License.
                 <version>1.2.5</version>
             </plugin>
             <plugin>
-                <groupId>org.codehaus.gmavenplus</groupId>
-                <artifactId>gmavenplus-plugin</artifactId>
-                <version>3.0.2</version>
-                <executions>
-                    <execution>
-                        <goals>
-                            <goal>addSources</goal>
-                            <goal>addTestSources</goal>
-                            <goal>compile</goal>
-                            <goal>compileTests</goal>
-                        </goals>
-                    </execution>
-                </executions>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <version>3.9.0</version>
+                <configuration>
+                    <compilerId>groovy-eclipse-compiler</compilerId>
+                    <source>${maven.compiler.source}</source>
+                    <target>${maven.compiler.target}</target>
+                    <fork>true</fork>
+                </configuration>
+                <dependencies>
+                    <dependency>
+                        <groupId>org.codehaus.groovy</groupId>
+                        <artifactId>groovy-eclipse-compiler</artifactId>
+                        <version>${groovy-eclipse-compiler.version}</version>
+                    </dependency>
+                    <dependency>
+                        <groupId>org.codehaus.groovy</groupId>
+                        <artifactId>groovy-eclipse-batch</artifactId>
+                        <version>${groovy-eclipse-batch.version}</version>
+                    </dependency>
+                </dependencies>
+            </plugin>
+            <plugin>
+                <groupId>org.codehaus.groovy</groupId>
+                <artifactId>groovy-eclipse-compiler</artifactId>
+                <version>${groovy-eclipse-compiler.version}</version>
+                <extensions>true</extensions>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
@@ -152,6 +169,35 @@ under the License.
                 </configuration>
             </plugin>
         </plugins>
+        <pluginManagement>
+            <plugins>
+                <plugin>
+                    <groupId>org.codehaus.mojo</groupId>
+                    <artifactId>flatten-maven-plugin</artifactId>
+                    <version>1.2.5</version>
+                    <configuration>
+                        <updatePomFile>true</updatePomFile>
+                        <flattenMode>bom</flattenMode>
+                    </configuration>
+                    <executions>
+                        <execution>
+                            <id>flatten</id>
+                            <phase>process-resources</phase>
+                            <goals>
+                                <goal>flatten</goal>
+                            </goals>
+                        </execution>
+                        <execution>
+                            <id>flatten.clean</id>
+                            <phase>clean</phase>
+                            <goals>
+                                <goal>clean</goal>
+                            </goals>
+                        </execution>
+                    </executions>
+                </plugin>
+            </plugins>
+        </pluginManagement>
     </build>
 
     <dependencies>
@@ -167,7 +213,7 @@ under the License.
             <version>2.10.1</version>
         </dependency>
         <dependency>
-            <groupId>org.apache.groovy</groupId>
+            <groupId>org.codehaus.groovy</groupId>
             <artifactId>groovy-all</artifactId>
             <version>${groovy.version}</version>
             <type>pom</type>
@@ -197,11 +243,6 @@ under the License.
             <artifactId>jodd-core</artifactId>
             <version>5.3.0</version>
         </dependency>
-        <dependency>
-            <groupId>org.apache.kafka</groupId>
-            <artifactId>kafka-clients</artifactId>
-            <version>2.8.1</version>
-        </dependency>
         <dependency>
             <groupId>ch.qos.logback</groupId>
             <artifactId>logback-classic</artifactId>
@@ -256,25 +297,69 @@ under the License.
             <groupId>org.apache.hadoop</groupId>
             <artifactId>hadoop-mapreduce-client-core</artifactId>
             <version>${hadoop.version}</version>
-            <exclusions>
-                <!-- exclude jdk7 jar -->
-                <exclusion>
-                    <groupId>jdk.tools</groupId>
-                    <artifactId>jdk.tools</artifactId>
-                </exclusion>
-            </exclusions>
         </dependency>
         <dependency>
             <groupId>org.apache.hive</groupId>
             <artifactId>hive-jdbc</artifactId>
             <version>2.3.7</version>
-            <exclusions>
-                <!-- exclude jdk7 jar -->
-                <exclusion>
-                    <groupId>jdk.tools</groupId>
-                    <artifactId>jdk.tools</artifactId>
-                </exclusion>
-            </exclusions>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-hdfs-client</artifactId>
+            <version>${hadoop.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.amazonaws</groupId>
+            <artifactId>aws-java-sdk-s3</artifactId>
+            <version>${aws-java-sdk-s3.version}</version>
+        </dependency>
+        <!-- aliyun ram -->
+        <dependency>
+            <groupId>com.aliyun</groupId>
+            <artifactId>aliyun-java-sdk-core</artifactId>
+            <version>4.5.10</version>
+        </dependency>
+        <dependency>
+            <groupId>com.aliyun</groupId>
+            <artifactId>aliyun-java-sdk-ram</artifactId>
+            <version>3.3.1</version>
+        </dependency>
+        <dependency>
+            <groupId>com.google.code.gson</groupId>
+            <artifactId>gson</artifactId>
+            <version>2.2.4</version>
+        </dependency>
+        <!---->
+        <!-- txcloud ram -->
+        <dependency>
+            <groupId>com.tencentcloudapi</groupId>
+            <artifactId>tencentcloud-sdk-java-cam</artifactId>
+            <version>3.1.694</version>
+        </dependency>
+        <!---->
+        <!-- hwcloud ram -->
+        <!--<dependency>
+            <groupId>com.huaweicloud.sdk</groupId>
+            <artifactId>huaweicloud-sdk-iam</artifactId>
+            <version>3.1.26</version>
+        </dependency>-->
+        <!---->
+        <!-- aws ram -->
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>iam</artifactId>
+            <version>2.20.8</version>
+        </dependency>
+        <!---->
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>s3</artifactId>
+            <version>2.19.8</version>
+        </dependency>
+        <dependency>
+            <groupId>com.hierynomus</groupId>
+            <artifactId>sshj</artifactId>
+            <version>0.32.0</version>
         </dependency>
         <dependency>
             <groupId>org.apache.arrow</groupId>
diff --git 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy
 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy
index e240264060f..f678d7e2bc8 100644
--- 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy
+++ 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy
@@ -51,7 +51,14 @@ class Config {
     public String feHttpUser
     public String feHttpPassword
 
+    public String feCloudHttpAddress
+    public String feCloudHttpUser
+    public String feCloudHttpPassword
+
+    public String instanceId
+    public String cloudUniqueId
     public String metaServiceHttpAddress
+    public String recycleServiceHttpAddress
 
     public String suitePath
     public String dataPath
@@ -92,21 +99,91 @@ class Config {
     public TNetworkAddress feTargetThriftNetworkAddress
     public TNetworkAddress syncerNetworkAddress
     public InetSocketAddress feHttpInetSocketAddress
+    public InetSocketAddress feCloudHttpInetSocketAddress
     public InetSocketAddress metaServiceHttpInetSocketAddress
+    public InetSocketAddress recycleServiceHttpInetSocketAddress
     public Integer parallel
     public Integer suiteParallel
     public Integer actionParallel
     public Integer times
     public boolean withOutLoadData
+    public boolean isSmokeTest
+    public String multiClusterBes
+    public String metaServiceToken
+    public String multiClusterInstance
+    public String upgradeNewBeIp
+    public String upgradeNewBeHbPort
+    public String upgradeNewBeHttpPort
+    public String upgradeNewBeUniqueId
+
+    public String stageIamEndpoint
+    public String stageIamRegion
+    public String stageIamBucket
+    public String stageIamPolicy
+    public String stageIamRole
+    public String stageIamArn
+    public String stageIamAk
+    public String stageIamSk
+    public String stageIamUserId
+
+    public String clusterDir
+    public String kafkaBrokerList
+    public String cloudVersion
 
     Config() {}
 
-    Config(String defaultDb, String jdbcUrl, String jdbcUser, String 
jdbcPassword,
-           String feSourceThriftAddress, String feTargetThriftAddress, String 
feSyncerUser, String feSyncerPassword,
-           String syncerPassword, String feHttpAddress, String feHttpUser, 
String feHttpPassword, String metaServiceHttpAddress,
-           String suitePath, String dataPath, String realDataPath, String 
cacheDataPath, Boolean enableCacheData,
-           String testGroups, String excludeGroups, String testSuites, String 
excludeSuites,
-           String testDirectories, String excludeDirectories, String 
pluginPath, String sslCertificatePath) {
+    Config(
+            String defaultDb, 
+            String jdbcUrl, 
+            String jdbcUser,
+            String jdbcPassword,
+            String feSourceThriftAddress,
+            String feTargetThriftAddress,
+            String feSyncerUser,
+            String feSyncerPassword,
+            String syncerPassword,
+            String feHttpAddress,
+            String feHttpUser,
+            String feHttpPassword, 
+            String feCloudHttpAddress,
+            String feCloudHttpUser,
+            String feCloudHttpPassword,
+            String instanceId,
+            String cloudUniqueId,
+            String metaServiceHttpAddress,
+            String recycleServiceHttpAddress,
+            String suitePath,
+            String dataPath,
+            String realDataPath,
+            String cacheDataPath,
+            Boolean enableCacheData,
+            String testGroups,
+            String excludeGroups,
+            String testSuites, 
+            String excludeSuites,
+            String testDirectories,
+            String excludeDirectories, 
+            String pluginPath,
+            String sslCertificatePath,
+            String multiClusterBes,
+            String metaServiceToken,
+            String multiClusterInstance,
+            String upgradeNewBeIp, 
+            String upgradeNewBeHbPort,
+            String upgradeNewBeHttpPort,
+            String upgradeNewBeUniqueId,
+            String stageIamEndpoint,
+            String stageIamRegion,
+            String stageIamBucket,
+            String stageIamPolicy,
+            String stageIamRole,
+            String stageIamArn,
+            String stageIamAk,
+            String stageIamSk,
+            String stageIamUserId,
+            String clusterDir, 
+            String kafkaBrokerList, 
+            String cloudVersion) {
         this.defaultDb = defaultDb
         this.jdbcUrl = jdbcUrl
         this.jdbcUser = jdbcUser
@@ -119,7 +196,13 @@ class Config {
         this.feHttpAddress = feHttpAddress
         this.feHttpUser = feHttpUser
         this.feHttpPassword = feHttpPassword
+        this.feCloudHttpAddress = feCloudHttpAddress
+        this.feCloudHttpUser = feCloudHttpUser
+        this.feCloudHttpPassword = feCloudHttpPassword
+        this.instanceId = instanceId
+        this.cloudUniqueId = cloudUniqueId
         this.metaServiceHttpAddress = metaServiceHttpAddress
+        this.recycleServiceHttpAddress = recycleServiceHttpAddress
         this.suitePath = suitePath
         this.dataPath = dataPath
         this.realDataPath = realDataPath
@@ -133,6 +216,25 @@ class Config {
         this.excludeDirectories = excludeDirectories
         this.pluginPath = pluginPath
         this.sslCertificatePath = sslCertificatePath
+        this.multiClusterBes = multiClusterBes
+        this.metaServiceToken = metaServiceToken
+        this.multiClusterInstance = multiClusterInstance
+        this.upgradeNewBeIp = upgradeNewBeIp
+        this.upgradeNewBeHbPort = upgradeNewBeHbPort
+        this.upgradeNewBeHttpPort = upgradeNewBeHttpPort
+        this.upgradeNewBeUniqueId = upgradeNewBeUniqueId
+        this.stageIamEndpoint = stageIamEndpoint
+        this.stageIamRegion = stageIamRegion
+        this.stageIamBucket = stageIamBucket
+        this.stageIamPolicy = stageIamPolicy
+        this.stageIamRole = stageIamRole
+        this.stageIamArn = stageIamArn
+        this.stageIamAk = stageIamAk
+        this.stageIamSk = stageIamSk
+        this.stageIamUserId = stageIamUserId
+        this.clusterDir = clusterDir
+        this.kafkaBrokerList = kafkaBrokerList
+        this.cloudVersion = cloudVersion
     }
 
     static Config fromCommandLine(CommandLine cmd) {
@@ -237,6 +339,22 @@ class Config {
             throw new IllegalStateException("Can not parse stream load 
address: ${config.feHttpAddress}", t)
         }
 
+        config.feCloudHttpAddress = cmd.getOptionValue(feCloudHttpAddressOpt, 
config.feCloudHttpAddress)
+        try {
+            Inet4Address host = 
Inet4Address.getByName(config.feCloudHttpAddress.split(":")[0]) as Inet4Address
+            int port = Integer.valueOf(config.feCloudHttpAddress.split(":")[1])
+            config.feCloudHttpInetSocketAddress = new InetSocketAddress(host, 
port)
+        } catch (Throwable t) {
+            throw new IllegalStateException("Can not parse fe cloud http 
address: ${config.feCloudHttpAddress}", t)
+        }
+        log.info("feCloudHttpAddress : $config.feCloudHttpAddress, socketAddr 
: $config.feCloudHttpInetSocketAddress")
+
+        config.instanceId = cmd.getOptionValue(instanceIdOpt, 
config.instanceId)
+        log.info("instanceId : ${config.instanceId}")
+
+        config.cloudUniqueId = cmd.getOptionValue(cloudUniqueIdOpt, 
config.cloudUniqueId)
+        log.info("cloudUniqueId : ${config.cloudUniqueId}")
+
         config.metaServiceHttpAddress = 
cmd.getOptionValue(metaServiceHttpAddressOpt, config.metaServiceHttpAddress)
         try {
             Inet4Address host = 
Inet4Address.getByName(config.metaServiceHttpAddress.split(":")[0]) as 
Inet4Address
@@ -247,6 +365,61 @@ class Config {
         }
         log.info("msAddr : $config.metaServiceHttpAddress, socketAddr : 
$config.metaServiceHttpInetSocketAddress")
 
+                config.multiClusterBes = 
cmd.getOptionValue(multiClusterBesOpt, config.multiClusterBes)
+        log.info("multiClusterBes is ${config.multiClusterBes}".toString())
+
+        config.metaServiceToken = cmd.getOptionValue(metaServiceTokenOpt, 
config.metaServiceToken)
+        log.info("metaServiceToken is ${config.metaServiceToken}".toString())
+
+        config.multiClusterInstance = 
cmd.getOptionValue(multiClusterInstanceOpt, config.multiClusterInstance)
+        log.info("multiClusterInstance is 
${config.multiClusterInstance}".toString())
+
+        config.upgradeNewBeIp = cmd.getOptionValue(upgradeNewBeIpOpt, 
config.upgradeNewBeIp)
+        log.info("upgradeNewBeIp is ${config.upgradeNewBeIp}".toString())
+
+        config.upgradeNewBeHbPort = cmd.getOptionValue(upgradeNewBeHbPortOpt, 
config.upgradeNewBeHbPort)
+        log.info("upgradeNewBeHbPort is 
${config.upgradeNewBeHbPort}".toString())
+
+        config.upgradeNewBeHttpPort = 
cmd.getOptionValue(upgradeNewBeHttpPortOpt, config.upgradeNewBeHttpPort)
+        log.info("upgradeNewBeHttpPort is 
${config.upgradeNewBeHttpPort}".toString())
+
+        config.upgradeNewBeUniqueId = 
cmd.getOptionValue(upgradeNewBeUniqueIdOpt, config.upgradeNewBeUniqueId)
+        log.info("upgradeNewBeUniqueId is 
${config.upgradeNewBeUniqueId}".toString())
+
+        config.stageIamEndpoint = cmd.getOptionValue(stageIamEndpointOpt, 
config.stageIamEndpoint)
+        log.info("stageIamEndpoint is ${config.stageIamEndpoint}".toString())
+        config.stageIamRegion = cmd.getOptionValue(stageIamRegionOpt, 
config.stageIamRegion)
+        log.info("stageIamRegion is ${config.stageIamRegion}".toString())
+        config.stageIamBucket = cmd.getOptionValue(stageIamBucketOpt, 
config.stageIamBucket)
+        log.info("stageIamBucket is ${config.stageIamBucket}".toString())
+        config.stageIamPolicy = cmd.getOptionValue(stageIamPolicyOpt, 
config.stageIamPolicy)
+        log.info("stageIamPolicy is ${config.stageIamPolicy}".toString())
+        config.stageIamRole = cmd.getOptionValue(stageIamRoleOpt, 
config.stageIamRole)
+        log.info("stageIamRole is ${config.stageIamRole}".toString())
+        config.stageIamArn = cmd.getOptionValue(stageIamArnOpt, 
config.stageIamArn)
+        log.info("stageIamArn is ${config.stageIamArn}".toString())
+        config.stageIamAk = cmd.getOptionValue(stageIamAkOpt, 
config.stageIamAk)
+        log.info("stageIamAk is ${config.stageIamAk}".toString())
+        config.stageIamSk = cmd.getOptionValue(stageIamSkOpt, 
config.stageIamSk)
+        log.info("stageIamSk is ${config.stageIamSk}".toString())
+        config.stageIamUserId = cmd.getOptionValue(stageIamUserIdOpt, 
config.stageIamUserId)
+        log.info("stageIamUserId is ${config.stageIamUserId}".toString())
+        config.cloudVersion = cmd.getOptionValue(cloudVersionOpt, 
config.cloudVersion)
+        log.info("cloudVersion is ${config.cloudVersion}".toString())
+
+        config.kafkaBrokerList = cmd.getOptionValue(kafkaBrokerListOpt, 
config.kafkaBrokerList)
+
+        config.recycleServiceHttpAddress = 
cmd.getOptionValue(recycleServiceHttpAddressOpt, 
config.recycleServiceHttpAddress)
+        try {
+            Inet4Address host = 
Inet4Address.getByName(config.recycleServiceHttpAddress.split(":")[0]) as 
Inet4Address
+            int port = 
Integer.valueOf(config.recycleServiceHttpAddress.split(":")[1])
+            config.recycleServiceHttpInetSocketAddress = new 
InetSocketAddress(host, port)
+        } catch (Throwable t) {
+            throw new IllegalStateException("Can not parse recycle service 
address: ${config.recycleServiceHttpAddress}", t)
+        }
+        log.info("recycleAddr : $config.recycleServiceHttpAddress, socketAddr 
: $config.recycleServiceHttpInetSocketAddress")
+
+
         config.defaultDb = cmd.getOptionValue(defaultDbOpt, config.defaultDb)
         config.jdbcUrl = cmd.getOptionValue(jdbcOpt, config.jdbcUrl)
         config.jdbcUser = cmd.getOptionValue(userOpt, config.jdbcUser)
@@ -255,6 +428,8 @@ class Config {
         config.feSyncerPassword = cmd.getOptionValue(feSyncerPasswordOpt, 
config.feSyncerPassword)
         config.feHttpUser = cmd.getOptionValue(feHttpUserOpt, 
config.feHttpUser)
         config.feHttpPassword = cmd.getOptionValue(feHttpPasswordOpt, 
config.feHttpPassword)
+        config.feCloudHttpUser = cmd.getOptionValue(feHttpUserOpt, 
config.feCloudHttpUser)
+        config.feCloudHttpPassword = cmd.getOptionValue(feHttpPasswordOpt, 
config.feCloudHttpPassword)
         config.generateOutputFile = cmd.hasOption(genOutOpt)
         config.forceGenerateOutputFile = cmd.hasOption(forceGenOutOpt)
         config.parallel = Integer.parseInt(cmd.getOptionValue(parallelOpt, 
"10"))
@@ -265,6 +440,7 @@ class Config {
         config.stopWhenFail = cmd.hasOption(stopWhenFailOpt)
         config.withOutLoadData = cmd.hasOption(withOutLoadDataOpt)
         config.dryRun = cmd.hasOption(dryRunOpt)
+        config.isSmokeTest = cmd.hasOption(isSmokeTestOpt)
 
         log.info("randomOrder is ${config.randomOrder}".toString())
         log.info("stopWhenFail is ${config.stopWhenFail}".toString())
@@ -294,7 +470,13 @@ class Config {
             configToString(obj.feHttpAddress),
             configToString(obj.feHttpUser),
             configToString(obj.feHttpPassword),
+            configToString(obj.feCloudHttpAddress),
+            configToString(obj.feCloudHttpUser),
+            configToString(obj.feCloudHttpPassword),
+            configToString(obj.instanceId),
+            configToString(obj.cloudUniqueId),
             configToString(obj.metaServiceHttpAddress),
+            configToString(obj.recycleServiceHttpAddress),
             configToString(obj.suitePath),
             configToString(obj.dataPath),
             configToString(obj.realDataPath),
@@ -307,7 +489,26 @@ class Config {
             configToString(obj.testDirectories),
             configToString(obj.excludeDirectories),
             configToString(obj.pluginPath),
-            configToString(obj.sslCertificatePath)
+            configToString(obj.sslCertificatePath),
+            configToString(obj.multiClusterBes),
+            configToString(obj.metaServiceToken),
+            configToString(obj.multiClusterInstance),
+            configToString(obj.upgradeNewBeIp),
+            configToString(obj.upgradeNewBeHbPort),
+            configToString(obj.upgradeNewBeHttpPort),
+            configToString(obj.upgradeNewBeUniqueId),
+            configToString(obj.stageIamEndpoint),
+            configToString(obj.stageIamRegion),
+            configToString(obj.stageIamBucket),
+            configToString(obj.stageIamPolicy),
+            configToString(obj.stageIamRole),
+            configToString(obj.stageIamArn),
+            configToString(obj.stageIamAk),
+            configToString(obj.stageIamSk),
+            configToString(obj.stageIamUserId),
+            configToString(obj.clusterDir),
+            configToString(obj.kafkaBrokerList),
+            configToString(obj.cloudVersion)
         )
 
         config.image = configToString(obj.image)
@@ -325,9 +526,55 @@ class Config {
                 config.otherConfigs.put(key, kv.getValue())
             }
         }
+
+        // check smoke config
+        if (obj.isSmokeTest) {
+            config.isSmokeTest = true
+            String env = config.otherConfigs.getOrDefault("smokeEnv", 
"UNKNOWN")
+            log.info("Start to check $env config")
+            def c = config.otherConfigs
+            c.put("feCloudHttpAddress", obj.feCloudHttpAddress)
+            checkCloudSmokeEnv(c)
+        }
+
         return config
     }
 
+    static String getProvider(String endpoint) {
+        def providers = ["cos", "oss", "s3", "obs", "bos"]
+        for (final def provider in providers) {
+            if (endpoint.containsIgnoreCase(provider)) {
+                return provider
+            }
+        }
+        return ""
+    }
+
+    static void checkCloudSmokeEnv(Properties properties) {
+        // external stage obj info
+        String s3Endpoint = properties.getOrDefault("s3Endpoint", "")
+        String feCloudHttpAddress = 
properties.getOrDefault("feCloudHttpAddress", "")
+        String s3Region = properties.getOrDefault("s3Region", "")
+        String s3BucketName = properties.getOrDefault("s3BucketName", "")
+        String s3AK = properties.getOrDefault("ak", "")
+        String s3SK = properties.getOrDefault("sk", "")
+
+        def items = [
+                fecloudHttpAddrConf:feCloudHttpAddress,
+                s3RegionConf:s3Region,
+                s3EndpointConf:s3Endpoint,
+                s3BucketConf:s3BucketName,
+                s3AKConf:s3AK,
+                s3SKConf:s3SK,
+                s3ProviderConf:getProvider(s3Endpoint)
+        ]
+        for (final def item in items) {
+            if (item.value == null || item.value.isEmpty()) {
+                throw new IllegalStateException("cloud smoke conf err, plz 
check " + item.key)
+            }
+        }
+    }
+
     static void fillDefaultConfig(Config config) {
         if (config.defaultDb == null) {
             config.defaultDb = "regression_test"
@@ -365,11 +612,26 @@ class Config {
             log.info("Set feHttpAddress to '${config.feHttpAddress}' because 
not specify.".toString())
         }
 
+        if (config.instanceId == null) {
+            config.instanceId = "instance_xxx"
+            log.info("Set instanceId to '${config.instanceId}' because not 
specify.".toString())
+        }
+
+        if (config.cloudUniqueId == null) {
+            config.cloudUniqueId = "cloud_unique_id_xxx"
+            log.info("Set cloudUniqueId to '${config.cloudUniqueId}' because 
not specify.".toString())
+        }
+
         if (config.metaServiceHttpAddress == null) {
             config.metaServiceHttpAddress = "127.0.0.1:5000"
             log.info("Set metaServiceHttpAddress to 
'${config.metaServiceHttpAddress}' because not specify.".toString())
         }
 
+        if (config.recycleServiceHttpAddress == null) {
+            config.recycleServiceHttpAddress = "127.0.0.1:5001"
+            log.info("Set recycleServiceHttpAddress to 
'${config.recycleServiceHttpAddress}' because not specify.".toString())
+        }
+
         if (config.feSyncerUser == null) {
             config.feSyncerUser = "root"
             log.info("Set feSyncerUser to '${config.feSyncerUser}' because not 
specify.".toString())
@@ -395,6 +657,22 @@ class Config {
             log.info("Set feHttpPassword to empty because not 
specify.".toString())
         }
 
+
+        if (config.feCloudHttpAddress == null) {
+            config.feCloudHttpAddress = "127.0.0.1:8876"
+            log.info("Set feCloudHttpAddress to '${config.feCloudHttpAddress}' 
because not specify.".toString())
+        }
+
+        if (config.feCloudHttpUser == null) {
+            config.feCloudHttpUser = "root"
+            log.info("Set feCloudHttpUser to '${config.feCloudHttpUser}' 
because not specify.".toString())
+        }
+
+        if (config.feCloudHttpPassword == null) {
+            config.feCloudHttpPassword = ""
+            log.info("Set feCloudHttpPassword to empty because not 
specify.".toString())
+        }
+
         if (config.suitePath == null) {
             config.suitePath = "regression-test/suites"
             log.info("Set suitePath to '${config.suitePath}' because not 
specify.".toString())
@@ -446,7 +724,11 @@ class Config {
         }
 
         if (config.testGroups == null) {
-            config.testGroups = "default"
+            if (config.isSmokeTest){
+                config.testGroups = "smoke"
+            } else {
+                config.testGroups = "default"
+            }
             log.info("Set testGroups to '${config.testGroups}' because not 
specify.".toString())
         }
 
diff --git 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy
 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy
index 36648dbe61d..3c455e9ecf1 100644
--- 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy
+++ 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy
@@ -40,7 +40,13 @@ class ConfigOptions {
     static Option feHttpAddressOpt
     static Option feHttpUserOpt
     static Option feHttpPasswordOpt
+    static Option feCloudHttpAddressOpt
+    static Option feCloudHttpUserOpt
+    static Option feCloudHttpPasswordOpt
+    static Option instanceIdOpt
+    static Option cloudUniqueIdOpt
     static Option metaServiceHttpAddressOpt
+    static Option recycleServiceHttpAddressOpt
     static Option pathOpt
     static Option dataOpt
     static Option realDataOpt
@@ -66,6 +72,26 @@ class ConfigOptions {
     static Option timesOpt
     static Option withOutLoadDataOpt
     static Option dryRunOpt
+    static Option isSmokeTestOpt
+    static Option multiClusterBesOpt
+    static Option metaServiceTokenOpt
+    static Option multiClusterInstanceOpt
+    static Option upgradeNewBeIpOpt
+    static Option upgradeNewBeHbPortOpt
+    static Option upgradeNewBeHttpPortOpt
+    static Option upgradeNewBeUniqueIdOpt
+    static Option stageIamEndpointOpt
+    static Option stageIamRegionOpt
+    static Option stageIamBucketOpt
+    static Option stageIamPolicyOpt
+    static Option stageIamRoleOpt
+    static Option stageIamArnOpt
+    static Option stageIamAkOpt
+    static Option stageIamSkOpt
+    static Option stageIamUserIdOpt
+    static Option clusterDirOpt
+    static Option kafkaBrokerListOpt
+    static Option cloudVersionOpt
 
     static CommandLine initCommands(String[] args) {
         helpOption = Option.builder("h")
@@ -300,14 +326,62 @@ class ConfigOptions {
                 .longOpt("feHttpPassword")
                 .desc("the password of fe http server")
                 .build()
-        metaServiceHttpAddressOpt = Option.builder("hm")
+        feCloudHttpAddressOpt = Option.builder("cha")
                 .argName("address")
                 .required(false)
                 .hasArg(true)
                 .type(String.class)
+                .longOpt("feCloudHttpAddress")
+                .desc("the fe cloud http address, format is ip:port")
+                .build()
+        feCloudHttpUserOpt = Option.builder("chu")
+                .argName("userName")
+                .required(false)
+                .hasArg(true)
+                .type(String.class)
+                .longOpt("feCloudHttpUser")
+                .desc("the user of fe cloud http server")
+                .build()
+        feCloudHttpPasswordOpt = Option.builder("chp")
+                .argName("password")
+                .required(false)
+                .hasArg(true)
+                .type(String.class)
+                .longOpt("feCloudHttpPassword")
+                .desc("the password of fe cloud http server")
+                .build()
+        instanceIdOpt = Option.builder("ii")
+                .argName("instanceId")
+                .required(false)
+                .hasArg(true)
+                .type(String.class)
+                .longOpt("instanceId")
+                .desc("the instance id")
+                .build()
+        cloudUniqueIdOpt = Option.builder("cui")
+                .argName("cloudUniqueId")
+                .required(false)
+                .hasArg(true)
+                .type(String.class)
+                .longOpt("cloudUniqueId")
+                .desc("the cloudUniqueId")
+                .build()
+        metaServiceHttpAddressOpt = Option.builder("hm")
+                .argName("metaServiceHttpAddress")
+                .required(false)
+                .hasArg(true)
+                .type(String.class)
                 .longOpt("metaServiceHttpAddress")
                 .desc("the meta service http address, format is ip:port")
                 .build()
+        recycleServiceHttpAddressOpt = Option.builder("hr")
+                .argName("recycleServiceHttpAddress")
+                .required(false)
+                .hasArg(true)
+                .type(String.class)
+                .longOpt("recycleServiceHttpAddress")
+                .desc("the recycle service http address, format is ip:port")
+                .build()
         genOutOpt = Option.builder("genOut")
                 .required(false)
                 .hasArg(false)
@@ -381,6 +455,106 @@ class ConfigOptions {
                 .hasArg(false)
                 .desc("just print cases and does not run")
                 .build()
+        isSmokeTestOpt = Option.builder("isSmokeTest")
+                .required(false)
+                .hasArg(false)
+                .desc("is smoke test")
+                .build()
+        multiClusterBesOpt = Option.builder("multiClusterBes")
+                .required(false)
+                .hasArg(false)
+                .desc("multi cluster backend info")
+                .build()
+        metaServiceTokenOpt = Option.builder("metaServiceToken")
+                .required(false)
+                .hasArg(false)
+                .desc("meta service token")
+                .build()
+        multiClusterInstanceOpt = Option.builder("multiClusterInstance")
+                .required(false)
+                .hasArg(false)
+                .desc("multi cluster instance")
+                .build()
+        upgradeNewBeIpOpt = Option.builder("upgradeNewBeIp")
+                .required(false)
+                .hasArg(false)
+                .desc("new BE ip")
+                .build()
+        upgradeNewBeHbPortOpt = Option.builder("upgradeNewBeHbPort")
+                .required(false)
+                .hasArg(false)
+                .desc("new BE heartbeat port")
+                .build()
+        upgradeNewBeHttpPortOpt = Option.builder("upgradeNewBeHttpPort")
+                .required(false)
+                .hasArg(false)
+                .desc("new BE http port")
+                .build()
+        upgradeNewBeUniqueIdOpt = Option.builder("upgradeNewBeUniqueId")
+                .required(false)
+                .hasArg(false)
+                .desc("new BE cloud unique id")
+                .build()
+        stageIamEndpointOpt = Option.builder("stageIamEndpoint")
+                .required(false)
+                .hasArg(false)
+                .desc("stage iam endpoint")
+                .build()
+        stageIamRegionOpt = Option.builder("stageIamRegion")
+                .required(false)
+                .hasArg(false)
+                .desc("stage iam region")
+                .build()
+        stageIamBucketOpt = Option.builder("stageIamBucket")
+                .required(false)
+                .hasArg(false)
+                .desc("stage iam bucket")
+                .build()
+        stageIamPolicyOpt = Option.builder("stageIamPolicy")
+                .required(false)
+                .hasArg(false)
+                .desc("stage iam policy")
+                .build()
+        stageIamRoleOpt = Option.builder("stageIamRole")
+                .required(false)
+                .hasArg(false)
+                .desc("stage iam role")
+                .build()
+        stageIamArnOpt = Option.builder("stageIamArn")
+                .required(false)
+                .hasArg(false)
+                .desc("stage iam arn")
+                .build()
+        stageIamAkOpt = Option.builder("stageIamAk")
+                .required(false)
+                .hasArg(false)
+                .desc("stage iam ak")
+                .build()
+        stageIamSkOpt = Option.builder("stageIamSk")
+                .required(false)
+                .hasArg(false)
+                .desc("stage iam sk")
+                .build()
+        stageIamUserIdOpt = Option.builder("stageIamUserId")
+                .required(false)
+                .hasArg(false)
+                .desc("stage iam user id")
+                .build()
+        clusterDirOpt = Option.builder("clusterDir")
+                .required(false)
+                .hasArg(false)
+                .desc("cloud cluster deploy dir")
+                .build()
+        kafkaBrokerListOpt = Option.builder("kafkaBrokerList")
+                .required(false)
+                .hasArg(false)
+                .desc("kafka broker list")
+                .build()
+        cloudVersionOpt = Option.builder("cloudVersion")
+                .required(false)
+                .hasArg(false)
+                .desc("selectdb cloud version")
+                .build()
 
         Options options = new Options()
                 .addOption(helpOption)
@@ -407,7 +581,11 @@ class ConfigOptions {
                 .addOption(feHttpAddressOpt)
                 .addOption(feHttpUserOpt)
                 .addOption(feHttpPasswordOpt)
+                .addOption(feCloudHttpAddressOpt)
+                .addOption(feCloudHttpUserOpt)
+                .addOption(feCloudHttpPasswordOpt)
                 .addOption(metaServiceHttpAddressOpt)
+                .addOption(recycleServiceHttpAddressOpt)
                 .addOption(genOutOpt)
                 .addOption(confFileOpt)
                 .addOption(forceGenOutOpt)
@@ -419,6 +597,26 @@ class ConfigOptions {
                 .addOption(timesOpt)
                 .addOption(withOutLoadDataOpt)
                 .addOption(dryRunOpt)
+                .addOption(isSmokeTestOpt)
+                .addOption(multiClusterBesOpt)
+                .addOption(metaServiceTokenOpt)
+                .addOption(multiClusterInstanceOpt)
+                .addOption(upgradeNewBeIpOpt)
+                .addOption(upgradeNewBeHbPortOpt)
+                .addOption(upgradeNewBeHttpPortOpt)
+                .addOption(upgradeNewBeUniqueIdOpt)
+                .addOption(stageIamEndpointOpt)
+                .addOption(stageIamRegionOpt)
+                .addOption(stageIamBucketOpt)
+                .addOption(stageIamPolicyOpt)
+                .addOption(stageIamRoleOpt)
+                .addOption(stageIamArnOpt)
+                .addOption(stageIamAkOpt)
+                .addOption(stageIamSkOpt)
+                .addOption(stageIamUserIdOpt)
+                .addOption(clusterDirOpt)
+                .addOption(kafkaBrokerListOpt)
+                .addOption(cloudVersionOpt)
 
         CommandLine cmd = new DefaultParser().parse(options, args, true)
         if (cmd.hasOption(helpOption)) {
diff --git 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
index c3bd25ad994..0b7a1792ac2 100644
--- 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
+++ 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
@@ -17,6 +17,7 @@
 
 package org.apache.doris.regression.suite
 
+import groovy.json.JsonOutput
 import com.google.common.collect.Maps
 import com.google.common.util.concurrent.Futures
 import com.google.common.util.concurrent.ListenableFuture
@@ -37,6 +38,7 @@ import org.apache.doris.regression.action.StreamLoadAction
 import org.apache.doris.regression.action.SuiteAction
 import org.apache.doris.regression.action.TestAction
 import org.apache.doris.regression.action.HttpCliAction
+import org.apache.doris.regression.util.DataUtils
 import org.apache.doris.regression.util.JdbcUtils
 import org.apache.doris.regression.util.Hdfs
 import org.apache.doris.regression.util.SuiteUtils
@@ -47,6 +49,11 @@ import org.slf4j.LoggerFactory
 import groovy.util.logging.Slf4j
 
 import java.sql.Connection
+import java.io.File
+import java.math.BigDecimal;
+import java.sql.PreparedStatement
+import java.sql.ResultSetMetaData
+import java.util.Map;
 import java.util.concurrent.Callable
 import java.util.concurrent.Future
 import java.util.concurrent.atomic.AtomicBoolean
@@ -1118,4 +1125,246 @@ class Suite implements GroovyInterceptable {
             notContains("${mv_name}(${mv_name})")
         }
     }
+
+    def token = context.config.metaServiceToken
+    def instance_id = context.config.multiClusterInstance
+    def get_be_metric = { ip, port, field ->
+        def metric_api = { request_body, check_func ->
+            httpTest {
+                endpoint ip + ":" + port
+                uri "/metrics?type=json"
+                body request_body
+                op "get"
+                check check_func
+            }
+        }
+
+        def jsonOutput = new JsonOutput()
+        def map = []
+        def js = jsonOutput.toJson(map)
+        log.info("get be metric req: ${js} ".toString())
+
+        def ret = 0;
+        metric_api.call(js) {
+            respCode, body ->
+                log.info("get be metric resp: ${respCode}".toString())
+                def json = parseJson(body)
+                for (item : json) {
+                    if (item.tags.metric == field) {
+                        ret = item.value
+                    }
+                }
+        }
+        ret
+    }
+
+    def add_cluster = { be_unique_id, ip, port, cluster_name, cluster_id ->
+        def jsonOutput = new JsonOutput()
+        def s3 = [
+                     type: "COMPUTE",
+                     cluster_name : cluster_name,
+                     cluster_id : cluster_id,
+                     nodes: [
+                         [
+                             cloud_unique_id: be_unique_id,
+                             ip: ip,
+                             heartbeat_port: port
+                         ],
+                     ]
+                 ]
+        def map = [instance_id: "${instance_id}", cluster: s3]
+        def js = jsonOutput.toJson(map)
+        log.info("add cluster req: ${js} ".toString())
+
+        def add_cluster_api = { request_body, check_func ->
+            httpTest {
+                endpoint context.config.metaServiceHttpAddress
+                uri "/MetaService/http/add_cluster?token=${token}"
+                body request_body
+                check check_func
+            }
+        }
+
+        add_cluster_api.call(js) {
+            respCode, body ->
+                log.info("add cluster resp: ${body} ${respCode}".toString())
+                def json = parseJson(body)
+                assertTrue(json.code.equalsIgnoreCase("OK") || 
json.code.equalsIgnoreCase("ALREADY_EXISTED"))
+        }
+    }
+
+    def get_cluster = { be_unique_id ->
+        def jsonOutput = new JsonOutput()
+        def map = [instance_id: "${instance_id}", cloud_unique_id: 
"${be_unique_id}" ]
+        def js = jsonOutput.toJson(map)
+        log.info("get cluster req: ${js} ".toString())
+
+        def add_cluster_api = { request_body, check_func ->
+            httpTest {
+                endpoint context.config.metaServiceHttpAddress
+                uri "/MetaService/http/get_cluster?token=${token}"
+                body request_body
+                check check_func
+            }
+        }
+
+        def json
+        add_cluster_api.call(js) {
+            respCode, body ->
+                log.info("get cluster resp: ${body} ${respCode}".toString())
+                json = parseJson(body)
+                assertTrue(json.code.equalsIgnoreCase("OK") || 
json.code.equalsIgnoreCase("ALREADY_EXISTED"))
+        }
+        json.result.cluster
+    }
+
+    def drop_cluster = { cluster_name, cluster_id ->
+        def jsonOutput = new JsonOutput()
+        def reqBody = [
+                     type: "COMPUTE",
+                     cluster_name : cluster_name,
+                     cluster_id : cluster_id,
+                     nodes: [
+                     ]
+                 ]
+        def map = [instance_id: "${instance_id}", cluster: reqBody]
+        def js = jsonOutput.toJson(map)
+        log.info("drop cluster req: ${js} ".toString())
+
+        def drop_cluster_api = { request_body, check_func ->
+            httpTest {
+                endpoint context.config.metaServiceHttpAddress
+                uri "/MetaService/http/drop_cluster?token=${token}"
+                body request_body
+                check check_func
+            }
+        }
+
+        drop_cluster_api.call(js) {
+            respCode, body ->
+                log.info("dorp cluster resp: ${body} ${respCode}".toString())
+                def json = parseJson(body)
+                assertTrue(json.code.equalsIgnoreCase("OK") || 
json.code.equalsIgnoreCase("ALREADY_EXISTED"))
+        }
+    }
+
+    def add_node = { be_unique_id, ip, port, cluster_name, cluster_id ->
+        def jsonOutput = new JsonOutput()
+        def clusterInfo = [
+                     type: "COMPUTE",
+                     cluster_name : cluster_name,
+                     cluster_id : cluster_id,
+                     nodes: [
+                         [
+                             cloud_unique_id: be_unique_id,
+                             ip: ip,
+                             heartbeat_port: port
+                         ],
+                     ]
+                 ]
+        def map = [instance_id: "${instance_id}", cluster: clusterInfo]
+        def js = jsonOutput.toJson(map)
+        log.info("add node req: ${js} ".toString())
+
+        def add_cluster_api = { request_body, check_func ->
+            httpTest {
+                endpoint context.config.metaServiceHttpAddress
+                uri "/MetaService/http/add_node?token=${token}"
+                body request_body
+                check check_func
+            }
+        }
+
+        add_cluster_api.call(js) {
+            respCode, body ->
+                log.info("add node resp: ${body} ${respCode}".toString())
+                def json = parseJson(body)
+                assertTrue(json.code.equalsIgnoreCase("OK") || 
json.code.equalsIgnoreCase("ALREADY_EXISTED"))
+        }
+    }
+
+    def d_node = { be_unique_id, ip, port, cluster_name, cluster_id ->
+        def jsonOutput = new JsonOutput()
+        def clusterInfo = [
+                     type: "COMPUTE",
+                     cluster_name : cluster_name,
+                     cluster_id : cluster_id,
+                     nodes: [
+                         [
+                             cloud_unique_id: be_unique_id,
+                             ip: ip,
+                             heartbeat_port: port
+                         ],
+                     ]
+                 ]
+        def map = [instance_id: "${instance_id}", cluster: clusterInfo]
+        def js = jsonOutput.toJson(map)
+        log.info("decommission node req: ${js} ".toString())
+
+        def d_cluster_api = { request_body, check_func ->
+            httpTest {
+                endpoint context.config.metaServiceHttpAddress
+                uri "/MetaService/http/decommission_node?token=${token}"
+                body request_body
+                check check_func
+            }
+        }
+
+        d_cluster_api.call(js) {
+            respCode, body ->
+                log.info("decommission node resp: ${body} 
${respCode}".toString())
+                def json = parseJson(body)
+                assertTrue(json.code.equalsIgnoreCase("OK") || 
json.code.equalsIgnoreCase("ALREADY_EXISTED"))
+        }
+    }
+
+    def checkProfile = { addrSet, fragNum ->
+        List<List<Object>> profileRes = sql " show query profile '/' "
+        for (row : profileRes) {
+            //println row
+        }
+
+        for (int i = 0; i < fragNum; ++i) {
+            String exec_sql = "show query profile '/" + profileRes[0][0] + "/" 
+ i.toString() + "'"
+            List<List<Object>> result = sql exec_sql
+            for (row : result) {
+                println row
+            }
+
+            println result[0][1]
+            println addrSet
+            assertTrue(addrSet.contains(result[0][1]));
+        }
+    }
+
+    def rename_cloud_cluster = { cluster_name, cluster_id ->
+        def jsonOutput = new JsonOutput()
+        def reqBody = [
+                          cluster_name : cluster_name,
+                          cluster_id : cluster_id
+                      ]
+        def map = [instance_id: "${instance_id}", cluster: reqBody]
+        def js = jsonOutput.toJson(map)
+        log.info("rename cluster req: ${js} ".toString())
+
+        def rename_cluster_api = { request_body, check_func ->
+            httpTest {
+                endpoint context.config.metaServiceHttpAddress
+                uri "/MetaService/http/rename_cluster?token=${token}"
+                body request_body
+                check check_func
+            }
+        }
+
+        rename_cluster_api.call(js) {
+            respCode, body ->
+                log.info("rename cluster resp: ${body} ${respCode}".toString())
+                def json = parseJson(body)
+                assertTrue(json.code.equalsIgnoreCase("OK"))
+        }
+    }
+
+    public void resetConnection() {
+        context.resetConnection()
+    }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to