This is an automated email from the ASF dual-hosted git repository.

gavinchou pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new e108101e046 [fix](storage vault) Fix some bug and add more regression 
test (#46402) (#46520)
e108101e046 is described below

commit e108101e046be5143b4b5fa7ce0ff530bce49796
Author: Lei Zhang <zhang...@selectdb.com>
AuthorDate: Tue Jan 7 22:58:59 2025 +0800

    [fix](storage vault) Fix some bug and add more regression test (#46402) 
(#46520)
    
    pick #46402
---
 .../org/apache/doris/catalog/StorageVaultMgr.java  |  88 +++---
 .../cloud/catalog/CloudInstanceStatusChecker.java  |   5 +-
 .../apache/doris/datasource/InternalCatalog.java   |   2 +-
 .../plans/commands/AlterStorageVaultCommand.java   |   2 +-
 .../doris/cloud/catalog/HdfsStorageVaultTest.java  |   6 +-
 .../org/apache/doris/regression/suite/Suite.groovy |   4 +
 .../vault_p0/alter/test_alter_hdfs_vault.groovy    | 105 +++++--
 .../vault_p0/alter/test_alter_s3_vault.groovy      | 322 ++++++++++++---------
 .../alter/test_alter_use_path_style.groovy         | 124 ++++----
 .../vault_p0/alter/test_alter_vault_name.groovy    |   8 +-
 .../vault_p0/alter/test_alter_vault_type.groovy    |  12 +-
 regression-test/suites/vault_p0/create/load.groovy | 252 ----------------
 .../vault_p0/create/test_create_vault.groovy       | 188 ++++++++++--
 .../vault_p0/default/test_default_vault.groovy     | 257 ++++++++++------
 .../test_s3_vault_path_start_with_slash.groovy     |  79 +++--
 .../vault_p0/forbid/test_forbid_vault.groovy       |   7 +-
 .../vault_p0/privilege/test_vault_privilege.groovy | 190 ------------
 .../privilege/test_vault_privilege_restart.groovy  | 278 +++++++++---------
 .../test_vault_privilege_with_role.groovy          | 107 +++----
 .../test_vault_privilege_with_user.groovy          | 253 ++++++++++++++++
 20 files changed, 1211 insertions(+), 1078 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java
index e48adad47ff..5ad0417d7dd 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java
@@ -53,9 +53,7 @@ public class StorageVaultMgr {
     private final SystemInfoService systemInfoService;
     // <VaultName, VaultId>
     private Pair<String, String> defaultVaultInfo;
-
     private Map<String, String> vaultNameToVaultId = new HashMap<>();
-
     private MonitoredReentrantReadWriteLock rwLock = new 
MonitoredReentrantReadWriteLock();
 
     public StorageVaultMgr(SystemInfoService systemInfoService) {
@@ -78,10 +76,14 @@ public class StorageVaultMgr {
         ALTER_BE_SYNC_THREAD_POOL.execute(() -> alterSyncVaultTask());
     }
 
-    public void refreshVaultMap(Map<String, String> vaultMap) {
-        rwLock.writeLock().lock();
-        vaultNameToVaultId = vaultMap;
-        rwLock.writeLock().unlock();
+    public void refreshVaultMap(Map<String, String> vaultMap, Pair<String, 
String> defaultVault) {
+        try {
+            rwLock.writeLock().lock();
+            vaultNameToVaultId = vaultMap;
+            defaultVaultInfo = defaultVault;
+        } finally {
+            rwLock.writeLock().unlock();
+        }
     }
 
     public String getVaultIdByName(String vaultName) {
@@ -107,7 +109,19 @@ public class StorageVaultMgr {
         }
     }
 
-    private void updateVaultNameToIdCache(String oldVaultName, String 
newVaultName, String vaultId) {
+    private void addStorageVaultToCache(String vaultName, String vaultId, 
boolean defaultVault) {
+        try {
+            rwLock.writeLock().lock();
+            vaultNameToVaultId.put(vaultName, vaultId);
+            if (defaultVault) {
+                defaultVaultInfo = Pair.of(vaultName, vaultId);
+            }
+        } finally {
+            rwLock.writeLock().unlock();
+        }
+    }
+
+    private void updateStorageVaultCache(String oldVaultName, String 
newVaultName, String vaultId) {
         try {
             rwLock.writeLock().lock();
             String cachedVaultId = vaultNameToVaultId.get(oldVaultName);
@@ -122,6 +136,15 @@ public class StorageVaultMgr {
         }
     }
 
+    private void updateDefaultStorageVaultCache(Pair<String, String> 
newDefaultVaultInfo) {
+        try {
+            rwLock.writeLock().lock();
+            defaultVaultInfo = newDefaultVaultInfo;
+        } finally {
+            rwLock.writeLock().unlock();
+        }
+    }
+
     private Cloud.StorageVaultPB.Builder buildAlterS3VaultRequest(Map<String, 
String> properties, String name)
             throws Exception {
         Cloud.ObjectStoreInfoPB.Builder objBuilder = 
S3Properties.getObjStoreInfoPB(properties);
@@ -185,7 +208,9 @@ public class StorageVaultMgr {
                 request.setOp(Operation.ALTER_S3_VAULT);
             } else if (type == StorageVaultType.HDFS) {
                 properties.keySet().stream()
-                        
.filter(HdfsStorageVault.FORBID_CHECK_PROPERTIES::contains)
+                        .filter(key -> 
HdfsStorageVault.FORBID_CHECK_PROPERTIES.contains(key)
+                                || 
key.toLowerCase().contains(S3Properties.S3_PREFIX)
+                                || 
key.toLowerCase().contains(S3Properties.PROVIDER))
                         .findAny()
                         .ifPresent(key -> {
                             throw new IllegalArgumentException("Alter property 
" + key + " is not allowed.");
@@ -202,7 +227,7 @@ public class StorageVaultMgr {
             }
 
             if (request.hasVault() && request.getVault().hasAlterName()) {
-                updateVaultNameToIdCache(name, 
request.getVault().getAlterName(), response.getStorageVaultId());
+                updateStorageVaultCache(name, 
request.getVault().getAlterName(), response.getStorageVaultId());
                 LOG.info("Succeed to alter storage vault, old name:{} new 
name: {} id:{}", name,
                         request.getVault().getAlterName(), 
response.getStorageVaultId());
             }
@@ -217,29 +242,33 @@ public class StorageVaultMgr {
 
     @VisibleForTesting
     public void setDefaultStorageVault(SetDefaultStorageVaultStmt stmt) throws 
DdlException {
+        setDefaultStorageVault(stmt.getStorageVaultName());
+    }
+
+    public void setDefaultStorageVault(String vaultName) throws DdlException {
         Cloud.AlterObjStoreInfoRequest.Builder builder = 
Cloud.AlterObjStoreInfoRequest.newBuilder();
         Cloud.StorageVaultPB.Builder vaultBuilder = 
Cloud.StorageVaultPB.newBuilder();
-        vaultBuilder.setName(stmt.getStorageVaultName());
+        vaultBuilder.setName(vaultName);
         builder.setVault(vaultBuilder.build());
         builder.setOp(Operation.SET_DEFAULT_VAULT);
         String vaultId;
-        LOG.info("try to set vault {} as default vault", 
stmt.getStorageVaultName());
+        LOG.info("try to set vault {} as default vault", vaultName);
         try {
             Cloud.AlterObjStoreInfoResponse resp =
                     
MetaServiceProxy.getInstance().alterStorageVault(builder.build());
             if (resp.getStatus().getCode() != Cloud.MetaServiceCode.OK) {
                 LOG.warn("failed to set default storage vault response: {}, 
vault name {}",
-                        resp, stmt.getStorageVaultName());
+                        resp, vaultName);
                 throw new DdlException(resp.getStatus().getMsg());
             }
             vaultId = resp.getStorageVaultId();
         } catch (RpcException e) {
             LOG.warn("failed to set default storage vault due to RpcException: 
{}, vault name {}",
-                    e, stmt.getStorageVaultName());
+                    e, vaultName);
             throw new DdlException(e.getMessage());
         }
-        LOG.info("succeed to set {} as default vault, vault id {}", 
stmt.getStorageVaultName(), vaultId);
-        setDefaultStorageVault(Pair.of(stmt.getStorageVaultName(), vaultId));
+        LOG.info("succeed to set {} as default vault, vault id {}", vaultName, 
vaultId);
+        updateDefaultStorageVaultCache(Pair.of(vaultName, vaultId));
     }
 
     public void unsetDefaultStorageVault() throws DdlException {
@@ -256,29 +285,16 @@ public class StorageVaultMgr {
             LOG.warn("failed to unset default storage vault");
             throw new DdlException(e.getMessage());
         }
-        defaultVaultInfo = null;
+        updateDefaultStorageVaultCache(null);
     }
 
-    public void setDefaultStorageVault(Pair<String, String> vaultInfo) {
-        try {
-            rwLock.writeLock().lock();
-            defaultVaultInfo = vaultInfo;
-        } finally {
-            rwLock.writeLock().unlock();
-        }
-    }
-
-    public Pair getDefaultStorageVaultInfo() {
-        Pair vault = null;
+    public Pair<String, String> getDefaultStorageVault() {
         try {
             rwLock.readLock().lock();
-            if (defaultVaultInfo != null) {
-                vault = defaultVaultInfo;
-            }
+            return defaultVaultInfo;
         } finally {
             rwLock.readLock().unlock();
         }
-        return vault;
     }
 
     @VisibleForTesting
@@ -302,12 +318,11 @@ public class StorageVaultMgr {
                         vault.getName(), response);
                 throw new DdlException(response.getStatus().getMsg());
             }
-            rwLock.writeLock().lock();
-            vaultNameToVaultId.put(vault.getName(), 
response.getStorageVaultId());
-            rwLock.writeLock().unlock();
+
             LOG.info("Succeed to create hdfs vault {}, id {}, origin default 
vault replaced {}",
                     vault.getName(), response.getStorageVaultId(),
                     response.getDefaultStorageVaultReplaced());
+            addStorageVaultToCache(vault.getName(), 
response.getStorageVaultId(), vault.setAsDefault());
         } catch (RpcException e) {
             LOG.warn("failed to alter storage vault due to RpcException: {}", 
e);
             throw new DdlException(e.getMessage());
@@ -352,11 +367,10 @@ public class StorageVaultMgr {
                 LOG.warn("failed to alter storage vault response: {} ", 
response);
                 throw new DdlException(response.getStatus().getMsg());
             }
-            rwLock.writeLock().lock();
-            vaultNameToVaultId.put(vault.getName(), 
response.getStorageVaultId());
-            rwLock.writeLock().unlock();
+
             LOG.info("Succeed to create s3 vault {}, id {}, origin default 
vault replaced {}",
                     vault.getName(), response.getStorageVaultId(), 
response.getDefaultStorageVaultReplaced());
+            addStorageVaultToCache(vault.getName(), 
response.getStorageVaultId(), vault.setAsDefault());
         } catch (RpcException e) {
             LOG.warn("failed to alter storage vault due to RpcException: {}", 
e);
             throw new DdlException(e.getMessage());
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudInstanceStatusChecker.java
 
b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudInstanceStatusChecker.java
index ec63e5b4d49..45369f3a8bb 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudInstanceStatusChecker.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudInstanceStatusChecker.java
@@ -59,10 +59,9 @@ public class CloudInstanceStatusChecker extends MasterDaemon 
{
                     String id = response.getInstance().getResourceIds(i);
                     vaultMap.put(name, id);
                 }
-                
Env.getCurrentEnv().getStorageVaultMgr().refreshVaultMap(vaultMap);
-                
Env.getCurrentEnv().getStorageVaultMgr().setDefaultStorageVault(
+                
Env.getCurrentEnv().getStorageVaultMgr().refreshVaultMap(vaultMap,
                         
Pair.of(response.getInstance().getDefaultStorageVaultName(),
-                                
response.getInstance().getDefaultStorageVaultId()));
+                        response.getInstance().getDefaultStorageVaultId()));
             }
         } catch (Exception e) {
             LOG.warn("get instance from ms exception", e);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
index 58ee2615a36..dbae2993416 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
@@ -2733,7 +2733,7 @@ public class InternalCatalog implements 
CatalogIf<Database> {
             String storageVaultId = null;
             // If user does not specify one storage vault then FE would use 
the default vault
             if (Strings.isNullOrEmpty(storageVaultName)) {
-                Pair<String, String> info = 
env.getStorageVaultMgr().getDefaultStorageVaultInfo();
+                Pair<String, String> info = 
env.getStorageVaultMgr().getDefaultStorageVault();
                 if (info != null) {
                     storageVaultName = info.first;
                     storageVaultId = info.second;
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterStorageVaultCommand.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterStorageVaultCommand.java
index 0766d236439..aafba1fa39b 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterStorageVaultCommand.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterStorageVaultCommand.java
@@ -56,7 +56,7 @@ public class AlterStorageVaultCommand extends Command 
implements ForwardWithSync
         if (properties.containsKey(StorageVault.VAULT_NAME)) {
             String newName = properties.get(StorageVault.VAULT_NAME);
             FeNameFormat.checkStorageVaultName(newName);
-            Preconditions.checkArgument(!name.equalsIgnoreCase(newName), 
"vault name no change");
+            Preconditions.checkArgument(!name.equalsIgnoreCase(newName), 
"Vault name has not been changed");
         }
         Env.getCurrentEnv().getStorageVaultMgr().alterStorageVault(vaultType, 
properties, name);
     }
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/cloud/catalog/HdfsStorageVaultTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/cloud/catalog/HdfsStorageVaultTest.java
index 0f34b322b05..09c7c3ba17d 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/cloud/catalog/HdfsStorageVaultTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/cloud/catalog/HdfsStorageVaultTest.java
@@ -172,7 +172,7 @@ public class HdfsStorageVaultTest {
             private HashSet<String> existed = new HashSet<>();
 
             @Mock
-            public Pair getDefaultStorageVaultInfo() {
+            public Pair getDefaultStorageVault() {
                 return defaultVaultInfo;
             }
 
@@ -210,8 +210,8 @@ public class HdfsStorageVaultTest {
                 "type", "hdfs",
                 "path", "abs/"));
         mgr.createHdfsVault(vault);
-        Assertions.assertTrue(mgr.getDefaultStorageVaultInfo() == null);
+        Assertions.assertTrue(mgr.getDefaultStorageVault() == null);
         mgr.setDefaultStorageVault(new 
SetDefaultStorageVaultStmt(vault.getName()));
-        
Assertions.assertTrue(mgr.getDefaultStorageVaultInfo().first.equals(vault.getName()));
+        
Assertions.assertTrue(mgr.getDefaultStorageVault().first.equals(vault.getName()));
     }
 }
diff --git 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
index ae9348191cd..e37a38b49ce 100644
--- 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
+++ 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
@@ -820,6 +820,10 @@ class Suite implements GroovyInterceptable {
         return "hdfs://" + host + ":" + port;
     }
 
+    String getHmsUser() {
+        return context.config.otherConfigs.get("extHiveHmsUser")
+    }
+
     String getHdfsUser() {
         String hdfsUser = context.config.otherConfigs.get("hdfsUser")
         return hdfsUser
diff --git a/regression-test/suites/vault_p0/alter/test_alter_hdfs_vault.groovy 
b/regression-test/suites/vault_p0/alter/test_alter_hdfs_vault.groovy
index 3893d43c02a..00265ad5244 100644
--- a/regression-test/suites/vault_p0/alter/test_alter_hdfs_vault.groovy
+++ b/regression-test/suites/vault_p0/alter/test_alter_hdfs_vault.groovy
@@ -27,72 +27,121 @@ suite("test_alter_hdfs_vault", "nonConcurrent") {
         return
     }
 
+
+    def randomStr = UUID.randomUUID().toString().replace("-", "")
+    def hdfsVaultName = "hdfs_" + randomStr
+
     sql """
-        CREATE STORAGE VAULT IF NOT EXISTS ${suiteName}
+        CREATE STORAGE VAULT IF NOT EXISTS ${hdfsVaultName}
         PROPERTIES (
             "type"="HDFS",
             "fs.defaultFS"="${getHmsHdfsFs()}",
-            "path_prefix" = "${suiteName}",
-            "hadoop.username" = "hadoop"
+            "path_prefix" = "${hdfsVaultName}",
+            "hadoop.username" = "${getHmsUser()}"
         );
     """
 
     expectExceptionLike({
         sql """
-            ALTER STORAGE VAULT ${suiteName}
+            ALTER STORAGE VAULT ${hdfsVaultName}
             PROPERTIES (
                 "type"="hdfs",
-                "path_prefix" = "${suiteName}"
+                "path_prefix" = "error_path"
             );
         """
     }, "Alter property")
 
     expectExceptionLike({
         sql """
-            ALTER STORAGE VAULT ${suiteName}
+            ALTER STORAGE VAULT ${hdfsVaultName}
             PROPERTIES (
                 "type"="hdfs",
-                "fs.defaultFS" = "not_exist_vault"
+                "fs.defaultFS" = "error_fs"
             );
         """
     }, "Alter property")
 
-    def vaultName = suiteName
-    String properties;
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${hdfsVaultName}
+            PROPERTIES (
+                "type"="hdfs",
+                "s3.endpoint" = "error_endpoint"
+            );
+        """
+    }, "Alter property")
 
-    def vaultInfos = try_sql """show storage vault"""
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${hdfsVaultName}
+            PROPERTIES (
+                "type"="hdfs",
+                "s3.region" = "error_region"
+            );
+        """
+    }, "Alter property")
 
-    for (int i = 0; i < vaultInfos.size(); i++) {
-        def name = vaultInfos[i][0]
-        if (name.equals(vaultName)) {
-            properties = vaultInfos[i][2]
-        }
-    }
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${hdfsVaultName}
+            PROPERTIES (
+                "type"="hdfs",
+                "s3.access_key" = "error_access_key"
+            );
+        """
+    }, "Alter property")
+
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${hdfsVaultName}
+            PROPERTIES (
+                "type"="hdfs",
+                "provider" = "error_provider"
+            );
+        """
+    }, "Alter property")
+
+    sql """
+        CREATE TABLE ${hdfsVaultName} (
+            C_CUSTKEY     INTEGER NOT NULL,
+            C_NAME        INTEGER NOT NULL
+        )
+        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+        PROPERTIES (
+            "replication_num" = "1",
+            "storage_vault_name" = ${hdfsVaultName}
+        )
+    """
+    sql """ insert into ${hdfsVaultName} values(1, 1); """
+    sql """ sync;"""
+    def result = sql """ select * from ${hdfsVaultName}; """
+    assertEquals(result.size(), 1);
 
-    def newVaultName = suiteName + "_new";
+    def newHdfsVaultName = hdfsVaultName + "_new";
     sql """
-        ALTER STORAGE VAULT ${vaultName}
+        ALTER STORAGE VAULT ${hdfsVaultName}
         PROPERTIES (
             "type"="hdfs",
-            "VAULT_NAME" = "${newVaultName}",
+            "VAULT_NAME" = "${newHdfsVaultName}",
             "hadoop.username" = "hdfs"
         );
     """
 
-    vaultInfos = sql """ SHOW STORAGE VAULT; """
-    boolean exist = false
-
+    def vaultInfos = sql """ SHOW STORAGE VAULT; """
+    boolean found = false
     for (int i = 0; i < vaultInfos.size(); i++) {
         def name = vaultInfos[i][0]
-        logger.info("name is ${name}, info ${vaultInfos[i]}")
-        if (name.equals(vaultName)) {
+        logger.info("info ${vaultInfos[i]}")
+        if (name.equals(hdfsVaultName)) {
             assertTrue(false);
         }
-        if (name.equals(newVaultName)) {
+        if (name.equals(newHdfsVaultName)) {
             assertTrue(vaultInfos[i][2].contains("""user: "hdfs" """))
-            exist = true
+            found = true
         }
     }
-    assertTrue(exist)
-    expectExceptionLike({sql """insert into ${suiteName} values("2", 
"2");"""}, "")
+    assertTrue(found)
+
+    expectExceptionLike({sql """insert into ${hdfsVaultName} values("2", 
"2");"""}, "open file failed")
 }
diff --git a/regression-test/suites/vault_p0/alter/test_alter_s3_vault.groovy 
b/regression-test/suites/vault_p0/alter/test_alter_s3_vault.groovy
index ffe67c77bc1..b9d6d1975cf 100644
--- a/regression-test/suites/vault_p0/alter/test_alter_s3_vault.groovy
+++ b/regression-test/suites/vault_p0/alter/test_alter_s3_vault.groovy
@@ -27,104 +27,170 @@ suite("test_alter_s3_vault", "nonConcurrent") {
         return
     }
 
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS ${suiteName}
-        PROPERTIES (
-            "type"="S3",
-            "s3.endpoint"="${getS3Endpoint()}",
-            "s3.region" = "${getS3Region()}",
-            "s3.access_key" = "${getS3AK()}",
-            "s3.secret_key" = "${getS3SK()}",
-            "s3.root.path" = "${suiteName}",
-            "s3.bucket" = "${getS3BucketName()}",
-            "s3.external_endpoint" = "",
-            "provider" = "${getS3Provider()}"
-        );
-    """
+    def randomStr = UUID.randomUUID().toString().replace("-", "")
+    def s3VaultName = "s3_" + randomStr
 
-    def dupVaultName = "${suiteName}" + "_dup"
     sql """
-        CREATE STORAGE VAULT IF NOT EXISTS ${dupVaultName}
+        CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName}
         PROPERTIES (
             "type"="S3",
             "s3.endpoint"="${getS3Endpoint()}",
             "s3.region" = "${getS3Region()}",
             "s3.access_key" = "${getS3AK()}",
             "s3.secret_key" = "${getS3SK()}",
-            "s3.root.path" = "${suiteName}",
+            "s3.root.path" = "${s3VaultName}",
             "s3.bucket" = "${getS3BucketName()}",
             "s3.external_endpoint" = "",
-            "provider" = "${getS3Provider()}"
+            "provider" = "${getS3Provider()}",
+            "use_path_style" = "false"
         );
     """
 
-    sql """
-        DROP TABLE IF EXISTS alter_s3_vault_tbl
+    // case1
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "fs.defaultFS"="error_fs"
+            );
         """
+    }, "Alter property")
 
-    sql """
-        CREATE TABLE IF NOT EXISTS alter_s3_vault_tbl
-        (
-        `k1` INT NULL,
-        `v1` INT NULL
-        )
-        UNIQUE KEY (k1)
-        DISTRIBUTED BY HASH(`k1`) BUCKETS 1
-        PROPERTIES (
-        "replication_num" = "1",
-        "disable_auto_compaction" = "true",
-        "storage_vault_name" = "${suiteName}"
-        );
-    """
+    // case2
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "path_prefix"="error_path"
+            );
+        """
+    }, "Alter property")
 
-    sql """insert into alter_s3_vault_tbl values(2, 2); """
+    // case3
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "hadoop.username"="error_user"
+            );
+        """
+    }, "Alter property")
 
+    // case4
     expectExceptionLike({
         sql """
-            ALTER STORAGE VAULT ${suiteName}
+            ALTER STORAGE VAULT ${s3VaultName}
             PROPERTIES (
-            "type"="S3",
-            "s3.bucket" = "error_bucket"
+                "type"="S3",
+                "s3.bucket" = "error_bucket"
             );
         """
     }, "Alter property")
 
+    // case5
     expectExceptionLike({
         sql """
-            ALTER STORAGE VAULT ${suiteName}
+            ALTER STORAGE VAULT ${s3VaultName}
             PROPERTIES (
-            "type"="S3",
-            "provider" = "${getS3Provider()}"
+                "type"="S3",
+                "s3.region" = "error_region"
             );
         """
     }, "Alter property")
 
+    // case6
     expectExceptionLike({
         sql """
-            ALTER STORAGE VAULT ${suiteName}
+            ALTER STORAGE VAULT ${s3VaultName}
             PROPERTIES (
-            "type"="S3",
-            "s3.access_key" = "new_ak"
+                "type"="S3",
+                "s3.endpoint" = "error_endpoint"
+            );
+        """
+    }, "Alter property")
+
+    // case7
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "provider" = "error_privider"
+            );
+        """
+    }, "Alter property")
+
+    // case8
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "provider" = "error_privider"
+            );
+        """
+    }, "Alter property")
+
+    // case9
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "s3.root.path" = "error_root_path"
+            );
+        """
+    }, "Alter property")
+
+    // case10
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "s3.external_endpoint" = "error_external_endpoint"
+            );
+        """
+    }, "Alter property")
+
+    // case11
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "s3.access_key" = "new_ak"
             );
         """
     }, "Accesskey and secretkey must be alter together")
 
-    def vaultName = suiteName
-    def String properties;
+    // case12
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "s3.access_key" = "new_ak"
+            );
+        """
+    }, "Accesskey and secretkey must be alter together")
 
-    def vaultInfos = try_sql """show storage vaults"""
+    // case13
+    def String properties;
+    def vaultInfos = try_sql """SHOW STORAGE VAULTS"""
 
     for (int i = 0; i < vaultInfos.size(); i++) {
-        def name = vaultInfos[i][0]
-        logger.info("name is ${name}, info ${vaultInfos[i]}")
-        if (name.equals(vaultName)) {
+        logger.info("vault info: ${vaultInfos[i]}")
+        if (vaultInfos[i][0].equals(s3VaultName)) {
             properties = vaultInfos[i][2]
         }
     }
 
-    // alter ak sk
     sql """
-        ALTER STORAGE VAULT ${vaultName}
+        ALTER STORAGE VAULT ${s3VaultName}
         PROPERTIES (
             "type"="S3",
             "s3.access_key" = "${getS3AK()}",
@@ -132,122 +198,92 @@ suite("test_alter_s3_vault", "nonConcurrent") {
         );
     """
 
-    vaultInfos = sql """SHOW STORAGE VAULT;"""
+    vaultInfos = sql """SHOW STORAGE VAULTS;"""
 
     for (int i = 0; i < vaultInfos.size(); i++) {
-        def name = vaultInfos[i][0]
-        logger.info("name is ${name}, info ${vaultInfos[i]}")
-        if (name.equals(vaultName)) {
+        logger.info("vault info: ${vaultInfos[i]}")
+        if (vaultInfos[i][0].equals(s3VaultName)) {
             def newProperties = vaultInfos[i][2]
-            assert properties == newProperties, "Properties are not the same"
+            assertTrue(properties.equals(newProperties), "Properties are not 
the same")
         }
     }
 
-    sql """insert into alter_s3_vault_tbl values("2", "2"); """
-
-
-    // rename
-    newVaultName = vaultName + "_new";
 
+    // case14 rename + aksk
     sql """
-        ALTER STORAGE VAULT ${vaultName}
+        CREATE TABLE ${s3VaultName} (
+            C_CUSTKEY     INTEGER NOT NULL,
+            C_NAME        INTEGER NOT NULL
+        )
+        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
         PROPERTIES (
-            "type"="S3",
-            "VAULT_NAME" = "${newVaultName}"
-        );
+            "replication_num" = "1",
+            "storage_vault_name" = ${s3VaultName}
+        )
     """
+    sql """ insert into ${s3VaultName} values(1, 1); """
+    sql """ sync;"""
+    def result = sql """ select * from ${s3VaultName}; """
+    assertEquals(result.size(), 1);
 
-    vaultInfos = sql """SHOW STORAGE VAULT;"""
-    for (int i = 0; i < vaultInfos.size(); i++) {
-        def name = vaultInfos[i][0]
-        logger.info("name is ${name}, info ${vaultInfos[i]}")
-        if (name.equals(newVaultName)) {
-            def newProperties = vaultInfos[i][2]
-            assert properties == newProperties, "Properties are not the same"
-        }
-        if (name.equals(vaultName)) {
-            assertTrue(false);
-        }
-    }
-
-    sql """insert into alter_s3_vault_tbl values("2", "2"); """
-
-    // rename + aksk
-    vaultName = newVaultName
-    newVaultName = vaultName + "_new";
-
+    def newS3VaultName = s3VaultName + "_new";
     sql """
-        ALTER STORAGE VAULT ${vaultName}
+        ALTER STORAGE VAULT ${s3VaultName}
         PROPERTIES (
             "type"="S3",
-            "VAULT_NAME" = "${newVaultName}",
+            "VAULT_NAME" = "${newS3VaultName}",
             "s3.access_key" = "${getS3AK()}",
             "s3.secret_key" = "${getS3SK()}"
         );
-    """
-
-    vaultInfos = sql """SHOW STORAGE VAULT;"""
-    for (int i = 0; i < vaultInfos.size(); i++) {
-        def name = vaultInfos[i][0]
-        logger.info("name is ${name}, info ${vaultInfos[i]}")
-        if (name.equals(newVaultName)) {
-            def newProperties = vaultInfos[i][2]
-            assert properties == newProperties, "Properties are not the same"
-        }
-        if (name.equals(vaultName)) {
-            assertTrue(false);
-        }
-    }
-    sql """insert into alter_s3_vault_tbl values("2", "2"); """
+        """
 
+    sql """ insert into ${s3VaultName} values(2, 2); """
+    sql """ sync;"""
+    result = sql """ select * from ${s3VaultName}; """
+    assertEquals(result.size(), 2);
 
-    vaultName = newVaultName;
+    expectExceptionLike({
+        sql """
+            CREATE TABLE ${newS3VaultName} (
+                C_CUSTKEY     INTEGER NOT NULL,
+                C_NAME        INTEGER NOT NULL
+            )
+            DUPLICATE KEY(C_CUSTKEY, C_NAME)
+            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "storage_vault_name" = ${s3VaultName}
+            )
+        """
+    }, "does not exis")
 
-    newVaultName = vaultName + "_new";
+    sql """
+        CREATE TABLE ${newS3VaultName} (
+            C_CUSTKEY     INTEGER NOT NULL,
+            C_NAME        INTEGER NOT NULL
+        )
+        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+        PROPERTIES (
+            "replication_num" = "1",
+            "storage_vault_name" = ${newS3VaultName}
+        )
+    """
 
-    vaultInfos = sql """SHOW STORAGE VAULT;"""
-    boolean exist = false
+    sql """ insert into ${newS3VaultName} values(1, 1); """
+    sql """ sync;"""
+    result = sql """ select * from ${newS3VaultName}; """
+    assertEquals(result.size(), 1);
 
+    // case15
     sql """
-        ALTER STORAGE VAULT ${vaultName}
+        ALTER STORAGE VAULT ${newS3VaultName}
         PROPERTIES (
             "type"="S3",
-            "VAULT_NAME" = "${newVaultName}",
-            "s3.access_key" = "new_ak_ak",
-            "s3.secret_key" = "sk"
+            "s3.access_key" = "error_ak",
+            "s3.secret_key" = "error_sk"
         );
     """
-
-    for (int i = 0; i < vaultInfos.size(); i++) {
-        def name = vaultInfos[i][0]
-        logger.info("name is ${name}, info ${vaultInfos[i]}")
-        if (name.equals(vaultName)) {
-            assertTrue(false);
-        }
-        if (name.equals(newVaultName)) {
-            assertTrue(vaultInfos[i][2].contains("new_ak_ak"))
-            exist = true
-        }
-    }
-    assertTrue(exist)
-
-    vaultName = newVaultName;
-
-    expectExceptionLike({
-        sql """
-            ALTER STORAGE VAULT ${vaultName}
-            PROPERTIES (
-                "type"="S3",
-                "VAULT_NAME" = "${dupVaultName}",
-                "s3.access_key" = "new_ak_ak",
-                "s3.secret_key" = "sk"
-            );
-        """
-    }, "already exists")
-
-    def count = sql """ select count() from alter_s3_vault_tbl; """
-    assertTrue(res[0][0] == 4)
-
-    // failed to insert due to the wrong ak
-    expectExceptionLike({ sql """insert into alter_s3_vault_tbl values("2", 
"2");""" }, "")
+    expectExceptionLike({ sql """insert into ${newS3VaultName} values("2", 
"2");""" }, "failed to put object")
 }
diff --git 
a/regression-test/suites/vault_p0/alter/test_alter_use_path_style.groovy 
b/regression-test/suites/vault_p0/alter/test_alter_use_path_style.groovy
index 4aaeb7ec472..86e8ba57f49 100644
--- a/regression-test/suites/vault_p0/alter/test_alter_use_path_style.groovy
+++ b/regression-test/suites/vault_p0/alter/test_alter_use_path_style.groovy
@@ -27,15 +27,18 @@ suite("test_alter_use_path_style", "nonConcurrent") {
         return
     }
 
+    def randomStr = UUID.randomUUID().toString().replace("-", "")
+    def s3VaultName = "s3_" + randomStr
+
     sql """
-        CREATE STORAGE VAULT IF NOT EXISTS ${suiteName}
+        CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName}
         PROPERTIES (
             "type"="S3",
             "s3.endpoint"="${getS3Endpoint()}",
             "s3.region" = "${getS3Region()}",
             "s3.access_key" = "${getS3AK()}",
             "s3.secret_key" = "${getS3SK()}",
-            "s3.root.path" = "${suiteName}",
+            "s3.root.path" = "${s3VaultName}",
             "s3.bucket" = "${getS3BucketName()}",
             "s3.external_endpoint" = "",
             "provider" = "${getS3Provider()}",
@@ -43,90 +46,85 @@ suite("test_alter_use_path_style", "nonConcurrent") {
         );
     """
 
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+            "type"="S3",
+            "use_path_style" = ""
+            );
+        """
+    }, "use_path_style cannot be empty")
+
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${suiteName}
+            PROPERTIES (
+            "type"="S3",
+            "use_path_style" = "abc"
+            );
+        """
+    }, "Invalid use_path_style value")
+
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${suiteName}
+            PROPERTIES (
+            "type"="S3",
+            "use_path_style" = "@#¥%*&-+=null."
+            );
+        """
+    }, "Invalid use_path_style value")
+
     sql """
-        CREATE TABLE IF NOT EXISTS alter_use_path_style_tbl
+        CREATE TABLE IF NOT EXISTS ${s3VaultName}
         (
-        `k1` INT NULL,
-        `v1` INT NULL
+            `k1` INT NULL,
+            `v1` INT NULL
         )
         UNIQUE KEY (k1)
         DISTRIBUTED BY HASH(`k1`) BUCKETS 1
         PROPERTIES (
-        "replication_num" = "1",
-        "disable_auto_compaction" = "true",
-        "storage_vault_name" = "${suiteName}"
+            "replication_num" = "1",
+            "disable_auto_compaction" = "true",
+            "storage_vault_name" = "${s3VaultName}"
         );
     """
 
-    sql """ insert into alter_use_path_style_tbl values(2, 2); """
+    sql """ insert into ${s3VaultName} values(1, 1); """
+    sql """ sync;"""
+    def result = sql """ select * from ${s3VaultName}; """
+    assertEquals(result.size(), 1);
 
     sql """
-        ALTER STORAGE VAULT ${suiteName}
+        ALTER STORAGE VAULT ${s3VaultName}
         PROPERTIES (
             "type"="S3",
             "use_path_style" = "true"
         );
     """
 
-    sql """ insert into alter_use_path_style_tbl values(2, 2); """
-
     def vaultInfos = sql """ SHOW STORAGE VAULT; """
-    boolean exist = false
+    boolean found = false
 
     for (int i = 0; i < vaultInfos.size(); i++) {
         def name = vaultInfos[i][0]
-        logger.info("name is ${name}, info ${vaultInfos[i]}")
-        if (name.equals(suiteName)) {
+        logger.info("info ${vaultInfos[i]}")
+        if (name.equals(s3VaultName)) {
             assertTrue(vaultInfos[i][2].contains("""use_path_style: true"""))
-            exist = true
+            found = true
+            break
         }
     }
-    assertTrue(exist)
-
-
-    sql """
-        ALTER STORAGE VAULT ${suiteName}
-        PROPERTIES (
-            "type"="S3",
-            "use_path_style" = "false"
-        );
-    """
-
-    sql """ insert into alter_use_path_style_tbl values(2, 2); """
-
-    vaultInfos = sql """ SHOW STORAGE VAULT; """
-    exist = false
-
-    for (int i = 0; i < vaultInfos.size(); i++) {
-        def name = vaultInfos[i][0]
-        logger.info("name is ${name}, info ${vaultInfos[i]}")
-        if (name.equals(suiteName)) {
-            assertTrue(vaultInfos[i][2].contains("""use_path_style: false"""))
-            exist = true
-        }
+    assertTrue(found)
+
+    if ("OSS".equalsIgnoreCase(getS3Provider().trim())) {
+        // OSS public cloud not allow url path style
+        expectExceptionLike({ sql """insert into ${s3VaultName} values("2", 
"2");""" }, "failed to put object")
+    } else {
+        sql """ insert into ${s3VaultName} values(2, 2); """
+        sql """ sync;"""
+        result = sql """ select * from ${s3VaultName}; """
+        assertEquals(result.size(), 2);
     }
-    assertTrue(exist)
-
-    expectExceptionLike({
-        sql """
-            ALTER STORAGE VAULT ${suiteName}
-            PROPERTIES (
-            "type"="S3",
-            "use_path_style" = ""
-            );
-        """
-    }, "use_path_style cannot be empty")
-
-    expectExceptionLike({
-        sql """
-            ALTER STORAGE VAULT ${suiteName}
-            PROPERTIES (
-            "type"="S3",
-            "use_path_style" = "abc"
-            );
-        """
-    }, "Invalid use_path_style value")
-
-    def count = sql """ select count() from alter_use_path_style_tbl; """
-    assertTrue(res[0][0] == 3)
 }
\ No newline at end of file
diff --git a/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy 
b/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy
index 4592e72292a..e094e12056d 100644
--- a/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy
+++ b/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy
@@ -33,9 +33,9 @@ suite("test_alter_vault_name", "nonConcurrent") {
         CREATE STORAGE VAULT ${hdfsVaultName}
         PROPERTIES (
             "type" = "HDFS",
-            "fs.defaultFS" = "${getHdfsFs()}",
+            "fs.defaultFS" = "${getHmsHdfsFs()}",
             "path_prefix" = "${hdfsVaultName}",
-            "hadoop.username" = "${getHdfsUser()}"
+            "hadoop.username" = "${getHmsUser()}"
         );
     """
 
@@ -65,7 +65,7 @@ suite("test_alter_vault_name", "nonConcurrent") {
                 "VAULT_NAME" = "${hdfsVaultName}"
             );
         """
-    }, "vault name no change")
+    }, "Vault name has not been changed")
 
     // case2
     expectExceptionLike({
@@ -87,7 +87,7 @@ suite("test_alter_vault_name", "nonConcurrent") {
                 "VAULT_NAME" = "${s3VaultName}"
             );
         """
-    }, "vault name no change")
+    }, "Vault name has not been changed")
 
     // case4
     expectExceptionLike({
diff --git a/regression-test/suites/vault_p0/alter/test_alter_vault_type.groovy 
b/regression-test/suites/vault_p0/alter/test_alter_vault_type.groovy
index 1a5181d6dcf..1a4c4c19193 100644
--- a/regression-test/suites/vault_p0/alter/test_alter_vault_type.groovy
+++ b/regression-test/suites/vault_p0/alter/test_alter_vault_type.groovy
@@ -27,7 +27,8 @@ suite("test_alter_vault_type", "nonConcurrent") {
         return
     }
 
-    def hdfsVaultName = suiteName + "_HDFS"
+    def randomStr = UUID.randomUUID().toString().replace("-", "")
+    def hdfsVaultName = "hdfs_" + randomStr
 
     sql """
         CREATE STORAGE VAULT IF NOT EXISTS ${hdfsVaultName}
@@ -35,7 +36,7 @@ suite("test_alter_vault_type", "nonConcurrent") {
             "type"="HDFS",
             "fs.defaultFS"="${getHmsHdfsFs()}",
             "path_prefix" = "${hdfsVaultName}",
-            "hadoop.username" = "hadoop"
+            "hadoop.username" = "${getHmsUser()}"
         );
     """
 
@@ -44,13 +45,13 @@ suite("test_alter_vault_type", "nonConcurrent") {
             ALTER STORAGE VAULT ${hdfsVaultName}
             PROPERTIES (
                 "type"="s3",
-                "s3.access_key" = "new_ak"
+                "s3.access_key" = "new_ak",
+                "s3.secret_key" = "new_sk"
             );
         """
     }, "is not s3 storage vault")
 
-
-    def s3VaultName = suiteName + "_S3"
+    def s3VaultName = "s3_" + randomStr
     sql """
         CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName}
         PROPERTIES (
@@ -62,6 +63,7 @@ suite("test_alter_vault_type", "nonConcurrent") {
             "s3.root.path" = "${s3VaultName}",
             "s3.bucket" = "${getS3BucketName()}",
             "s3.external_endpoint" = "",
+            "use_path_style" = "false",
             "provider" = "${getS3Provider()}"
         );
     """
diff --git a/regression-test/suites/vault_p0/create/load.groovy 
b/regression-test/suites/vault_p0/create/load.groovy
deleted file mode 100644
index 321a58a2170..00000000000
--- a/regression-test/suites/vault_p0/create/load.groovy
+++ /dev/null
@@ -1,252 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-suite("create_storage_vault", "nonConcurrent") {
-    if (!isCloudMode()) {
-        logger.info("skip test_create_vault case because not cloud mode")
-        return
-    }
-
-    if (!enableStoragevault()) {
-        logger.info("skip test_create_vault case")
-        return
-    }
-
-    expectExceptionLike({
-        sql """
-            CREATE STORAGE VAULT IF NOT EXISTS failed_vault
-            PROPERTIES (
-                "type"="S3",
-                "fs.defaultFS"="${getHmsHdfsFs()}",
-                "path_prefix" = "ssb_sf1_p2",
-                "hadoop.username" = "hadoop"
-            );
-           """
-    }, "Missing [s3.endpoint] in properties")
-
-    expectExceptionLike({
-        sql """
-            CREATE STORAGE VAULT IF NOT EXISTS failed_vault
-            PROPERTIES (
-                "type"="hdfs",
-                "s3.bucket"="${getHmsHdfsFs()}",
-                "path_prefix" = "ssb_sf1_p2",
-                "hadoop.username" = "hadoop"
-            );
-            """
-    }, "invalid fs_name")
-
-    expectExceptionLike({
-        sql """ CREATE STORAGE VAULT IF NOT EXISTS failed_vault PROPERTIES (); 
"""
-    }, "mismatched input ')'")
-
-
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS create_hdfs_vault
-        PROPERTIES (
-            "type"="hdfs",
-            "fs.defaultFS"="${getHmsHdfsFs()}",
-            "path_prefix" = "default_vault_ssb_hdfs_vault",
-            "hadoop.username" = "hadoop"
-        );
-        """
-
-    try_sql """ DROP TABLE IF EXISTS create_table_use_vault FORCE; """
-
-    sql """
-        CREATE TABLE IF NOT EXISTS create_table_use_vault (
-                C_CUSTKEY     INTEGER NOT NULL,
-                C_NAME        INTEGER NOT NULL
-        )
-        DUPLICATE KEY(C_CUSTKEY, C_NAME)
-        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-        PROPERTIES (
-            "replication_num" = "1",
-            "storage_vault_name" = "create_hdfs_vault"
-        )
-        """
-
-    String create_stmt = sql """ SHOW CREATE TABLE create_table_use_vault """
-
-    logger.info("the create table stmt is ${create_stmt}")
-    assertTrue(create_stmt.contains("create_hdfs_vault"))
-
-    expectExceptionLike({
-        sql """
-            CREATE STORAGE VAULT create_hdfs_vault
-            PROPERTIES (
-                "type"="hdfs",
-                "fs.defaultFS"="${getHmsHdfsFs()}",
-                "path_prefix" = "default_vault_ssb_hdfs_vault"
-            );
-        """
-    }, "already created")
-
-
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS create_s3_vault
-        PROPERTIES (
-            "type"="S3",
-            "s3.endpoint"="${getS3Endpoint()}",
-            "s3.region" = "${getS3Region()}",
-            "s3.access_key" = "${getS3AK()}",
-            "s3.secret_key" = "${getS3SK()}",
-            "s3.root.path" = "test_create_s3_vault",
-            "s3.bucket" = "${getS3BucketName()}",
-            "s3.external_endpoint" = "",
-            "provider" = "${getS3Provider()}"
-        );
-    """
-
-    // test path style
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS create_s3_vault1
-        PROPERTIES (
-            "type"="S3",
-            "s3.endpoint"="${getS3Endpoint()}",
-            "s3.region" = "${getS3Region()}",
-            "s3.access_key" = "${getS3AK()}",
-            "s3.secret_key" = "${getS3SK()}",
-            "s3.root.path" = "test_create_s3_vault1",
-            "s3.bucket" = "${getS3BucketName()}",
-            "s3.external_endpoint" = "",
-            "use_path_style" = "true",
-            "provider" = "${getS3Provider()}"
-        );
-    """
-
-    // test path style
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS create_s3_vault2
-        PROPERTIES (
-            "type"="S3",
-            "s3.endpoint"="${getS3Endpoint()}",
-            "s3.region" = "${getS3Region()}",
-            "s3.access_key" = "${getS3AK()}",
-            "s3.secret_key" = "${getS3SK()}",
-            "s3.root.path" = "test_create_s3_vault2",
-            "s3.bucket" = "${getS3BucketName()}",
-            "s3.external_endpoint" = "",
-            "use_path_style" = "false",
-            "provider" = "${getS3Provider()}"
-        );
-    """
-
-    var result = """show storage vault"""
-    assertTrue(result.size() >= 3);
-    for (int i = 0; i < result.size(); ++i) {
-        if (result[i][0].equals("create_s3_vault")) {
-            assertTrue(result[i][2].contains("use_path_style: true"));
-        }
-        if (result[i][0].equals("create_s3_vault1")) {
-            assertTrue(result[i][2].contains("use_path_style: true"));
-        }
-        if (result[i][0].equals("create_s3_vault2")) {
-            assertTrue(result[i][2].contains("use_path_style: false"));
-        }
-    }
-
-    expectExceptionLike({
-        sql """
-            CREATE STORAGE VAULT create_s3_vault
-            PROPERTIES (
-                "type"="S3",
-                "s3.endpoint"="${getS3Endpoint()}",
-                "s3.region" = "${getS3Region()}",
-                "s3.access_key" = "${getS3AK()}",
-                "s3.secret_key" = "${getS3SK()}",
-                "s3.root.path" = "test_create_s3_vault",
-                "s3.bucket" = "${getS3BucketName()}",
-                "s3.external_endpoint" = "",
-                "provider" = "${getS3Provider()}"
-            );
-        """
-    }, "already created")
-
-    // sql """
-    //     CREATE TABLE IF NOT EXISTS create_table_use_s3_vault (
-    //         C_CUSTKEY     INTEGER NOT NULL,
-    //         C_NAME        INTEGER NOT NULL
-    //     )
-    //     DUPLICATE KEY(C_CUSTKEY, C_NAME)
-    //     DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-    //     PROPERTIES (
-    //         "replication_num" = "1",
-    //         "storage_vault_name" = "create_s3_vault"
-    //     )
-    // """
-
-    // sql """ insert into create_table_use_s3_vault values(1,1); """
-
-    // sql """ select * from create_table_use_s3_vault; """
-
-
-    // def vaults_info = try_sql """ show storage vault """
-
-    
-    // boolean create_hdfs_vault_exist = false;
-    // boolean create_s3_vault_exist = false;
-    // boolean built_in_storage_vault_exist = false;
-    // for (int i = 0; i < vaults_info.size(); i++) {
-    //     def name = vaults_info[i][0]
-    //     if (name.equals("create_hdfs_vault")) {
-    //         create_hdfs_vault_exist = true;
-    //     }
-    //     if (name.equals("create_s3_vault")) {
-    //         create_s3_vault_exist = true;
-    //     }
-    //     if (name.equals("built_in_storage_vault")) {
-    //         built_in_storage_vault_exist = true
-    //     }
-    // }
-    // assertTrue(create_hdfs_vault_exist)
-    // assertTrue(create_s3_vault_exist)
-    // assertTrue(built_in_storage_vault_exist)
-
-    expectExceptionLike({
-        sql """
-            CREATE STORAGE VAULT built_in_storage_vault
-            PROPERTIES (
-                "type"="S3",
-                "s3.endpoint"="${getS3Endpoint()}",
-                "s3.region" = "${getS3Region()}",
-                "s3.access_key" = "${getS3AK()}",
-                "s3.secret_key" = "${getS3SK()}",
-                "s3.root.path" = "test_built_in_storage_vault",
-                "s3.bucket" = "${getS3BucketName()}",
-                "s3.external_endpoint" = "",
-                "provider" = "${getS3Provider()}"
-            );
-        """
-    }, "already created")
-
-
-    // expectExceptionLike({
-    //     sql """
-    //         CREATE TABLE IF NOT EXISTS create_table_with_not_exist_vault (
-    //             C_CUSTKEY     INTEGER NOT NULL,
-    //             C_NAME        INTEGER NOT NULL
-    //         )
-    //         DUPLICATE KEY(C_CUSTKEY, C_NAME)
-    //         DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-    //         PROPERTIES (
-    //             "replication_num" = "1",
-    //             "storage_vault_name" = "not_exist_vault"
-    //         )
-    //     """
-    // }, "Storage vault 'not_exist_vault' does not exist")
-}
diff --git a/regression-test/suites/vault_p0/create/test_create_vault.groovy 
b/regression-test/suites/vault_p0/create/test_create_vault.groovy
index 6bacf27fa5a..b545b3eff79 100644
--- a/regression-test/suites/vault_p0/create/test_create_vault.groovy
+++ b/regression-test/suites/vault_p0/create/test_create_vault.groovy
@@ -16,18 +16,107 @@
 // under the License.
 
 suite("test_create_vault", "nonConcurrent") {
+    def suiteName = name;
     if (!isCloudMode()) {
-        logger.info("skip test_create_vault case because not cloud mode")
+        logger.info("skip ${name} case, because not cloud mode")
         return
     }
 
     if (!enableStoragevault()) {
-        logger.info("skip test_create_vault case")
+        logger.info("skip ${name} case, because storage vault not enabled")
         return
     }
 
+    def randomStr = UUID.randomUUID().toString().replace("-", "")
+    def s3VaultName = "s3_" + randomStr
+    def hdfsVaultName = "hdfs_" + randomStr
+
+    expectExceptionLike({
+        sql """
+            CREATE STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "fs.defaultFS"="${getHmsHdfsFs()}",
+                "path_prefix" = "${s3VaultName}",
+                "hadoop.username" = "${getHmsUser()}"
+            );
+           """
+    }, "Missing [s3.endpoint] in properties")
+
+    expectExceptionLike({
+        sql """ CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName} PROPERTIES 
(); """
+    }, "mismatched input ')'")
+
+
+    expectExceptionLike({
+        sql """
+            CREATE TABLE ${s3VaultName} (
+                C_CUSTKEY     INTEGER NOT NULL,
+                C_NAME        INTEGER NOT NULL
+            )
+            DUPLICATE KEY(C_CUSTKEY, C_NAME)
+            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "storage_vault_name" = "not_exist_vault"
+            )
+        """
+    }, "Storage vault 'not_exist_vault' does not exist")
+
+
+    // test `if not exist` and dup name s3 vault
+    sql """
+        CREATE STORAGE VAULT ${s3VaultName}
+        PROPERTIES (
+            "type"="S3",
+            "s3.endpoint"="${getS3Endpoint()}",
+            "s3.region" = "${getS3Region()}",
+            "s3.access_key" = "${getS3AK()}",
+            "s3.secret_key" = "${getS3SK()}",
+            "s3.root.path" = "${s3VaultName}",
+            "s3.bucket" = "${getS3BucketName()}",
+            "s3.external_endpoint" = "",
+            "provider" = "${getS3Provider()}",
+            "use_path_style" = "false"
+        );
+    """
+
+    expectExceptionLike({
+        sql """
+            CREATE STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "s3.endpoint"="${getS3Endpoint()}",
+                "s3.region" = "${getS3Region()}",
+                "s3.access_key" = "${getS3AK()}",
+                "s3.secret_key" = "${getS3SK()}",
+                "s3.root.path" = "${s3VaultName}",
+                "s3.bucket" = "${getS3BucketName()}",
+                "s3.external_endpoint" = "",
+                "provider" = "${getS3Provider()}",
+                "use_path_style" = "false"
+            );
+        """
+    }, "already created")
+
     sql """
-        CREATE TABLE IF NOT EXISTS create_table_use_s3_vault (
+        CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName}
+        PROPERTIES (
+            "type"="S3",
+            "s3.endpoint"="${getS3Endpoint()}",
+            "s3.region" = "${getS3Region()}",
+            "s3.access_key" = "${getS3AK()}",
+            "s3.secret_key" = "${getS3SK()}",
+            "s3.root.path" = "${s3VaultName}",
+            "s3.bucket" = "${getS3BucketName()}",
+            "s3.external_endpoint" = "",
+            "provider" = "${getS3Provider()}",
+            "use_path_style" = "false"
+        );
+    """
+
+    sql """
+        CREATE TABLE ${s3VaultName} (
             C_CUSTKEY     INTEGER NOT NULL,
             C_NAME        INTEGER NOT NULL
         )
@@ -35,35 +124,90 @@ suite("test_create_vault", "nonConcurrent") {
         DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
         PROPERTIES (
             "replication_num" = "1",
-            "storage_vault_name" = "create_s3_vault"
+            "storage_vault_name" = ${s3VaultName}
         )
     """
+    sql """ insert into ${s3VaultName} values(1, 1); """
+    sql """ sync;"""
+    def result = sql """ select * from ${s3VaultName}; """
+    assertEquals(result.size(), 1);
 
-    sql """ insert into create_table_use_s3_vault values(1,1); """
+    // hdfs vault case
+    expectExceptionLike({
+        sql """
+            CREATE STORAGE VAULT ${hdfsVaultName}
+            PROPERTIES (
+                "type"="hdfs",
+                "s3.bucket"="${getHmsHdfsFs()}",
+                "path_prefix" = "${hdfsVaultName}",
+                "hadoop.username" = "${getHmsUser()}"
+            );
+            """
+    }, "invalid fs_name")
 
-    sql """ select * from create_table_use_s3_vault; """
+    // test `if not exist` and dup name hdfs vault
+    sql """
+        CREATE STORAGE VAULT ${hdfsVaultName}
+        PROPERTIES (
+            "type"="HDFS",
+            "fs.defaultFS"="${getHmsHdfsFs()}",
+            "path_prefix" = "${hdfsVaultName}",
+            "hadoop.username" = "${getHmsUser()}"
+        );
+    """
 
+    expectExceptionLike({
+        sql """
+            CREATE STORAGE VAULT ${hdfsVaultName}
+            PROPERTIES (
+                "type"="HDFS",
+                "fs.defaultFS"="${getHmsHdfsFs()}",
+                "path_prefix" = "${hdfsVaultName}",
+                "hadoop.username" = "${getHmsUser()}"
+            );
+        """
+    }, "already created")
 
-    def vaults_info = try_sql """ show storage vault """
+    sql """
+        CREATE STORAGE VAULT IF NOT EXISTS ${hdfsVaultName}
+        PROPERTIES (
+            "type"="HDFS",
+            "fs.defaultFS"="${getHmsHdfsFs()}",
+            "path_prefix" = "${hdfsVaultName}",
+            "hadoop.username" = "${getHmsUser()}"
+        );
+    """
+
+    sql """
+        CREATE TABLE ${hdfsVaultName} (
+            C_CUSTKEY     INTEGER NOT NULL,
+            C_NAME        INTEGER NOT NULL
+        )
+        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+        PROPERTIES (
+            "replication_num" = "1",
+            "storage_vault_name" = ${hdfsVaultName}
+        )
+    """
+    sql """ insert into ${hdfsVaultName} values(1, 1); """
+    sql """ sync;"""
+    result = sql """ select * from ${hdfsVaultName}; """
+    assertEquals(result.size(), 1);
+
+    boolean hdfsVaultExisted = false;
+    boolean s3VaultExisted = false;
+    def vaults_info = try_sql """ SHOW STORAGE VAULTS """
 
-    
-    boolean create_hdfs_vault_exist = false;
-    boolean create_s3_vault_exist = false;
-    boolean built_in_storage_vault_exist = false;
     for (int i = 0; i < vaults_info.size(); i++) {
         def name = vaults_info[i][0]
-        if (name.equals("create_hdfs_vault")) {
-            create_hdfs_vault_exist = true;
+        if (name.equals(hdfsVaultName)) {
+            hdfsVaultExisted = true;
         }
-        if (name.equals("create_s3_vault")) {
-            create_s3_vault_exist = true;
-        }
-        if (name.equals("built_in_storage_vault")) {
-            built_in_storage_vault_exist = true
+        if (name.equals(s3VaultName)) {
+            s3VaultExisted = true;
         }
     }
-    assertTrue(create_hdfs_vault_exist)
-    assertTrue(create_s3_vault_exist)
-    assertTrue(built_in_storage_vault_exist)
-
+    assertTrue(hdfsVaultExisted)
+    assertTrue(s3VaultExisted)
 }
diff --git a/regression-test/suites/vault_p0/default/test_default_vault.groovy 
b/regression-test/suites/vault_p0/default/test_default_vault.groovy
index 2fa445bdd57..0660b47cbf8 100644
--- a/regression-test/suites/vault_p0/default/test_default_vault.groovy
+++ b/regression-test/suites/vault_p0/default/test_default_vault.groovy
@@ -16,123 +16,198 @@
 // under the License.
 
 suite("test_default_vault", "nonConcurrent") {
+    def suiteName = name;
     if (!isCloudMode()) {
-        logger.info("skip ${name} case, because not cloud mode")
+        logger.info("skip ${suiteName} case, because not cloud mode")
         return
     }
 
     if (!enableStoragevault()) {
-        logger.info("skip ${name} case")
+        logger.info("skip ${suiteName} case, because storage vault not 
enabled")
         return
     }
 
-    try {
-        sql """ UNSET DEFAULT STORAGE VAULT; """
+    expectExceptionLike({
+        sql """ set not_exist as default storage vault"""
+    }, "invalid storage vault name")
 
-        expectExceptionLike({
-            sql """ set not_exist as default storage vault """
-        }, "invalid storage vault name")
+    expectExceptionLike({
+        sql """ set null as default storage vault"""
+    }, "no viable alternative at input")
 
-        def tableName = "table_use_vault"
-        sql "DROP TABLE IF EXISTS ${tableName}"
-
-        expectExceptionLike({
-            sql """
-                CREATE TABLE ${tableName} (
-                    `key` INT,
-                    value INT
-                ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1
-                PROPERTIES ('replication_num' = '1')
-            """
-        }, "No default storage vault")
+    sql """ UNSET DEFAULT STORAGE VAULT; """
+    def randomStr = UUID.randomUUID().toString().replace("-", "")
+    def s3VaultName = "s3_" + randomStr
+    def s3TableName = "s3_tbl_" + randomStr
+    def hdfsVaultName = "hdfs_" + randomStr
+    def hdfsTableName = "hdfs_tbl_" + randomStr
 
+    expectExceptionLike({
         sql """
-            CREATE STORAGE VAULT IF NOT EXISTS create_s3_vault_for_default
-            PROPERTIES (
-                "type"="S3",
-                "s3.endpoint"="${getS3Endpoint()}",
-                "s3.region" = "${getS3Region()}",
-                "s3.access_key" = "${getS3AK()}",
-                "s3.secret_key" = "${getS3SK()}",
-                "s3.root.path" = "create_s3_vault_for_default",
-                "s3.bucket" = "${getS3BucketName()}",
-                "s3.external_endpoint" = "",
-                "provider" = "${getS3Provider()}",
-                "set_as_default" = "true"
-            );
-        """
-
-        sql """ set create_s3_vault_for_default as default storage vault """
-        def vaultInfos = sql """ SHOW STORAGE VAULT """
-        // check if create_s3_vault_for_default is set as default
-        for (int i = 0; i < vaultInfos.size(); i++) {
-            def name = vaultInfos[i][0]
-            if (name.equals("create_s3_vault_for_default")) {
-                // isDefault is true
-                assertEquals(vaultInfos[i][3], "true")
-            }
-        }
-
-        sql """ UNSET DEFAULT STORAGE VAULT; """
-        vaultInfos = sql """ SHOW STORAGE VAULT """
-        for (int i = 0; i < vaultInfos.size(); i++) {
-            assertEquals(vaultInfos[i][3], "false")
-        }
-
-
-        sql """ set built_in_storage_vault as default storage vault """
-
-        sql "DROP TABLE IF EXISTS ${tableName} FORCE;"
-        sql """
-            CREATE TABLE ${tableName} (
+            CREATE TABLE ${s3VaultName} (
                 `key` INT,
                 value INT
             ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1
             PROPERTIES ('replication_num' = '1')
         """
-
-        sql """ insert into ${tableName} values(1, 1); """
-        sql """ sync;"""
-        def result = sql """ select * from ${tableName}; """
-        logger.info("result:${result}");
-        assertTrue(result.size() == 1)
-        assertTrue(result[0][0].toInteger() == 1)
-
-        def create_table_stmt = sql """ show create table ${tableName} """
-        assertTrue(create_table_stmt[0][1].contains("built_in_storage_vault"))
-
+    }, "No default storage vault")
+
+    sql """
+        CREATE STORAGE VAULT ${s3VaultName}
+        PROPERTIES (
+            "type"="S3",
+            "s3.endpoint"="${getS3Endpoint()}",
+            "s3.region" = "${getS3Region()}",
+            "s3.access_key" = "${getS3AK()}",
+            "s3.secret_key" = "${getS3SK()}",
+            "s3.root.path" = "${s3VaultName}",
+            "s3.bucket" = "${getS3BucketName()}",
+            "s3.external_endpoint" = "",
+            "provider" = "${getS3Provider()}",
+            "use_path_style" = "false",
+            "set_as_default" = "true"
+        );
+    """
+
+    boolean found = false
+    def vaultInfos = sql """ SHOW STORAGE VAULTS """
+    for (int i = 0; i < vaultInfos.size(); i++) {
+        def name = vaultInfos[i][0]
+        if (name.equals(s3VaultName)) {
+            // isDefault is true
+            assertEquals(vaultInfos[i][3], "true")
+            found = true
+            break;
+        }
+    }
+    assertTrue(found)
+
+    sql """
+        CREATE TABLE ${s3TableName} (
+            C_CUSTKEY     INTEGER NOT NULL,
+            C_NAME        INTEGER NOT NULL
+        )
+        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+        PROPERTIES (
+            "replication_num" = "1"
+        )
+    """
+    sql """ insert into ${s3TableName} values(1, 1); """
+    sql """ sync;"""
+    def result = sql """ select * from ${s3TableName}; """
+    assertEquals(result.size(), 1);
+    def createTableStmt = sql """ show create table ${s3TableName} """
+    assertTrue(createTableStmt[0][1].contains(s3VaultName))
+
+    expectExceptionLike({
         sql """
-            CREATE STORAGE VAULT IF NOT EXISTS create_default_hdfs_vault
-            PROPERTIES (
-                "type"="hdfs",
-                "fs.defaultFS"="${getHmsHdfsFs()}",
-                "path_prefix" = "default_vault_ssb_hdfs_vault",
-                "hadoop.username" = "hadoop"
-            );
+            alter table ${s3TableName} set("storage_vault_name" = 
"not_exist_vault");
         """
+    }, "You can not modify")
 
-        sql """ set create_default_hdfs_vault as default storage vault """
+    sql """ UNSET DEFAULT STORAGE VAULT; """
+    vaultInfos = sql """ SHOW STORAGE VAULT """
+    for (int i = 0; i < vaultInfos.size(); i++) {
+        assertEquals(vaultInfos[i][3], "false")
+    }
 
-        sql "DROP TABLE IF EXISTS ${tableName} FORCE;"
+    expectExceptionLike({
         sql """
-            CREATE TABLE ${tableName} (
+            CREATE TABLE ${s3TableName}_2 (
                 `key` INT,
                 value INT
             ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1
             PROPERTIES ('replication_num' = '1')
         """
-
-        create_table_stmt = sql """ show create table ${tableName} """
-        
assertTrue(create_table_stmt[0][1].contains("create_default_hdfs_vault"))
-
-        expectExceptionLike({
-            sql """
-                alter table ${tableName} set("storage_vault_name" = 
"built_in_storage_vault");
-            """
-        }, "You can not modify")
-
-    } finally {
-        sql """ set built_in_storage_vault as default storage vault """
-        sql """ set built_in_storage_vault as default storage vault """
+    }, "No default storage vault")
+
+    sql """
+        CREATE STORAGE VAULT ${hdfsVaultName}
+        PROPERTIES (
+            "type"="HDFS",
+            "fs.defaultFS"="${getHmsHdfsFs()}",
+            "path_prefix" = "${hdfsVaultName}",
+            "hadoop.username" = "${getHmsUser()}",
+            "set_as_default" = "true"
+        );
+    """
+
+    found = false
+    vaultInfos = sql """ SHOW STORAGE VAULTS """
+    for (int i = 0; i < vaultInfos.size(); i++) {
+        def name = vaultInfos[i][0]
+        if (name.equals(hdfsVaultName)) {
+            // isDefault is true
+            assertEquals(vaultInfos[i][3], "true")
+            found = true
+            break;
+        }
+    }
+    assertTrue(found)
+
+    sql """
+        CREATE TABLE ${hdfsTableName} (
+            C_CUSTKEY     INTEGER NOT NULL,
+            C_NAME        INTEGER NOT NULL
+        )
+        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+        PROPERTIES (
+            "replication_num" = "1"
+        )
+    """
+    sql """ insert into ${hdfsTableName} values(1, 1); """
+    sql """ sync;"""
+    result = sql """ select * from ${hdfsTableName}; """
+    assertEquals(result.size(), 1);
+    createTableStmt = sql """ show create table ${hdfsTableName} """
+    assertTrue(createTableStmt[0][1].contains(hdfsVaultName))
+
+    expectExceptionLike({
+        sql """
+            alter table ${hdfsTableName} set("storage_vault_name" = 
"${hdfsVaultName}");
+        """
+    }, "You can not modify")
+
+    // test set stmt
+    sql """SET ${s3VaultName} AS DEFAULT STORAGE VAULT;"""
+    found = false
+    vaultInfos = sql """ SHOW STORAGE VAULTS """
+    for (int i = 0; i < vaultInfos.size(); i++) {
+        def name = vaultInfos[i][0]
+        if (name.equals(s3VaultName)) {
+            // isDefault is true
+            assertEquals(vaultInfos[i][3], "true")
+            found = true
+            break;
+        }
+    }
+    assertTrue(found)
+
+    sql """
+        CREATE TABLE ${s3TableName}_2 (
+            C_CUSTKEY     INTEGER NOT NULL,
+            C_NAME        INTEGER NOT NULL
+        )
+        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+        PROPERTIES (
+            "replication_num" = "1"
+        )
+    """
+    sql """ insert into ${s3TableName}_2 values(1, 1); """
+    sql """ sync;"""
+    result = sql """ select * from ${s3TableName}_2; """
+    assertEquals(result.size(), 1);
+    createTableStmt = sql """ show create table ${s3TableName}_2 """
+    assertTrue(createTableStmt[0][1].contains(s3VaultName))
+
+    sql """ UNSET DEFAULT STORAGE VAULT; """
+    vaultInfos = sql """ SHOW STORAGE VAULTS """
+    for (int i = 0; i < vaultInfos.size(); i++) {
+        if(vaultInfos[i][3].equalsIgnoreCase"true") {
+            assertFalse(true)
+        }
     }
 }
diff --git 
a/regression-test/suites/vault_p0/default/test_s3_vault_path_start_with_slash.groovy
 
b/regression-test/suites/vault_p0/default/test_s3_vault_path_start_with_slash.groovy
index 704a4b67836..b69e98adba5 100644
--- 
a/regression-test/suites/vault_p0/default/test_s3_vault_path_start_with_slash.groovy
+++ 
b/regression-test/suites/vault_p0/default/test_s3_vault_path_start_with_slash.groovy
@@ -16,55 +16,52 @@
 // under the License.
 
 suite("test_s3_vault_path_start_with_slash", "nonConcurrent") {
+    def suiteName = name;
     if (!isCloudMode()) {
-        logger.info("skip ${name} case, because not cloud mode")
+        logger.info("skip ${suiteName} case, because not cloud mode")
         return
     }
 
     if (!enableStoragevault()) {
-        logger.info("skip ${name} case")
+        logger.info("skip ${suiteName} case, because storage vault not 
enabled")
         return
     }
 
-    def tableName = "table_test_s3_vault_path_start_with_slash"
-    try {
-        def vault_name = "test_s3_vault_path_start_with_slash_vault"
-        sql """
-            CREATE STORAGE VAULT IF NOT EXISTS ${vault_name}
-            PROPERTIES (
-                "type"="S3",
-                "s3.endpoint"="${getS3Endpoint()}",
-                "s3.region" = "${getS3Region()}",
-                "s3.access_key" = "${getS3AK()}",
-                "s3.secret_key" = "${getS3SK()}",
-                "s3.root.path" = "/test_s3_vault_path_start_with_slash_vault",
-                "s3.bucket" = "${getS3BucketName()}",
-                "s3.external_endpoint" = "",
-                "provider" = "${getS3Provider()}",
-                "set_as_default" = "true"
-            );
-        """
+    def randomStr = UUID.randomUUID().toString().replace("-", "")
+    def s3VaultName = "s3_" + randomStr
 
-        sql "DROP TABLE IF EXISTS ${tableName}"
-        sql """
-                CREATE TABLE ${tableName} (
-                    `key` INT,
-                    value INT
-                ) DUPLICATE KEY (`key`) 
-                DISTRIBUTED BY HASH (`key`) BUCKETS 1
-                PROPERTIES (
-                    "replication_num" = "1",
-                    "storage_vault_name" = "${vault_name}"
-                )
-            """
+    sql """
+        CREATE STORAGE VAULT ${s3VaultName}
+        PROPERTIES (
+            "type"="S3",
+            "s3.endpoint"="${getS3Endpoint()}",
+            "s3.region" = "${getS3Region()}",
+            "s3.access_key" = "${getS3AK()}",
+            "s3.secret_key" = "${getS3SK()}",
+            "s3.root.path" = "/test_s3_vault_path_start_with_slash_vault",
+            "s3.bucket" = "${getS3BucketName()}",
+            "s3.external_endpoint" = "",
+            "provider" = "${getS3Provider()}",
+            "use_path_style" = "false"
+        );
+    """
 
-        sql """ insert into ${tableName} values(1, 1); """
-        sql """ sync;"""
-        def result = sql """ select * from ${tableName}; """
-        logger.info("result:${result}");
-        assertTrue(result.size() == 1)
-        assertTrue(result[0][0].toInteger() == 1)
-    } finally {
-        sql "DROP TABLE IF EXISTS ${tableName}"
-    }
+    sql """
+        CREATE TABLE ${s3VaultName} (
+            `key` INT,
+            value INT
+        ) DUPLICATE KEY (`key`) 
+        DISTRIBUTED BY HASH (`key`) BUCKETS 1
+        PROPERTIES (
+            "replication_num" = "1",
+            "storage_vault_name" = "${s3VaultName}"
+        )
+    """
+
+    sql """ insert into ${s3VaultName} values(1, 1); """
+    sql """ sync;"""
+    def result = sql """ select * from ${s3VaultName}; """
+    logger.info("result:${result}");
+    assertTrue(result.size() == 1)
+    assertTrue(result[0][0].toInteger() == 1)
 }
diff --git a/regression-test/suites/vault_p0/forbid/test_forbid_vault.groovy 
b/regression-test/suites/vault_p0/forbid/test_forbid_vault.groovy
index da31ae532af..2a7a3c15f64 100644
--- a/regression-test/suites/vault_p0/forbid/test_forbid_vault.groovy
+++ b/regression-test/suites/vault_p0/forbid/test_forbid_vault.groovy
@@ -15,14 +15,15 @@
 // specific language governing permissions and limitations
 // under the License.
 
-suite("test_forbid_vault") {
+suite("test_forbid_vault", "nonConcurrent") {
+    def suiteName = name;
     if (!isCloudMode()) {
-        logger.info("skip ${name} case, because not cloud mode")
+        logger.info("skip ${suiteName} case, because not cloud mode")
         return
     }
 
     if (enableStoragevault()) {
-        logger.info("skip ${name} case, because storage vault enabled")
+        logger.info("skip ${suiteName} case, because storage vault enabled")
         return
     }
 
diff --git 
a/regression-test/suites/vault_p0/privilege/test_vault_privilege.groovy 
b/regression-test/suites/vault_p0/privilege/test_vault_privilege.groovy
deleted file mode 100644
index 3238a4d4857..00000000000
--- a/regression-test/suites/vault_p0/privilege/test_vault_privilege.groovy
+++ /dev/null
@@ -1,190 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-import java.util.stream.Collectors;
-
-suite("test_vault_privilege", "nonConcurrent") {
-    if (!isCloudMode()) {
-        logger.info("skip ${name} case, because not cloud mode")
-        return
-    }
-
-    if (!enableStoragevault()) {
-        logger.info("skip ${name} case, because storage vault not enabled")
-        return
-    }
-
-    try {
-        def vault1 = "test_privilege_vault1"
-        def table1 = "test_privilege_vault_t1"
-        def table2 = "test_privilege_vault_t2"
-        def table3 = "test_privilege_vault_t3"
-
-        sql """
-            CREATE STORAGE VAULT IF NOT EXISTS ${vault1}
-            PROPERTIES (
-                "type"="hdfs",
-                "fs.defaultFS"="${getHmsHdfsFs()}",
-                "path_prefix" = "test_vault_privilege"
-            );
-        """
-
-        def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> 
row[0]).collect(Collectors.toSet())
-        assertTrue(storageVaults.contains(vault1))
-
-        sql """
-            SET ${vault1} AS DEFAULT STORAGE VAULT
-        """
-        sql """
-            UNSET DEFAULT STORAGE VAULT
-        """
-
-        sql """
-            DROP TABLE IF EXISTS ${table1};
-        """
-
-        sql """
-            CREATE TABLE IF NOT EXISTS ${table1} (
-                C_CUSTKEY     INTEGER NOT NULL,
-                C_NAME        INTEGER NOT NULL
-            )
-            DUPLICATE KEY(C_CUSTKEY, C_NAME)
-            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-            PROPERTIES (
-                "replication_num" = "1",
-                "storage_vault_name" = ${vault1}
-            )
-        """
-
-        def user1 = "test_privilege_vault_user1"
-        sql """drop user if exists ${user1}"""
-        sql """create user ${user1} identified by 'Cloud12345'"""
-        sql """ GRANT create_priv ON *.*.* TO '${user1}'; """
-
-        def vault2 = "test_privilege_vault2"
-        // Only users with admin role can create storage vault
-        connect(user1, 'Cloud12345', context.config.jdbcUrl) {
-            expectExceptionLike({
-                sql """
-                    CREATE STORAGE VAULT IF NOT EXISTS ${vault2}
-                    PROPERTIES (
-                    "type"="hdfs",
-                    "fs.defaultFS"="${getHmsHdfsFs()}",
-                    "path_prefix" = "test_vault_privilege"
-                    );
-                """
-            }, "denied")
-        }
-
-        // Only users with admin role can set/unset default storage vault
-        connect(user1, 'Cloud12345', context.config.jdbcUrl) {
-            expectExceptionLike({
-                sql """
-                    SET ${vault1} AS DEFAULT STORAGE VAULT
-                """
-            }, "denied")
-        }
-        connect(user1, 'Cloud12345', context.config.jdbcUrl) {
-            expectExceptionLike({
-                sql """
-                    UNSET DEFAULT STORAGE VAULT
-                """
-            }, "denied")
-        }
-
-        def result = connect(user1, 'Cloud12345', context.config.jdbcUrl) {
-                sql " SHOW STORAGE VAULT; "
-        }
-        assertTrue(result.isEmpty())
-
-        sql """
-            DROP TABLE IF EXISTS ${table2};
-        """
-        connect(user1, 'Cloud12345', context.config.jdbcUrl) {
-            expectExceptionLike({
-                sql """
-                    CREATE TABLE IF NOT EXISTS ${table2} (
-                            C_CUSTKEY     INTEGER NOT NULL,
-                            C_NAME        INTEGER NOT NULL
-                            )
-                            DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                            PROPERTIES (
-                            "replication_num" = "1",
-                            "storage_vault_name" = ${vault1}
-                            )
-                """
-            }, "USAGE denied")
-        }
-
-        sql """
-            GRANT usage_priv ON STORAGE VAULT '${vault1}' TO '${user1}';
-        """
-
-        result = connect(user1, 'Cloud12345', context.config.jdbcUrl) {
-                sql " SHOW STORAGE VAULT; "
-        }
-        storageVaults = result.stream().map(row -> 
row[0]).collect(Collectors.toSet())
-        assertTrue(storageVaults.contains(vault1))
-
-        connect(user1, 'Cloud12345', context.config.jdbcUrl) {
-            sql """
-                CREATE TABLE IF NOT EXISTS ${table2} (
-                        C_CUSTKEY     INTEGER NOT NULL,
-                        C_NAME        INTEGER NOT NULL
-                        )
-                        DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                        PROPERTIES (
-                        "replication_num" = "1",
-                        "storage_vault_name" = ${vault1}
-                        )
-            """
-        }
-
-        sql """
-            REVOKE usage_priv ON STORAGE VAULT '${vault1}' FROM '${user1}';
-        """
-
-        result = connect(user1, 'Cloud12345', context.config.jdbcUrl) {
-                sql " SHOW STORAGE VAULT; "
-        }
-        assertTrue(result.isEmpty())
-
-        sql """
-            DROP TABLE IF EXISTS ${table3};
-        """
-        connect(user1, 'Cloud12345', context.config.jdbcUrl) {
-            expectExceptionLike({
-                sql """
-                    CREATE TABLE IF NOT EXISTS ${table3} (
-                            C_CUSTKEY     INTEGER NOT NULL,
-                            C_NAME        INTEGER NOT NULL
-                            )
-                            DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                            PROPERTIES (
-                            "replication_num" = "1",
-                            "storage_vault_name" = ${vault1}
-                            )
-                """
-            }, "USAGE denied")
-        }
-    } finally {
-        sql """ set built_in_storage_vault as default storage vault """
-    }
-}
\ No newline at end of file
diff --git 
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_restart.groovy 
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_restart.groovy
index fa5475c1762..230904d76a6 100644
--- 
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_restart.groovy
+++ 
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_restart.groovy
@@ -21,167 +21,163 @@ import java.util.stream.Collectors;
 // This test suite is intent to test the granted privilege for specific user 
will
 // not disappear
 suite("test_vault_privilege_restart", "nonConcurrent") {
+    def suiteName = name;
     if (!isCloudMode()) {
-        logger.info("skip ${name} case, because not cloud mode")
+        logger.info("skip ${suiteName} case, because not cloud mode")
         return
     }
 
     if (!enableStoragevault()) {
-        logger.info("skip ${name} case, because storage vault not enabled")
+        logger.info("skip ${suiteName} case, because storage vault not 
enabled")
         return
     }
 
-    try {
-        // user1 will be kept before and after running this test in order to 
check
-        // the granted vault privilege is persisted well eventhough FE 
restarts many times
-        def user1 = "test_privilege_vault_restart_user1"
-        def passwd = "Cloud12345"
-
-        def vault1 = "test_privilege_vault_restart_vault1"
-        // this vaule is derived from current file location: 
regression-test/vaults
-        def db = context.dbName
-
-        def table1 = "test_privilege_vault_restart_t1"
-        def table2 = "test_privilege_vault_restart_t2"
-        def hdfsLinkWeDontReallyCare = "127.0.0.1:10086" // a dummy link, it 
doesn't need to work
-
-        
//==========================================================================
-        // prepare the basic vault and tables for further check
-        
//==========================================================================
-        sql """
-            CREATE STORAGE VAULT IF NOT EXISTS ${vault1}
-            PROPERTIES (
+    // user1 will be kept before and after running this test in order to check
+    // the granted vault privilege is persisted well eventhough FE restarts 
many times
+    def user1 = "test_privilege_vault_restart_user1"
+    def passwd = "Cloud12345"
+
+    def vault1 = "test_privilege_vault_restart_vault1"
+    // this vaule is derived from current file location: regression-test/vaults
+    def db = context.dbName
+
+    def table1 = "test_privilege_vault_restart_t1"
+    def table2 = "test_privilege_vault_restart_t2"
+    def dummyHdfsEndpoint = "127.0.0.1:10086" // a dummy link, it doesn't need 
to work
+
+    
//==========================================================================
+    // prepare the basic vault and tables for further check
+    
//==========================================================================
+    sql """
+        CREATE STORAGE VAULT IF NOT EXISTS ${vault1}
+        PROPERTIES (
             "type"="hdfs",
-            "fs.defaultFS"="${hdfsLinkWeDontReallyCare}",
+            "fs.defaultFS"="${dummyHdfsEndpoint}",
             "path_prefix" = "test_vault_privilege_restart"
-            );
+        );
+    """
+
+    def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> 
row[0]).collect(Collectors.toSet())
+    logger.info("All vaults: ${storageVaults}")
+    org.junit.Assert.assertTrue("${vault1} is not present after creating, all 
vaults: ${storageVaults}", storageVaults.contains(vault1))
+
+    def allTables = (sql " SHOW tables").stream().map(row -> 
row[0]).collect(Collectors.toSet())
+    logger.info("All tables: ${allTables}")
+
+    // Table1 is the sign to check if the user1 has been created and granted 
well
+    def targetTableExist = allTables.contains(table1) 
+
+    if (targetTableExist) { 
+        // The grant procedure at least run once before, user1 has been 
granted vault1
+        logger.info("${user1} has been granted with usage_priv to ${vault1} 
before")
+    } else {
+        logger.info("this is the frist run, or there was a crash during the 
very first run, ${user1} has not been granted with usage_priv to ${vault1} 
before")
+        // create user and grant storage vault and create a table with that 
vault
+        sql """drop user if exists ${user1}"""
+        sql """create user ${user1} identified by '${passwd}'"""
+        sql """
+            GRANT usage_priv ON storage vault ${vault1} TO '${user1}';
+        """
+        sql """
+            GRANT create_priv ON *.*.* TO '${user1}';
         """
 
-        def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> 
row[0]).collect(Collectors.toSet())
-        logger.info("all vaults: ${storageVaults}")
-        org.junit.Assert.assertTrue("${vault1} is not present after creating, 
all vaults: ${storageVaults}", storageVaults.contains(vault1))
-
-        def allTables = (sql " SHOW tables").stream().map(row -> 
row[0]).collect(Collectors.toSet())
-        logger.info("all tables ${allTables}")
-
-        // table1 is the sign to check if the user1 has been created and 
granted well
-        def targetTableExist = allTables.contains(table1) 
+        // ATTN: create table1, if successful, the sign has been set
+        //       there wont be any execuse that user1 misses the privilege to 
vault1 from now on
+        sql """
+            CREATE TABLE IF NOT EXISTS ${table1} (
+                C_CUSTKEY     INTEGER NOT NULL,
+                C_NAME        INTEGER NOT NULL
+            )
+            DUPLICATE KEY(C_CUSTKEY, C_NAME)
+            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "storage_vault_name" = ${vault1}
+            )
+        """
+    }
 
-        if (targetTableExist) { 
-            // the grant procedure at least run once before, user1 has been 
granted vault1
-            logger.info("${user1} has been granted with usage_priv to 
${vault1} before")
-        } else {
-            logger.info("this is the frist run, or there was a crash during 
the very first run, ${user1} has not been granted with usage_priv to ${vault1} 
before")
-            // create user and grant storage vault and create a table with 
that vault
-            sql """drop user if exists ${user1}"""
-            sql """create user ${user1} identified by '${passwd}'"""
+    
//==========================================================================
+    // check the prepared users and tables
+    
//==========================================================================
+    def allUsers = (sql " SHOW all grants ").stream().map(row -> 
row[0]).collect(Collectors.toSet())
+    logger.info("All users: ${allUsers}")
+    def userPresent = !(allUsers.stream().filter(i -> 
i.contains(user1)).collect(Collectors.toSet()).isEmpty())
+    org.junit.Assert.assertTrue("${user1} is not in the priv table 
${allUsers}", userPresent)
+
+    allTables = (sql " SHOW tables").stream().map(row -> 
row[0]).collect(Collectors.toSet())
+    logger.info("all tables: ${allTables}")
+    org.junit.Assert.assertTrue("${table1} is not present, all tables: 
${allUsers}", allTables.contains(table1))
+
+    // Test user privilege, the newly created user cannot create or set 
default vault
+    // Only users with admin role can create storage vault
+    connect(user1, passwd, context.config.jdbcUrl) {
+        sql """use ${db}"""
+        expectExceptionLike({
             sql """
-                GRANT usage_priv ON storage vault ${vault1} TO '${user1}';
+                CREATE STORAGE VAULT IF NOT EXISTS ${vault1}
+                PROPERTIES (
+                    "type"="hdfs",
+                    "fs.defaultFS"="${dummyHdfsEndpoint}",
+                    "path_prefix" = "test_vault_privilege"
+                );
             """
+        }, "denied")
+    }
+    // Only users with admin role can set/unset default storage vault
+    connect(user1, passwd, context.config.jdbcUrl) {
+        sql """use ${db}"""
+        expectExceptionLike({
             sql """
-                GRANT create_priv ON *.*.* TO '${user1}';
+                SET ${vault1} AS DEFAULT STORAGE VAULT
             """
-
-            // ATTN: create table1, if successful, the sign has been set
-            //       there wont be any execuse that user1 misses the privilege 
to vault1 from now on
+        }, "denied")
+    }
+    connect(user1, passwd, context.config.jdbcUrl) {
+        sql """use ${db}"""
+        expectExceptionLike({
             sql """
-                CREATE TABLE IF NOT EXISTS ${table1} (
-                        C_CUSTKEY     INTEGER NOT NULL,
-                        C_NAME        INTEGER NOT NULL
-                        )
-                        DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                        PROPERTIES (
-                        "replication_num" = "1",
-                        "storage_vault_name" = ${vault1}
-                        )
+                UNSET DEFAULT STORAGE VAULT
             """
-        }
-
-        
//==========================================================================
-        // check the prepared users and tables
-        
//==========================================================================
-        def allUsers = (sql " SHOW all grants ").stream().map(row -> 
row[0]).collect(Collectors.toSet())
-        logger.info("all users: ${allUsers}")
-        def userPresent = !(allUsers.stream().filter(i -> 
i.contains(user1)).collect(Collectors.toSet()).isEmpty())
-        org.junit.Assert.assertTrue("${user1} is not in the priv table 
${allUsers}", userPresent)
-
-        allTables = (sql " SHOW tables").stream().map(row -> 
row[0]).collect(Collectors.toSet())
-        logger.info("all tables: ${allTables}")
-        org.junit.Assert.assertTrue("${table1} is not present, all tables: 
${allUsers}", allTables.contains(table1))
-
-        // Test user privilege, the newly created user cannot create or set 
default vault
-        // Only users with admin role can create storage vault
-        connect(user1, passwd, context.config.jdbcUrl) {
-            sql """use ${db}"""
-            expectExceptionLike({
-                sql """
-                    CREATE STORAGE VAULT IF NOT EXISTS ${vault1}
-                    PROPERTIES (
-                    "type"="hdfs",
-                    "fs.defaultFS"="${hdfsLinkWeDontReallyCare}",
-                    "path_prefix" = "test_vault_privilege"
-                    );
-                """
-            }, "denied")
-        }
-        // Only users with admin role can set/unset default storage vault
-        connect(user1, passwd, context.config.jdbcUrl) {
-            sql """use ${db}"""
-            expectExceptionLike({
-                sql """
-                    SET ${vault1} AS DEFAULT STORAGE VAULT
-                """
-            }, "denied")
-        }
-        connect(user1, passwd, context.config.jdbcUrl) {
-            sql """use ${db}"""
-            expectExceptionLike({
-                sql """
-                    UNSET DEFAULT STORAGE VAULT
-                """
-            }, "denied")
-        }
-
-        // user1 should see vault1
-        def result = connect(user1, passwd, context.config.jdbcUrl) {
-            sql """use ${db}"""
-            sql " SHOW STORAGE VAULT; "
-        }
-        storageVaults = result.stream().map(row -> 
row[0]).collect(Collectors.toSet())
-        org.junit.Assert.assertTrue("${user1} cannot see granted vault 
${vault1} in result ${result}", storageVaults.contains(vault1))
-
-
-        
//==========================================================================
-        // to test that user1 has the privilege of vault1 to create new tables
-        // this is the main test for granted vault privilege after restarting 
FE
-        
//==========================================================================
+        }, "denied")
+    }
+
+    // user1 should see vault1
+    def result = connect(user1, passwd, context.config.jdbcUrl) {
+        sql """use ${db}"""
+        sql " SHOW STORAGE VAULT; "
+    }
+    storageVaults = result.stream().map(row -> 
row[0]).collect(Collectors.toSet())
+    org.junit.Assert.assertTrue("${user1} cannot see granted vault ${vault1} 
in result ${result}", storageVaults.contains(vault1))
+
+    
//==========================================================================
+    // to test that user1 has the privilege of vault1 to create new tables
+    // this is the main test for granted vault privilege after restarting FE
+    
//==========================================================================
+    sql """
+        DROP TABLE IF EXISTS ${table2} force;
+    """
+    connect(user1, passwd, context.config.jdbcUrl) {
+        sql """use ${db}"""
         sql """
-            DROP TABLE IF EXISTS ${table2} force;
+            CREATE TABLE ${table2} (
+                C_CUSTKEY     INTEGER NOT NULL,
+                C_NAME        INTEGER NOT NULL
+            )
+            DUPLICATE KEY(C_CUSTKEY, C_NAME)
+            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "storage_vault_name" = ${vault1}
+            )
         """
-        connect(user1, passwd, context.config.jdbcUrl) {
-            sql """use ${db}"""
-            sql """
-                CREATE TABLE ${table2} (
-                        C_CUSTKEY     INTEGER NOT NULL,
-                        C_NAME        INTEGER NOT NULL
-                        )
-                        DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                        PROPERTIES (
-                        "replication_num" = "1",
-                        "storage_vault_name" = ${vault1}
-                        )
-            """
-        }
-
-        result = connect(user1, passwd, context.config.jdbcUrl) {
-            sql """use ${db}"""
-            sql " SHOW create table ${table2}; "
-        }
-        logger.info("show create table ${table2}, result ${result}")
-        org.junit.Assert.assertTrue("missing storage vault properties 
${vault1} in table ${table2}", result.toString().contains(vault1))
-    } finally {
-        sql """ set built_in_storage_vault as default storage vault """
     }
+
+    result = connect(user1, passwd, context.config.jdbcUrl) {
+        sql """use ${db}"""
+        sql " SHOW create table ${table2}; "
+    }
+    logger.info("show create table ${table2}, result ${result}")
+    org.junit.Assert.assertTrue("missing storage vault properties ${vault1} in 
table ${table2}", result.toString().contains(vault1))
 }
diff --git 
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy
 
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy
index 7d406cf471a..54cace642d4 100644
--- 
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy
+++ 
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy
@@ -18,36 +18,27 @@
 import java.util.stream.Collectors;
 
 suite("test_vault_privilege_with_role", "nonConcurrent") {
+    def suiteName = name;
     if (!isCloudMode()) {
-        logger.info("skip ${name} case, because not cloud mode")
+        logger.info("skip ${suiteName} case, because not cloud mode")
         return
     }
 
     if (!enableStoragevault()) {
-        logger.info("skip ${name} case, because storage vault not enabled")
+        logger.info("skip ${suiteName} case, because storage vault not 
enabled")
         return
     }
 
-    def vaultName = "test_vault_privilege_with_role_vault";
-
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS ${vaultName}
-        PROPERTIES (
-            "type"="hdfs",
-            "fs.defaultFS"="${getHmsHdfsFs()}",
-            "path_prefix" = "${vaultName}"
-        );
-    """
+    def dbName = context.config.getDbNameByFile(context.file)
+    def randomStr = UUID.randomUUID().toString().replace("-", "")
+    def hdfsVaultName = "hdfs_" + randomStr
 
-    def tableName = "test_vault_privilege_with_role_table"
-    def userName = "test_vault_privilege_with_role_user"
+    def userName = "user_${randomStr}"
     def userPassword = "Cloud12345"
-    def roleName = "test_vault_privilege_with_role_role"
-    def dbName = context.config.getDbNameByFile(context.file)
+    def roleName = "role_${randomStr}"
+    def tableName = "tbl_${randomStr}"
 
     sql """DROP TABLE IF EXISTS ${dbName}.${tableName}"""
-    sql """DROP TABLE IF EXISTS ${dbName}.${tableName}_2"""
-
     sql """DROP USER IF EXISTS ${userName}"""
     sql """DROP ROLE IF EXISTS ${roleName}"""
 
@@ -59,53 +50,69 @@ suite("test_vault_privilege_with_role", "nonConcurrent") {
         expectExceptionLike({
             sql """
                 CREATE TABLE IF NOT EXISTS ${dbName}.${tableName} (
-                        C_CUSTKEY     INTEGER NOT NULL,
-                        C_NAME        INTEGER NOT NULL
-                        )
-                        DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                        PROPERTIES (
-                        "replication_num" = "1",
-                        "storage_vault_name" = ${vaultName}
-                        )
+                    C_CUSTKEY     INTEGER NOT NULL,
+                    C_NAME        INTEGER NOT NULL
+                )
+                DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                PROPERTIES (
+                    "replication_num" = "1",
+                    "storage_vault_name" = ${hdfsVaultName}
+                )
             """
         }, "denied")
     }
 
-    sql """ GRANT usage_priv ON STORAGE VAULT '${vaultName}' TO ROLE 
'${roleName}';"""
+    sql """ GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName}' TO ROLE 
'${roleName}';"""
+
+    sql """
+        CREATE STORAGE VAULT ${hdfsVaultName}
+        PROPERTIES (
+            "type"="HDFS",
+            "fs.defaultFS"="${getHmsHdfsFs()}",
+            "path_prefix" = "${hdfsVaultName}",
+            "hadoop.username" = "${getHmsUser()}"
+        );
+        """
 
     connect(userName, userPassword, context.config.jdbcUrl) {
         sql """
             CREATE TABLE IF NOT EXISTS ${dbName}.${tableName} (
-                    C_CUSTKEY     INTEGER NOT NULL,
-                    C_NAME        INTEGER NOT NULL
-                    )
-                    DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                    DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                    PROPERTIES (
-                    "replication_num" = "1",
-                    "storage_vault_name" = ${vaultName}
-                    )
+                C_CUSTKEY     INTEGER NOT NULL,
+                C_NAME        INTEGER NOT NULL
+            )
+            DUPLICATE KEY(C_CUSTKEY, C_NAME)
+            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "storage_vault_name" = ${hdfsVaultName}
+            )
         """
     }
 
-    sql """
-        REVOKE usage_priv ON STORAGE VAULT '${vaultName}' FROM ROLE 
'${roleName}';
-    """
+    sql """ GRANT load_priv,select_priv ON  *.*.* TO '${userName}';"""
+    sql """ GRANT USAGE_PRIV ON COMPUTE GROUP '%' TO '${userName}';"""
+    connect(userName, userPassword, context.config.jdbcUrl) {
+        sql """
+            insert into ${dbName}.${tableName} values(1, 1);
+            select * from ${dbName}.${tableName};
+        """
+    }
 
+    sql """REVOKE usage_priv ON STORAGE VAULT '${hdfsVaultName}' FROM ROLE 
'${roleName}';"""
     connect(userName, userPassword, context.config.jdbcUrl) {
         expectExceptionLike({
             sql """
-                CREATE TABLE IF NOT EXISTS ${dbName}.${tableName}_2 (
-                        C_CUSTKEY     INTEGER NOT NULL,
-                        C_NAME        INTEGER NOT NULL
-                        )
-                        DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                        PROPERTIES (
-                        "replication_num" = "1",
-                        "storage_vault_name" = ${vaultName}
-                        )
+                CREATE TABLE ${dbName}.${tableName}_2 (
+                    C_CUSTKEY     INTEGER NOT NULL,
+                    C_NAME        INTEGER NOT NULL
+                )
+                DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                PROPERTIES (
+                    "replication_num" = "1",
+                    "storage_vault_name" = ${hdfsVaultName}
+                )
             """
         }, "denied")
     }
diff --git 
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_user.groovy
 
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_user.groovy
new file mode 100644
index 00000000000..89a158323be
--- /dev/null
+++ 
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_user.groovy
@@ -0,0 +1,253 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import java.util.stream.Collectors;
+
+suite("test_vault_privilege_with_user", "nonConcurrent") {
+    def suiteName = name;
+    if (!isCloudMode()) {
+        logger.info("skip ${suiteName} case, because not cloud mode")
+        return
+    }
+
+    if (!enableStoragevault()) {
+        logger.info("skip ${suiteName} case, because storage vault not 
enabled")
+        return
+    }
+
+    def randomStr = UUID.randomUUID().toString().replace("-", "")
+    def hdfsVaultName = "hdfs_" + randomStr
+    def hdfsUser = "user_" + randomStr
+    def hdfsUser2 = "user2_" + randomStr
+
+    sql """DROP USER IF EXISTS ${hdfsUser}"""
+    sql """DROP USER IF EXISTS ${hdfsUser2}"""
+
+    sql """create user ${hdfsUser} identified by 'Cloud12345'"""
+    sql """create user ${hdfsUser2} identified by 'Cloud12345'"""
+    sql """ GRANT create_priv ON *.*.* TO '${hdfsUser}'; """
+    sql """ GRANT create_priv ON *.*.* TO '${hdfsUser2}'; """
+
+
+    // Only users with admin role can create storage vault
+    connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+        expectExceptionLike({
+            sql """
+                CREATE STORAGE VAULT ${hdfsVaultName}
+                PROPERTIES (
+                    "type"="hdfs",
+                    "fs.defaultFS"="${getHmsHdfsFs()}",
+                    "path_prefix" = "${hdfsVaultName}",
+                    "hadoop.username" = "${getHmsUser()}"
+                );
+            """
+        }, "denied")
+    }
+
+    sql """
+        CREATE STORAGE VAULT ${hdfsVaultName}
+        PROPERTIES (
+            "type"="HDFS",
+            "fs.defaultFS"="${getHmsHdfsFs()}",
+            "path_prefix" = "${hdfsVaultName}",
+            "hadoop.username" = "${getHmsUser()}"
+        );
+        """
+
+    // Only users with admin role can set/unset default storage vault
+    connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+        expectExceptionLike({
+            sql """
+                SET ${hdfsVaultName} AS DEFAULT STORAGE VAULT
+            """
+        }, "denied")
+    }
+
+    connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+        expectExceptionLike({
+            sql """
+                SET not_exist_vault AS DEFAULT STORAGE VAULT
+            """
+        }, "denied")
+    }
+
+    connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+        expectExceptionLike({
+            sql """
+                UNSET DEFAULT STORAGE VAULT
+            """
+        }, "denied")
+    }
+
+    def result = connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+            sql " SHOW STORAGE VAULT; "
+    }
+    assertTrue(result.isEmpty())
+
+    connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+        expectExceptionLike({
+            sql """
+                CREATE TABLE IF NOT EXISTS tbl_${hdfsVaultName} (
+                    C_CUSTKEY     INTEGER NOT NULL,
+                    C_NAME        INTEGER NOT NULL
+                )
+                DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                PROPERTIES (
+                    "replication_num" = "1",
+                    "storage_vault_name" = ${hdfsVaultName}
+                )
+            """
+        }, "USAGE denied")
+    }
+
+    connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+        expectExceptionLike({
+            sql """
+                CREATE TABLE IF NOT EXISTS tbl_${hdfsVaultName} (
+                    C_CUSTKEY     INTEGER NOT NULL,
+                    C_NAME        INTEGER NOT NULL
+                )
+                DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                PROPERTIES (
+                    "replication_num" = "1",
+                    "storage_vault_name" = ${hdfsVaultName}
+                )
+            """
+        }, "USAGE denied")
+    }
+
+    sql """
+        GRANT usage_priv ON STORAGE VAULT 'not_exist_vault' TO '${hdfsUser}';
+    """
+
+    sql """
+        GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName}' TO 
'not_exit_user';
+    """
+
+    sql """
+        GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName}' TO '${hdfsUser}';
+        """
+
+    connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+        sql """
+            CREATE TABLE tbl_${hdfsVaultName} (
+                C_CUSTKEY     INTEGER NOT NULL,
+                C_NAME        INTEGER NOT NULL
+            )
+            DUPLICATE KEY(C_CUSTKEY, C_NAME)
+            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+            PROPERTIES (
+            "replication_num" = "1",
+            "storage_vault_name" = ${hdfsVaultName}
+            );
+        """
+    }
+
+    result = connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+            sql " SHOW STORAGE VAULT; "
+    }
+
+    def storageVaults = result.stream().map(row -> 
row[0]).collect(Collectors.toSet())
+    assertTrue(storageVaults.contains(hdfsVaultName))
+
+    sql """ GRANT load_priv,select_priv ON  *.*.* TO '${hdfsUser}';"""
+    sql """ GRANT USAGE_PRIV ON COMPUTE GROUP '%' TO '${hdfsUser}';"""
+
+    connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+        sql """
+            insert into tbl_${hdfsVaultName} values(1, 1);
+            select * from tbl_${hdfsVaultName};
+        """
+    }
+
+    // Grant `usage_prive` to hdfsUser2
+    sql """
+        GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName}' TO '${hdfsUser2}';
+        """
+
+    connect(hdfsUser2, 'Cloud12345', context.config.jdbcUrl) {
+        sql """
+            CREATE TABLE tbl2_${hdfsVaultName} (
+                C_CUSTKEY     INTEGER NOT NULL,
+                C_NAME        INTEGER NOT NULL
+            )
+            DUPLICATE KEY(C_CUSTKEY, C_NAME)
+            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+            PROPERTIES (
+            "replication_num" = "1",
+            "storage_vault_name" = ${hdfsVaultName}
+            );
+        """
+    }
+
+    result = connect(hdfsUser2, 'Cloud12345', context.config.jdbcUrl) {
+            sql " SHOW STORAGE VAULT; "
+    }
+
+    storageVaults = result.stream().map(row -> 
row[0]).collect(Collectors.toSet())
+    assertTrue(storageVaults.contains(hdfsVaultName))
+
+    sql """
+        GRANT load_priv,select_priv ON  *.*.* TO '${hdfsUser2}';
+        """
+    sql """
+        GRANT USAGE_PRIV ON COMPUTE GROUP '%' TO '${hdfsUser2}';
+        """
+
+    connect(hdfsUser2, 'Cloud12345', context.config.jdbcUrl) {
+        sql """
+            insert into tbl2_${hdfsVaultName} values(1, 1);
+            select * from tbl2_${hdfsVaultName};
+        """
+    }
+
+    // hdfsUser1 still has privlege
+    connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+        sql """
+            insert into tbl_${hdfsVaultName} values(1, 1);
+            select * from tbl_${hdfsVaultName};
+        """
+    }
+
+    sql """
+        REVOKE usage_priv ON STORAGE VAULT '${hdfsVaultName}' FROM 
'${hdfsUser}';
+        """
+
+    result = connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+            sql " SHOW STORAGE VAULT; "
+    }
+    assertTrue(result.isEmpty())
+
+    connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+        expectExceptionLike({
+            sql """
+                CREATE TABLE tbl3_${hdfsVaultName} (
+                    C_CUSTKEY     INTEGER NOT NULL,
+                    C_NAME        INTEGER NOT NULL
+                )
+                DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                PROPERTIES (
+                    "replication_num" = "1",
+                    "storage_vault_name" = ${hdfsVaultName}
+                )
+            """
+        }, "USAGE denied")
+    }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to