This is an automated email from the ASF dual-hosted git repository.

gavinchou pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 5e4f83a91dd800461897552bf65a397011097239
Author: AlexYue <yj976240...@gmail.com>
AuthorDate: Tue Aug 13 22:39:10 2024 +0800

    [feature](Vault) Support alter hdfs storage vault and alter vault's name 
(#38685)
    
    Previously only s3 vault can be altered. This pr adds support of hdfs
    storage vault. And this pr also supports to alter vault's name.
    
    usage example
    ```
    ALTER STORAGE VAULT alter_s3_vault
    PROPERTIES (
            "type"="S3",
            "s3.access_key" = "new_ak", -- change ak
            "s3.secrete_key" = "new_ak", -- change sk
    );
    
    ALTER STORAGE VAULT alter_hdfs_vault
    PROPERTIES (
            "type"="hdfs",
            "VAULT_NAME" = "alter_hdfs_vault_new_name", -- change vault name
            "hadoop.username" = "hdfs" -- change user name
    );
    ```
---
 cloud/src/meta-service/meta_service_resource.cpp   | 186 ++++++++++++++----
 cloud/test/meta_service_test.cpp                   | 214 ++++++++++++++++++++-
 .../doris/analysis/CreateStorageVaultStmt.java     |  25 +++
 .../org/apache/doris/catalog/HdfsStorageVault.java |   9 +
 .../org/apache/doris/catalog/S3StorageVault.java   |   5 +-
 .../org/apache/doris/catalog/StorageVault.java     |  12 ++
 .../org/apache/doris/catalog/StorageVaultMgr.java  | 139 ++++++++-----
 gensrc/proto/cloud.proto                           |   3 +
 .../suites/vaults/alter/alter_hdfs.groovy          | 119 ++++++++++++
 .../suites/vaults/alter/alter_s3.groovy            | 124 ++++++++++++
 10 files changed, 754 insertions(+), 82 deletions(-)

diff --git a/cloud/src/meta-service/meta_service_resource.cpp 
b/cloud/src/meta-service/meta_service_resource.cpp
index a73301205dc..90a88f86006 100644
--- a/cloud/src/meta-service/meta_service_resource.cpp
+++ b/cloud/src/meta-service/meta_service_resource.cpp
@@ -24,6 +24,7 @@
 #include <charconv>
 #include <chrono>
 #include <numeric>
+#include <regex>
 #include <string>
 #include <tuple>
 
@@ -40,6 +41,14 @@
 
 using namespace std::chrono;
 
+namespace {
+constexpr char pattern_str[] = "^[a-zA-Z][0-9a-zA-Z_]*$";
+bool is_valid_storage_vault_name(const std::string& str) {
+    const std::regex pattern(pattern_str);
+    return std::regex_match(str, pattern);
+}
+} // namespace
+
 namespace doris::cloud {
 
 static void* run_bthread_work(void* arg) {
@@ -510,13 +519,114 @@ static void set_default_vault_log_helper(const 
InstanceInfoPB& instance,
     LOG(INFO) << vault_msg;
 }
 
+static int alter_hdfs_storage_vault(InstanceInfoPB& instance, 
std::unique_ptr<Transaction> txn,
+                                    const StorageVaultPB& vault, 
MetaServiceCode& code,
+                                    std::string& msg) {
+    if (!vault.has_hdfs_info()) {
+        code = MetaServiceCode::INVALID_ARGUMENT;
+        std::stringstream ss;
+        ss << "There is no hdfs vault provided";
+        msg = ss.str();
+        return -1;
+    }
+    const auto& hdfs_info = vault.hdfs_info();
+    if (hdfs_info.has_prefix() || !hdfs_info.has_build_conf() ||
+        hdfs_info.build_conf().has_fs_name()) {
+        code = MetaServiceCode::INVALID_ARGUMENT;
+        std::stringstream ss;
+        ss << "You can not alter prefix or fs name because it might lose 
previoud written data";
+        msg = ss.str();
+        return -1;
+    }
+    const auto& name = vault.name();
+    // Here we try to get mutable iter since we might need to alter the vault 
name
+    auto name_itr = 
std::find_if(instance.mutable_storage_vault_names()->begin(),
+                                 instance.mutable_storage_vault_names()->end(),
+                                 [&](const auto& vault_name) { return name == 
vault_name; });
+    if (name_itr == instance.storage_vault_names().end()) {
+        code = MetaServiceCode::INVALID_ARGUMENT;
+        std::stringstream ss;
+        ss << "invalid storage vault name, not found, name =" << name;
+        msg = ss.str();
+        return -1;
+    }
+    auto pos = name_itr - instance.storage_vault_names().begin();
+    std::string vault_id = instance.resource_ids().begin()[pos];
+    auto vault_key = storage_vault_key({instance.instance_id(), vault_id});
+    std::string val;
+
+    auto err = txn->get(vault_key, &val);
+    LOG(INFO) << "get vault_key=" << hex(vault_key);
+
+    if (err != TxnErrorCode::TXN_OK) {
+        code = cast_as<ErrCategory::READ>(err);
+        std::stringstream ss;
+        ss << "failed to get storage vault, vault_id=" << vault_id << ", 
vault_name="
+           << "" << name << " err=" << err;
+        msg = ss.str();
+        return -1;
+    }
+    StorageVaultPB new_vault;
+    new_vault.ParseFromString(val);
+    auto origin_vault_info = new_vault.DebugString();
+    if (vault.has_alter_name()) {
+        if (!is_valid_storage_vault_name(vault.alter_name())) {
+            code = MetaServiceCode::INVALID_ARGUMENT;
+            std::stringstream ss;
+            ss << "invalid storage vault name =" << vault.alter_name() << " 
the name must satisfy "
+               << pattern_str;
+            msg = ss.str();
+            return -1;
+        }
+        new_vault.set_name(vault.alter_name());
+        *name_itr = vault.alter_name();
+    }
+    auto* alter_hdfs_info = new_vault.mutable_hdfs_info();
+    if (hdfs_info.build_conf().has_hdfs_kerberos_keytab()) {
+        alter_hdfs_info->mutable_build_conf()->set_hdfs_kerberos_keytab(
+                hdfs_info.build_conf().hdfs_kerberos_keytab());
+    }
+    if (hdfs_info.build_conf().has_hdfs_kerberos_principal()) {
+        alter_hdfs_info->mutable_build_conf()->set_hdfs_kerberos_principal(
+                hdfs_info.build_conf().hdfs_kerberos_principal());
+    }
+    if (hdfs_info.build_conf().has_user()) {
+        
alter_hdfs_info->mutable_build_conf()->set_user(hdfs_info.build_conf().user());
+    }
+    if (0 != hdfs_info.build_conf().hdfs_confs_size()) {
+        alter_hdfs_info->mutable_build_conf()->mutable_hdfs_confs()->Add(
+                hdfs_info.build_conf().hdfs_confs().begin(),
+                hdfs_info.build_conf().hdfs_confs().end());
+    }
+    auto new_vault_info = new_vault.DebugString();
+
+    val = new_vault.SerializeAsString();
+    if (val.empty()) {
+        msg = "failed to serialize";
+        code = MetaServiceCode::PROTOBUF_SERIALIZE_ERR;
+        return -1;
+    }
+
+    txn->put(vault_key, val);
+    LOG(INFO) << "put vault_id=" << vault_id << ", vault_key=" << 
hex(vault_key)
+              << ", origin vault=" << origin_vault_info << ", new_vault=" << 
new_vault_info;
+    err = txn->commit();
+    if (err != TxnErrorCode::TXN_OK) {
+        code = cast_as<ErrCategory::COMMIT>(err);
+        msg = fmt::format("failed to commit kv txn, err={}", err);
+        LOG(WARNING) << msg;
+    }
+
+    return 0;
+}
+
 static int alter_s3_storage_vault(InstanceInfoPB& instance, 
std::unique_ptr<Transaction> txn,
                                   const StorageVaultPB& vault, 
MetaServiceCode& code,
                                   std::string& msg) {
     if (!vault.has_obj_info()) {
         code = MetaServiceCode::INVALID_ARGUMENT;
         std::stringstream ss;
-        ss << "Only s3 vault can be altered";
+        ss << "There is no s3 vault provided";
         msg = ss.str();
         return -1;
     }
@@ -530,8 +640,9 @@ static int alter_s3_storage_vault(InstanceInfoPB& instance, 
std::unique_ptr<Tran
         return -1;
     }
     const auto& name = vault.name();
-    auto name_itr = std::find_if(instance.storage_vault_names().begin(),
-                                 instance.storage_vault_names().end(),
+    // Here we try to get mutable iter since we might need to alter the vault 
name
+    auto name_itr = 
std::find_if(instance.mutable_storage_vault_names()->begin(),
+                                 instance.mutable_storage_vault_names()->end(),
                                  [&](const auto& vault_name) { return name == 
vault_name; });
     if (name_itr == instance.storage_vault_names().end()) {
         code = MetaServiceCode::INVALID_ARGUMENT;
@@ -541,35 +652,39 @@ static int alter_s3_storage_vault(InstanceInfoPB& 
instance, std::unique_ptr<Tran
         return -1;
     }
     auto pos = name_itr - instance.storage_vault_names().begin();
-    auto id_itr = instance.resource_ids().begin() + pos;
-    auto vault_key = storage_vault_key({instance.instance_id(), *id_itr});
+    std::string vault_id = instance.resource_ids().begin()[pos];
+    auto vault_key = storage_vault_key({instance.instance_id(), vault_id});
     std::string val;
 
     auto err = txn->get(vault_key, &val);
-    LOG(INFO) << "get instance_key=" << hex(vault_key);
+    LOG(INFO) << "get vault_key=" << hex(vault_key);
 
     if (err != TxnErrorCode::TXN_OK) {
         code = cast_as<ErrCategory::READ>(err);
         std::stringstream ss;
-        ss << "failed to get storage vault, vault_id=" << *name_itr << ", 
vault_name="
+        ss << "failed to get storage vault, vault_id=" << vault_id << ", 
vault_name="
            << "" << name << " err=" << err;
         msg = ss.str();
         return -1;
     }
-    StorageVaultPB alter;
-    alter.ParseFromString(val);
-    AkSkPair pre {alter.obj_info().ak(), alter.obj_info().sk()};
-    const auto& plain_ak = obj_info.has_ak() ? obj_info.ak() : 
alter.obj_info().ak();
-    const auto& plain_sk = obj_info.has_ak() ? obj_info.sk() : 
alter.obj_info().sk();
-    auto obfuscating_sk = [](const auto& sk) -> std::string {
-        if (sk.empty()) {
-            return "";
-        }
-        std::string result(sk.length(), '*');
-        result.replace(0, 2, sk, 0, 2);
-        result.replace(result.length() - 2, 2, sk, sk.length() - 2, 2);
-        return result;
-    };
+    StorageVaultPB new_vault;
+    new_vault.ParseFromString(val);
+    if (vault.has_alter_name()) {
+        if (!is_valid_storage_vault_name(vault.alter_name())) {
+            code = MetaServiceCode::INVALID_ARGUMENT;
+            std::stringstream ss;
+            ss << "invalid storage vault name =" << vault.alter_name() << " 
the name must satisfy "
+               << pattern_str;
+            msg = ss.str();
+            return -1;
+        }
+        new_vault.set_name(vault.alter_name());
+        *name_itr = vault.alter_name();
+    }
+    auto origin_vault_info = new_vault.DebugString();
+    AkSkPair pre {new_vault.obj_info().ak(), new_vault.obj_info().sk()};
+    const auto& plain_ak = obj_info.has_ak() ? obj_info.ak() : 
new_vault.obj_info().ak();
+    const auto& plain_sk = obj_info.has_ak() ? obj_info.sk() : 
new_vault.obj_info().sk();
     AkSkPair plain_ak_sk_pair {plain_ak, plain_sk};
     AkSkPair cipher_ak_sk_pair;
     EncryptionInfoPB encryption_info;
@@ -581,11 +696,12 @@ static int alter_s3_storage_vault(InstanceInfoPB& 
instance, std::unique_ptr<Tran
         LOG(WARNING) << msg;
         return -1;
     }
-    alter.mutable_obj_info()->set_ak(cipher_ak_sk_pair.first);
-    alter.mutable_obj_info()->set_sk(cipher_ak_sk_pair.second);
-    
alter.mutable_obj_info()->mutable_encryption_info()->CopyFrom(encryption_info);
+    new_vault.mutable_obj_info()->set_ak(cipher_ak_sk_pair.first);
+    new_vault.mutable_obj_info()->set_sk(cipher_ak_sk_pair.second);
+    
new_vault.mutable_obj_info()->mutable_encryption_info()->CopyFrom(encryption_info);
 
-    val = alter.SerializeAsString();
+    auto new_vault_info = new_vault.DebugString();
+    val = new_vault.SerializeAsString();
     if (val.empty()) {
         msg = "failed to serialize";
         code = MetaServiceCode::PROTOBUF_SERIALIZE_ERR;
@@ -593,10 +709,8 @@ static int alter_s3_storage_vault(InstanceInfoPB& 
instance, std::unique_ptr<Tran
     }
 
     txn->put(vault_key, val);
-    LOG(INFO) << "put vault_id=" << *id_itr << ", instance_key=" << 
hex(vault_key)
-              << ", previous ak=" << pre.first << ", previous sk=" << 
obfuscating_sk(pre.second)
-              << ", new ak=" << cipher_ak_sk_pair.first
-              << ", new sk=" << obfuscating_sk(cipher_ak_sk_pair.second);
+    LOG(INFO) << "put vault_id=" << vault_id << ", vault_key=" << 
hex(vault_key)
+              << ", origin vault=" << origin_vault_info << ", new vault=" << 
new_vault_info;
     err = txn->commit();
     if (err != TxnErrorCode::TXN_OK) {
         code = cast_as<ErrCategory::COMMIT>(err);
@@ -738,6 +852,8 @@ void 
MetaServiceImpl::alter_storage_vault(google::protobuf::RpcController* contr
     }
     case AlterObjStoreInfoRequest::ALTER_S3_VAULT:
         break;
+    case AlterObjStoreInfoRequest::ALTER_HDFS_VAULT:
+        break;
     case AlterObjStoreInfoRequest::UNSET_DEFAULT_VAULT:
         break;
     case AlterObjStoreInfoRequest::UNKNOWN: {
@@ -925,12 +1041,12 @@ void 
MetaServiceImpl::alter_storage_vault(google::protobuf::RpcController* contr
             return;
         }
         auto pos = name_itr - instance.storage_vault_names().begin();
-        auto id_itr = instance.resource_ids().begin() + pos;
+        std::string vault_id = instance.resource_ids().begin()[pos];
         
response->set_default_storage_vault_replaced(instance.has_default_storage_vault_id());
-        set_default_vault_log_helper(instance, name, *id_itr);
-        instance.set_default_storage_vault_id(*id_itr);
+        set_default_vault_log_helper(instance, name, vault_id);
+        instance.set_default_storage_vault_id(vault_id);
         instance.set_default_storage_vault_name(name);
-        response->set_storage_vault_id(*id_itr);
+        response->set_storage_vault_id(vault_id);
         break;
     }
     case AlterObjStoreInfoRequest::UNSET_DEFAULT_VAULT: {
@@ -945,6 +1061,10 @@ void 
MetaServiceImpl::alter_storage_vault(google::protobuf::RpcController* contr
         alter_s3_storage_vault(instance, std::move(txn), request->vault(), 
code, msg);
         return;
     }
+    case AlterObjStoreInfoRequest::ALTER_HDFS_VAULT: {
+        alter_hdfs_storage_vault(instance, std::move(txn), request->vault(), 
code, msg);
+        return;
+    }
     case AlterObjStoreInfoRequest::DROP_S3_VAULT:
         [[fallthrough]];
     default: {
diff --git a/cloud/test/meta_service_test.cpp b/cloud/test/meta_service_test.cpp
index 5a7ab21103f..3baec482710 100644
--- a/cloud/test/meta_service_test.cpp
+++ b/cloud/test/meta_service_test.cpp
@@ -459,8 +459,9 @@ TEST(MetaServiceTest, AlterS3StorageVaultTest) {
     obj_info.set_ak("ak");
     obj_info.set_sk("sk");
     StorageVaultPB vault;
+    constexpr char vault_name[] = "test_alter_s3_vault";
     vault.mutable_obj_info()->MergeFrom(obj_info);
-    vault.set_name("test_alter_s3_vault");
+    vault.set_name(vault_name);
     vault.set_id("2");
     InstanceInfoPB instance;
     instance.add_storage_vault_names(vault.name());
@@ -489,7 +490,7 @@ TEST(MetaServiceTest, AlterS3StorageVaultTest) {
         req.set_op(AlterObjStoreInfoRequest::ALTER_S3_VAULT);
         StorageVaultPB vault;
         vault.mutable_obj_info()->set_ak("new_ak");
-        vault.set_name("test_alter_s3_vault");
+        vault.set_name(vault_name);
         req.mutable_vault()->CopyFrom(vault);
 
         brpc::Controller cntl;
@@ -528,6 +529,215 @@ TEST(MetaServiceTest, AlterS3StorageVaultTest) {
         ASSERT_NE(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
     }
 
+    {
+        AlterObjStoreInfoRequest req;
+        constexpr char new_vault_name[] = "@!#vault_name";
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::ALTER_S3_VAULT);
+        StorageVaultPB vault;
+        vault.set_alter_name(new_vault_name);
+        ObjectStoreInfoPB obj;
+        obj_info.set_ak("new_ak");
+        vault.mutable_obj_info()->MergeFrom(obj);
+        vault.set_name(vault_name);
+        req.mutable_vault()->CopyFrom(vault);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_storage_vault(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_NE(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+        ASSERT_TRUE(res.status().msg().find("invalid storage vault name") != 
std::string::npos)
+                << res.status().msg();
+    }
+
+    {
+        AlterObjStoreInfoRequest req;
+        constexpr char new_vault_name[] = "new_test_alter_s3_vault";
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::ALTER_S3_VAULT);
+        StorageVaultPB vault;
+        vault.set_alter_name(new_vault_name);
+        ObjectStoreInfoPB obj;
+        obj_info.set_ak("new_ak");
+        vault.mutable_obj_info()->MergeFrom(obj);
+        vault.set_name(vault_name);
+        req.mutable_vault()->CopyFrom(vault);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_storage_vault(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_EQ(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+        InstanceInfoPB instance;
+        get_test_instance(instance);
+
+        std::unique_ptr<Transaction> txn;
+        ASSERT_EQ(meta_service->txn_kv()->create_txn(&txn), 
TxnErrorCode::TXN_OK);
+        std::string val;
+        ASSERT_EQ(txn->get(storage_vault_key({instance.instance_id(), "2"}), 
&val),
+                  TxnErrorCode::TXN_OK);
+        StorageVaultPB get_obj;
+        get_obj.ParseFromString(val);
+        ASSERT_EQ(get_obj.name(), new_vault_name) << get_obj.obj_info().ak();
+    }
+
+    SyncPoint::get_instance()->disable_processing();
+    SyncPoint::get_instance()->clear_all_call_backs();
+}
+
+TEST(MetaServiceTest, AlterHdfsStorageVaultTest) {
+    auto meta_service = get_meta_service();
+
+    auto sp = SyncPoint::get_instance();
+    sp->enable_processing();
+    sp->set_call_back("encrypt_ak_sk:get_encryption_key", [](auto&& args) {
+        auto* ret = try_any_cast<int*>(args[0]);
+        *ret = 0;
+        auto* key = try_any_cast<std::string*>(args[1]);
+        *key = "selectdbselectdbselectdbselectdb";
+        auto* key_id = try_any_cast<int64_t*>(args[2]);
+        *key_id = 1;
+    });
+    std::pair<std::string, std::string> pair;
+    sp->set_call_back("extract_object_storage_info:get_aksk_pair", [&](auto&& 
args) {
+        auto* ret = try_any_cast<std::pair<std::string, 
std::string>*>(args[0]);
+        pair = *ret;
+    });
+
+    std::unique_ptr<Transaction> txn;
+    ASSERT_EQ(meta_service->txn_kv()->create_txn(&txn), TxnErrorCode::TXN_OK);
+    std::string key;
+    std::string val;
+    InstanceKeyInfo key_info {"test_instance"};
+    instance_key(key_info, &key);
+
+    HdfsBuildConf hdfs_build_conf;
+    hdfs_build_conf.set_fs_name("fs_name");
+    hdfs_build_conf.set_user("root");
+    HdfsVaultInfo hdfs_info;
+    hdfs_info.set_prefix("root_path");
+    hdfs_info.mutable_build_conf()->MergeFrom(hdfs_build_conf);
+    StorageVaultPB vault;
+    constexpr char vault_name[] = "test_alter_hdfs_vault";
+    vault.mutable_hdfs_info()->MergeFrom(hdfs_info);
+    vault.set_name(vault_name);
+    vault.set_id("2");
+    InstanceInfoPB instance;
+    instance.add_storage_vault_names(vault.name());
+    instance.add_resource_ids(vault.id());
+    instance.set_instance_id("GetObjStoreInfoTestInstance");
+    val = instance.SerializeAsString();
+    txn->put(key, val);
+    txn->put(storage_vault_key({instance.instance_id(), "2"}), 
vault.SerializeAsString());
+    ASSERT_EQ(txn->commit(), TxnErrorCode::TXN_OK);
+    txn = nullptr;
+
+    auto get_test_instance = [&](InstanceInfoPB& i) {
+        std::string key;
+        std::string val;
+        std::unique_ptr<Transaction> txn;
+        ASSERT_EQ(meta_service->txn_kv()->create_txn(&txn), 
TxnErrorCode::TXN_OK);
+        InstanceKeyInfo key_info {"test_instance"};
+        instance_key(key_info, &key);
+        ASSERT_EQ(txn->get(key, &val), TxnErrorCode::TXN_OK);
+        i.ParseFromString(val);
+    };
+
+    {
+        AlterObjStoreInfoRequest req;
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::ALTER_HDFS_VAULT);
+        StorageVaultPB vault;
+        vault.mutable_hdfs_info()->mutable_build_conf()->set_user("hadoop");
+        vault.set_name(vault_name);
+        req.mutable_vault()->CopyFrom(vault);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_storage_vault(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_EQ(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+        InstanceInfoPB instance;
+        get_test_instance(instance);
+
+        std::unique_ptr<Transaction> txn;
+        ASSERT_EQ(meta_service->txn_kv()->create_txn(&txn), 
TxnErrorCode::TXN_OK);
+        std::string val;
+        ASSERT_EQ(txn->get(storage_vault_key({instance.instance_id(), "2"}), 
&val),
+                  TxnErrorCode::TXN_OK);
+        StorageVaultPB get_obj;
+        get_obj.ParseFromString(val);
+        ASSERT_EQ(get_obj.hdfs_info().build_conf().user(), "hadoop")
+                << get_obj.hdfs_info().build_conf().fs_name();
+    }
+
+    {
+        AlterObjStoreInfoRequest req;
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::ALTER_HDFS_VAULT);
+        StorageVaultPB vault;
+        auto* hdfs = vault.mutable_hdfs_info();
+        hdfs->set_prefix("fake_one");
+        vault.set_name("test_alter_hdfs_vault_non_exist");
+        req.mutable_vault()->CopyFrom(vault);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_storage_vault(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_NE(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+    }
+
+    {
+        AlterObjStoreInfoRequest req;
+        constexpr char new_vault_name[] = "Thi213***@fakeVault";
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::ALTER_HDFS_VAULT);
+        StorageVaultPB vault;
+        vault.mutable_hdfs_info()->mutable_build_conf()->set_user("hadoop");
+        vault.set_name(vault_name);
+        vault.set_alter_name(new_vault_name);
+        req.mutable_vault()->CopyFrom(vault);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_storage_vault(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_NE(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+        ASSERT_TRUE(res.status().msg().find("invalid storage vault name") != 
std::string::npos)
+                << res.status().msg();
+    }
+
+    {
+        AlterObjStoreInfoRequest req;
+        constexpr char new_vault_name[] = "new_test_alter_hdfs_vault";
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::ALTER_HDFS_VAULT);
+        StorageVaultPB vault;
+        vault.mutable_hdfs_info()->mutable_build_conf()->set_user("hadoop");
+        vault.set_name(vault_name);
+        vault.set_alter_name(new_vault_name);
+        req.mutable_vault()->CopyFrom(vault);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_storage_vault(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_EQ(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+        InstanceInfoPB instance;
+        get_test_instance(instance);
+
+        std::unique_ptr<Transaction> txn;
+        ASSERT_EQ(meta_service->txn_kv()->create_txn(&txn), 
TxnErrorCode::TXN_OK);
+        std::string val;
+        ASSERT_EQ(txn->get(storage_vault_key({instance.instance_id(), "2"}), 
&val),
+                  TxnErrorCode::TXN_OK);
+        StorageVaultPB get_obj;
+        get_obj.ParseFromString(val);
+        ASSERT_EQ(get_obj.name(), new_vault_name) << get_obj.obj_info().ak();
+    }
+
     SyncPoint::get_instance()->disable_processing();
     SyncPoint::get_instance()->clear_all_call_backs();
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java
 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java
index 78ba855ea4a..f1aff6b9ab6 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java
@@ -37,12 +37,19 @@ import java.util.Map;
 // PROPERTIES (key1 = value1, ...)
 public class CreateStorageVaultStmt extends DdlStmt implements 
NotFallbackInParser {
     private static final String TYPE = "type";
+
+    private static final String PATH_VERSION = "path_version";
+
+    private static final String SHARD_NUM = "shard_num";
+
     private static final String SET_AS_DEFAULT = "set_as_default";
 
     private final boolean ifNotExists;
     private final String vaultName;
     private final Map<String, String> properties;
     private boolean setAsDefault;
+    private int pathVersion = 0;
+    private int numShard = 0;
     private StorageVault.StorageVaultType vaultType;
 
     public CreateStorageVaultStmt(boolean ifNotExists, String vaultName, 
Map<String, String> properties) {
@@ -64,6 +71,14 @@ public class CreateStorageVaultStmt extends DdlStmt 
implements NotFallbackInPars
         return vaultName;
     }
 
+    public int getNumShard() {
+        return numShard;
+    }
+
+    public int getPathVersion() {
+        return pathVersion;
+    }
+
     public Map<String, String> getProperties() {
         return properties;
     }
@@ -108,6 +123,16 @@ public class CreateStorageVaultStmt extends DdlStmt 
implements NotFallbackInPars
         if (type == null) {
             throw new AnalysisException("Storage Vault type can't be null");
         }
+        final String pathVersionString = properties.get(PATH_VERSION);
+        if (pathVersionString != null) {
+            this.pathVersion = Integer.parseInt(pathVersionString);
+            properties.remove(PATH_VERSION);
+        }
+        final String numShardString = properties.get(SHARD_NUM);
+        if (numShardString != null) {
+            this.numShard = Integer.parseInt(numShardString);
+            properties.remove(SHARD_NUM);
+        }
         setAsDefault = 
Boolean.parseBoolean(properties.getOrDefault(SET_AS_DEFAULT, "false"));
         setStorageVaultType(StorageVault.StorageVaultType.fromString(type));
     }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java
index 4614beef828..9be463ee3a1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java
@@ -27,6 +27,8 @@ import com.google.gson.annotations.SerializedName;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
+import java.util.Arrays;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
@@ -56,6 +58,11 @@ public class HdfsStorageVault extends StorageVault {
     public static final String HDFS_PREFIX = "hdfs:";
     public static final String HDFS_FILE_PREFIX = "hdfs://";
 
+    public static final HashSet<String> FORBID_CHECK_PROPERTIES = new 
HashSet<>(Arrays.asList(
+            VAULT_PATH_PREFIX,
+            HADOOP_FS_NAME
+    ));
+
     /**
      * Property keys used by Doris, and should not be put in HDFS client 
configs,
      * such as `type`, `path_prefix`, etc.
@@ -99,6 +106,8 @@ public class HdfsStorageVault extends StorageVault {
                 hdfsConfBuilder.setHdfsKerberosPrincipal(property.getValue());
             } else if 
(property.getKey().equalsIgnoreCase(AuthenticationConfig.HADOOP_KERBEROS_KEYTAB))
 {
                 hdfsConfBuilder.setHdfsKerberosKeytab(property.getValue());
+            } else if (property.getKey().equalsIgnoreCase(VAULT_NAME)) {
+                continue;
             } else {
                 if 
(!nonHdfsConfPropertyKeys.contains(property.getKey().toLowerCase())) {
                     Cloud.HdfsBuildConf.HdfsConfKVPair.Builder conf = 
Cloud.HdfsBuildConf.HdfsConfKVPair.newBuilder();
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/S3StorageVault.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/S3StorageVault.java
index 710354b0092..67cc351cdb3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/S3StorageVault.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/S3StorageVault.java
@@ -58,10 +58,11 @@ public class S3StorageVault extends StorageVault {
 
     private static final String TYPE = "type";
 
-    public static final HashSet<String> ALTER_CHECK_PROPERTIES = new 
HashSet<>(Arrays.asList(
+    public static final HashSet<String> ALLOW_ALTER_PROPERTIES = new 
HashSet<>(Arrays.asList(
             TYPE,
             S3Properties.ACCESS_KEY,
-            S3Properties.SECRET_KEY
+            S3Properties.SECRET_KEY,
+            VAULT_NAME
     ));
 
     @SerializedName(value = "properties")
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java
index fb68581a00e..fe285b6919d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java
@@ -59,11 +59,14 @@ public abstract class StorageVault {
         }
     }
 
+    protected static final String VAULT_NAME = "VAULT_NAME";
     protected String name;
     protected StorageVaultType type;
     protected String id;
     private boolean ifNotExists;
     private boolean setAsDefault;
+    private int pathVersion = 0;
+    private int numShard = 0;
 
     private final ReentrantReadWriteLock lock = new 
ReentrantReadWriteLock(true);
 
@@ -105,6 +108,13 @@ public abstract class StorageVault {
         return this.setAsDefault;
     }
 
+    public int getPathVersion() {
+        return pathVersion;
+    }
+
+    public int getNumShard() {
+        return numShard;
+    }
 
     public String getId() {
         return this.id;
@@ -142,6 +152,8 @@ public abstract class StorageVault {
             default:
                 throw new DdlException("Unknown StorageVault type: " + type);
         }
+        vault.pathVersion = stmt.getPathVersion();
+        vault.numShard = stmt.getNumShard();
         return vault;
     }
 
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java
index ee5356eeb2a..c0942d916d1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java
@@ -38,23 +38,21 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
-import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 public class StorageVaultMgr {
     private static final Logger LOG = 
LogManager.getLogger(StorageVaultMgr.class);
-
-    // <VaultName, VaultId>
-    private Pair<String, String> defaultVaultInfo;
-
-    private ReadWriteLock rwLock = new ReentrantReadWriteLock();
-
     private static final ExecutorService ALTER_BE_SYNC_THREAD_POOL = 
Executors.newFixedThreadPool(1);
-
     private final SystemInfoService systemInfoService;
+    // <VaultName, VaultId>
+    private Pair<String, String> defaultVaultInfo;
+    private Map<String, String> vaultNameToVaultId = new HashMap<>();
+    private ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
 
     public StorageVaultMgr(SystemInfoService systemInfoService) {
         this.systemInfoService = systemInfoService;
@@ -93,32 +91,85 @@ public class StorageVaultMgr {
         return vaultId;
     }
 
-    public void alterStorageVault(StorageVaultType type, Map<String, String> 
properties, String name) throws Exception {
-        if (type != StorageVaultType.S3) {
-            throw new DdlException("Only support alter s3 storage vault");
-        }
-        properties.keySet().stream()
-                .filter(key -> 
!S3StorageVault.ALTER_CHECK_PROPERTIES.contains(key))
-                .findAny()
-                .ifPresent(key -> {
-                    throw new IllegalArgumentException("Alter property " + key 
+ " is not allowed.");
-                });
-        Cloud.AlterObjStoreInfoRequest.Builder requestBuilder
-                = Cloud.AlterObjStoreInfoRequest.newBuilder();
-        
requestBuilder.setOp(Cloud.AlterObjStoreInfoRequest.Operation.ALTER_S3_VAULT);
+    private Cloud.StorageVaultPB.Builder buildAlterS3VaultRequest(Map<String, 
String> properties, String name)
+            throws Exception {
         Cloud.ObjectStoreInfoPB.Builder objBuilder = 
S3Properties.getObjStoreInfoPB(properties);
         Cloud.StorageVaultPB.Builder alterObjVaultBuilder = 
Cloud.StorageVaultPB.newBuilder();
         alterObjVaultBuilder.setName(name);
         alterObjVaultBuilder.setObjInfo(objBuilder.build());
-        requestBuilder.setVault(alterObjVaultBuilder.build());
+        if (properties.containsKey(StorageVault.VAULT_NAME)) {
+            
alterObjVaultBuilder.setAlterName(properties.get(StorageVault.VAULT_NAME));
+        }
+        return alterObjVaultBuilder;
+    }
+
+    private Cloud.StorageVaultPB.Builder 
buildAlterHdfsVaultRequest(Map<String, String> properties, String name)
+            throws Exception {
+        Cloud.HdfsVaultInfo hdfsInfos = 
HdfsStorageVault.generateHdfsParam(properties);
+        Cloud.StorageVaultPB.Builder alterHdfsInfoBuilder = 
Cloud.StorageVaultPB.newBuilder();
+        alterHdfsInfoBuilder.setName(name);
+        alterHdfsInfoBuilder.setHdfsInfo(hdfsInfos);
+        if (properties.containsKey(StorageVault.VAULT_NAME)) {
+            
alterHdfsInfoBuilder.setAlterName(properties.get(StorageVault.VAULT_NAME));
+        }
+        return alterHdfsInfoBuilder;
+    }
+
+    private Cloud.StorageVaultPB.Builder 
buildAlterStorageVaultRequest(StorageVaultType type,
+            Map<String, String> properties, String name) throws Exception {
+        Cloud.StorageVaultPB.Builder builder;
+        if (type == StorageVaultType.S3) {
+            builder = buildAlterS3VaultRequest(properties, name);
+        } else if (type == StorageVaultType.HDFS) {
+            builder = buildAlterHdfsVaultRequest(properties, name);
+        } else {
+            throw new DdlException("Unknown storage vault type");
+        }
+        return builder;
+    }
+
+    private Cloud.StorageVaultPB.Builder 
buildAlterStorageVaultRequest(StorageVault vault) throws Exception {
+        Cloud.StorageVaultPB.Builder builder = 
buildAlterStorageVaultRequest(vault.getType(),
+                vault.getCopiedProperties(), vault.getName());
+        Cloud.StorageVaultPB.PathFormat.Builder pathBuilder = 
Cloud.StorageVaultPB.PathFormat.newBuilder();
+        pathBuilder.setShardNum(vault.getNumShard());
+        pathBuilder.setPathVersion(vault.getPathVersion());
+        builder.setPathFormat(pathBuilder);
+        return builder;
+    }
+
+    public void alterStorageVault(StorageVaultType type, Map<String, String> 
properties, String name) throws Exception {
+        if (type == StorageVaultType.UNKNOWN) {
+            throw new DdlException("Unknown storage vault type");
+        }
         try {
+            Cloud.AlterObjStoreInfoRequest.Builder request = 
Cloud.AlterObjStoreInfoRequest.newBuilder();
+            if (type == StorageVaultType.S3) {
+                properties.keySet().stream()
+                        .filter(key -> 
!S3StorageVault.ALLOW_ALTER_PROPERTIES.contains(key))
+                        .findAny()
+                        .ifPresent(key -> {
+                            throw new IllegalArgumentException("Alter property 
" + key + " is not allowed.");
+                        });
+                request.setOp(Operation.ALTER_S3_VAULT);
+            } else if (type == StorageVaultType.HDFS) {
+                properties.keySet().stream()
+                        
.filter(HdfsStorageVault.FORBID_CHECK_PROPERTIES::contains)
+                        .findAny()
+                        .ifPresent(key -> {
+                            throw new IllegalArgumentException("Alter property 
" + key + " is not allowed.");
+                        });
+                request.setOp(Operation.ALTER_HDFS_VAULT);
+            }
+            Cloud.StorageVaultPB.Builder vaultBuilder = 
buildAlterStorageVaultRequest(type, properties, name);
+            request.setVault(vaultBuilder);
             Cloud.AlterObjStoreInfoResponse response =
-                    
MetaServiceProxy.getInstance().alterStorageVault(requestBuilder.build());
+                    
MetaServiceProxy.getInstance().alterStorageVault(request.build());
             if (response.getStatus().getCode() != Cloud.MetaServiceCode.OK) {
                 LOG.warn("failed to alter storage vault response: {} ", 
response);
                 throw new DdlException(response.getStatus().getMsg());
             }
-            LOG.info("Succeed to alter s3 vault {}, id {}, origin default 
vault replaced {}",
+            LOG.info("Succeed to alter storage vault {}, id {}, origin default 
vault replaced {}",
                     name, response.getStorageVaultId(), 
response.getDefaultStorageVaultReplaced());
         } catch (RpcException e) {
             LOG.warn("failed to alter storage vault due to RpcException: {}", 
e);
@@ -193,12 +244,8 @@ public class StorageVaultMgr {
     }
 
     @VisibleForTesting
-    public void createHdfsVault(StorageVault vault) throws DdlException {
-        HdfsStorageVault hdfsStorageVault = (HdfsStorageVault) vault;
-        Cloud.HdfsVaultInfo hdfsInfos = 
HdfsStorageVault.generateHdfsParam(hdfsStorageVault.getCopiedProperties());
-        Cloud.StorageVaultPB.Builder alterHdfsInfoBuilder = 
Cloud.StorageVaultPB.newBuilder();
-        alterHdfsInfoBuilder.setName(hdfsStorageVault.getName());
-        alterHdfsInfoBuilder.setHdfsInfo(hdfsInfos);
+    public void createHdfsVault(StorageVault vault) throws Exception {
+        Cloud.StorageVaultPB.Builder alterHdfsInfoBuilder = 
buildAlterStorageVaultRequest(vault);
         Cloud.AlterObjStoreInfoRequest.Builder requestBuilder
                 = Cloud.AlterObjStoreInfoRequest.newBuilder();
         
requestBuilder.setOp(Cloud.AlterObjStoreInfoRequest.Operation.ADD_HDFS_INFO);
@@ -208,18 +255,21 @@ public class StorageVaultMgr {
             Cloud.AlterObjStoreInfoResponse response =
                     
MetaServiceProxy.getInstance().alterStorageVault(requestBuilder.build());
             if (response.getStatus().getCode() == 
Cloud.MetaServiceCode.ALREADY_EXISTED
-                    && hdfsStorageVault.ifNotExists()) {
-                LOG.info("Hdfs vault {} already existed", 
hdfsStorageVault.getName());
+                    && vault.ifNotExists()) {
+                LOG.info("Hdfs vault {} already existed", vault.getName());
                 return;
             }
             if (response.getStatus().getCode() != Cloud.MetaServiceCode.OK) {
                 LOG.warn("failed to create hdfs storage vault, vault name {}, 
response: {} ",
-                        hdfsStorageVault.getName(), response);
+                        vault.getName(), response);
                 throw new DdlException(response.getStatus().getMsg());
             }
+            rwLock.writeLock().lock();
+            vaultNameToVaultId.put(vault.getName(), 
response.getStorageVaultId());
+            rwLock.writeLock().unlock();
             LOG.info("Succeed to create hdfs vault {}, id {}, origin default 
vault replaced {}",
-                    hdfsStorageVault.getName(), response.getStorageVaultId(),
-                            response.getDefaultStorageVaultReplaced());
+                    vault.getName(), response.getStorageVaultId(),
+                    response.getDefaultStorageVaultReplaced());
         } catch (RpcException e) {
             LOG.warn("failed to alter storage vault due to RpcException: {}", 
e);
             throw new DdlException(e.getMessage());
@@ -245,31 +295,30 @@ public class StorageVaultMgr {
         });
     }
 
-    public void createS3Vault(StorageVault vault) throws DdlException {
-        S3StorageVault s3StorageVault = (S3StorageVault) vault;
+    public void createS3Vault(StorageVault vault) throws Exception {
+        Cloud.StorageVaultPB.Builder s3StorageVaultBuilder = 
buildAlterStorageVaultRequest(vault);
         Cloud.AlterObjStoreInfoRequest.Builder requestBuilder
                 = Cloud.AlterObjStoreInfoRequest.newBuilder();
         
requestBuilder.setOp(Cloud.AlterObjStoreInfoRequest.Operation.ADD_S3_VAULT);
-        Cloud.ObjectStoreInfoPB.Builder objBuilder = 
S3Properties.getObjStoreInfoPB(vault.getCopiedProperties());
-        Cloud.StorageVaultPB.Builder alterObjVaultBuilder = 
Cloud.StorageVaultPB.newBuilder();
-        alterObjVaultBuilder.setName(s3StorageVault.getName());
-        alterObjVaultBuilder.setObjInfo(objBuilder.build());
-        requestBuilder.setVault(alterObjVaultBuilder.build());
+        requestBuilder.setVault(s3StorageVaultBuilder);
         requestBuilder.setSetAsDefaultStorageVault(vault.setAsDefault());
         try {
             Cloud.AlterObjStoreInfoResponse response =
                     
MetaServiceProxy.getInstance().alterStorageVault(requestBuilder.build());
             if (response.getStatus().getCode() == 
Cloud.MetaServiceCode.ALREADY_EXISTED
-                    && s3StorageVault.ifNotExists()) {
-                LOG.info("S3 vault {} already existed", 
s3StorageVault.getName());
+                    && vault.ifNotExists()) {
+                LOG.info("S3 vault {} already existed", vault.getName());
                 return;
             }
             if (response.getStatus().getCode() != Cloud.MetaServiceCode.OK) {
                 LOG.warn("failed to alter storage vault response: {} ", 
response);
                 throw new DdlException(response.getStatus().getMsg());
             }
+            rwLock.writeLock().lock();
+            vaultNameToVaultId.put(vault.getName(), 
response.getStorageVaultId());
+            rwLock.writeLock().unlock();
             LOG.info("Succeed to create s3 vault {}, id {}, origin default 
vault replaced {}",
-                    s3StorageVault.getName(), response.getStorageVaultId(), 
response.getDefaultStorageVaultReplaced());
+                    vault.getName(), response.getStorageVaultId(), 
response.getDefaultStorageVaultReplaced());
         } catch (RpcException e) {
             LOG.warn("failed to alter storage vault due to RpcException: {}", 
e);
             throw new DdlException(e.getMessage());
diff --git a/gensrc/proto/cloud.proto b/gensrc/proto/cloud.proto
index b113debae64..cb07ea1ec07 100644
--- a/gensrc/proto/cloud.proto
+++ b/gensrc/proto/cloud.proto
@@ -212,6 +212,8 @@ message StorageVaultPB {
     }
 
     optional PathFormat path_format = 5;
+    // Set this filed when the user tries to alter name to alter_name
+    optional string alter_name = 6;
 }
 
 message HdfsBuildConf {
@@ -858,6 +860,7 @@ message AlterObjStoreInfoRequest {
         ADD_S3_VAULT = 103;
         DROP_S3_VAULT = 104;
         ALTER_S3_VAULT = 105;
+        ALTER_HDFS_VAULT = 106;
 
         SET_DEFAULT_VAULT = 200;
         UNSET_DEFAULT_VAULT = 201;
diff --git a/regression-test/suites/vaults/alter/alter_hdfs.groovy 
b/regression-test/suites/vaults/alter/alter_hdfs.groovy
new file mode 100644
index 00000000000..1a1299a93cc
--- /dev/null
+++ b/regression-test/suites/vaults/alter/alter_hdfs.groovy
@@ -0,0 +1,119 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("alter_hdfs_vault", "nonConcurrent") {
+    if (!enableStoragevault()) {
+        logger.info("skip alter hdfs storgage vault case")
+        return
+    }
+
+    sql """
+        CREATE STORAGE VAULT IF NOT EXISTS alter_hdfs_vault
+        PROPERTIES (
+        "type"="HDFS",
+        "fs.defaultFS"="${getHmsHdfsFs()}",
+        "path_prefix" = "ssb_sf1_p2",
+        "hadoop.username" = "hadoop"
+        );
+    """
+
+    sql """
+        CREATE TABLE IF NOT EXISTS alter_hdfs_vault_tbl (
+                C_CUSTKEY     INTEGER NOT NULL,
+                C_NAME        INTEGER NOT NULL
+                )
+                DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                PROPERTIES (
+                "replication_num" = "1",
+                "storage_vault_name" = "alter_hdfs_vault"
+                )
+    """
+
+    sql """
+        insert into alter_hdfs_vault_tbl values("1", "1");
+    """
+
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT alter_hdfs_vault
+            PROPERTIES (
+            "type"="hdfs",
+            "path_prefix" = "ssb_sf1_p3"
+            );
+        """
+    }, "Alter property")
+
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT alter_hdfs_vault
+            PROPERTIES (
+            "type"="hdfs",
+            "fs.defaultFS" = "ssb_sf1_p3"
+            );
+        """
+    }, "Alter property")
+
+    def vault_name = "alter_hdfs_vault"
+    String properties;
+
+    def vaults_info = try_sql """
+        show storage vault
+    """
+
+    for (int i = 0; i < vaults_info.size(); i++) {
+        def name = vaults_info[i][0]
+        if (name.equals(vault_name)) {
+            properties = vaults_info[i][2]
+        }
+    }
+    
+    sql """
+        ALTER STORAGE VAULT alter_hdfs_vault
+        PROPERTIES (
+        "type"="hdfs",
+        "VAULT_NAME" = "alter_hdfs_vault_new_name",
+        "hadoop.username" = "hdfs"
+        );
+    """
+
+    def new_vault_name = "alter_hdfs_vault_new_name"
+
+    vaults_info = sql """
+        SHOW STORAGE VAULT;
+    """
+    boolean exist = false
+
+    for (int i = 0; i < vaults_info.size(); i++) {
+        def name = vaults_info[i][0]
+        logger.info("name is ${name}, info ${vaults_info[i]}")
+        if (name.equals(vault_name)) {
+            exist = true
+        }
+        if (name.equals(new_vault_name)) {
+            assertTrue(vaults_info[i][2].contains(""""hadoop.username" = 
"hdfs"""""))
+        }
+    }
+    assertFalse(exist)
+
+    // failed to insert due to the wrong ak
+    expectExceptionLike({
+        sql """
+            insert into alter_hdfs_vault_tbl values("2", "2");
+        """
+    }, "")
+}
diff --git a/regression-test/suites/vaults/alter/alter_s3.groovy 
b/regression-test/suites/vaults/alter/alter_s3.groovy
new file mode 100644
index 00000000000..37f9edd0415
--- /dev/null
+++ b/regression-test/suites/vaults/alter/alter_s3.groovy
@@ -0,0 +1,124 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("alter_s3_vault", "nonConcurrent") {
+    if (!enableStoragevault()) {
+        logger.info("skip alter s3 storgage vault case")
+        return
+    }
+
+    sql """
+        CREATE STORAGE VAULT IF NOT EXISTS alter_s3_vault
+        PROPERTIES (
+        "type"="S3",
+        "s3.endpoint"="${getS3Endpoint()}",
+        "s3.region" = "${getS3Region()}",
+        "s3.access_key" = "${getS3AK()}",
+        "s3.secret_key" = "${getS3SK()}",
+        "s3.root.path" = "ssb_sf1_p2_s3",
+        "s3.bucket" = "${getS3BucketName()}",
+        "s3.external_endpoint" = "",
+        "provider" = "${getS3Provider()}"
+        );
+    """
+
+    sql """
+        CREATE TABLE IF NOT EXISTS alter_s3_vault_tbl (
+                C_CUSTKEY     INTEGER NOT NULL,
+                C_NAME        INTEGER NOT NULL
+                )
+                DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                PROPERTIES (
+                "replication_num" = "1",
+                "storage_vault_name" = "alter_s3_vault"
+                )
+    """
+
+    sql """
+        insert into alter_s3_vault_tbl values("1", "1");
+    """
+
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT alter_s3_vault
+            PROPERTIES (
+            "type"="S3",
+            "s3.bucket" = "error_bucket"
+            );
+        """
+    }, "Alter property")
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT alter_s3_vault
+            PROPERTIES (
+            "type"="S3",
+            "provider" = "${getS3Provider()}"
+            );
+        """
+    }, "Alter property")
+
+    def vault_name = "alter_s3_vault"
+    String properties;
+
+    def vaults_info = try_sql """
+        show storage vault
+    """
+
+    for (int i = 0; i < vaults_info.size(); i++) {
+        def name = vaults_info[i][0]
+        if (name.equals(vault_name)) {
+            properties = vaults_info[i][2]
+        }
+    }
+    
+    sql """
+        ALTER STORAGE VAULT alter_s3_vault
+        PROPERTIES (
+        "type"="S3",
+        "VAULT_NAME" = "alter_s3_vault",
+        "s3.access_key" = "new_ak"
+        );
+    """
+
+    def new_vault_name = "alter_s3_vault_new"
+
+    vaults_info = sql """
+        SHOW STORAGE VAULT;
+    """
+    boolean exist = false
+
+    for (int i = 0; i < vaults_info.size(); i++) {
+        def name = vaults_info[i][0]
+        logger.info("name is ${name}, info ${vaults_info[i]}")
+        if (name.equals(vault_name)) {
+            exist = true
+        }
+        if (name.equals(new_vault_name)) {
+            assertTrue(vaults_info[i][2].contains(""""s3.access_key" = 
"new_ak"""""))
+        }
+    }
+    assertFalse(exist)
+
+    // failed to insert due to the wrong ak
+    expectExceptionLike({
+        sql """
+            insert into alter_s3_vault_tbl values("2", "2");
+        """
+    }, "")
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to