This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 53bbbe6d782 [feature](Cloud) Support drop storage vault in Meta 
Service (#32228)
53bbbe6d782 is described below

commit 53bbbe6d782f8368a29cdbbdae374843fec1be67
Author: AlexYue <yj976240...@gmail.com>
AuthorDate: Sun Mar 17 14:58:43 2024 +0800

    [feature](Cloud) Support drop storage vault in Meta Service (#32228)
---
 be/src/cloud/cloud_storage_engine.cpp              |   8 +-
 cloud/src/meta-service/meta_service.cpp            |  31 +++
 cloud/src/meta-service/meta_service_resource.cpp   |  53 ++++-
 cloud/test/meta_service_test.cpp                   | 248 ++++++++++++++++++++-
 .../apache/doris/common/util/PropertyAnalyzer.java |   1 +
 fe/fe-core/src/main/jflex/sql_scanner.flex         |   1 +
 gensrc/proto/cloud.proto                           |   3 +
 7 files changed, 325 insertions(+), 20 deletions(-)

diff --git a/be/src/cloud/cloud_storage_engine.cpp 
b/be/src/cloud/cloud_storage_engine.cpp
index 957abb87805..0dd108b2d62 100644
--- a/be/src/cloud/cloud_storage_engine.cpp
+++ b/be/src/cloud/cloud_storage_engine.cpp
@@ -88,23 +88,26 @@ struct VaultCreateFSVisitor {
         }
 
         put_storage_resource(std::atol(id.data()), {s3_fs, 0});
+        LOG_INFO("successfully create s3 vault, vault id {}", id);
     }
 
+    // TODO(ByteYue): Make sure enable_java_support is on
     void operator()(const THdfsParams& hdfs_params) const {
         std::shared_ptr<io::HdfsFileSystem> hdfs_fs;
         auto st = io::HdfsFileSystem::create(hdfs_params, id, "", nullptr, 
&hdfs_fs);
         if (!st.ok()) {
-            LOG(WARNING) << "failed to create s3 fs. id=" << id;
+            LOG(WARNING) << "failed to create hdfs fs. id=" << id;
             return;
         }
 
         st = hdfs_fs->connect();
         if (!st.ok()) {
-            LOG(WARNING) << "failed to connect s3 fs. id=" << id;
+            LOG(WARNING) << "failed to connect hdfs fs. id=" << id;
             return;
         }
 
         put_storage_resource(std::atol(id.data()), {hdfs_fs, 0});
+        LOG_INFO("successfully create hdfs vault, vault id {}", id);
     }
     const std::string& id;
 };
@@ -256,6 +259,7 @@ Status CloudStorageEngine::start_bg_threads() {
     return Status::OK();
 }
 
+// We should enable_java_support if we want to use hdfs vault
 void CloudStorageEngine::_refresh_storage_vault_info_thread_callback() {
     while (!_stop_background_threads_latch.wait_for(
             std::chrono::seconds(config::refresh_s3_info_interval_s))) {
diff --git a/cloud/src/meta-service/meta_service.cpp 
b/cloud/src/meta-service/meta_service.cpp
index 7e8ac19b7af..23633281772 100644
--- a/cloud/src/meta-service/meta_service.cpp
+++ b/cloud/src/meta-service/meta_service.cpp
@@ -21,6 +21,7 @@
 #include <brpc/closure_guard.h>
 #include <brpc/controller.h>
 #include <bthread/bthread.h>
+#include <fmt/core.h>
 #include <gen_cpp/cloud.pb.h>
 #include <gen_cpp/olap_file.pb.h>
 #include <google/protobuf/util/json_util.h>
@@ -497,6 +498,36 @@ void 
MetaServiceImpl::create_tablets(::google::protobuf::RpcController* controll
         return;
     }
     RPC_RATE_LIMIT(create_tablets)
+    if (request->has_storage_vault_name()) {
+        InstanceInfoPB instance;
+        std::unique_ptr<Transaction> txn0;
+        TxnErrorCode err = txn_kv_->create_txn(&txn0);
+        if (err != TxnErrorCode::TXN_OK) {
+            code = cast_as<ErrCategory::READ>(err);
+            msg = fmt::format("failed to create txn");
+            return;
+        }
+
+        std::shared_ptr<Transaction> txn(txn0.release());
+        auto [c0, m0] = resource_mgr_->get_instance(txn, instance_id, 
&instance);
+        if (c0 != TxnErrorCode::TXN_OK) {
+            code = cast_as<ErrCategory::READ>(err);
+            msg = fmt::format("failed to get instance, info={}", m0);
+        }
+
+        auto vault_name = std::find_if(
+                instance.storage_vault_names().begin(), 
instance.storage_vault_names().end(),
+                [&](const auto& name) { return name == 
request->storage_vault_name(); });
+        if (vault_name != instance.storage_vault_names().end()) {
+            auto idx = vault_name - instance.storage_vault_names().begin();
+            response->set_storage_vault_id(instance.resource_ids().at(idx));
+        } else {
+            code = cast_as<ErrCategory::READ>(err);
+            msg = fmt::format("failed to get vault id, vault name={}",
+                              request->storage_vault_name());
+            return;
+        }
+    }
     // [index_id, schema_version]
     std::set<std::pair<int64_t, int32_t>> saved_schema;
     for (auto& tablet_meta : request->tablet_metas()) {
diff --git a/cloud/src/meta-service/meta_service_resource.cpp 
b/cloud/src/meta-service/meta_service_resource.cpp
index 4242649c6cd..c02b8f5f62f 100644
--- a/cloud/src/meta-service/meta_service_resource.cpp
+++ b/cloud/src/meta-service/meta_service_resource.cpp
@@ -19,6 +19,7 @@
 #include <fmt/core.h>
 #include <gen_cpp/cloud.pb.h>
 
+#include <algorithm>
 #include <charconv>
 #include <chrono>
 #include <numeric>
@@ -291,8 +292,8 @@ static std::string next_available_vault_id(const 
InstanceInfoPB& instance) {
         } else if constexpr (std::is_same_v<std::decay_t<decltype(last)>, 
std::string>) {
             value = last;
         }
-        auto [_, ec] = std::from_chars(value.data(), value.data() + 
value.size(), last_id);
-        if (ec == std::errc {}) {
+        if (auto [_, ec] = std::from_chars(value.data(), value.data() + 
value.size(), last_id);
+            ec != std::errc {}) [[unlikely]] {
             LOG_WARNING("Invalid resource id format: {}", value);
             last_id = 0;
             DCHECK(false);
@@ -306,9 +307,14 @@ static std::string next_available_vault_id(const 
InstanceInfoPB& instance) {
     return std::to_string(prev + 1);
 }
 
-static int add_hdfs_storage_valut(InstanceInfoPB& instance, Transaction* txn,
+static int add_hdfs_storage_vault(InstanceInfoPB& instance, Transaction* txn,
                                   StorageVaultPB hdfs_param, MetaServiceCode& 
code,
                                   std::string& msg) {
+    if (!hdfs_param.has_hdfs_info()) {
+        code = MetaServiceCode::INVALID_ARGUMENT;
+        msg = fmt::format("vault_name={} passed invalid argument", 
hdfs_param.name());
+        return -1;
+    }
     if (std::find_if(instance.storage_vault_names().begin(), 
instance.storage_vault_names().end(),
                      [&hdfs_param](const auto& name) { return name == 
hdfs_param.name(); }) !=
         instance.storage_vault_names().end()) {
@@ -322,16 +328,32 @@ static int add_hdfs_storage_valut(InstanceInfoPB& 
instance, Transaction* txn,
     hdfs_param.set_id(vault_id);
     std::string val = hdfs_param.SerializeAsString();
     txn->put(key, val);
-    LOG_INFO("try to put storage vault_id={}, vault_name={}, err={}", 
vault_id, hdfs_param.name());
+    LOG_INFO("try to put storage vault_id={}, vault_name={}", vault_id, 
hdfs_param.name());
     instance.mutable_resource_ids()->Add(std::move(vault_id));
     *instance.mutable_storage_vault_names()->Add() = hdfs_param.name();
     return 0;
 }
 
-// TODO(ByteYue): Implement drop storage vault.
-[[maybe_unused]] static int remove_hdfs_storage_valut(std::string_view 
vault_key, Transaction* txn,
-                                                      MetaServiceCode& code, 
std::string& msg) {
+static int remove_hdfs_storage_vault(InstanceInfoPB& instance, Transaction* 
txn,
+                                     const StorageVaultPB& hdfs_info, 
MetaServiceCode& code,
+                                     std::string& msg) {
+    std::string_view vault_name = hdfs_info.name();
+    auto name_iter = std::find_if(instance.storage_vault_names().begin(),
+                                  instance.storage_vault_names().end(),
+                                  [&](const auto& name) { return vault_name == 
name; });
+    if (name_iter == instance.storage_vault_names().end()) {
+        code = MetaServiceCode::STORAGE_VAULT_NOT_FOUND;
+        msg = fmt::format("vault_name={} not found", vault_name);
+        return -1;
+    }
+    auto vault_idx = name_iter - instance.storage_vault_names().begin();
+    auto vault_id_iter = instance.resource_ids().begin() + vault_idx;
+    std::string_view vault_id = *vault_id_iter;
+    std::string vault_key = storage_vault_key({instance.instance_id(), 
vault_id});
+
     txn->remove(vault_key);
+    instance.mutable_storage_vault_names()->DeleteSubrange(vault_idx, 1);
+    instance.mutable_resource_ids()->DeleteSubrange(vault_idx, 1);
     LOG(INFO) << "remove storage_vault_key=" << hex(vault_key);
 
     return 0;
@@ -379,8 +401,9 @@ void 
MetaServiceImpl::alter_obj_store_info(google::protobuf::RpcController* cont
             return;
         };
     } break;
-    case AlterObjStoreInfoRequest::ADD_HDFS_INFO: {
-        if (!request->has_hdfs()) {
+    case AlterObjStoreInfoRequest::ADD_HDFS_INFO:
+    case AlterObjStoreInfoRequest::DROP_HDFS_INFO: {
+        if (!request->has_hdfs() || !request->hdfs().has_name()) {
             code = MetaServiceCode::INVALID_ARGUMENT;
             msg = "hdfs info is not found " + proto_to_json(*request);
             return;
@@ -533,7 +556,14 @@ void 
MetaServiceImpl::alter_obj_store_info(google::protobuf::RpcController* cont
         instance.add_obj_info()->CopyFrom(last_item);
     } break;
     case AlterObjStoreInfoRequest::ADD_HDFS_INFO: {
-        if (auto ret = add_hdfs_storage_valut(instance, txn.get(), 
request->hdfs(), code, msg);
+        if (auto ret = add_hdfs_storage_vault(instance, txn.get(), 
request->hdfs(), code, msg);
+            ret != 0) {
+            return;
+        }
+        break;
+    }
+    case AlterObjStoreInfoRequest::DROP_HDFS_INFO: {
+        if (auto ret = remove_hdfs_storage_vault(instance, txn.get(), 
request->hdfs(), code, msg);
             ret != 0) {
             return;
         }
@@ -889,12 +919,11 @@ void 
MetaServiceImpl::create_instance(google::protobuf::RpcController* controlle
         LOG(WARNING) << msg << " err=" << err;
         return;
     }
-    // TODO(ByteYue): Reclaim the vault if the following procedure failed
     if (request->has_hdfs_info()) {
         StorageVaultPB hdfs_param;
         hdfs_param.set_name("Default");
         hdfs_param.mutable_hdfs_info()->MergeFrom(request->hdfs_info());
-        if (0 != add_hdfs_storage_valut(instance, txn.get(), 
std::move(hdfs_param), code, msg)) {
+        if (0 != add_hdfs_storage_vault(instance, txn.get(), 
std::move(hdfs_param), code, msg)) {
             return;
         }
     }
diff --git a/cloud/test/meta_service_test.cpp b/cloud/test/meta_service_test.cpp
index b7d36e87904..f4e0d6ac483 100644
--- a/cloud/test/meta_service_test.cpp
+++ b/cloud/test/meta_service_test.cpp
@@ -1195,12 +1195,6 @@ TEST(MetaServiceTest, CommitTxnTest) {
             BeginTxnResponse res;
             
meta_service->begin_txn(reinterpret_cast<::google::protobuf::RpcController*>(&cntl),
                                     &req, &res, nullptr);
-            for (const auto& tstats : res.table_stats()) {
-                LOG(INFO) << "table_id=" << tstats.table_id()
-                          << " updatedRowCount=" << tstats.updated_row_count();
-                ASSERT_EQ(tstats.table_id(), 1234);
-                ASSERT_EQ(tstats.updated_row_count(), 500);
-            }
             ASSERT_EQ(res.status().code(), MetaServiceCode::OK);
             txn_id = res.txn_id();
         }
@@ -5088,6 +5082,248 @@ TEST(MetaServiceTest, AddHdfsInfoTest) {
     SyncPoint::get_instance()->clear_all_call_backs();
 }
 
+TEST(MetaServiceTest, DropHdfsInfoTest) {
+    auto meta_service = get_meta_service();
+
+    auto sp = cloud::SyncPoint::get_instance();
+    sp->enable_processing();
+    sp->set_call_back("encrypt_ak_sk:get_encryption_key_ret",
+                      [](void* p) { *reinterpret_cast<int*>(p) = 0; });
+    sp->set_call_back("encrypt_ak_sk:get_encryption_key", [](void* p) {
+        *reinterpret_cast<std::string*>(p) = 
"selectdbselectdbselectdbselectdb";
+    });
+    sp->set_call_back("encrypt_ak_sk:get_encryption_key_id",
+                      [](void* p) { *reinterpret_cast<int*>(p) = 1; });
+
+    std::unique_ptr<Transaction> txn;
+    ASSERT_EQ(meta_service->txn_kv()->create_txn(&txn), TxnErrorCode::TXN_OK);
+    std::string key;
+    std::string val;
+    InstanceKeyInfo key_info {"test_instance"};
+    instance_key(key_info, &key);
+
+    ObjectStoreInfoPB obj_info;
+    obj_info.set_id("1");
+    obj_info.set_ak("ak");
+    obj_info.set_sk("sk");
+    StorageVaultPB vault;
+    vault.set_name("test_hdfs_vault");
+    vault.set_id("2");
+    InstanceInfoPB instance;
+    instance.add_obj_info()->CopyFrom(obj_info);
+    instance.add_storage_vault_names(vault.name());
+    instance.add_resource_ids(vault.id());
+    val = instance.SerializeAsString();
+    txn->put(key, val);
+    txn->put(storage_vault_key({instance.instance_id(), "2"}), 
vault.SerializeAsString());
+    ASSERT_EQ(txn->commit(), TxnErrorCode::TXN_OK);
+    txn = nullptr;
+
+    auto get_test_instance = [&](InstanceInfoPB& i) {
+        std::string key;
+        std::string val;
+        std::unique_ptr<Transaction> txn;
+        ASSERT_EQ(meta_service->txn_kv()->create_txn(&txn), 
TxnErrorCode::TXN_OK);
+        InstanceKeyInfo key_info {"test_instance"};
+        instance_key(key_info, &key);
+        ASSERT_EQ(txn->get(key, &val), TxnErrorCode::TXN_OK);
+        i.ParseFromString(val);
+    };
+
+    // update failed because has no storage vault set
+    {
+        AlterObjStoreInfoRequest req;
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::DROP_HDFS_INFO);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_obj_store_info(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_EQ(res.status().code(), MetaServiceCode::INVALID_ARGUMENT) << 
res.status().msg();
+    }
+
+    // update failed because vault name does not exist
+    {
+        AlterObjStoreInfoRequest req;
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::DROP_HDFS_INFO);
+        StorageVaultPB hdfs;
+        hdfs.set_name("test_hdfs_vault_not_found");
+        HdfsVaultInfo params;
+
+        hdfs.mutable_hdfs_info()->CopyFrom(params);
+        req.mutable_hdfs()->CopyFrom(hdfs);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_obj_store_info(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_EQ(res.status().code(), 
MetaServiceCode::STORAGE_VAULT_NOT_FOUND)
+                << res.status().msg();
+    }
+
+    // update successfully
+    {
+        AlterObjStoreInfoRequest req;
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::DROP_HDFS_INFO);
+        StorageVaultPB hdfs;
+        hdfs.set_name("test_hdfs_vault");
+        HdfsVaultInfo params;
+
+        hdfs.mutable_hdfs_info()->CopyFrom(params);
+        req.mutable_hdfs()->CopyFrom(hdfs);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_obj_store_info(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_EQ(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+        InstanceInfoPB instance;
+        get_test_instance(instance);
+        ASSERT_EQ(instance.resource_ids().size(), 0);
+        ASSERT_EQ(instance.storage_vault_names().size(), 0);
+        ASSERT_EQ(meta_service->txn_kv()->create_txn(&txn), 
TxnErrorCode::TXN_OK);
+        // To test we can not read the storage vault anymore
+        std::string vault_key = storage_vault_key({instance.instance_id(), 
"2"});
+        std::string vault_value;
+        auto code = txn->get(vault_key, &vault_value);
+        ASSERT_TRUE(code != TxnErrorCode::TXN_OK);
+    }
+
+    {
+        // Try to add one new hdfs info and then check the vault id is expected
+        AlterObjStoreInfoRequest req;
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::ADD_HDFS_INFO);
+        StorageVaultPB hdfs;
+        hdfs.set_name("test_alter_add_hdfs_info");
+        HdfsVaultInfo params;
+
+        hdfs.mutable_hdfs_info()->CopyFrom(params);
+        req.mutable_hdfs()->CopyFrom(hdfs);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_obj_store_info(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_EQ(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+        InstanceInfoPB instance;
+        get_test_instance(instance);
+        ASSERT_EQ(*(instance.resource_ids().begin()), "2");
+        ASSERT_EQ(*(instance.storage_vault_names().begin()), 
"test_alter_add_hdfs_info");
+    }
+
+    // Add two more vaults
+    {
+        // Try to add one new hdfs info and then check the vault id is expected
+        AlterObjStoreInfoRequest req;
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::ADD_HDFS_INFO);
+        StorageVaultPB hdfs;
+        hdfs.set_name("test_alter_add_hdfs_info_1");
+        HdfsVaultInfo params;
+
+        hdfs.mutable_hdfs_info()->CopyFrom(params);
+        req.mutable_hdfs()->CopyFrom(hdfs);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_obj_store_info(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_EQ(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+        InstanceInfoPB instance;
+        get_test_instance(instance);
+        ASSERT_EQ(instance.resource_ids().at(1), "3");
+        ASSERT_EQ(instance.storage_vault_names().at(1), 
"test_alter_add_hdfs_info_1");
+    }
+
+    {
+        // Try to add one new hdfs info and then check the vault id is expected
+        AlterObjStoreInfoRequest req;
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::ADD_HDFS_INFO);
+        StorageVaultPB hdfs;
+        hdfs.set_name("test_alter_add_hdfs_info_2");
+        HdfsVaultInfo params;
+
+        hdfs.mutable_hdfs_info()->CopyFrom(params);
+        req.mutable_hdfs()->CopyFrom(hdfs);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_obj_store_info(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_EQ(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+        InstanceInfoPB instance;
+        get_test_instance(instance);
+        ASSERT_EQ(instance.resource_ids().at(2), "4");
+        ASSERT_EQ(instance.storage_vault_names().at(2), 
"test_alter_add_hdfs_info_2");
+    }
+
+    // Remove one vault among three vaults
+    {
+        AlterObjStoreInfoRequest req;
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::DROP_HDFS_INFO);
+        StorageVaultPB hdfs;
+        hdfs.set_name("test_alter_add_hdfs_info_1");
+        HdfsVaultInfo params;
+
+        hdfs.mutable_hdfs_info()->CopyFrom(params);
+        req.mutable_hdfs()->CopyFrom(hdfs);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_obj_store_info(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_EQ(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+        InstanceInfoPB instance;
+        get_test_instance(instance);
+        ASSERT_EQ(instance.resource_ids().size(), 2);
+        ASSERT_EQ(instance.storage_vault_names().size(), 2);
+        ASSERT_EQ(meta_service->txn_kv()->create_txn(&txn), 
TxnErrorCode::TXN_OK);
+        // To test we can not read the storage vault anymore
+        std::string vault_key = storage_vault_key({instance.instance_id(), 
"3"});
+        std::string vault_value;
+        auto code = txn->get(vault_key, &vault_value);
+        ASSERT_TRUE(code != TxnErrorCode::TXN_OK);
+        ASSERT_EQ(2, instance.resource_ids().size());
+        ASSERT_EQ(2, instance.storage_vault_names().size());
+        ASSERT_EQ(instance.resource_ids().at(0), "2");
+        ASSERT_EQ(instance.storage_vault_names().at(0), 
"test_alter_add_hdfs_info");
+        ASSERT_EQ(instance.resource_ids().at(1), "4");
+        ASSERT_EQ(instance.storage_vault_names().at(1), 
"test_alter_add_hdfs_info_2");
+    }
+
+    {
+        // Try to add one new hdfs info and then check the vault id is expected
+        AlterObjStoreInfoRequest req;
+        req.set_cloud_unique_id("test_cloud_unique_id");
+        req.set_op(AlterObjStoreInfoRequest::ADD_HDFS_INFO);
+        StorageVaultPB hdfs;
+        hdfs.set_name("test_alter_add_hdfs_info_3");
+        HdfsVaultInfo params;
+
+        hdfs.mutable_hdfs_info()->CopyFrom(params);
+        req.mutable_hdfs()->CopyFrom(hdfs);
+
+        brpc::Controller cntl;
+        AlterObjStoreInfoResponse res;
+        meta_service->alter_obj_store_info(
+                reinterpret_cast<::google::protobuf::RpcController*>(&cntl), 
&req, &res, nullptr);
+        ASSERT_EQ(res.status().code(), MetaServiceCode::OK) << 
res.status().msg();
+        InstanceInfoPB instance;
+        get_test_instance(instance);
+        ASSERT_EQ(instance.resource_ids().at(2), "5");
+        ASSERT_EQ(instance.storage_vault_names().at(2), 
"test_alter_add_hdfs_info_3");
+    }
+
+    SyncPoint::get_instance()->disable_processing();
+    SyncPoint::get_instance()->clear_all_call_backs();
+}
+
 TEST(MetaServiceTest, UpdateAkSkTest) {
     auto meta_service = get_meta_service();
 
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java 
b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
index b9167bec6bc..3b4a8472fc1 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
@@ -1058,6 +1058,7 @@ public class PropertyAnalyzer {
         String storageVault = null;
         if (properties != null && 
properties.containsKey(PROPERTIES_STORAGE_VAULT)) {
             storageVault = properties.get(PROPERTIES_STORAGE_VAULT);
+            properties.remove(PROPERTIES_STORAGE_VAULT);
         }
 
         return storageVault;
diff --git a/fe/fe-core/src/main/jflex/sql_scanner.flex 
b/fe/fe-core/src/main/jflex/sql_scanner.flex
index 43dd60a4ce1..cd496a9fff3 100644
--- a/fe/fe-core/src/main/jflex/sql_scanner.flex
+++ b/fe/fe-core/src/main/jflex/sql_scanner.flex
@@ -454,6 +454,7 @@ import org.apache.doris.qe.SqlModeHelper;
         keywordMap.put("status", new Integer(SqlParserSymbols.KW_STATUS));
         keywordMap.put("stop", new Integer(SqlParserSymbols.KW_STOP));
         keywordMap.put("storage", new Integer(SqlParserSymbols.KW_STORAGE));
+        keywordMap.put("vault", new Integer(SqlParserSymbols.KW_VAULT));
         keywordMap.put("stream", new Integer(SqlParserSymbols.KW_STREAM));
         keywordMap.put("streaming", new 
Integer(SqlParserSymbols.KW_STREAMING));
         keywordMap.put("string", new Integer(SqlParserSymbols.KW_STRING));
diff --git a/gensrc/proto/cloud.proto b/gensrc/proto/cloud.proto
index 2342f135d45..e91bc5ec4a5 100644
--- a/gensrc/proto/cloud.proto
+++ b/gensrc/proto/cloud.proto
@@ -735,6 +735,7 @@ message AlterObjStoreInfoRequest {
         LEGACY_UPDATE_AK_SK = 3;
 
         ADD_HDFS_INFO = 100;
+        DROP_HDFS_INFO = 101;
     }
     optional string cloud_unique_id = 1; // For auth
     optional ObjectStoreInfoPB obj = 2;
@@ -1191,6 +1192,8 @@ enum MetaServiceCode {
     ALREADY_EXISTED = 3002;
     CLUSTER_ENDPOINT_MISSING = 3003;
 
+    STORAGE_VAULT_NOT_FOUND = 3004;
+
     // Stage
     STAGE_NOT_FOUND = 4001;
     STAGE_GET_ERR = 4002;


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to