This is an automated email from the ASF dual-hosted git repository. morningman pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push: new c505e473585 [fix](kerberos) Back to use principal and keytab to login kerberos instead of using kerberos ticket cache. (#48655) c505e473585 is described below commit c505e4735855e68d789b2fc3fdadc62929ddcb52 Author: Mingyu Chen (Rayner) <morning...@163.com> AuthorDate: Wed Mar 19 23:19:18 2025 +0800 [fix](kerberos) Back to use principal and keytab to login kerberos instead of using kerberos ticket cache. (#48655) ### What problem does this PR solve? Related PR: #47299, #49181 This PR mainly changes: 1. Back to use principal and keytab to login kerberos instead of using kerberos ticket cache. Discard what I did in #47299. It looks like there are a lot of issue when using ticket cache in multi-kerberos env. So I abandoned that logic. 2. Config's default value Change the default value of related to hdfs file handle cache 1. `max_hdfs_file_handle_cache_num`: from 1000 to 20000 2. `max_hdfs_file_handle_cache_time_sec`: from 3600 to 28800 3. Fix a bug the cleanup thread of `FileHandleCache` is not working --- be/src/common/config.cpp | 7 +- be/src/common/kerberos/kerberos_ticket_cache.cpp | 6 +- be/src/common/kerberos/kerberos_ticket_cache.h | 6 +- be/src/io/fs/file_handle_cache.cpp | 4 + be/src/io/fs/hdfs/hdfs_mgr.cpp | 17 +- be/src/io/fs/hdfs_file_reader.cpp | 2 +- be/src/io/hdfs_builder.cpp | 37 +-- be/src/io/hdfs_builder.h | 9 - be/src/io/hdfs_util.h | 14 +- be/src/util/lru_multi_cache.inline.h | 2 + be/test/io/fs/hdfs/hdfs_mgr_test.cpp | 259 ++++++++++----------- .../kerberos/test_single_hive_kerberos.groovy | 14 +- .../kerberos/test_two_hive_kerberos.groovy | 26 +-- 13 files changed, 212 insertions(+), 191 deletions(-) diff --git a/be/src/common/config.cpp b/be/src/common/config.cpp index 7552086dcde..30f546f9562 100644 --- a/be/src/common/config.cpp +++ b/be/src/common/config.cpp @@ -1143,8 +1143,8 @@ DEFINE_Bool(enable_feature_binlog, "false"); // enable set in BitmapValue DEFINE_Bool(enable_set_in_bitmap_value, "true"); -DEFINE_Int64(max_hdfs_file_handle_cache_num, "1000"); -DEFINE_Int32(max_hdfs_file_handle_cache_time_sec, "3600"); +DEFINE_Int64(max_hdfs_file_handle_cache_num, "20000"); +DEFINE_Int32(max_hdfs_file_handle_cache_time_sec, "28800"); DEFINE_Int64(max_external_file_meta_cache_num, "1000"); DEFINE_mInt32(common_obj_lru_cache_stale_sweep_time_sec, "900"); // Apply delete pred in cumu compaction @@ -1157,7 +1157,8 @@ DEFINE_mBool(allow_zero_date, "false"); DEFINE_Bool(allow_invalid_decimalv2_literal, "false"); DEFINE_mString(kerberos_ccache_path, "/tmp/"); DEFINE_mString(kerberos_krb5_conf_path, "/etc/krb5.conf"); -DEFINE_mInt32(kerberos_refresh_interval_second, "3600"); +// Deprecated +DEFINE_mInt32(kerberos_refresh_interval_second, "43200"); DEFINE_mString(get_stack_trace_tool, "libunwind"); DEFINE_mString(dwarf_location_info_mode, "FAST"); diff --git a/be/src/common/kerberos/kerberos_ticket_cache.cpp b/be/src/common/kerberos/kerberos_ticket_cache.cpp index 982dee22a5b..eb87ff368ff 100644 --- a/be/src/common/kerberos/kerberos_ticket_cache.cpp +++ b/be/src/common/kerberos/kerberos_ticket_cache.cpp @@ -23,6 +23,7 @@ #include <thread> #include "common/config.h" +#include "util/time.h" namespace doris::kerberos { @@ -119,6 +120,8 @@ Status KerberosTicketCache::login() { } return status; } + _ticket_lifetime_sec = static_cast<int64_t>(creds.times.endtime) - + static_cast<int64_t>(creds.times.starttime); // init ccache file status = _krb5_interface->cc_initialize(_context, temp_ccache, _principal); @@ -214,7 +217,8 @@ void KerberosTicketCache::start_periodic_refresh() { // ignore and continue LOG(WARNING) << st.to_string(); } else { - LOG(INFO) << "refresh kerberos ticket cache: " << _ticket_cache_path; + LOG(INFO) << "refresh kerberos ticket cache: " << _ticket_cache_path + << ", lifetime sec: " << _ticket_lifetime_sec; } } } diff --git a/be/src/common/kerberos/kerberos_ticket_cache.h b/be/src/common/kerberos/kerberos_ticket_cache.h index 2c2c4f94fe1..9ca595bd5fa 100644 --- a/be/src/common/kerberos/kerberos_ticket_cache.h +++ b/be/src/common/kerberos/kerberos_ticket_cache.h @@ -104,6 +104,8 @@ public: // Get detailed information about all credentials in the current ticket cache virtual std::vector<KerberosTicketInfo> get_ticket_info(); + int64_t get_ticket_lifetime_sec() const { return _ticket_lifetime_sec; } + private: // Initialize the ticket cache file path using principal and keytab information Status _init_ticket_cache_path(); @@ -125,6 +127,8 @@ private: krb5_ccache _ccache {nullptr}; // Principal handle krb5_principal _principal {nullptr}; + // Ticket lifetime in second + int64_t _ticket_lifetime_sec; // Thread for periodic ticket refresh std::unique_ptr<std::thread> _refresh_thread; @@ -133,7 +137,7 @@ private: // Flag to control refresh thread execution std::atomic<bool> _should_stop_refresh {false}; // Sleep time between refresh checks (in milliseconds) - std::chrono::milliseconds _refresh_thread_sleep_time {10}; + std::chrono::milliseconds _refresh_thread_sleep_time {5000}; // Interface for KRB5 operations std::unique_ptr<Krb5Interface> _krb5_interface; diff --git a/be/src/io/fs/file_handle_cache.cpp b/be/src/io/fs/file_handle_cache.cpp index 8104b9d572e..e2268982d2f 100644 --- a/be/src/io/fs/file_handle_cache.cpp +++ b/be/src/io/fs/file_handle_cache.cpp @@ -125,6 +125,10 @@ FileHandleCache::FileHandleCache(size_t capacity, size_t num_partitions, for (FileHandleCachePartition& p : _cache_partitions) { p.cache.set_capacity(partition_capacity); } + Status st = init(); + if (!st) { + LOG(FATAL) << "failed to start file handle cache thread: " << st.to_string(); + } } FileHandleCache::~FileHandleCache() { diff --git a/be/src/io/fs/hdfs/hdfs_mgr.cpp b/be/src/io/fs/hdfs/hdfs_mgr.cpp index d5cde499a5d..e9b4315507c 100644 --- a/be/src/io/fs/hdfs/hdfs_mgr.cpp +++ b/be/src/io/fs/hdfs/hdfs_mgr.cpp @@ -74,13 +74,20 @@ void HdfsMgr::_cleanup_loop() { // Find expired handlers for (const auto& entry : _fs_handlers) { - if (current_time - entry.second->last_access_time >= - _instance_timeout_seconds) { + bool is_expired = current_time - entry.second->last_access_time >= + _instance_timeout_seconds; + // bool is_krb_expired = + // entry.second->is_kerberos_auth && + // (current_time - entry.second->create_time >= + // entry.second->ticket_cache->get_ticket_lifetime_sec() / 2); + if (is_expired) { LOG(INFO) << "Found expired HDFS handler, hash_code=" << entry.first << ", last_access_time=" << entry.second->last_access_time << ", is_kerberos=" << entry.second->is_kerberos_auth << ", principal=" << entry.second->principal - << ", fs_name=" << entry.second->fs_name; + << ", fs_name=" << entry.second->fs_name + << ", is_expired=" << is_expired; + // << ", is_krb_expire=" << is_krb_expired; to_remove.push_back(entry.first); handlers_to_cleanup.push_back(entry.second); } @@ -180,8 +187,8 @@ Status HdfsMgr::_create_hdfs_fs_impl(const THdfsParams& hdfs_params, const std:: bool is_kerberos = builder.is_kerberos(); *fs_handler = std::make_shared<HdfsHandler>( hdfs_fs, is_kerberos, is_kerberos ? hdfs_params.hdfs_kerberos_principal : "", - is_kerberos ? hdfs_params.hdfs_kerberos_keytab : "", fs_name, - builder.get_ticket_cache()); + is_kerberos ? hdfs_params.hdfs_kerberos_keytab : "", fs_name); + // builder.get_ticket_cache()); return Status::OK(); } diff --git a/be/src/io/fs/hdfs_file_reader.cpp b/be/src/io/fs/hdfs_file_reader.cpp index d43cfae1c28..977ed3d51e3 100644 --- a/be/src/io/fs/hdfs_file_reader.cpp +++ b/be/src/io/fs/hdfs_file_reader.cpp @@ -49,7 +49,7 @@ namespace { Result<FileHandleCache::Accessor> get_file(const hdfsFS& fs, const Path& file, int64_t mtime, int64_t file_size) { static FileHandleCache cache(config::max_hdfs_file_handle_cache_num, 16, - config::max_hdfs_file_handle_cache_time_sec * 1000L); + config::max_hdfs_file_handle_cache_time_sec); bool cache_hit; FileHandleCache::Accessor accessor; RETURN_IF_ERROR_RESULT(cache.get_file_handle(fs, file.native(), mtime, file_size, false, diff --git a/be/src/io/hdfs_builder.cpp b/be/src/io/hdfs_builder.cpp index cced8627546..ed7fb0a34b4 100644 --- a/be/src/io/hdfs_builder.cpp +++ b/be/src/io/hdfs_builder.cpp @@ -145,21 +145,23 @@ void HDFSCommonBuilder::set_hdfs_conf_to_hdfs_builder() { } } +// This method is deprecated, will be removed later Status HDFSCommonBuilder::set_kerberos_ticket_cache() { - kerberos::KerberosConfig config; - config.set_principal_and_keytab(hdfs_kerberos_principal, hdfs_kerberos_keytab); - config.set_krb5_conf_path(config::kerberos_krb5_conf_path); - config.set_refresh_interval(config::kerberos_refresh_interval_second); - config.set_min_time_before_refresh(600); - kerberos::KerberosTicketMgr* ticket_mgr = ExecEnv::GetInstance()->kerberos_ticket_mgr(); - RETURN_IF_ERROR(ticket_mgr->get_or_set_ticket_cache(config, &ticket_cache)); - // ATTN, can't use ticket_cache->get_ticket_cache_path() directly, - // it may cause the kerberos ticket cache path in libhdfs is empty, - kerberos_ticket_path = ticket_cache->get_ticket_cache_path(); - hdfsBuilderSetKerbTicketCachePath(hdfs_builder, kerberos_ticket_path.c_str()); - hdfsBuilderSetForceNewInstance(hdfs_builder); - LOG(INFO) << "get kerberos ticket path: " << kerberos_ticket_path - << " with principal: " << hdfs_kerberos_principal; + // kerberos::KerberosConfig config; + // config.set_principal_and_keytab(hdfs_kerberos_principal, hdfs_kerberos_keytab); + // config.set_krb5_conf_path(config::kerberos_krb5_conf_path); + // config.set_refresh_interval(config::kerberos_refresh_interval_second); + // config.set_min_time_before_refresh(600); + // kerberos::KerberosTicketMgr* ticket_mgr = ExecEnv::GetInstance()->kerberos_ticket_mgr(); + // RETURN_IF_ERROR(ticket_mgr->get_or_set_ticket_cache(config, &ticket_cache)); + // // ATTN, can't use ticket_cache->get_ticket_cache_path() directly, + // // it may cause the kerberos ticket cache path in libhdfs is empty, + // kerberos_ticket_path = ticket_cache->get_ticket_cache_path(); + // hdfsBuilderSetUserName(hdfs_builder, hdfs_kerberos_principal.c_str()); + // hdfsBuilderSetKerbTicketCachePath(hdfs_builder, kerberos_ticket_path.c_str()); + // hdfsBuilderSetForceNewInstance(hdfs_builder); + // LOG(INFO) << "get kerberos ticket path: " << kerberos_ticket_path + // << " with principal: " << hdfs_kerberos_principal; return Status::OK(); } @@ -225,7 +227,12 @@ Status create_hdfs_builder(const THdfsParams& hdfsParams, const std::string& fs_ builder->kerberos_login = true; builder->hdfs_kerberos_principal = hdfsParams.hdfs_kerberos_principal; builder->hdfs_kerberos_keytab = hdfsParams.hdfs_kerberos_keytab; - RETURN_IF_ERROR(builder->set_kerberos_ticket_cache()); + hdfsBuilderSetKerb5Conf(builder->get(), doris::config::kerberos_krb5_conf_path.c_str()); + hdfsBuilderSetPrincipal(builder->get(), builder->hdfs_kerberos_principal.c_str()); + hdfsBuilderSetKeyTabFile(builder->get(), builder->hdfs_kerberos_keytab.c_str()); + hdfsBuilderConfSetStr(builder->get(), "hadoop.kerberos.keytab.login.autorenewal.enabled", + "true"); + // RETURN_IF_ERROR(builder->set_kerberos_ticket_cache()); } else { if (hdfsParams.__isset.user) { builder->hadoop_user = hdfsParams.user; diff --git a/be/src/io/hdfs_builder.h b/be/src/io/hdfs_builder.h index 320275981ad..37e2f99b8c4 100644 --- a/be/src/io/hdfs_builder.h +++ b/be/src/io/hdfs_builder.h @@ -37,11 +37,6 @@ const std::string HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authenticati const std::string FALLBACK_TO_SIMPLE_AUTH_ALLOWED = "ipc.client.fallback-to-simple-auth-allowed"; const std::string TRUE_VALUE = "true"; -namespace kerberos { -class KerberosTicketCache; -class KerberosConfig; -}; // namespace kerberos - class HDFSCommonBuilder { friend Status create_hdfs_builder(const THdfsParams& hdfsParams, const std::string& fs_name, HDFSCommonBuilder* builder); @@ -71,8 +66,6 @@ public: std::string get_hdfs_conf_value(const std::string& key, const std::string& default_val) const; void set_hdfs_conf_to_hdfs_builder(); - std::shared_ptr<kerberos::KerberosTicketCache> get_ticket_cache() { return ticket_cache; } - private: hdfsBuilder* hdfs_builder = nullptr; bool kerberos_login {false}; @@ -83,8 +76,6 @@ private: std::string hadoop_user; std::string hdfs_kerberos_keytab; std::string hdfs_kerberos_principal; - std::string kerberos_ticket_path; - std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache; std::unordered_map<std::string, std::string> hdfs_conf; }; diff --git a/be/src/io/hdfs_util.h b/be/src/io/hdfs_util.h index 2994a26bb49..8d63a19ec92 100644 --- a/be/src/io/hdfs_util.h +++ b/be/src/io/hdfs_util.h @@ -25,7 +25,7 @@ #include <string> #include <unordered_map> -#include "common/kerberos/kerberos_ticket_cache.h" +// #include "common/kerberos/kerberos_ticket_cache.h" #include "common/status.h" #include "io/fs/hdfs.h" #include "io/fs/path.h" @@ -47,19 +47,21 @@ public: std::string principal; std::string keytab_path; std::string fs_name; + uint64_t create_time; std::atomic<uint64_t> last_access_time; - std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache; + // std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache; HdfsHandler(hdfsFS fs, bool is_kerberos, const std::string& principal_, - const std::string& keytab_path_, const std::string& fs_name_, - std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache_) + const std::string& keytab_path_, const std::string& fs_name_) + // std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache_) : hdfs_fs(fs), is_kerberos_auth(is_kerberos), principal(principal_), keytab_path(keytab_path_), fs_name(fs_name_), - last_access_time(std::time(nullptr)), - ticket_cache(ticket_cache_) {} + create_time(std::time(nullptr)), + last_access_time(std::time(nullptr)) {} + // ticket_cache(ticket_cache_) {} ~HdfsHandler() { // The ticket_cache will be automatically released when the last reference is gone diff --git a/be/src/util/lru_multi_cache.inline.h b/be/src/util/lru_multi_cache.inline.h index 87d09891342..5fe85399aac 100644 --- a/be/src/util/lru_multi_cache.inline.h +++ b/be/src/util/lru_multi_cache.inline.h @@ -194,6 +194,8 @@ void LruMultiCache<KeyType, ValueType>::release(ValueType_internal* p_value_inte // Has to be currently not available DCHECK(!p_value_internal->is_available()); + // DO NOT update timestamp_seconds when release. + // Because we are about to evict cache value after a certain period. p_value_internal->timestamp_seconds = MonotonicSeconds(); Container& container = p_value_internal->container; diff --git a/be/test/io/fs/hdfs/hdfs_mgr_test.cpp b/be/test/io/fs/hdfs/hdfs_mgr_test.cpp index 7f05428acf0..0ecc9dd030f 100644 --- a/be/test/io/fs/hdfs/hdfs_mgr_test.cpp +++ b/be/test/io/fs/hdfs/hdfs_mgr_test.cpp @@ -99,8 +99,7 @@ protected: ON_CALL(*_hdfs_mgr, _create_hdfs_fs_impl(_, _, _)) .WillByDefault([](const THdfsParams& params, const std::string& fs_name, std::shared_ptr<HdfsHandler>* fs_handler) { - *fs_handler = - std::make_shared<HdfsHandler>(nullptr, false, "", "", fs_name, nullptr); + *fs_handler = std::make_shared<HdfsHandler>(nullptr, false, "", "", fs_name); return Status::OK(); }); } @@ -230,133 +229,133 @@ TEST_F(HdfsMgrTest, ConcurrentAccess) { } // Test sharing of KerberosTicketCache between handlers -TEST_F(HdfsMgrTest, SharedKerberosTicketCache) { - // Create handlers with same Kerberos credentials - THdfsParams params = create_test_params("user1", "principal1", "keytab1"); - std::shared_ptr<HdfsHandler> handler1; - std::shared_ptr<HdfsHandler> handler2; - - // Create a shared ticket cache that will be used by both handlers - auto shared_ticket_mgr = std::make_shared<NiceMock<MockKerberosTicketMgr>>("/tmp/kerberos"); - // Set cleanup interval to 1 second for testing - shared_ticket_mgr->set_cleanup_interval(std::chrono::seconds(1)); - - // Setup mock to create handlers with Kerberos - ON_CALL(*_hdfs_mgr, _create_hdfs_fs_impl(_, _, _)) - .WillByDefault([shared_ticket_mgr](const THdfsParams& params, - const std::string& fs_name, - std::shared_ptr<HdfsHandler>* fs_handler) { - kerberos::KerberosConfig config; - config.set_principal_and_keytab(params.hdfs_kerberos_principal, - params.hdfs_kerberos_keytab); - std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache; - RETURN_IF_ERROR(shared_ticket_mgr->get_or_set_ticket_cache(config, &ticket_cache)); - *fs_handler = std::make_shared<HdfsHandler>( - nullptr, true, params.hdfs_kerberos_principal, params.hdfs_kerberos_keytab, - fs_name, ticket_cache); - return Status::OK(); - }); - - // Create first handler - ASSERT_TRUE(_hdfs_mgr->get_or_create_fs(params, "test_fs1", &handler1).ok()); - ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 1); - - // Create second handler with same credentials - ASSERT_TRUE(_hdfs_mgr->get_or_create_fs(params, "test_fs2", &handler2).ok()); - ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 1); - - // Verify both handlers share the same ticket cache - ASSERT_EQ(handler1->ticket_cache, handler2->ticket_cache); - // ASSERT_EQ(handler1->ticket_cache, shared_ticket_cache); -} - -// Test cleanup of KerberosTicketCache when handlers are destroyed -TEST_F(HdfsMgrTest, KerberosTicketCacheCleanup) { - THdfsParams params = create_test_params("user1", "principal1", "keytab1"); - - // Create a ticket manager that will be used by the handler - auto ticket_mgr = std::make_shared<NiceMock<MockKerberosTicketMgr>>("/tmp/kerberos"); - // Set cleanup interval to 1 second for testing - ticket_mgr->set_cleanup_interval(std::chrono::seconds(1)); - - // Setup mock to create handler with Kerberos - ON_CALL(*_hdfs_mgr, _create_hdfs_fs_impl(_, _, _)) - .WillByDefault([ticket_mgr](const THdfsParams& params, const std::string& fs_name, - std::shared_ptr<HdfsHandler>* fs_handler) { - kerberos::KerberosConfig config; - config.set_principal_and_keytab(params.hdfs_kerberos_principal, - params.hdfs_kerberos_keytab); - std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache; - - RETURN_IF_ERROR(ticket_mgr->get_or_set_ticket_cache(config, &ticket_cache)); - *fs_handler = std::make_shared<HdfsHandler>( - nullptr, true, params.hdfs_kerberos_principal, params.hdfs_kerberos_keytab, - fs_name, ticket_cache); - return Status::OK(); - }); - - // Create handler - std::shared_ptr<HdfsHandler> handler; - ASSERT_TRUE(_hdfs_mgr->get_or_create_fs(params, "test_fs", &handler).ok()); - std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache_holder = handler->ticket_cache; - handler.reset(); - ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 1); - - // Wait for cleanup - ticket_cache_holder.reset(); - std::this_thread::sleep_for(std::chrono::seconds(6)); - - // Verify handler and ticket cache are cleaned up - ASSERT_EQ(_hdfs_mgr->get_fs_handlers_size(), 0); - ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 0); -} - -// Test multiple handlers with different Kerberos credentials -TEST_F(HdfsMgrTest, DifferentKerberosCredentials) { - // Create a ticket manager that will be used by both handlers - auto ticket_mgr = std::make_shared<NiceMock<MockKerberosTicketMgr>>("/tmp/kerberos"); - // Set cleanup interval to 1 second for testing - ticket_mgr->set_cleanup_interval(std::chrono::seconds(1)); - - // Setup mock to create handlers with Kerberos - ON_CALL(*_hdfs_mgr, _create_hdfs_fs_impl(_, _, _)) - .WillByDefault([ticket_mgr](const THdfsParams& params, const std::string& fs_name, - std::shared_ptr<HdfsHandler>* fs_handler) { - kerberos::KerberosConfig config; - config.set_principal_and_keytab(params.hdfs_kerberos_principal, - params.hdfs_kerberos_keytab); - std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache; - - RETURN_IF_ERROR(ticket_mgr->get_or_set_ticket_cache(config, &ticket_cache)); - *fs_handler = std::make_shared<HdfsHandler>( - nullptr, true, params.hdfs_kerberos_principal, params.hdfs_kerberos_keytab, - fs_name, ticket_cache); - return Status::OK(); - }); - - // Create handlers with different credentials - // std::cout << "xxx 6 MockKerberosTicketCache::get_instance_count(): " << MockKerberosTicketCache::get_instance_count() << std::endl; - THdfsParams params1 = create_test_params("user1", "principal1", "keytab1"); - THdfsParams params2 = create_test_params("user2", "principal2", "keytab2"); - std::shared_ptr<HdfsHandler> handler1; - std::shared_ptr<HdfsHandler> handler2; - ASSERT_TRUE(_hdfs_mgr->get_or_create_fs(params1, "test_fs1", &handler1).ok()); - ASSERT_TRUE(_hdfs_mgr->get_or_create_fs(params2, "test_fs2", &handler2).ok()); - - // Verify each handler has its own ticket cache - ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 2); - ASSERT_NE(handler1->ticket_cache, handler2->ticket_cache); - - // Wait for cleanup - // Also need to reset this 2 temp references - handler1.reset(); - handler2.reset(); - std::this_thread::sleep_for(std::chrono::seconds(6)); - - // Verify all handlers and ticket caches are cleaned up - ASSERT_EQ(_hdfs_mgr->get_fs_handlers_size(), 0); - - ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 0); -} +// TEST_F(HdfsMgrTest, SharedKerberosTicketCache) { +// // Create handlers with same Kerberos credentials +// THdfsParams params = create_test_params("user1", "principal1", "keytab1"); +// std::shared_ptr<HdfsHandler> handler1; +// std::shared_ptr<HdfsHandler> handler2; +// +// // Create a shared ticket cache that will be used by both handlers +// auto shared_ticket_mgr = std::make_shared<NiceMock<MockKerberosTicketMgr>>("/tmp/kerberos"); +// // Set cleanup interval to 1 second for testing +// shared_ticket_mgr->set_cleanup_interval(std::chrono::seconds(1)); +// +// // Setup mock to create handlers with Kerberos +// ON_CALL(*_hdfs_mgr, _create_hdfs_fs_impl(_, _, _)) +// .WillByDefault([shared_ticket_mgr](const THdfsParams& params, +// const std::string& fs_name, +// std::shared_ptr<HdfsHandler>* fs_handler) { +// kerberos::KerberosConfig config; +// config.set_principal_and_keytab(params.hdfs_kerberos_principal, +// params.hdfs_kerberos_keytab); +// std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache; +// RETURN_IF_ERROR(shared_ticket_mgr->get_or_set_ticket_cache(config, &ticket_cache)); +// *fs_handler = std::make_shared<HdfsHandler>( +// nullptr, true, params.hdfs_kerberos_principal, params.hdfs_kerberos_keytab, +// fs_name, ticket_cache); +// return Status::OK(); +// }); +// +// // Create first handler +// ASSERT_TRUE(_hdfs_mgr->get_or_create_fs(params, "test_fs1", &handler1).ok()); +// ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 1); +// +// // Create second handler with same credentials +// ASSERT_TRUE(_hdfs_mgr->get_or_create_fs(params, "test_fs2", &handler2).ok()); +// ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 1); +// +// // Verify both handlers share the same ticket cache +// ASSERT_EQ(handler1->ticket_cache, handler2->ticket_cache); +// // ASSERT_EQ(handler1->ticket_cache, shared_ticket_cache); +// } +// +// // Test cleanup of KerberosTicketCache when handlers are destroyed +// TEST_F(HdfsMgrTest, KerberosTicketCacheCleanup) { +// THdfsParams params = create_test_params("user1", "principal1", "keytab1"); +// +// // Create a ticket manager that will be used by the handler +// auto ticket_mgr = std::make_shared<NiceMock<MockKerberosTicketMgr>>("/tmp/kerberos"); +// // Set cleanup interval to 1 second for testing +// ticket_mgr->set_cleanup_interval(std::chrono::seconds(1)); +// +// // Setup mock to create handler with Kerberos +// ON_CALL(*_hdfs_mgr, _create_hdfs_fs_impl(_, _, _)) +// .WillByDefault([ticket_mgr](const THdfsParams& params, const std::string& fs_name, +// std::shared_ptr<HdfsHandler>* fs_handler) { +// kerberos::KerberosConfig config; +// config.set_principal_and_keytab(params.hdfs_kerberos_principal, +// params.hdfs_kerberos_keytab); +// std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache; +// +// RETURN_IF_ERROR(ticket_mgr->get_or_set_ticket_cache(config, &ticket_cache)); +// *fs_handler = std::make_shared<HdfsHandler>( +// nullptr, true, params.hdfs_kerberos_principal, params.hdfs_kerberos_keytab, +// fs_name, ticket_cache); +// return Status::OK(); +// }); +// +// // Create handler +// std::shared_ptr<HdfsHandler> handler; +// ASSERT_TRUE(_hdfs_mgr->get_or_create_fs(params, "test_fs", &handler).ok()); +// std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache_holder = handler->ticket_cache; +// handler.reset(); +// ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 1); +// +// // Wait for cleanup +// ticket_cache_holder.reset(); +// std::this_thread::sleep_for(std::chrono::seconds(6)); +// +// // Verify handler and ticket cache are cleaned up +// ASSERT_EQ(_hdfs_mgr->get_fs_handlers_size(), 0); +// ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 0); +// } +// +// // Test multiple handlers with different Kerberos credentials +// TEST_F(HdfsMgrTest, DifferentKerberosCredentials) { +// // Create a ticket manager that will be used by both handlers +// auto ticket_mgr = std::make_shared<NiceMock<MockKerberosTicketMgr>>("/tmp/kerberos"); +// // Set cleanup interval to 1 second for testing +// ticket_mgr->set_cleanup_interval(std::chrono::seconds(1)); +// +// // Setup mock to create handlers with Kerberos +// ON_CALL(*_hdfs_mgr, _create_hdfs_fs_impl(_, _, _)) +// .WillByDefault([ticket_mgr](const THdfsParams& params, const std::string& fs_name, +// std::shared_ptr<HdfsHandler>* fs_handler) { +// kerberos::KerberosConfig config; +// config.set_principal_and_keytab(params.hdfs_kerberos_principal, +// params.hdfs_kerberos_keytab); +// std::shared_ptr<kerberos::KerberosTicketCache> ticket_cache; +// +// RETURN_IF_ERROR(ticket_mgr->get_or_set_ticket_cache(config, &ticket_cache)); +// *fs_handler = std::make_shared<HdfsHandler>( +// nullptr, true, params.hdfs_kerberos_principal, params.hdfs_kerberos_keytab, +// fs_name, ticket_cache); +// return Status::OK(); +// }); +// +// // Create handlers with different credentials +// // std::cout << "xxx 6 MockKerberosTicketCache::get_instance_count(): " << MockKerberosTicketCache::get_instance_count() << std::endl; +// THdfsParams params1 = create_test_params("user1", "principal1", "keytab1"); +// THdfsParams params2 = create_test_params("user2", "principal2", "keytab2"); +// std::shared_ptr<HdfsHandler> handler1; +// std::shared_ptr<HdfsHandler> handler2; +// ASSERT_TRUE(_hdfs_mgr->get_or_create_fs(params1, "test_fs1", &handler1).ok()); +// ASSERT_TRUE(_hdfs_mgr->get_or_create_fs(params2, "test_fs2", &handler2).ok()); +// +// // Verify each handler has its own ticket cache +// ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 2); +// ASSERT_NE(handler1->ticket_cache, handler2->ticket_cache); +// +// // Wait for cleanup +// // Also need to reset this 2 temp references +// handler1.reset(); +// handler2.reset(); +// std::this_thread::sleep_for(std::chrono::seconds(6)); +// +// // Verify all handlers and ticket caches are cleaned up +// ASSERT_EQ(_hdfs_mgr->get_fs_handlers_size(), 0); +// +// ASSERT_EQ(MockKerberosTicketCache::get_instance_count(), 0); +// } } // namespace doris::io diff --git a/regression-test/suites/external_table_p0/kerberos/test_single_hive_kerberos.groovy b/regression-test/suites/external_table_p0/kerberos/test_single_hive_kerberos.groovy index 382d0a7b337..c3c97f0ca0c 100644 --- a/regression-test/suites/external_table_p0/kerberos/test_single_hive_kerberos.groovy +++ b/regression-test/suites/external_table_p0/kerberos/test_single_hive_kerberos.groovy @@ -115,12 +115,12 @@ suite("test_single_hive_kerberos", "p0,external,kerberos,external_docker,externa // test information_schema.backend_kerberos_ticket_cache // switch to a normal catalog - sql "switch internal"; - List<List<Object>> backends = sql "show backends" - int beNum = backends.size(); - test { - sql """select * from information_schema.backend_kerberos_ticket_cache where PRINCIPAL="presto-server/presto-master.docker.clus...@labs.teradata.com" and KEYTAB = "${keytab_root_dir}/presto-server.keytab";""" - rowNum beNum - } + // sql "switch internal"; + // List<List<Object>> backends = sql "show backends" + // int beNum = backends.size(); + // test { + // sql """select * from information_schema.backend_kerberos_ticket_cache where PRINCIPAL="presto-server/presto-master.docker.clus...@labs.teradata.com" and KEYTAB = "${keytab_root_dir}/presto-server.keytab";""" + // rowNum beNum + // } } } diff --git a/regression-test/suites/external_table_p0/kerberos/test_two_hive_kerberos.groovy b/regression-test/suites/external_table_p0/kerberos/test_two_hive_kerberos.groovy index 1d0fa2478ce..4053102b145 100644 --- a/regression-test/suites/external_table_p0/kerberos/test_two_hive_kerberos.groovy +++ b/regression-test/suites/external_table_p0/kerberos/test_two_hive_kerberos.groovy @@ -136,19 +136,19 @@ suite("test_two_hive_kerberos", "p0,external,kerberos,external_docker,external_d thread1.join() thread2.join() - // test information_schema.backend_kerberos_ticket_cache - sql """switch internal""" - List<List<Object>> backends = sql "show backends" - int beNum = backends.size(); - test { - sql """select * from information_schema.backend_kerberos_ticket_cache where PRINCIPAL="hive/presto-master.docker.clus...@labs.teradata.com" and KEYTAB = "${keytab_root_dir}/hive-presto-master.keytab";""" - rowNum beNum - } - - test { - sql """select * from information_schema.backend_kerberos_ticket_cache where PRINCIPAL="hive/presto-master.docker.clus...@otherrealm.com" and KEYTAB = "${keytab_root_dir}/other-hive-presto-master.keytab";""" - rowNum beNum - } + // // test information_schema.backend_kerberos_ticket_cache + // sql """switch internal""" + // List<List<Object>> backends = sql "show backends" + // int beNum = backends.size(); + // test { + // sql """select * from information_schema.backend_kerberos_ticket_cache where PRINCIPAL="hive/presto-master.docker.clus...@labs.teradata.com" and KEYTAB = "${keytab_root_dir}/hive-presto-master.keytab";""" + // rowNum beNum + // } + + // test { + // sql """select * from information_schema.backend_kerberos_ticket_cache where PRINCIPAL="hive/presto-master.docker.clus...@otherrealm.com" and KEYTAB = "${keytab_root_dir}/other-hive-presto-master.keytab";""" + // rowNum beNum + // } // sql """drop catalog ${hms_catalog_name};""" // sql """drop catalog other_${hms_catalog_name};""" --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org