This is an automated email from the ASF dual-hosted git repository.

gavinchou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new ab7d7a841d7 (cloud-merge)  Supports online capacity expansion and 
contraction (#37484)
ab7d7a841d7 is described below

commit ab7d7a841d79adc65f03262a6096111f8450574c
Author: Lightman <31928846+lchangli...@users.noreply.github.com>
AuthorDate: Mon Jul 29 12:03:34 2024 +0800

    (cloud-merge)  Supports online capacity expansion and contraction (#37484)
    
    1. Support to reset capacity online via BE HTTP requet
    2. Support to analyze cache conf when only get path_name
    `file_cache_path=[{"path":"xxxx"}]`, we can only specify the path for a
    file cache instance from now on, if we don't care about the detailed
    capacity limit
    
    ```
    # clear file cache
    curl ip:http_port/api/file_cache?op=clear
    # reset capacity of a certain instance of file cache
    curl 
ip:http_port/api/file_cache?op=reset&capacity=${capacity}&path=${cache_path}
    
    curl 127.0.0.1:8060/api/file_cache?op=clear
    curl 
127.0.0.1:8060/api/file_cache?op=reset&capacity=102400000&path=/user/file_cache
    ```
---
 be/src/http/action/clear_file_cache_action.cpp     |  40 -----
 be/src/http/action/clear_file_cache_action.h       |  32 ----
 be/src/http/action/file_cache_action.cpp           |  54 ++++--
 be/src/io/cache/block_file_cache.cpp               |  69 +++++++-
 be/src/io/cache/block_file_cache.h                 |  10 +-
 be/src/io/cache/block_file_cache_factory.cpp       |  19 ++-
 be/src/io/cache/block_file_cache_factory.h         |   9 +
 be/src/io/cache/file_cache_common.cpp              |   1 +
 be/src/olap/options.cpp                            |   9 +-
 be/src/service/http_service.cpp                    |  14 +-
 be/test/io/cache/block_file_cache_test.cpp         |  88 +++++++++-
 .../cache/compaction/test_stale_rowset.groovy      |   6 +-
 .../cloud_p0/cache/http/test_clear_cache.groovy    |   5 +-
 .../cache/http/test_clear_cache_async.groovy       |   5 +-
 .../test_reset_capacity.groovy}                    | 183 +++++++++++++++++----
 .../multi_cluster/read_write/sync_insert.groovy    |   6 +-
 .../read_write/test_multi_stale_rowset.groovy      |   4 +-
 .../warm_up/cluster/test_warm_up_cluster.groovy    |   4 +-
 .../cluster/test_warm_up_cluster_batch.groovy      |   6 +-
 .../cluster/test_warm_up_cluster_bigsize.groovy    |   6 +-
 .../cluster/test_warm_up_cluster_empty.groovy      |   6 +-
 .../warm_up/table/test_warm_up_partition.groovy    |   6 +-
 .../warm_up/table/test_warm_up_table.groovy        |   4 +-
 .../warm_up/table/test_warm_up_tables.groovy       |   6 +-
 .../suites/cloud_p0/cache/ttl/alter_ttl_1.groovy   |   6 +-
 .../suites/cloud_p0/cache/ttl/alter_ttl_2.groovy   |   6 +-
 .../suites/cloud_p0/cache/ttl/alter_ttl_3.groovy   |   6 +-
 .../suites/cloud_p0/cache/ttl/alter_ttl_4.groovy   |   6 +-
 .../cloud_p0/cache/ttl/alter_ttl_max_int64.groovy  |   6 +-
 .../cloud_p0/cache/ttl/alter_ttl_random.groovy     |   6 +-
 .../cloud_p0/cache/ttl/alter_ttl_seconds.groovy    |   6 +-
 .../cache/ttl/create_table_as_select.groovy        |   6 +-
 .../cloud_p0/cache/ttl/create_table_like.groovy    |   6 +-
 .../suites/cloud_p0/cache/ttl/test_ttl.groovy      |   6 +-
 .../cloud_p0/cache/ttl/test_ttl_preempt.groovy     |   6 +-
 35 files changed, 449 insertions(+), 209 deletions(-)

diff --git a/be/src/http/action/clear_file_cache_action.cpp 
b/be/src/http/action/clear_file_cache_action.cpp
deleted file mode 100644
index 6a4a2517508..00000000000
--- a/be/src/http/action/clear_file_cache_action.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-#include "http/action/clear_file_cache_action.h"
-
-#include <fmt/core.h>
-
-#include "common/logging.h"
-#include "http/http_channel.h"
-#include "http/http_headers.h"
-#include "http/http_request.h"
-#include "io/cache/block_file_cache_factory.h"
-
-namespace doris {
-
-const std::string SYNC = "sync";
-
-void ClearFileCacheAction::handle(HttpRequest* req) {
-    req->add_output_header(HttpHeaders::CONTENT_TYPE, "application/json");
-    std::string sync = req->param(SYNC);
-    auto ret =
-            io::FileCacheFactory::instance()->clear_file_caches(sync == "TRUE" 
|| sync == "true");
-    HttpChannel::send_reply(req, HttpStatus::OK, ret);
-}
-
-} // namespace doris
diff --git a/be/src/http/action/clear_file_cache_action.h 
b/be/src/http/action/clear_file_cache_action.h
deleted file mode 100644
index 25ebdd7cb5e..00000000000
--- a/be/src/http/action/clear_file_cache_action.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-#pragma once
-
-#include "http/http_handler.h"
-
-namespace doris {
-class ExecEnv;
-class ClearFileCacheAction : public HttpHandler {
-public:
-    ClearFileCacheAction() = default;
-
-    ~ClearFileCacheAction() override = default;
-
-    void handle(HttpRequest* req) override;
-};
-} // namespace doris
diff --git a/be/src/http/action/file_cache_action.cpp 
b/be/src/http/action/file_cache_action.cpp
index cee37f2115d..acad2b3b7bf 100644
--- a/be/src/http/action/file_cache_action.cpp
+++ b/be/src/http/action/file_cache_action.cpp
@@ -33,25 +33,59 @@
 
 namespace doris {
 
-const static std::string HEADER_JSON = "application/json";
-const static std::string OP = "op";
+constexpr static std::string_view HEADER_JSON = "application/json";
+constexpr static std::string_view OP = "op";
+constexpr static std::string_view SYNC = "sync";
+constexpr static std::string_view PATH = "path";
+constexpr static std::string_view CLEAR = "clear";
+constexpr static std::string_view RESET = "reset";
+constexpr static std::string_view CAPACITY = "capacity";
+constexpr static std::string_view RELEASE = "release";
+constexpr static std::string_view BASE_PATH = "base_path";
+constexpr static std::string_view RELEASED_ELEMENTS = "released_elements";
 
 Status FileCacheAction::_handle_header(HttpRequest* req, std::string* 
json_metrics) {
-    req->add_output_header(HttpHeaders::CONTENT_TYPE, HEADER_JSON.c_str());
-    std::string operation = req->param(OP);
-    if (operation == "release") {
+    req->add_output_header(HttpHeaders::CONTENT_TYPE, HEADER_JSON.data());
+    std::string operation = req->param(OP.data());
+    Status st = Status::OK();
+    if (operation == RELEASE) {
         size_t released = 0;
-        if (req->param("base_path") != "") {
-            released = 
io::FileCacheFactory::instance()->try_release(req->param("base_path"));
+        const std::string& base_path = req->param(BASE_PATH.data());
+        if (!base_path.empty()) {
+            released = 
io::FileCacheFactory::instance()->try_release(base_path);
         } else {
             released = io::FileCacheFactory::instance()->try_release();
         }
         EasyJson json;
-        json["released_elements"] = released;
+        json[RELEASED_ELEMENTS.data()] = released;
         *json_metrics = json.ToString();
-        return Status::OK();
+    } else if (operation == CLEAR) {
+        const std::string& sync = req->param(SYNC.data());
+        auto ret = 
io::FileCacheFactory::instance()->clear_file_caches(to_lower(sync) == "true");
+    } else if (operation == RESET) {
+        Status st;
+        std::string capacity = req->param(CAPACITY.data());
+        int64_t new_capacity = 0;
+        bool parse = true;
+        try {
+            new_capacity = std::stoll(capacity);
+        } catch (...) {
+            parse = false;
+        }
+        if (!parse || new_capacity <= 0) {
+            st = Status::InvalidArgument(
+                    "The capacity {} failed to be parsed, the capacity needs 
to be in "
+                    "the interval (0, INT64_MAX]",
+                    capacity);
+        } else {
+            const std::string& path = req->param(PATH.data());
+            auto ret = io::FileCacheFactory::instance()->reset_capacity(path, 
new_capacity);
+            LOG(INFO) << ret;
+        }
+    } else {
+        st = Status::InternalError("invalid operation: {}", operation);
     }
-    return Status::InternalError("invalid operation: {}", operation);
+    return st;
 }
 
 void FileCacheAction::handle(HttpRequest* req) {
diff --git a/be/src/io/cache/block_file_cache.cpp 
b/be/src/io/cache/block_file_cache.cpp
index a2ec9f82c86..33858e9ac53 100644
--- a/be/src/io/cache/block_file_cache.cpp
+++ b/be/src/io/cache/block_file_cache.cpp
@@ -1408,11 +1408,72 @@ int disk_used_percentage(const std::string& path, 
std::pair<int, int>* percent)
     return 0;
 }
 
-void BlockFileCache::check_disk_resource_limit(const std::string& path) {
+std::string BlockFileCache::reset_capacity(size_t new_capacity) {
+    using namespace std::chrono;
+    int64_t space_released = 0;
+    size_t old_capacity = 0;
+    std::stringstream ss;
+    ss << "finish reset_capacity, path=" << _cache_base_path;
+    auto start_time = steady_clock::time_point();
+    {
+        std::lock_guard cache_lock(_mutex);
+        if (new_capacity < _capacity && new_capacity < _cur_cache_size) {
+            int64_t need_remove_size = _cur_cache_size - new_capacity;
+            auto remove_blocks = [&](LRUQueue& queue) -> int64_t {
+                int64_t queue_released = 0;
+                for (const auto& [entry_key, entry_offset, entry_size] : 
queue) {
+                    if (need_remove_size <= 0) return queue_released;
+                    auto* cell = get_cell(entry_key, entry_offset, cache_lock);
+                    if (!cell->releasable()) continue;
+                    cell->is_deleted = true;
+                    need_remove_size -= entry_size;
+                    space_released += entry_size;
+                    queue_released += entry_size;
+                }
+                return queue_released;
+            };
+            int64_t queue_released = remove_blocks(_disposable_queue);
+            ss << " disposable_queue released " << queue_released;
+            queue_released = remove_blocks(_normal_queue);
+            ss << " normal_queue released " << queue_released;
+            queue_released = remove_blocks(_index_queue);
+            ss << " index_queue released " << queue_released;
+            if (need_remove_size >= 0) {
+                queue_released = 0;
+                for (auto& [_, key] : _time_to_key) {
+                    for (auto& [_, cell] : _files[key]) {
+                        if (need_remove_size <= 0) break;
+                        cell.is_deleted = true;
+                        need_remove_size -= cell.file_block->range().size();
+                        space_released += cell.file_block->range().size();
+                        queue_released += cell.file_block->range().size();
+                    }
+                }
+                ss << " ttl_queue released " << queue_released;
+            }
+            _disk_resource_limit_mode = true;
+            _async_clear_file_cache = true;
+            ss << " total_space_released=" << space_released;
+        }
+        old_capacity = _capacity;
+        _capacity = new_capacity;
+    }
+    auto use_time = duration_cast<milliseconds>(steady_clock::time_point() - 
start_time);
+    LOG(INFO) << "Finish tag deleted block. path=" << _cache_base_path
+              << " use_time=" << static_cast<int64_t>(use_time.count());
+    ss << " old_capacity=" << old_capacity << " new_capacity=" << new_capacity;
+    LOG(INFO) << ss.str();
+    return ss.str();
+}
+
+void BlockFileCache::check_disk_resource_limit() {
+    if (_capacity > _cur_cache_size) {
+        _disk_resource_limit_mode = false;
+    }
     std::pair<int, int> percent;
-    int ret = disk_used_percentage(path, &percent);
+    int ret = disk_used_percentage(_cache_base_path, &percent);
     if (ret != 0) {
-        LOG_ERROR("").tag("file cache path", path).tag("error", 
strerror(errno));
+        LOG_ERROR("").tag("file cache path", _cache_base_path).tag("error", 
strerror(errno));
         return;
     }
     auto [capacity_percentage, inode_percentage] = percent;
@@ -1452,7 +1513,7 @@ void BlockFileCache::run_background_operation() {
     int64_t interval_time_seconds = 20;
     while (!_close) {
         TEST_SYNC_POINT_CALLBACK("BlockFileCache::set_sleep_time", 
&interval_time_seconds);
-        check_disk_resource_limit(_cache_base_path);
+        check_disk_resource_limit();
         {
             std::unique_lock close_lock(_close_mtx);
             _close_cv.wait_for(close_lock, 
std::chrono::seconds(interval_time_seconds));
diff --git a/be/src/io/cache/block_file_cache.h 
b/be/src/io/cache/block_file_cache.h
index c9668b236c8..cafb57f9a1e 100644
--- a/be/src/io/cache/block_file_cache.h
+++ b/be/src/io/cache/block_file_cache.h
@@ -95,6 +95,14 @@ public:
      */
     std::string clear_file_cache_async();
     std::string clear_file_cache_directly();
+
+    /**
+     * Reset the cache capacity. If the new_capacity is smaller than 
_capacity, the redundant data will be remove async.
+     *
+     * @returns summary message
+     */
+    std::string reset_capacity(size_t new_capacity);
+
     std::map<size_t, FileBlockSPtr> get_blocks_by_key(const UInt128Wrapper& 
hash);
     /// For debug.
     std::string dump_structure(const UInt128Wrapper& hash);
@@ -358,7 +366,7 @@ private:
     size_t get_used_cache_size_unlocked(FileCacheType type,
                                         std::lock_guard<std::mutex>& 
cache_lock) const;
 
-    void check_disk_resource_limit(const std::string& path);
+    void check_disk_resource_limit();
 
     size_t get_available_cache_size_unlocked(FileCacheType type,
                                              std::lock_guard<std::mutex>& 
cache_lock) const;
diff --git a/be/src/io/cache/block_file_cache_factory.cpp 
b/be/src/io/cache/block_file_cache_factory.cpp
index a6df98c686d..2c15d440be1 100644
--- a/be/src/io/cache/block_file_cache_factory.cpp
+++ b/be/src/io/cache/block_file_cache_factory.cpp
@@ -83,8 +83,8 @@ Status FileCacheFactory::create_file_cache(const std::string& 
cache_base_path,
     size_t disk_capacity = static_cast<size_t>(
             static_cast<size_t>(stat.f_blocks) * 
static_cast<size_t>(stat.f_bsize) *
             
(static_cast<double>(config::file_cache_enter_disk_resource_limit_mode_percent) 
/ 100));
-    if (disk_capacity < file_cache_settings.capacity) {
-        LOG_INFO("The cache {} config size {} is larger than {}% disk size {}, 
recalc it.",
+    if (file_cache_settings.capacity == 0 || disk_capacity < 
file_cache_settings.capacity) {
+        LOG_INFO("The cache {} config size {} is larger than {}% disk size {} 
or zero, recalc it.",
                  cache_base_path, file_cache_settings.capacity,
                  config::file_cache_enter_disk_resource_limit_mode_percent, 
disk_capacity);
         file_cache_settings =
@@ -143,5 +143,20 @@ std::vector<std::string> 
FileCacheFactory::get_base_paths() {
     return paths;
 }
 
+std::string FileCacheFactory::reset_capacity(const std::string& path, int64_t 
new_capacity) {
+    if (path.empty()) {
+        std::stringstream ss;
+        for (auto& [_, cache] : _path_to_cache) {
+            ss << cache->reset_capacity(new_capacity);
+        }
+        return ss.str();
+    } else {
+        if (auto iter = _path_to_cache.find(path); iter != 
_path_to_cache.end()) {
+            return iter->second->reset_capacity(new_capacity);
+        }
+    }
+    return "Unknown the cache path " + path;
+}
+
 } // namespace io
 } // namespace doris
diff --git a/be/src/io/cache/block_file_cache_factory.h 
b/be/src/io/cache/block_file_cache_factory.h
index 696dae6fdc5..6365fab3105 100644
--- a/be/src/io/cache/block_file_cache_factory.h
+++ b/be/src/io/cache/block_file_cache_factory.h
@@ -70,6 +70,15 @@ public:
 
     std::vector<std::string> get_base_paths();
 
+    /**
+     * Clears data of all file cache instances
+     *
+     * @param path file cache absolute path
+     * @param new_capacity
+     * @return summary message
+     */
+    std::string reset_capacity(const std::string& path, int64_t new_capacity);
+
     FileCacheFactory() = default;
     FileCacheFactory& operator=(const FileCacheFactory&) = delete;
     FileCacheFactory(const FileCacheFactory&) = delete;
diff --git a/be/src/io/cache/file_cache_common.cpp 
b/be/src/io/cache/file_cache_common.cpp
index 3ce647b4a0d..61e873e04c6 100644
--- a/be/src/io/cache/file_cache_common.cpp
+++ b/be/src/io/cache/file_cache_common.cpp
@@ -30,6 +30,7 @@ FileCacheSettings get_file_cache_settings(size_t capacity, 
size_t max_query_cach
                                           size_t normal_percent, size_t 
disposable_percent,
                                           size_t index_percent) {
     io::FileCacheSettings settings;
+    if (capacity == 0) return settings;
     settings.capacity = capacity;
     settings.max_file_block_size = config::file_cache_each_block_size;
     settings.max_query_cache_size = max_query_cache_size;
diff --git a/be/src/olap/options.cpp b/be/src/olap/options.cpp
index bf472b6ef52..cd53e6c0b1f 100644
--- a/be/src/olap/options.cpp
+++ b/be/src/olap/options.cpp
@@ -221,7 +221,7 @@ Status parse_conf_cache_paths(const std::string& 
config_path, std::vector<CacheP
             if (value.IsInt64()) {
                 total_size = value.GetInt64();
             } else {
-                return Status::InvalidArgument("total_size should be int64");
+                total_size = 0;
             }
         }
         if (config::enable_file_cache_query_limit) {
@@ -230,13 +230,12 @@ Status parse_conf_cache_paths(const std::string& 
config_path, std::vector<CacheP
                 if (value.IsInt64()) {
                     query_limit_bytes = value.GetInt64();
                 } else {
-                    return Status::InvalidArgument("query_limit should be 
int64");
+                    query_limit_bytes = 0;
                 }
             }
         }
-        if (total_size <= 0 || (config::enable_file_cache_query_limit && 
query_limit_bytes <= 0)) {
-            return Status::InvalidArgument(
-                    "total_size or query_limit should not less than or equal 
to zero");
+        if (total_size < 0 || (config::enable_file_cache_query_limit && 
query_limit_bytes < 0)) {
+            return Status::InvalidArgument("total_size or query_limit should 
not less than zero");
         }
 
         // percent
diff --git a/be/src/service/http_service.cpp b/be/src/service/http_service.cpp
index 5cea6cb67ac..b851302aaa8 100644
--- a/be/src/service/http_service.cpp
+++ b/be/src/service/http_service.cpp
@@ -36,7 +36,6 @@
 #include "http/action/check_tablet_segment_action.h"
 #include "http/action/checksum_action.h"
 #include "http/action/clear_cache_action.h"
-#include "http/action/clear_file_cache_action.h"
 #include "http/action/compaction_action.h"
 #include "http/action/config_action.h"
 #include "http/action/debug_point_action.h"
@@ -209,9 +208,6 @@ Status HttpService::start() {
             _pool.add(new MetaAction(_env, TPrivilegeHier::GLOBAL, 
TPrivilegeType::ADMIN));
     _ev_http_server->register_handler(HttpMethod::GET, 
"/api/meta/{op}/{tablet_id}", meta_action);
 
-    FileCacheAction* file_cache_action = _pool.add(new FileCacheAction());
-    _ev_http_server->register_handler(HttpMethod::GET, "/api/file_cache", 
file_cache_action);
-
     ConfigAction* update_config_action =
             _pool.add(new ConfigAction(ConfigActionType::UPDATE_CONFIG));
     _ev_http_server->register_handler(HttpMethod::POST, "/api/update_config", 
update_config_action);
@@ -304,9 +300,8 @@ void HttpService::register_local_handler(StorageEngine& 
engine) {
     _ev_http_server->register_handler(HttpMethod::HEAD, 
"/api/_binlog/_download",
                                       download_binlog_action);
 
-    ClearFileCacheAction* clear_file_cache_action = _pool.add(new 
ClearFileCacheAction());
-    _ev_http_server->register_handler(HttpMethod::POST, 
"/api/clear_file_cache",
-                                      clear_file_cache_action);
+    FileCacheAction* file_cache_action = _pool.add(new FileCacheAction());
+    _ev_http_server->register_handler(HttpMethod::POST, "/api/file_cache", 
file_cache_action);
 
     TabletsDistributionAction* tablets_distribution_action =
             _pool.add(new TabletsDistributionAction(_env, engine, 
TPrivilegeHier::GLOBAL,
@@ -406,9 +401,8 @@ void 
HttpService::register_cloud_handler(CloudStorageEngine& engine) {
     _ev_http_server->register_handler(HttpMethod::GET, 
"/api/injection_point/{op}",
                                       injection_point_action);
 #endif
-    ClearFileCacheAction* clear_file_cache_action = _pool.add(new 
ClearFileCacheAction());
-    _ev_http_server->register_handler(HttpMethod::POST, 
"/api/clear_file_cache",
-                                      clear_file_cache_action);
+    FileCacheAction* file_cache_action = _pool.add(new FileCacheAction());
+    _ev_http_server->register_handler(HttpMethod::GET, "/api/file_cache", 
file_cache_action);
     auto* show_hotspot_action = _pool.add(new ShowHotspotAction(engine));
     _ev_http_server->register_handler(HttpMethod::GET, "/api/hotspot/tablet", 
show_hotspot_action);
 
diff --git a/be/test/io/cache/block_file_cache_test.cpp 
b/be/test/io/cache/block_file_cache_test.cpp
index 7cbc54d095a..e4a52c02589 100644
--- a/be/test/io/cache/block_file_cache_test.cpp
+++ b/be/test/io/cache/block_file_cache_test.cpp
@@ -182,7 +182,7 @@ TEST_F(BlockFileCacheTest, init) {
         {
             "path" : "/mnt/ssd01/clickbench/hot/be/file_cache",
             "total_size" : "193273528320",
-            "query_limit" : 38654705664
+            "query_limit" : -1
         }
         ]
         )");
@@ -194,14 +194,19 @@ TEST_F(BlockFileCacheTest, init) {
         [
         {
             "path" : "/mnt/ssd01/clickbench/hot/be/file_cache",
-            "normal" : 193273528320,
-            "persistent" : 193273528320,
-            "query_limit" : "38654705664"
+            "total_size" : -1
         }
         ]
         )");
     cache_paths.clear();
     EXPECT_FALSE(parse_conf_cache_paths(err_string, cache_paths));
+
+    err_string = std::string(R"(
+        [
+        ]
+        )");
+    cache_paths.clear();
+    EXPECT_FALSE(parse_conf_cache_paths(err_string, cache_paths));
 }
 
 void test_file_cache(io::FileCacheType cache_type) {
@@ -4239,4 +4244,79 @@ TEST_F(BlockFileCacheTest, 
ttl_reserve_with_evict_using_lru) {
     }
 }
 
+TEST_F(BlockFileCacheTest, reset_capacity) {
+    if (fs::exists(cache_base_path)) {
+        fs::remove_all(cache_base_path);
+    }
+    fs::create_directories(cache_base_path);
+    TUniqueId query_id;
+    query_id.hi = 1;
+    query_id.lo = 1;
+    io::FileCacheSettings settings;
+    settings.query_queue_size = 30;
+    settings.query_queue_elements = 5;
+    settings.index_queue_size = 30;
+    settings.index_queue_elements = 5;
+    settings.disposable_queue_size = 30;
+    settings.disposable_queue_elements = 5;
+    settings.capacity = 90;
+    settings.max_file_block_size = 30;
+    settings.max_query_cache_size = 30;
+    io::CacheContext context;
+    context.query_id = query_id;
+    auto key = io::BlockFileCache::hash("key1");
+    auto key2 = io::BlockFileCache::hash("key2");
+    io::BlockFileCache cache(cache_base_path, settings);
+    auto sp = SyncPoint::get_instance();
+    Defer defer {[sp] {
+        sp->clear_call_back("BlockFileCache::set_remove_batch");
+        sp->clear_call_back("BlockFileCache::set_sleep_time");
+    }};
+    sp->set_call_back("BlockFileCache::set_sleep_time",
+                      [](auto&& args) { *try_any_cast<int64_t*>(args[0]) = 1; 
});
+    sp->set_call_back("BlockFileCache::set_remove_batch",
+                      [](auto&& args) { *try_any_cast<int*>(args[0]) = 2; });
+    sp->enable_processing();
+    ASSERT_TRUE(cache.initialize());
+    for (int i = 0; i < 100; i++) {
+        if (cache.get_lazy_open_success()) {
+            break;
+        };
+        std::this_thread::sleep_for(std::chrono::milliseconds(1));
+    }
+    for (int64_t offset = 0; offset < 45; offset += 5) {
+        context.cache_type = static_cast<io::FileCacheType>((offset / 5) % 3);
+        auto holder = cache.get_or_set(key, offset, 5, context);
+        auto segments = fromHolder(holder);
+        ASSERT_EQ(segments.size(), 1);
+        assert_range(1, segments[0], io::FileBlock::Range(offset, offset + 4),
+                     io::FileBlock::State::EMPTY);
+        ASSERT_TRUE(segments[0]->get_or_set_downloader() == 
io::FileBlock::get_caller_id());
+        download(segments[0]);
+        assert_range(1, segments[0], io::FileBlock::Range(offset, offset + 4),
+                     io::FileBlock::State::DOWNLOADED);
+    }
+    context.cache_type = io::FileCacheType::TTL;
+    int64_t cur_time = UnixSeconds();
+    context.expiration_time = cur_time + 120;
+    for (int64_t offset = 45; offset < 90; offset += 5) {
+        auto holder = cache.get_or_set(key2, offset, 5, context);
+        auto segments = fromHolder(holder);
+        ASSERT_EQ(segments.size(), 1);
+        assert_range(1, segments[0], io::FileBlock::Range(offset, offset + 4),
+                     io::FileBlock::State::EMPTY);
+        ASSERT_TRUE(segments[0]->get_or_set_downloader() == 
io::FileBlock::get_caller_id());
+        download(segments[0]);
+        assert_range(1, segments[0], io::FileBlock::Range(offset, offset + 4),
+                     io::FileBlock::State::DOWNLOADED);
+    }
+    std::cout << cache.reset_capacity(30) << std::endl;
+    while (cache._async_clear_file_cache)
+        ;
+    EXPECT_EQ(cache._cur_cache_size, 30);
+    if (fs::exists(cache_base_path)) {
+        fs::remove_all(cache_base_path);
+    }
+}
+
 } // namespace doris::io
diff --git 
a/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy 
b/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy
index 716f623cfdb..8975d92f2ee 100644
--- a/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy
+++ b/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy
@@ -34,14 +34,14 @@ suite("test_stale_rowset") {
         }
     }
     String backendId = backendId_to_backendIP.keySet()[0]
-    def url = backendId_to_backendIP.get(backendId) + ":" + 
backendId_to_backendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendId_to_backendIP.get(backendId) + ":" + 
backendId_to_backendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }
diff --git a/regression-test/suites/cloud_p0/cache/http/test_clear_cache.groovy 
b/regression-test/suites/cloud_p0/cache/http/test_clear_cache.groovy
index ff729981c59..6407261b6fe 100644
--- a/regression-test/suites/cloud_p0/cache/http/test_clear_cache.groovy
+++ b/regression-test/suites/cloud_p0/cache/http/test_clear_cache.groovy
@@ -35,14 +35,13 @@ suite("test_clear_cache") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
-    url = url + "?sync=true"
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info("clear file cache URL:" + url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
+            op "get"
             body ""
             check check_func
         }
diff --git 
a/regression-test/suites/cloud_p0/cache/http/test_clear_cache_async.groovy 
b/regression-test/suites/cloud_p0/cache/http/test_clear_cache_async.groovy
index db2a7f4537e..15f6f95b776 100644
--- a/regression-test/suites/cloud_p0/cache/http/test_clear_cache_async.groovy
+++ b/regression-test/suites/cloud_p0/cache/http/test_clear_cache_async.groovy
@@ -36,14 +36,13 @@ suite("test_clear_cache_async") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
-    url = url + "?sync=false"
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=false"""
     logger.info("clear file cache URL:" + url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
+            op "get"
             body ""
             check check_func
         }
diff --git a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy 
b/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy
similarity index 51%
copy from regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy
copy to regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy
index 0f8cc2f91e2..ab27f40925d 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy
+++ b/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy
@@ -17,32 +17,64 @@
 
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
-suite("test_ttl_preempt") {
+// 1. clear file cache
+// 2. load 19.5G ttl table data into cache (cache capacity is 20G)
+// 3. check ttl size and total size
+// 4. load 1.3G normal table data into cache (just little datas will be cached)
+// 5. select some data from normal table, and it will read from s3
+// 6. select some data from ttl table, and it will not read from s3
+// 7. wait for ttl data timeout
+// 8. drop the normal table and load again. All normal table datas will be 
cached this time.
+// 9. select some data from normal table to check whether all datas are cached
+suite("test_reset_capacity") {
     sql """ use @regression_cluster_name1 """
-    def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="120") """
-    String[][] backends = sql """ show backends """
-    String backendId;
+    sql """ set global enable_auto_analyze = false; """
+    def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="4200") """
+    //doris show backends: BackendId  Host  HeartbeatPort  BePort  HttpPort  
BrpcPort  ArrowFlightSqlPort  LastStartTime  LastHeartbeat  Alive  
SystemDecommissioned  TabletNum  DataUsedCapacity  TrashUsedCapcacity  
AvailCapacity  TotalCapacity  UsedPct  MaxDiskUsedPct  RemoteUsedCapacity  Tag  
ErrMsg  Version  Status  HeartbeatFailureCounter  NodeRole
+    def backends = sql_return_maparray "show backends;"
+    assertTrue(backends.size() > 0)
+    String backend_id;
     def backendIdToBackendIP = [:]
     def backendIdToBackendHttpPort = [:]
     def backendIdToBackendBrpcPort = [:]
-    for (String[] backend in backends) {
-        if (backend[9].equals("true") && 
backend[19].contains("regression_cluster_name1")) {
-            backendIdToBackendIP.put(backend[0], backend[1])
-            backendIdToBackendHttpPort.put(backend[0], backend[4])
-            backendIdToBackendBrpcPort.put(backend[0], backend[5])
+    String host = ''
+    for (def backend in backends) {
+        if (backend.keySet().contains('Host')) {
+            host = backend.Host
+        } else {
+            host = backend.IP
+        }
+        def cloud_tag = parseJson(backend.Tag)
+        if (backend.Alive.equals("true") && 
cloud_tag.cloud_cluster_name.contains("regression_cluster_name1")) {
+            backendIdToBackendIP.put(backend.BackendId, host)
+            backendIdToBackendHttpPort.put(backend.BackendId, backend.HttpPort)
+            backendIdToBackendBrpcPort.put(backend.BackendId, backend.BrpcPort)
         }
     }
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
+            check check_func
+        }
+    }
+
+    def resetUrl = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=reset&capacity="""
+    def capacity = "0"
+    logger.info(resetUrl)
+    def resetFileCache = { check_func ->
+        httpTest {
+            endpoint ""
+            uri resetUrl + capacity
+            op "get"
+            body ""
             check check_func
         }
     }
@@ -61,13 +93,12 @@ suite("test_ttl_preempt") {
         |"AWS_ACCESS_KEY" = "${getS3AK()}",
         |"AWS_SECRET_KEY" = "${getS3SK()}",
         |"AWS_ENDPOINT" = "${getS3Endpoint()}",
-        |"AWS_REGION" = "${getS3Region()}",
-        |"provider" = "${getS3Provider()}")
+        |"AWS_REGION" = "${getS3Region()}")
         |PROPERTIES(
         |"exec_mem_limit" = "8589934592",
         |"load_parallelism" = "3")""".stripMargin()
-    
-    
+
+
     sql new 
File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text
     sql new File("""${context.file.parent}/../ddl/customer_delete.sql""").text
     def load_customer_ttl_once =  { String table ->
@@ -124,22 +155,25 @@ suite("test_ttl_preempt") {
 
     // one customer table would take about 1.3GB, the total cache size is 20GB
     // the following would take 19.5G all
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
-    load_customer_once("customer")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
 
+    // The max ttl cache size is 90% cache capacity
+    long ttl_cache_size = 0
+    sleep(30000)
     getMetricsMethod.call() {
         respCode, body ->
             assertEquals("${respCode}".toString(), "200")
@@ -153,14 +187,82 @@ suite("test_ttl_preempt") {
                         continue
                     }
                     def i = line.indexOf(' ')
-                    assertEquals(line.substring(i).toLong(), 0)
+                    ttl_cache_size = line.substring(i).toLong()
+                    logger.info("current ttl_cache_size " + ttl_cache_size);
+                    assertTrue(ttl_cache_size <= 19327352832)
                     flag1 = true
                 }
             }
             assertTrue(flag1)
     }
-    // will cache all datas
+    capacity = "-1"
+    resetFileCache.call() {
+        respCode, body -> {
+            assertFalse("${respCode}".toString().equals("200"))
+        }
+    }
+
+    capacity = "-faf13r1r"
+    resetFileCache.call() {
+        respCode, body -> {
+            assertFalse("${respCode}".toString().equals("200"))
+        }
+    }
+
+    capacity = "0"
+    resetFileCache.call() {
+        respCode, body -> {
+            assertFalse("${respCode}".toString().equals("200"))
+        }
+    }
+
+    capacity = "1073741824&path=/xxxxxx" // 1GB
+    resetFileCache.call() {
+        respCode, body -> {
+            assertEquals("${respCode}".toString(), "200")
+        }
+    }
+
+    capacity = "1073741824" // 1GB
+    resetFileCache.call() {
+        respCode, body -> {
+            assertEquals("${respCode}".toString(), "200")
+        }
+    }
+
+    sleep(60000)
+    getMetricsMethod.call() {
+        respCode, body ->
+            assertEquals("${respCode}".toString(), "200")
+            String out = "${body}".toString()
+            def strs = out.split('\n')
+            Boolean flag1 = false;
+            for (String line in strs) {
+                if (flag1) break;
+                if (line.contains("ttl_cache_size")) {
+                    if (line.startsWith("#")) {
+                        continue
+                    }
+                    def i = line.indexOf(' ')
+                    ttl_cache_size = line.substring(i).toLong()
+                    logger.info("current ttl_cache_size " + ttl_cache_size);
+                    assertTrue(ttl_cache_size <= 1073741824)
+                    flag1 = true
+                }
+            }
+            assertTrue(flag1)
+    }
+
+    capacity = "1099511627776" // 1TB
+    resetFileCache.call() {
+        respCode, body -> {
+            assertEquals("${respCode}".toString(), "200")
+        }
+    }
     load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+    load_customer_ttl_once("customer_ttl")
+
     sleep(30000)
     getMetricsMethod.call() {
         respCode, body ->
@@ -175,10 +277,21 @@ suite("test_ttl_preempt") {
                         continue
                     }
                     def i = line.indexOf(' ')
-                    assertTrue(line.substring(i).toLong() > 1073741824)
+                    ttl_cache_size = line.substring(i).toLong()
+                    logger.info("current ttl_cache_size " + ttl_cache_size);
+                    assertTrue(ttl_cache_size > 1073741824)
                     flag1 = true
                 }
             }
             assertTrue(flag1)
     }
-}
+
+    capacity = "21474836480" // 20GB
+    resetFileCache.call() {
+        respCode, body -> {
+            assertEquals("${respCode}".toString(), "200")
+        }
+    }
+
+    sql new File("""${context.file.parent}/../ddl/customer_delete.sql""").text
+}
\ No newline at end of file
diff --git 
a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/sync_insert.groovy
 
b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/sync_insert.groovy
index bfeb1c74c8f..c7731134e4a 100644
--- 
a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/sync_insert.groovy
+++ 
b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/sync_insert.groovy
@@ -47,9 +47,9 @@ suite("sync_insert") {
     def clearFileCache = { ip, port ->
         httpTest {
             endpoint ""
-            uri ip + ":" + port + """/api/clear_file_cache"""
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            uri ip + ":" + port + """/api/file_cache?op=clear&sync=true"""
+            op "get"
+            body ""
         }
     }
 
diff --git 
a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy
 
b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy
index d2de945f3e3..7d03f4daf82 100644
--- 
a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy
+++ 
b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy
@@ -48,8 +48,8 @@ suite("test_multi_stale_rowset") {
     def clearFileCache = { ip, port ->
         httpTest {
             endpoint ""
-            uri ip + ":" + port + """/api/clear_file_cache?sync=true"""
-            op "post"
+            uri ip + ":" + port + """/api/file_cache?op=clear&sync=true"""
+            op "get"
             body ""
         }
     }
diff --git 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy
 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy
index adc9697e15b..3d22b75e98d 100644
--- 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy
+++ 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy
@@ -76,8 +76,8 @@ suite("test_warm_up_cluster") {
     def clearFileCache = { ip, port ->
         httpTest {
             endpoint ""
-            uri ip + ":" + port + """/api/clear_file_cache?sync=true"""
-            op "post"
+            uri ip + ":" + port + """/api/file_cache?op=clear&sync=true"""
+            op "get"
             body ""
         }
     }
diff --git 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy
 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy
index 8841582ca6c..f9a5004a84e 100644
--- 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy
+++ 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy
@@ -75,9 +75,9 @@ suite("test_warm_up_cluster_batch") {
     def clearFileCache = { ip, port ->
         httpTest {
             endpoint ""
-            uri ip + ":" + port + """/api/clear_file_cache"""
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            uri ip + ":" + port + """/api/file_cache?op=clear&sync=true"""
+            op "get"
+            body ""
         }
     }
 
diff --git 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy
 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy
index 4c3aa6cbb86..e9be62cf982 100644
--- 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy
+++ 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy
@@ -76,9 +76,9 @@ suite("test_warm_up_cluster_bigsize") {
     def clearFileCache = { ip, port ->
         httpTest {
             endpoint ""
-            uri ip + ":" + port + """/api/clear_file_cache"""
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            uri ip + ":" + port + """/api/file_cache?op=clear&sync=true"""
+            op "get"
+            body ""
         }
     }
 
diff --git 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy
 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy
index 3af7956e9ec..bf3121b269f 100644
--- 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy
+++ 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy
@@ -102,9 +102,9 @@ suite("test_warm_up_cluster_empty") {
     def clearFileCache = { ip, port ->
         httpTest {
             endpoint ""
-            uri ip + ":" + port + """/api/clear_file_cache"""
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            uri ip + ":" + port + """/api/file_cache?op=clear&sync=true"""
+            op "get"
+            body ""
         }
     }
 
diff --git 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy
 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy
index 912f10ce584..0eb93f2896c 100644
--- 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy
+++ 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy
@@ -95,9 +95,9 @@ suite("test_warm_up_partition") {
     def clearFileCache = { ip, port ->
         httpTest {
             endpoint ""
-            uri ip + ":" + port + """/api/clear_file_cache"""
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            uri ip + ":" + port + """/api/file_cache?op=clear&sync=true"""
+            op "get"
+            body ""
         }
     }
 
diff --git 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
index ea6fba9d724..ffce02f4f64 100644
--- 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
+++ 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
@@ -95,8 +95,8 @@ suite("test_warm_up_table") {
     def clearFileCache = { ip, port ->
         httpTest {
             endpoint ""
-            uri ip + ":" + port + """/api/clear_file_cache?sync=true"""
-            op "post"
+            uri ip + ":" + port + """/api/file_cache?op=clear&sync=true"""
+            op "get"
             body ""
         }
     }
diff --git 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy
 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy
index c558513fb0d..bf39e922802 100644
--- 
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy
+++ 
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy
@@ -118,9 +118,9 @@ suite("test_warm_up_tables") {
     def clearFileCache = { ip, port ->
         httpTest {
             endpoint ""
-            uri ip + ":" + port + """/api/clear_file_cache"""
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            uri ip + ":" + port + """/api/file_cache?op=clear&sync=true"""
+            op "get"
+            body ""
         }
     }
 
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy
index 05dfd0fe64f..299608f4091 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy
@@ -35,14 +35,14 @@ suite("alter_ttl_1") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy
index 83e1e64178f..660e822075d 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy
@@ -35,14 +35,14 @@ suite("alter_ttl_2") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy
index 65783d9a995..bd88c5287f9 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy
@@ -35,14 +35,14 @@ suite("alter_ttl_3") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy
index 11b7c0f4211..2731abaef0a 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy
@@ -35,14 +35,14 @@ suite("alter_ttl_4") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }
diff --git 
a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy
index 77a8bbdf78a..19f946e2998 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy
@@ -35,14 +35,14 @@ suite("test_ttl_max_int64") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy
index 4b8e7ebb5ee..7ad86c2cc53 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy
@@ -35,14 +35,14 @@ suite("test_ttl_random") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy
index 39092582a7b..6d72e51c4c0 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy
@@ -35,14 +35,14 @@ suite("test_ttl_seconds") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }
diff --git 
a/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy
index df4ccf886be..47c458971bb 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy
@@ -34,14 +34,14 @@ suite("create_table_as_select") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
 def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }
diff --git a/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy
index 320609f92a2..9c927f5c025 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy
@@ -34,14 +34,14 @@ suite("create_table_like") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
 def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }
diff --git a/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy
index d4101fa2840..d9f928ebd89 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy
@@ -35,14 +35,14 @@ suite("test_ttl") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }
diff --git a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy 
b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy
index 0f8cc2f91e2..e8008a05e13 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy
@@ -35,14 +35,14 @@ suite("test_ttl_preempt") {
     assertEquals(backendIdToBackendIP.size(), 1)
 
     backendId = backendIdToBackendIP.keySet()[0]
-    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache"""
+    def url = backendIdToBackendIP.get(backendId) + ":" + 
backendIdToBackendHttpPort.get(backendId) + 
"""/api/file_cache?op=clear&sync=true"""
     logger.info(url)
     def clearFileCache = { check_func ->
         httpTest {
             endpoint ""
             uri url
-            op "post"
-            body "{\"sync\"=\"true\"}"
+            op "get"
+            body ""
             check check_func
         }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to