This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 8ea0fd03e7a [Chore](gutil) use shared_ptr to replace scoped_ptr 
(#54741)
8ea0fd03e7a is described below

commit 8ea0fd03e7acdbf27b3ac3842df45e28c8f19ca1
Author: Pxl <[email protected]>
AuthorDate: Fri Aug 15 17:46:20 2025 +0800

    [Chore](gutil) use shared_ptr to replace scoped_ptr (#54741)
    
    ### What problem does this PR solve?
    
    Issue Number: close #xxx
    
    Related PR: #xxx
    
    Problem Summary:
    
    ### Release note
    
    None
    
    ### Check List (For Author)
    
    - Test <!-- At least one of them must be included. -->
        - [ ] Regression test
        - [ ] Unit Test
        - [ ] Manual test (add detailed scripts or steps below)
        - [ ] No need to test or manual test. Explain why:
    - [ ] This is a refactor/code format and no logic has been changed.
            - [ ] Previous test can cover this change.
            - [ ] No code files have been changed.
            - [ ] Other reason <!-- Add your reason?  -->
    
    - Behavior changed:
        - [ ] No.
        - [ ] Yes. <!-- Explain the behavior change -->
    
    - Does this need documentation?
        - [ ] No.
    - [ ] Yes. <!-- Add document PR link here. eg:
    https://github.com/apache/doris-website/pull/1214 -->
    
    ### Check List (For Reviewer who merge this PR)
    
    - [ ] Confirm the release note
    - [ ] Confirm test cases
    - [ ] Confirm document
    - [ ] Add branch pick label <!-- Add branch pick label that this PR
    should merge into -->
---
 be/CMakeLists.txt                                  |   2 -
 be/src/agent/task_worker_pool.h                    |   5 +-
 be/src/cloud/cloud_storage_engine.h                |   2 +-
 be/src/cloud/cloud_txn_delete_bitmap_cache.h       |   2 +-
 be/src/common/daemon.h                             |   2 +-
 be/src/gutil/CMakeLists.txt                        |  55 ---
 be/src/gutil/atomic_refcount.h                     | 145 -------
 be/src/gutil/atomicops-internals-gcc.h             | 196 ---------
 be/src/gutil/atomicops-internals-tsan.h            | 189 ---------
 be/src/gutil/atomicops-internals-x86.cc            | 105 -----
 be/src/gutil/atomicops-internals-x86.h             | 462 ---------------------
 be/src/gutil/atomicops.h                           | 268 ------------
 be/src/gutil/ref_counted.cc                        |  53 ---
 be/src/gutil/ref_counted.h                         | 237 -----------
 be/src/gutil/threading/thread_collision_warner.cc  |  76 ----
 be/src/gutil/threading/thread_collision_warner.h   | 225 ----------
 be/src/io/fs/file_handle_cache.h                   |   2 +-
 be/src/olap/olap_server.cpp                        |   2 +-
 .../olap/rowset/segment_v2/ngram_bloom_filter.cpp  |   2 +-
 be/src/olap/storage_engine.h                       |  34 +-
 be/src/olap/wal/wal_manager.h                      |   4 +-
 be/src/runtime/broker_mgr.h                        |   3 +-
 be/src/runtime/external_scan_context_mgr.h         |   3 +-
 be/src/runtime/fragment_mgr.h                      |   3 +-
 be/src/runtime/load_channel_mgr.h                  |   2 +-
 be/src/runtime/load_path_mgr.h                     |   3 +-
 be/src/runtime/result_buffer_mgr.h                 |   3 +-
 be/src/runtime/routine_load/data_consumer_pool.h   |   2 +-
 .../workload_sched_policy_mgr.h                    |   2 +-
 be/src/{gutil => util}/hash/city.cc                |  19 +-
 be/src/{gutil => util}/hash/city.h                 |  17 +
 be/src/util/hash_util.hpp                          |   2 +-
 be/src/util/thread.cpp                             |  25 +-
 be/src/util/thread.h                               |  56 +--
 be/src/vec/common/string_ref.h                     |   2 +-
 be/src/vec/common/uint128.h                        |   2 +-
 be/src/vec/spill/spill_stream_manager.h            |   2 +-
 be/test/util/cityhash_test.cpp                     |   2 +-
 be/test/util/countdown_latch_test.cpp              |   3 +-
 be/test/util/thread_test.cpp                       |  11 +-
 40 files changed, 116 insertions(+), 2114 deletions(-)

diff --git a/be/CMakeLists.txt b/be/CMakeLists.txt
index 6e50ea6971e..48cd273ee59 100644
--- a/be/CMakeLists.txt
+++ b/be/CMakeLists.txt
@@ -514,7 +514,6 @@ set(DORIS_LINK_LIBS
     Common
     Exec
     Exprs
-    Gutil
     IO
     Olap
     Runtime
@@ -774,7 +773,6 @@ add_subdirectory(${SRC_DIR}/exec)
 add_subdirectory(${SRC_DIR}/exprs)
 add_subdirectory(${SRC_DIR}/gen_cpp)
 add_subdirectory(${SRC_DIR}/geo)
-add_subdirectory(${SRC_DIR}/gutil)
 add_subdirectory(${SRC_DIR}/http)
 add_subdirectory(${SRC_DIR}/io)
 add_subdirectory(${SRC_DIR}/olap)
diff --git a/be/src/agent/task_worker_pool.h b/be/src/agent/task_worker_pool.h
index a46c63d6ae5..fe1048a2c23 100644
--- a/be/src/agent/task_worker_pool.h
+++ b/be/src/agent/task_worker_pool.h
@@ -27,7 +27,6 @@
 #include <string_view>
 
 #include "common/status.h"
-#include "gutil/ref_counted.h"
 
 namespace doris {
 
@@ -104,7 +103,7 @@ private:
     std::condition_variable _high_prior_condv;
     std::deque<std::unique_ptr<TAgentTaskRequest>> _high_prior_queue;
 
-    std::vector<scoped_refptr<Thread>> _workers;
+    std::vector<std::shared_ptr<Thread>> _workers;
 
     std::function<void(const TAgentTaskRequest&)> _callback;
 };
@@ -125,7 +124,7 @@ public:
 
 private:
     std::string _name;
-    scoped_refptr<Thread> _thread;
+    std::shared_ptr<Thread> _thread;
 
     std::mutex _mtx;
     std::condition_variable _condv;
diff --git a/be/src/cloud/cloud_storage_engine.h 
b/be/src/cloud/cloud_storage_engine.h
index f513bfbdcd7..1bb69d00527 100644
--- a/be/src/cloud/cloud_storage_engine.h
+++ b/be/src/cloud/cloud_storage_engine.h
@@ -190,7 +190,7 @@ private:
     mutable std::mutex _latest_fs_mtx;
     io::RemoteFileSystemSPtr _latest_fs;
 
-    std::vector<scoped_refptr<Thread>> _bg_threads;
+    std::vector<std::shared_ptr<Thread>> _bg_threads;
 
     // ATTN: Compactions in maps depend on `CloudTabletMgr` and `CloudMetaMgr`
     mutable std::mutex _compaction_mtx;
diff --git a/be/src/cloud/cloud_txn_delete_bitmap_cache.h 
b/be/src/cloud/cloud_txn_delete_bitmap_cache.h
index 91a0531c60a..9da94d204ef 100644
--- a/be/src/cloud/cloud_txn_delete_bitmap_cache.h
+++ b/be/src/cloud/cloud_txn_delete_bitmap_cache.h
@@ -108,7 +108,7 @@ private:
     std::map<TxnKey, TxnVal> _txn_map;
     std::multimap<int64_t, TxnKey> _expiration_txn;
     std::shared_mutex _rwlock;
-    scoped_refptr<Thread> _clean_thread;
+    std::shared_ptr<Thread> _clean_thread;
     CountDownLatch _stop_latch;
 };
 
diff --git a/be/src/common/daemon.h b/be/src/common/daemon.h
index 63cb0c6f906..e12c25c9cb7 100644
--- a/be/src/common/daemon.h
+++ b/be/src/common/daemon.h
@@ -48,6 +48,6 @@ private:
     void calculate_workload_group_metrics_thread();
 
     CountDownLatch _stop_background_threads_latch;
-    std::vector<scoped_refptr<Thread>> _threads;
+    std::vector<std::shared_ptr<Thread>> _threads;
 };
 } // namespace doris
diff --git a/be/src/gutil/CMakeLists.txt b/be/src/gutil/CMakeLists.txt
deleted file mode 100644
index 0a1b8375b3b..00000000000
--- a/be/src/gutil/CMakeLists.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-#INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR})
-
-# where to put generated libraries
-set(LIBRARY_OUTPUT_PATH "${BUILD_DIR}/src/gutil")
-
-# where to put generated binaries
-set(EXECUTABLE_OUTPUT_PATH "${BUILD_DIR}/src/gutil")
-
-SET(SOURCE_FILES
-        hash/city.cc
-        ref_counted.cc
-        threading/thread_collision_warner.cc)
-
-if ("${CMAKE_BUILD_TARGET_ARCH}" STREQUAL "x86" OR 
"${CMAKE_BUILD_TARGET_ARCH}" STREQUAL "x86_64")
-    set(SOURCE_FILES ${SOURCE_FILES} atomicops-internals-x86.cc)
-endif()
-
-add_library(Gutil STATIC ${SOURCE_FILES})
-
-set_target_properties(Gutil PROPERTIES COMPILE_FLAGS "-funsigned-char 
-Wno-deprecated")
-target_compile_options(Gutil PRIVATE -Wno-char-subscripts 
-Wno-implicit-fallthrough)
-# target_link_libraries(Gutil glog protobuf rt)
-
-#set(GUTIL_LIBS
-#  glog
-#  protobuf)
-
-#if (NOT APPLE)
-#  set(GUTIL_LIBS
-#    ${GUTIL_LIBS}
-#    rt) # clock_gettime() requires -lrt
-#endif()
-
-#ADD_EXPORTABLE_LIBRARY(gutil
-#  SRCS ${GUTIL_SRCS}
-#  DEPS ${GUTIL_LIBS}
-  # Disable warnings which trigger a lot in the Google code:
-#  COMPILE_FLAGS "-funsigned-char -Wno-deprecated -Wno-char-subscripts")
diff --git a/be/src/gutil/atomic_refcount.h b/be/src/gutil/atomic_refcount.h
deleted file mode 100644
index b96788027de..00000000000
--- a/be/src/gutil/atomic_refcount.h
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2008 Google Inc.
-// All rights reserved.
-
-// Atomic increment and decrement for reference counting.
-// For atomic operations on statistics counters and sequence numbers,
-// see atomic_stats_counter.h and atomic_sequence_num.h respectively.
-
-// Some clients use atomic operations for reference counting.
-// you use one of them:
-//         util/refcount/reference_counted.h
-//         util/gtl/refcounted_ptr.h
-//         util/gtl/shared_ptr.h
-// Alternatively, use a Mutex to maintain your reference count.
-// If you really must build your own reference counts with atomic operations,
-// use the following routines in the way suggested by this example:
-//    AtomicWord ref_count_;  // remember to initialize this to 0
-//    ...
-//    void Ref() {
-//      base::RefCountInc(&this->ref_count_);
-//    }
-//    void Unref() {
-//      if (!base::RefCountDec(&this->ref_count_)) {
-//        delete this;
-//      }
-//    }
-// Using these routines (rather than the ones in atomicops.h) will provide the
-// correct semantics; in particular, the memory ordering needed to make
-// reference counting work will be guaranteed.
-// You need not declare the reference count word "volatile".  After
-// initialization you should use the word only via the routines below; the
-// "volatile" in the signatures below is for backwards compatibility.
-//
-// If you need to do something very different from this, use a Mutex.
-
-#pragma once
-
-#include <glog/logging.h>
-
-#include "gutil/atomicops.h"
-
-namespace base {
-
-// These calls are available for both Atomic32, and AtomicWord types,
-// and also for base::subtle::Atomic64 if available on the platform.
-
-// Normally, clients are expected to use RefCountInc/RefCountDec.
-// In rare cases, it may be necessary to adjust the reference count by
-// more than 1, in which case they may use RefCountIncN/RefCountDecN.
-
-// Increment a reference count by "increment", which must exceed 0.
-inline void RefCountIncN(volatile Atomic32* ptr, Atomic32 increment) {
-    DCHECK_GT(increment, 0);
-    base::subtle::NoBarrier_AtomicIncrement(ptr, increment);
-}
-
-// Decrement a reference count by "decrement", which must exceed 0,
-// and return whether the result is non-zero.
-// Insert barriers to ensure that state written before the reference count
-// became zero will be visible to a thread that has just made the count zero.
-inline bool RefCountDecN(volatile Atomic32* ptr, Atomic32 decrement) {
-    DCHECK_GT(decrement, 0);
-    bool res = base::subtle::Barrier_AtomicIncrement(ptr, -decrement) != 0;
-    return res;
-}
-
-// Increment a reference count by 1.
-inline void RefCountInc(volatile Atomic32* ptr) {
-    base::RefCountIncN(ptr, 1);
-}
-
-// Decrement a reference count by 1 and return whether the result is non-zero.
-// Insert barriers to ensure that state written before the reference count
-// became zero will be visible to a thread that has just made the count zero.
-inline bool RefCountDec(volatile Atomic32* ptr) {
-    return base::RefCountDecN(ptr, 1);
-}
-
-// Return whether the reference count is one.
-// If the reference count is used in the conventional way, a
-// reference count of 1 implies that the current thread owns the
-// reference and no other thread shares it.
-// This call performs the test for a reference count of one, and
-// performs the memory barrier needed for the owning thread
-// to act on the object, knowing that it has exclusive access to the
-// object.
-inline bool RefCountIsOne(const volatile Atomic32* ptr) {
-    return base::subtle::Acquire_Load(ptr) == 1;
-}
-
-// Return whether the reference count is zero.  With conventional object
-// referencing counting, the object will be destroyed, so the reference count
-// should never be zero.  Hence this is generally used for a debug check.
-inline bool RefCountIsZero(const volatile Atomic32* ptr) {
-    return subtle::Acquire_Load(ptr) == 0;
-}
-
-#ifdef BASE_HAS_ATOMIC64
-// Implementations for Atomic64, if available.
-inline void RefCountIncN(volatile base::subtle::Atomic64* ptr, 
base::subtle::Atomic64 increment) {
-    DCHECK_GT(increment, 0);
-    base::subtle::NoBarrier_AtomicIncrement(ptr, increment);
-}
-inline bool RefCountDecN(volatile base::subtle::Atomic64* ptr, 
base::subtle::Atomic64 decrement) {
-    DCHECK_GT(decrement, 0);
-    return base::subtle::Barrier_AtomicIncrement(ptr, -decrement) != 0;
-}
-inline void RefCountInc(volatile base::subtle::Atomic64* ptr) {
-    base::RefCountIncN(ptr, 1);
-}
-inline bool RefCountDec(volatile base::subtle::Atomic64* ptr) {
-    return base::RefCountDecN(ptr, 1);
-}
-inline bool RefCountIsOne(const volatile base::subtle::Atomic64* ptr) {
-    return base::subtle::Acquire_Load(ptr) == 1;
-}
-inline bool RefCountIsZero(const volatile base::subtle::Atomic64* ptr) {
-    return base::subtle::Acquire_Load(ptr) == 0;
-}
-#endif
-
-#ifdef AtomicWordCastType
-// Implementations for AtomicWord, if it's a different type from the above.
-inline void RefCountIncN(volatile AtomicWord* ptr, AtomicWord increment) {
-    base::RefCountIncN(reinterpret_cast<volatile AtomicWordCastType*>(ptr), 
increment);
-}
-inline bool RefCountDecN(volatile AtomicWord* ptr, AtomicWord decrement) {
-    return base::RefCountDecN(reinterpret_cast<volatile 
AtomicWordCastType*>(ptr), decrement);
-}
-inline void RefCountInc(volatile AtomicWord* ptr) {
-    base::RefCountIncN(ptr, 1);
-}
-inline bool RefCountDec(volatile AtomicWord* ptr) {
-    return base::RefCountDecN(ptr, 1);
-}
-inline bool RefCountIsOne(const volatile AtomicWord* ptr) {
-    return base::subtle::Acquire_Load(reinterpret_cast<const volatile 
AtomicWordCastType*>(ptr)) ==
-           1;
-}
-inline bool RefCountIsZero(const volatile AtomicWord* ptr) {
-    return base::subtle::Acquire_Load(reinterpret_cast<const volatile 
AtomicWordCastType*>(ptr)) ==
-           0;
-}
-#endif
-
-} // namespace base
diff --git a/be/src/gutil/atomicops-internals-gcc.h 
b/be/src/gutil/atomicops-internals-gcc.h
deleted file mode 100644
index 3fb02f2c6a2..00000000000
--- a/be/src/gutil/atomicops-internals-gcc.h
+++ /dev/null
@@ -1,196 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2014, Linaro
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// ---
-//
-// Author: Riku Voipio, [email protected]
-//
-// atomic primitives implemented with gcc atomic intrinsics:
-// http://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
-//
-
-#pragma once
-
-#include <stdio.h>
-#include <stdlib.h>
-
-typedef int32_t Atomic32;
-
-namespace base {
-namespace subtle {
-
-typedef int64_t Atomic64;
-
-inline void MemoryBarrier() {
-    __sync_synchronize();
-}
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                         Atomic32 new_value) {
-    Atomic32 prev_value = old_value;
-    __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, 
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
-    return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 
new_value) {
-    return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, 
__ATOMIC_RELAXED);
-}
-
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 
new_value) {
-    return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, 
__ATOMIC_ACQUIRE);
-}
-
-inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 
new_value) {
-    return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, 
__ATOMIC_RELEASE);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 
increment) {
-    return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 
increment) {
-    return __atomic_add_fetch(ptr, increment, __ATOMIC_SEQ_CST);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                       Atomic32 new_value) {
-    Atomic32 prev_value = old_value;
-    __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, 
__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
-    return prev_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                       Atomic32 new_value) {
-    Atomic32 prev_value = old_value;
-    __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, 
__ATOMIC_RELEASE, __ATOMIC_RELAXED);
-    return prev_value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-    *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-    *ptr = value;
-    MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-    MemoryBarrier();
-    *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-    return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-    Atomic32 value = *ptr;
-    MemoryBarrier();
-    return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-    MemoryBarrier();
-    return *ptr;
-}
-
-// 64-bit versions
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 
old_value,
-                                         Atomic64 new_value) {
-    Atomic64 prev_value = old_value;
-    __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, 
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
-    return prev_value;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_value) {
-    return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, 
__ATOMIC_RELAXED);
-}
-
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_value) {
-    return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, 
__ATOMIC_ACQUIRE);
-}
-
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_value) {
-    return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, 
__ATOMIC_RELEASE);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 
increment) {
-    return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 
increment) {
-    return __sync_add_and_fetch(ptr, increment);
-}
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 
old_value,
-                                       Atomic64 new_value) {
-    Atomic64 prev_value = old_value;
-    __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, 
__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
-    return prev_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 
old_value,
-                                       Atomic64 new_value) {
-    Atomic64 prev_value = old_value;
-    __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, 
__ATOMIC_RELEASE, __ATOMIC_RELAXED);
-    return prev_value;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-    *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-    *ptr = value;
-    MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-    MemoryBarrier();
-    *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-    return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-    Atomic64 value = *ptr;
-    MemoryBarrier();
-    return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-    MemoryBarrier();
-    return *ptr;
-}
-
-} // namespace subtle
-} // namespace base
diff --git a/be/src/gutil/atomicops-internals-tsan.h 
b/be/src/gutil/atomicops-internals-tsan.h
deleted file mode 100644
index a6868d249ee..00000000000
--- a/be/src/gutil/atomicops-internals-tsan.h
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation for compiler-based
-// ThreadSanitizer. Use base/atomicops.h instead.
-
-#pragma once
-
-// Workaround for Chromium BASE_EXPORT definition
-#ifndef BASE_EXPORT
-#define BASE_EXPORT
-#endif
-
-// This struct is not part of the public API of this module; clients may not
-// use it.  (However, it's exported via BASE_EXPORT because clients implicitly
-// do use it at link time by inlining these functions.)
-// Features of this x86.  Values may not be correct before main() is run,
-// but are set conservatively.
-struct AtomicOps_x86CPUFeatureStruct {
-    bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do 
lfence
-                              // after acquire compare-and-swap.
-    bool has_sse2;            // Processor has SSE2.
-};
-BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct 
AtomicOps_Internalx86CPUFeatures;
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-#include <sanitizer/tsan_interface_atomic.h>
-
-typedef int32_t Atomic32;
-typedef int64_t Atomic64;
-
-namespace base {
-namespace subtle {
-
-typedef int32_t Atomic32;
-typedef int64_t Atomic64;
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                         Atomic32 new_value) {
-    Atomic32 cmp = old_value;
-    __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, 
__tsan_memory_order_relaxed,
-                                            __tsan_memory_order_relaxed);
-    return cmp;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 
new_value) {
-    return __tsan_atomic32_exchange(ptr, new_value, 
__tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 
new_value) {
-    return __tsan_atomic32_exchange(ptr, new_value, 
__tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 
new_value) {
-    return __tsan_atomic32_exchange(ptr, new_value, 
__tsan_memory_order_release);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 
increment) {
-    return increment + __tsan_atomic32_fetch_add(ptr, increment, 
__tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 
increment) {
-    return increment + __tsan_atomic32_fetch_add(ptr, increment, 
__tsan_memory_order_acq_rel);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                       Atomic32 new_value) {
-    Atomic32 cmp = old_value;
-    __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, 
__tsan_memory_order_acquire,
-                                            __tsan_memory_order_acquire);
-    return cmp;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                       Atomic32 new_value) {
-    Atomic32 cmp = old_value;
-    __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, 
__tsan_memory_order_release,
-                                            __tsan_memory_order_relaxed);
-    return cmp;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-    __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-    __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
-    __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-    __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-    return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-    return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-    __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-    return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 
old_value,
-                                         Atomic64 new_value) {
-    Atomic64 cmp = old_value;
-    __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, 
__tsan_memory_order_relaxed,
-                                            __tsan_memory_order_relaxed);
-    return cmp;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_value) {
-    return __tsan_atomic64_exchange(ptr, new_value, 
__tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_value) {
-    return __tsan_atomic64_exchange(ptr, new_value, 
__tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_value) {
-    return __tsan_atomic64_exchange(ptr, new_value, 
__tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 
increment) {
-    return increment + __tsan_atomic64_fetch_add(ptr, increment, 
__tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 
increment) {
-    return increment + __tsan_atomic64_fetch_add(ptr, increment, 
__tsan_memory_order_acq_rel);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-    __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-    __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
-    __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-    __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-    return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-    return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-    __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-    return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 
old_value,
-                                       Atomic64 new_value) {
-    Atomic64 cmp = old_value;
-    __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, 
__tsan_memory_order_acquire,
-                                            __tsan_memory_order_acquire);
-    return cmp;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 
old_value,
-                                       Atomic64 new_value) {
-    Atomic64 cmp = old_value;
-    __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, 
__tsan_memory_order_release,
-                                            __tsan_memory_order_relaxed);
-    return cmp;
-}
-
-inline void MemoryBarrier() {
-    __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void PauseCPU() {}
-
-} // namespace subtle
-} // namespace base
-
-#undef ATOMICOPS_COMPILER_BARRIER
diff --git a/be/src/gutil/atomicops-internals-x86.cc 
b/be/src/gutil/atomicops-internals-x86.cc
deleted file mode 100644
index edc2e367564..00000000000
--- a/be/src/gutil/atomicops-internals-x86.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2007 Google, Inc.
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// All rights reserved.
-
-// This module gets enough CPU information to optimize the
-// atomicops module on x86.
-
-#include "gutil/atomicops-internals-x86.h"
-
-// This file only makes sense with atomicops-internals-x86.h -- it
-// depends on structs that are defined in that file.  If atomicops.h
-// doesn't sub-include that file, then we aren't needed, and shouldn't
-// try to do anything.
-#ifdef GUTIL_ATOMICOPS_INTERNALS_X86_H_
-
-// This macro was copied from //util/cpuid/cpuid.cc
-// Inline cpuid instruction.  In PIC compilations, %ebx contains the address
-// of the global offset table.  To avoid breaking such executables, this code
-// must preserve that register's value across cpuid instructions.
-#if defined(__i386__)
-#define cpuid(a, b, c, d, inp)               \
-    asm("mov %%ebx, %%edi\n"                 \
-        "cpuid\n"                            \
-        "xchg %%edi, %%ebx\n"                \
-        : "=a"(a), "=D"(b), "=c"(c), "=d"(d) \
-        : "a"(inp))
-#elif defined(__x86_64__)
-#define cpuid(a, b, c, d, inp)               \
-    asm("mov %%rbx, %%rdi\n"                 \
-        "cpuid\n"                            \
-        "xchg %%rdi, %%rbx\n"                \
-        : "=a"(a), "=D"(b), "=c"(c), "=d"(d) \
-        : "a"(inp))
-#endif
-
-#if defined(cpuid) // initialize the struct only on x86
-
-// Set the flags so that code will run correctly and conservatively
-// until InitGoogle() is called.
-struct GutilAtomicOps_x86CPUFeatureStruct 
GutilAtomicOps_Internalx86CPUFeatures = {
-        false, // no SSE2
-        false  // no cmpxchg16b
-};
-
-// Initialize the GutilAtomicOps_Internalx86CPUFeatures struct.
-static void AtomicOps_Internalx86CPUFeaturesInit() {
-    uint32 eax;
-    uint32 ebx;
-    uint32 ecx;
-    uint32 edx;
-
-    // Get vendor string (issue CPUID with eax = 0)
-    cpuid(eax, ebx, ecx, edx, 0);
-    char vendor[13];
-    memcpy(vendor, &ebx, 4);
-    memcpy(vendor + 4, &edx, 4);
-    memcpy(vendor + 8, &ecx, 4);
-    vendor[12] = 0;
-
-    // get feature flags in ecx/edx, and family/model in eax
-    cpuid(eax, ebx, ecx, edx, 1);
-
-    int family = (eax >> 8) & 0xf; // family and model fields
-    int model = (eax >> 4) & 0xf;
-    if (family == 0xf) { // use extended family and model fields
-        family += (eax >> 20) & 0xff;
-        model += ((eax >> 16) & 0xf) << 4;
-    }
-
-    // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
-    GutilAtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
-
-    // ecx bit 13 indicates whether the cmpxchg16b instruction is supported
-    GutilAtomicOps_Internalx86CPUFeatures.has_cmpxchg16b = ((ecx >> 13) & 1);
-
-    VLOG_CRITICAL << "vendor " << vendor << "  family " << family << "  model 
" << model << "  sse2 "
-            << GutilAtomicOps_Internalx86CPUFeatures.has_sse2 << "  cmpxchg16b 
"
-            << GutilAtomicOps_Internalx86CPUFeatures.has_cmpxchg16b;
-}
-
-// AtomicOps initialisation routine for external use.
-void AtomicOps_x86CPUFeaturesInit() {
-    AtomicOps_Internalx86CPUFeaturesInit();
-}
-
-#endif
-
-#endif // GUTIL_ATOMICOPS_INTERNALS_X86_H_
diff --git a/be/src/gutil/atomicops-internals-x86.h 
b/be/src/gutil/atomicops-internals-x86.h
deleted file mode 100644
index 141bc569172..00000000000
--- a/be/src/gutil/atomicops-internals-x86.h
+++ /dev/null
@@ -1,462 +0,0 @@
-// Copyright 2003 Google Inc.
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// All Rights Reserved.
-//
-//
-// Implementation of atomic operations for x86.  This file should not
-// be included directly.  Clients should instead include
-// "base/atomicops.h".
-
-#pragma once
-
-#include "common/logging.h"
-#include <stdint.h>
-#include <ostream>
-
-#define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
-
-// NOTE(user): x86 does not need to define AtomicWordCastType, because it
-// already matches Atomic32 or Atomic64, depending on the platform.
-
-// This struct is not part of the public API of this module; clients may not
-// use it.
-// Features of this x86.  Values may not be correct before InitGoogle() is run,
-// but are set conservatively.
-// Modify AtomicOps_x86CPUFeatureStruct to GutilAtomicOps_x86CPUFeatureStruct 
for brpc
-struct GutilAtomicOps_x86CPUFeatureStruct {
-    bool has_sse2;       // Processor has SSE2.
-    bool has_cmpxchg16b; // Processor supports cmpxchg16b instruction.
-};
-extern struct GutilAtomicOps_x86CPUFeatureStruct 
GutilAtomicOps_Internalx86CPUFeatures;
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-// AtomicOps initialisation for open source use.
-void AtomicOps_x86CPUFeaturesInit();
-
-typedef int32_t Atomic32;
-typedef int64_t Atomic64;
-
-namespace base {
-namespace subtle {
-
-typedef int32_t Atomic32;
-typedef int64_t Atomic64;
-
-// These atomic primitives don't work atomically, and can cause really nasty
-// hard-to-track-down bugs, if the pointer isn't naturally aligned. Check 
alignment
-// in debug mode.
-template <class T>
-void CheckNaturalAlignment(const T* ptr) {
-    DCHECK_EQ(0, reinterpret_cast<const uintptr_t>(ptr) & (sizeof(T) - 1))
-            << "unaligned pointer not allowed for atomics";
-}
-
-// 32-bit low-level operations on any platform.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                         Atomic32 new_value) {
-    CheckNaturalAlignment(ptr);
-    Atomic32 prev;
-    __asm__ __volatile__("lock; cmpxchgl %1,%2"
-                         : "=a"(prev)
-                         : "q"(new_value), "m"(*ptr), "0"(old_value)
-                         : "memory");
-    return prev;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 
new_value) {
-    CheckNaturalAlignment(ptr);
-    __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
-                         : "=r"(new_value)
-                         : "m"(*ptr), "0"(new_value)
-                         : "memory");
-    return new_value; // Now it's the previous value.
-}
-
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 
new_value) {
-    CheckNaturalAlignment(ptr);
-    Atomic32 old_val = NoBarrier_AtomicExchange(ptr, new_value);
-    return old_val;
-}
-
-inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 
new_value) {
-    return NoBarrier_AtomicExchange(ptr, new_value);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 
increment) {
-    CheckNaturalAlignment(ptr);
-    Atomic32 temp = increment;
-    __asm__ __volatile__("lock; xaddl %0,%1" : "+r"(temp), "+m"(*ptr) : : 
"memory");
-    // temp now holds the old value of *ptr
-    return temp + increment;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 
increment) {
-    CheckNaturalAlignment(ptr);
-    Atomic32 temp = increment;
-    __asm__ __volatile__("lock; xaddl %0,%1" : "+r"(temp), "+m"(*ptr) : : 
"memory");
-    // temp now holds the old value of *ptr
-    return temp + increment;
-}
-
-// On x86, the NoBarrier_CompareAndSwap() uses a locked instruction and so also
-// provides both acquire and release barriers.
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                       Atomic32 new_value) {
-    return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                       Atomic32 new_value) {
-    return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Barrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                       Atomic32 new_value) {
-    return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-    CheckNaturalAlignment(ptr);
-    *ptr = value;
-}
-
-// Issue the x86 "pause" instruction, which tells the CPU that we
-// are in a spinlock wait loop and should allow other hyperthreads
-// to run, not speculate memory access, etc.
-inline void PauseCPU() {
-    __asm__ __volatile__("pause" : : : "memory");
-}
-
-#if defined(__x86_64__)
-
-// 64-bit implementations of memory barrier can be simpler, because it
-// "mfence" is guaranteed to exist.
-inline void MemoryBarrier() {
-    __asm__ __volatile__("mfence" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-    CheckNaturalAlignment(ptr);
-    *ptr = value;
-    MemoryBarrier();
-}
-
-#else
-
-inline void MemoryBarrier() {
-    if (GutilAtomicOps_Internalx86CPUFeatures.has_sse2) {
-        __asm__ __volatile__("mfence" : : : "memory");
-    } else { // mfence is faster but not present on PIII
-        Atomic32 x = 0;
-        Acquire_AtomicExchange(&x, 0);
-    }
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-    if (GutilAtomicOps_Internalx86CPUFeatures.has_sse2) {
-        CheckNaturalAlignment(ptr);
-        *ptr = value;
-        __asm__ __volatile__("mfence" : : : "memory");
-    } else {
-        Acquire_AtomicExchange(ptr, value);
-    }
-}
-#endif
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-    CheckNaturalAlignment(ptr);
-    ATOMICOPS_COMPILER_BARRIER();
-    *ptr = value; // An x86 store acts as a release barrier.
-                  // See comments in Atomic64 version of Release_Store(), 
below.
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-    CheckNaturalAlignment(ptr);
-    return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-    CheckNaturalAlignment(ptr);
-    Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
-    // See comments in Atomic64 version of Release_Store(), below.
-    ATOMICOPS_COMPILER_BARRIER();
-    return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-    CheckNaturalAlignment(ptr);
-    MemoryBarrier();
-    return *ptr;
-}
-
-#if defined(__x86_64__)
-
-// 64-bit low-level operations on 64-bit platform.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 
old_value,
-                                         Atomic64 new_value) {
-    Atomic64 prev;
-    CheckNaturalAlignment(ptr);
-    __asm__ __volatile__("lock; cmpxchgq %1,%2"
-                         : "=a"(prev)
-                         : "q"(new_value), "m"(*ptr), "0"(old_value)
-                         : "memory");
-    return prev;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_value) {
-    CheckNaturalAlignment(ptr);
-    __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
-                         : "=r"(new_value)
-                         : "m"(*ptr), "0"(new_value)
-                         : "memory");
-    return new_value; // Now it's the previous value.
-}
-
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_value) {
-    Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_value);
-    return old_val;
-}
-
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_value) {
-    return NoBarrier_AtomicExchange(ptr, new_value);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 
increment) {
-    Atomic64 temp = increment;
-    CheckNaturalAlignment(ptr);
-    __asm__ __volatile__("lock; xaddq %0,%1" : "+r"(temp), "+m"(*ptr) : : 
"memory");
-    // temp now contains the previous value of *ptr
-    return temp + increment;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 
increment) {
-    Atomic64 temp = increment;
-    CheckNaturalAlignment(ptr);
-    __asm__ __volatile__("lock; xaddq %0,%1" : "+r"(temp), "+m"(*ptr) : : 
"memory");
-    // temp now contains the previous value of *ptr
-    return temp + increment;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-    CheckNaturalAlignment(ptr);
-    *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-    CheckNaturalAlignment(ptr);
-    *ptr = value;
-    MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-    ATOMICOPS_COMPILER_BARRIER();
-    CheckNaturalAlignment(ptr);
-    *ptr = value; // An x86 store acts as a release barrier
-                  // for current AMD/Intel chips as of Jan 2008.
-                  // See also Acquire_Load(), below.
-
-    // When new chips come out, check:
-    //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
-    //  System Programming Guide, Chatper 7: Multiple-processor management,
-    //  Section 7.2, Memory Ordering.
-    // Last seen at:
-    //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
-    //
-    // x86 stores/loads fail to act as barriers for a few instructions (clflush
-    // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
-    // not generated by the compiler, and are rare.  Users of these 
instructions
-    // need to know about cache behaviour in any case since all of these 
involve
-    // either flushing cache lines or non-temporal cache hints.
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-    CheckNaturalAlignment(ptr);
-    return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-    CheckNaturalAlignment(ptr);
-    Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
-                           // for current AMD/Intel chips as of Jan 2008.
-                           // See also Release_Store(), above.
-    ATOMICOPS_COMPILER_BARRIER();
-    return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-    CheckNaturalAlignment(ptr);
-    MemoryBarrier();
-    return *ptr;
-}
-
-#else // defined(__x86_64__)
-
-// 64-bit low-level operations on 32-bit platform.
-
-#if !((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
-// For compilers older than gcc 4.1, we use inline asm.
-//
-// Potential pitfalls:
-//
-// 1. %ebx points to Global offset table (GOT) with -fPIC.
-//    We need to preserve this register.
-// 2. When explicit registers are used in inline asm, the
-//    compiler may not be aware of it and might try to reuse
-//    the same register for another argument which has constraints
-//    that allow it ("r" for example).
-
-inline Atomic64 __sync_val_compare_and_swap(volatile Atomic64* ptr, Atomic64 
old_value,
-                                            Atomic64 new_value) {
-    CheckNaturalAlignment(ptr);
-    Atomic64 prev;
-    __asm__ __volatile__(
-            "push %%ebx\n\t"
-            "movl (%3), %%ebx\n\t"     // Move 64-bit new_value into
-            "movl 4(%3), %%ecx\n\t"    // ecx:ebx
-            "lock; cmpxchg8b (%1)\n\t" // If edx:eax (old_value) same
-            "pop %%ebx\n\t"
-            : "=A"(prev)      // as contents of ptr:
-            : "D"(ptr),       //   ecx:ebx => ptr
-              "0"(old_value), // else:
-              "S"(&new_value) //   old *ptr => edx:eax
-            : "memory", "%ecx");
-    return prev;
-}
-#endif // Compiler < gcc-4.1
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 
old_val,
-                                         Atomic64 new_val) {
-    CheckNaturalAlignment(ptr);
-    return __sync_val_compare_and_swap(ptr, old_val, new_val);
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_val) {
-    Atomic64 old_val;
-    CheckNaturalAlignment(ptr);
-
-    do {
-        old_val = *ptr;
-    } while (__sync_val_compare_and_swap(ptr, old_val, new_val) != old_val);
-
-    return old_val;
-}
-
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_val) {
-    CheckNaturalAlignment(ptr);
-    Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_val);
-    return old_val;
-}
-
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 
new_val) {
-    return NoBarrier_AtomicExchange(ptr, new_val);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 
increment) {
-    CheckNaturalAlignment(ptr);
-    Atomic64 old_val, new_val;
-
-    do {
-        old_val = *ptr;
-        new_val = old_val + increment;
-    } while (__sync_val_compare_and_swap(ptr, old_val, new_val) != old_val);
-
-    return old_val + increment;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 
increment) {
-    CheckNaturalAlignment(ptr);
-    Atomic64 new_val = NoBarrier_AtomicIncrement(ptr, increment);
-    return new_val;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-    CheckNaturalAlignment(ptr);
-    __asm__ __volatile__(
-            "movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
-            "movq %%mm0, %0\n\t" // moves (ptr could be read-only)
-            "emms\n\t"           // Empty mmx state/Reset FP regs
-            : "=m"(*ptr)
-            : "m"(value)
-            : // mark the FP stack and mmx registers as clobbered
-            "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", 
"st(7)", "mm0", "mm1",
-            "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-    NoBarrier_Store(ptr, value);
-    MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-    ATOMICOPS_COMPILER_BARRIER();
-    NoBarrier_Store(ptr, value);
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-    CheckNaturalAlignment(ptr);
-    Atomic64 value;
-    __asm__ __volatile__(
-            "movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
-            "movq %%mm0, %0\n\t" // moves (ptr could be read-only)
-            "emms\n\t"           // Empty mmx state/Reset FP regs
-            : "=m"(value)
-            : "m"(*ptr)
-            : // mark the FP stack and mmx registers as clobbered
-            "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", 
"st(7)", "mm0", "mm1",
-            "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
-    return value;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-    CheckNaturalAlignment(ptr);
-    Atomic64 value = NoBarrier_Load(ptr);
-    ATOMICOPS_COMPILER_BARRIER();
-    return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-    MemoryBarrier();
-    return NoBarrier_Load(ptr);
-}
-
-#endif // defined(__x86_64__)
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 
old_value,
-                                       Atomic64 new_value) {
-    return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 
old_value,
-                                       Atomic64 new_value) {
-    return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Barrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 
old_value,
-                                       Atomic64 new_value) {
-    return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-} // namespace subtle
-} // namespace base
-
-#undef ATOMICOPS_COMPILER_BARRIER
diff --git a/be/src/gutil/atomicops.h b/be/src/gutil/atomicops.h
deleted file mode 100644
index 5e7c606792b..00000000000
--- a/be/src/gutil/atomicops.h
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2003 Google Inc.
-// All Rights Reserved.
-//
-
-// For atomic operations on statistics counters, see atomic_stats_counter.h.
-// For atomic operations on sequence numbers, see atomic_sequence_num.h.
-// For atomic operations on reference counts, see atomic_refcount.h.
-
-// Some fast atomic operations -- typically with machine-dependent
-// implementations.  This file may need editing as Google code is
-// ported to different architectures.
-
-// The routines exported by this module are subtle.  If you use them, even if
-// you get the code right, it will depend on careful reasoning about atomicity
-// and memory ordering; it will be less readable, and harder to maintain.  If
-// you plan to use these routines, you should have a good reason, such as solid
-// evidence that performance would otherwise suffer, or there being no
-// alternative.  You should assume only properties explicitly guaranteed by the
-// specifications in this file.  You are almost certainly _not_ writing code
-// just for the x86; if you assume x86 semantics, x86 hardware bugs and
-// implementations on other architectures will cause your code to break.  If 
you
-// do not know what you are doing, avoid these routines, and use a Mutex.
-//
-// These following lower-level operations are typically useful only to people
-// implementing higher-level synchronization operations like spinlocks,
-// mutexes, and condition-variables.  They combine CompareAndSwap(),
-// addition, exchange, a load, or a store with appropriate memory-ordering
-// instructions.  "Acquire" operations ensure that no later memory access by
-// the same thread can be reordered ahead of the operation.  "Release"
-// operations ensure that no previous memory access by the same thread can be
-// reordered after the operation.  "Barrier" operations have both "Acquire" and
-// "Release" semantics.  A MemoryBarrier() has "Barrier" semantics, but does no
-// memory access.  "NoBarrier" operations have no barrier:  the CPU is
-// permitted to reorder them freely (as seen by other threads), even in ways
-// the appear to violate functional dependence, just as it can for any normal
-// variable access.
-//
-// It is incorrect to make direct assignments to/from an atomic variable.
-// You should use one of the Load or Store routines.  The NoBarrier
-// versions are provided when no barriers are needed:
-//   NoBarrier_Store()
-//   NoBarrier_Load()
-// Although there are currently no compiler enforcement, you are encouraged
-// to use these.  Moreover, if you choose to use base::subtle::Atomic64 type,
-// you MUST use one of the Load or Store routines to get correct behavior
-// on 32-bit platforms.
-//
-// The intent is eventually to put all of these routines in namespace
-// base::subtle
-
-#pragma once
-
-#include <stdint.h>
-
-// ------------------------------------------------------------------------
-// Include the platform specific implementations of the types
-// and operations listed below.  Implementations are to provide Atomic32
-// and Atomic64 operations. If there is a mismatch between intptr_t and
-// the Atomic32 or Atomic64 types for a platform, the platform-specific header
-// should define the macro, AtomicWordCastType in a clause similar to the
-// following:
-// #if ...pointers are 64 bits...
-// # define AtomicWordCastType base::subtle::Atomic64
-// #else
-// # define AtomicWordCastType Atomic32
-// #endif
-// ------------------------------------------------------------------------
-
-#define GUTILS_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + 
__GNUC_PATCHLEVEL__)
-
-#define GUTILS_CLANG_VERSION \
-    (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
-
-// ThreadSanitizer provides own implementation of atomicops.
-#if defined(THREAD_SANITIZER)
-#include "gutil/atomicops-internals-tsan.h" // IWYU pragma: export
-#elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__))
-#include "gutil/atomicops-internals-x86.h" // IWYU pragma: export
-#elif defined(__GNUC__) && GUTILS_GCC_VERSION >= 40700
-#include "gutil/atomicops-internals-gcc.h" // IWYU pragma: export
-#elif defined(__clang__) && GUTILS_CLANG_VERSION >= 30400
-#include "gutil/atomicops-internals-gcc.h" // IWYU pragma: export
-#else
-#error You need to implement atomic operations for this architecture
-#endif
-
-// Signed type that can hold a pointer and supports the atomic ops below, as
-// well as atomic loads and stores.  Instances must be naturally-aligned.
-typedef intptr_t AtomicWord;
-
-#ifdef AtomicWordCastType
-// ------------------------------------------------------------------------
-// This section is needed only when explicit type casting is required to
-// cast AtomicWord to one of the basic atomic types (Atomic64 or Atomic32).
-// It also serves to document the AtomicWord interface.
-// ------------------------------------------------------------------------
-
-namespace base {
-namespace subtle {
-
-// Atomically execute:
-//      result = *ptr;
-//      if (*ptr == old_value)
-//        *ptr = new_value;
-//      return result;
-//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
-inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, 
AtomicWord old_value,
-                                           AtomicWord new_value) {
-    return NoBarrier_CompareAndSwap(reinterpret_cast<volatile 
AtomicWordCastType*>(ptr), old_value,
-                                    new_value);
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr.  This routine implies no memory barriers.
-inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, 
AtomicWord new_value) {
-    return NoBarrier_AtomicExchange(reinterpret_cast<volatile 
AtomicWordCastType*>(ptr), new_value);
-}
-
-inline AtomicWord Acquire_AtomicExchange(volatile AtomicWord* ptr, AtomicWord 
new_value) {
-    return Acquire_AtomicExchange(reinterpret_cast<volatile 
AtomicWordCastType*>(ptr), new_value);
-}
-
-inline AtomicWord Release_AtomicExchange(volatile AtomicWord* ptr, AtomicWord 
new_value) {
-    return Release_AtomicExchange(reinterpret_cast<volatile 
AtomicWordCastType*>(ptr), new_value);
-}
-
-// Atomically increment *ptr by "increment".  Returns the new value of
-// *ptr with the increment applied.  This routine implies no memory
-// barriers.
-inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, 
AtomicWord increment) {
-    return NoBarrier_AtomicIncrement(reinterpret_cast<volatile 
AtomicWordCastType*>(ptr),
-                                     increment);
-}
-
-inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, AtomicWord 
increment) {
-    return Barrier_AtomicIncrement(reinterpret_cast<volatile 
AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord 
old_value,
-                                         AtomicWord new_value) {
-    return base::subtle::Acquire_CompareAndSwap(reinterpret_cast<volatile 
AtomicWordCastType*>(ptr),
-                                                old_value, new_value);
-}
-
-inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord 
old_value,
-                                         AtomicWord new_value) {
-    return base::subtle::Release_CompareAndSwap(reinterpret_cast<volatile 
AtomicWordCastType*>(ptr),
-                                                old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
-    NoBarrier_Store(reinterpret_cast<volatile AtomicWordCastType*>(ptr), 
value);
-}
-
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
-    return base::subtle::Acquire_Store(reinterpret_cast<volatile 
AtomicWordCastType*>(ptr), value);
-}
-
-inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
-    return base::subtle::Release_Store(reinterpret_cast<volatile 
AtomicWordCastType*>(ptr), value);
-}
-
-inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
-    return NoBarrier_Load(reinterpret_cast<volatile const 
AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
-    return base::subtle::Acquire_Load(reinterpret_cast<volatile const 
AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
-    return base::subtle::Release_Load(reinterpret_cast<volatile const 
AtomicWordCastType*>(ptr));
-}
-
-} // namespace subtle
-} // namespace base
-#endif // AtomicWordCastType
-
-// ------------------------------------------------------------------------
-// Commented out type definitions and method declarations for documentation
-// of the interface provided by this module.
-// ------------------------------------------------------------------------
-
-// ------------------------------------------------------------------------
-// The following are to be deprecated when all uses have been changed to
-// use the base::subtle namespace.
-// ------------------------------------------------------------------------
-
-#ifdef AtomicWordCastType
-// AtomicWord versions to be deprecated
-inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord 
old_value,
-                                         AtomicWord new_value) {
-    return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord 
old_value,
-                                         AtomicWord new_value) {
-    return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
-    return base::subtle::Acquire_Store(ptr, value);
-}
-
-inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
-    return base::subtle::Release_Store(ptr, value);
-}
-
-inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
-    return base::subtle::Acquire_Load(ptr);
-}
-
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
-    return base::subtle::Release_Load(ptr);
-}
-#endif // AtomicWordCastType
-
-// 32-bit Acquire/Release operations to be deprecated.
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                       Atomic32 new_value) {
-    return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 
old_value,
-                                       Atomic32 new_value) {
-    return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
-}
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-    base::subtle::Acquire_Store(ptr, value);
-}
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-    return base::subtle::Release_Store(ptr, value);
-}
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-    return base::subtle::Acquire_Load(ptr);
-}
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-    return base::subtle::Release_Load(ptr);
-}
-
-// 64-bit Acquire/Release operations to be deprecated.
-
-inline base::subtle::Atomic64 Acquire_CompareAndSwap(volatile 
base::subtle::Atomic64* ptr,
-                                                     base::subtle::Atomic64 
old_value,
-                                                     base::subtle::Atomic64 
new_value) {
-    return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-inline base::subtle::Atomic64 Release_CompareAndSwap(volatile 
base::subtle::Atomic64* ptr,
-                                                     base::subtle::Atomic64 
old_value,
-                                                     base::subtle::Atomic64 
new_value) {
-    return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
-}
-inline void Acquire_Store(volatile base::subtle::Atomic64* ptr, 
base::subtle::Atomic64 value) {
-    base::subtle::Acquire_Store(ptr, value);
-}
-inline void Release_Store(volatile base::subtle::Atomic64* ptr, 
base::subtle::Atomic64 value) {
-    return base::subtle::Release_Store(ptr, value);
-}
-inline base::subtle::Atomic64 Acquire_Load(volatile const 
base::subtle::Atomic64* ptr) {
-    return base::subtle::Acquire_Load(ptr);
-}
-inline base::subtle::Atomic64 Release_Load(volatile const 
base::subtle::Atomic64* ptr) {
-    return base::subtle::Release_Load(ptr);
-}
diff --git a/be/src/gutil/ref_counted.cc b/be/src/gutil/ref_counted.cc
deleted file mode 100644
index 0392d54265f..00000000000
--- a/be/src/gutil/ref_counted.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gutil/ref_counted.h"
-
-#include "gutil/atomic_refcount.h"
-
-namespace doris {
-
-namespace subtle {
-
-bool RefCountedThreadSafeBase::HasOneRef() const {
-    return 
base::RefCountIsOne(&const_cast<RefCountedThreadSafeBase*>(this)->ref_count_);
-}
-
-RefCountedThreadSafeBase::RefCountedThreadSafeBase() : ref_count_(0) {
-#ifndef NDEBUG
-    in_dtor_ = false;
-#endif
-}
-
-RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
-#ifndef NDEBUG
-    DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
-                        "calling Release()";
-#endif
-}
-
-void RefCountedThreadSafeBase::AddRef() const {
-#ifndef NDEBUG
-    DCHECK(!in_dtor_);
-#endif
-    base::RefCountInc(&ref_count_);
-}
-
-bool RefCountedThreadSafeBase::Release() const {
-#ifndef NDEBUG
-    DCHECK(!in_dtor_);
-    DCHECK(!base::RefCountIsZero(&ref_count_));
-#endif
-    if (!base::RefCountDec(&ref_count_)) {
-#ifndef NDEBUG
-        in_dtor_ = true;
-#endif
-        return true;
-    }
-    return false;
-}
-
-} // namespace subtle
-
-} // namespace doris
diff --git a/be/src/gutil/ref_counted.h b/be/src/gutil/ref_counted.h
deleted file mode 100644
index 534d0889caf..00000000000
--- a/be/src/gutil/ref_counted.h
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#pragma once
-
-#include <cassert>
-#include <cstddef>
-#include <utility> // IWYU pragma: keep
-
-#include "butil/macros.h"
-#include "gutil/atomicops.h"
-
-namespace doris {
-namespace subtle {
-
-typedef Atomic32 AtomicRefCount;
-
-class RefCountedThreadSafeBase {
-public:
-    bool HasOneRef() const;
-
-protected:
-    RefCountedThreadSafeBase();
-    ~RefCountedThreadSafeBase();
-
-    void AddRef() const;
-
-    // Returns true if the object should self-delete.
-    bool Release() const;
-
-private:
-    mutable AtomicRefCount ref_count_;
-#ifndef NDEBUG
-    mutable bool in_dtor_;
-#endif
-
-    DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase);
-};
-
-} // namespace subtle
-
-// Forward declaration.
-template <class T, typename Traits>
-class RefCountedThreadSafe;
-
-// Default traits for RefCountedThreadSafe<T>.  Deletes the object when its ref
-// count reaches 0.  Overload to delete it on a different thread etc.
-template <typename T>
-struct DefaultRefCountedThreadSafeTraits {
-    static void Destruct(const T* x) {
-        // Delete through RefCountedThreadSafe to make child classes only need 
to be
-        // friend with RefCountedThreadSafe instead of this struct, which is an
-        // implementation detail.
-        RefCountedThreadSafe<T, 
DefaultRefCountedThreadSafeTraits>::DeleteInternal(x);
-    }
-};
-
-//
-// A thread-safe variant of RefCounted<T>
-//
-//   class MyFoo : public RefCountedThreadSafe<MyFoo> {
-//    ...
-//   };
-//
-// If you're using the default trait, then you should add compile time
-// asserts that no one else is deleting your object.  i.e.
-//    private:
-//     friend class RefCountedThreadSafe<MyFoo>;
-//     ~MyFoo();
-template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T>>
-class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
-public:
-    RefCountedThreadSafe() {}
-
-    void AddRef() const { subtle::RefCountedThreadSafeBase::AddRef(); }
-
-    void Release() const {
-        if (subtle::RefCountedThreadSafeBase::Release()) {
-            Traits::Destruct(static_cast<const T*>(this));
-        }
-    }
-
-protected:
-    ~RefCountedThreadSafe() {}
-
-private:
-    friend struct DefaultRefCountedThreadSafeTraits<T>;
-    static void DeleteInternal(const T* x) { delete x; }
-
-    DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafe);
-};
-
-} // namespace doris
-
-//
-// A smart pointer class for reference counted objects.  Use this class instead
-// of calling AddRef and Release manually on a reference counted object to
-// avoid common memory leaks caused by forgetting to Release an object
-// reference.  Sample usage:
-//
-//   class MyFoo : public RefCounted<MyFoo> {
-//    ...
-//   };
-//
-//   void some_function() {
-//     scoped_refptr<MyFoo> foo = new MyFoo();
-//     foo->Method(param);
-//     // |foo| is released when this function returns
-//   }
-//
-//   void some_other_function() {
-//     scoped_refptr<MyFoo> foo = new MyFoo();
-//     ...
-//     foo = NULL;  // explicitly releases |foo|
-//     ...
-//     if (foo)
-//       foo->Method(param);
-//   }
-//
-// The above examples show how scoped_refptr<T> acts like a pointer to T.
-// Given two scoped_refptr<T> classes, it is also possible to exchange
-// references between the two objects, like so:
-//
-//   {
-//     scoped_refptr<MyFoo> a = new MyFoo();
-//     scoped_refptr<MyFoo> b;
-//
-//     b.swap(a);
-//     // now, |b| references the MyFoo object, and |a| references NULL.
-//   }
-//
-// To make both |a| and |b| in the above example reference the same MyFoo
-// object, simply use the assignment operator:
-//
-//   {
-//     scoped_refptr<MyFoo> a = new MyFoo();
-//     scoped_refptr<MyFoo> b;
-//
-//     b = a;
-//     // now, |a| and |b| each own a reference to the same MyFoo object.
-//   }
-//
-template <class T>
-class scoped_refptr {
-public:
-    typedef T element_type;
-
-    scoped_refptr() : ptr_(NULL) {}
-
-    scoped_refptr(T* p) : ptr_(p) {
-        if (ptr_) ptr_->AddRef();
-    }
-
-    // Copy constructor.
-    scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
-        if (ptr_) ptr_->AddRef();
-    }
-
-    // Copy conversion constructor.
-    template <typename U>
-    scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
-        if (ptr_) ptr_->AddRef();
-    }
-
-    // Move constructor. This is required in addition to the conversion
-    // constructor below in order for clang to warn about pessimizing moves.
-    scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.get()) { // NOLINT
-        r.ptr_ = nullptr;
-    }
-
-    // Move conversion constructor.
-    template <typename U>
-    scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.get()) { // NOLINT
-        r.ptr_ = nullptr;
-    }
-
-    ~scoped_refptr() {
-        if (ptr_) ptr_->Release();
-    }
-
-    T* get() const { return ptr_; }
-
-    typedef T* scoped_refptr::* Testable;
-    operator Testable() const { return ptr_ ? &scoped_refptr::ptr_ : NULL; }
-
-    T* operator->() const {
-        assert(ptr_ != NULL);
-        return ptr_;
-    }
-
-    scoped_refptr<T>& operator=(T* p) {
-        // AddRef first so that self assignment should work
-        if (p) p->AddRef();
-        T* old_ptr = ptr_;
-        ptr_ = p;
-        if (old_ptr) old_ptr->Release();
-        return *this;
-    }
-
-    scoped_refptr<T>& operator=(const scoped_refptr<T>& r) { return *this = 
r.ptr_; }
-
-    template <typename U>
-    scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
-        return *this = r.get();
-    }
-
-    scoped_refptr<T>& operator=(scoped_refptr<T>&& r) {
-        scoped_refptr<T>(std::move(r)).swap(*this);
-        return *this;
-    }
-
-    template <typename U>
-    scoped_refptr<T>& operator=(scoped_refptr<U>&& r) {
-        scoped_refptr<T>(std::move(r)).swap(*this);
-        return *this;
-    }
-
-    void swap(T** pp) {
-        T* p = ptr_;
-        ptr_ = *pp;
-        *pp = p;
-    }
-
-    void swap(scoped_refptr<T>& r) { swap(&r.ptr_); }
-
-    // Like gscoped_ptr::reset(), drops a reference on the currently held 
object
-    // (if any), and adds a reference to the passed-in object (if not NULL).
-    void reset(T* p = NULL) { *this = p; }
-
-protected:
-    T* ptr_ = nullptr;
-
-private:
-    template <typename U>
-    friend class scoped_refptr;
-};
diff --git a/be/src/gutil/threading/thread_collision_warner.cc 
b/be/src/gutil/threading/thread_collision_warner.cc
deleted file mode 100644
index fd51a9195d6..00000000000
--- a/be/src/gutil/threading/thread_collision_warner.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gutil/threading/thread_collision_warner.h"
-
-#include "common/exception.h"
-#include "common/status.h"
-
-#ifdef __linux__
-#include <syscall.h>
-#else
-#include <sys/syscall.h>
-#endif
-
-#include <glog/logging.h>
-#include <unistd.h>
-
-#include <cstdint>
-#include <ostream>
-
-namespace base {
-
-void DCheckAsserter::warn(int64_t previous_thread_id, int64_t 
current_thread_id) {
-    throw doris::Exception(doris::Status::FatalError(
-            "Thread Collision! Previous thread id: {}, current thread id: {}", 
previous_thread_id,
-            current_thread_id));
-}
-
-static subtle::Atomic64 CurrentThread() {
-#if defined(__APPLE__)
-    uint64_t tid;
-    CHECK_EQ(0, pthread_threadid_np(NULL, &tid));
-    return tid;
-#elif defined(__linux__)
-    return syscall(__NR_gettid);
-#endif
-}
-
-void ThreadCollisionWarner::EnterSelf() {
-    // If the active thread is 0 then I'll write the current thread ID
-    // if two or more threads arrive here only one will succeed to
-    // write on valid_thread_id_ the current thread ID.
-    subtle::Atomic64 current_thread_id = CurrentThread();
-
-    int64_t previous_thread_id =
-            subtle::NoBarrier_CompareAndSwap(&valid_thread_id_, 0, 
current_thread_id);
-    if (previous_thread_id != 0 && previous_thread_id != current_thread_id) {
-        // gotcha! a thread is trying to use the same class and that is
-        // not current thread.
-        asserter_->warn(previous_thread_id, current_thread_id);
-    }
-
-    subtle::NoBarrier_AtomicIncrement(&counter_, 1);
-}
-
-void ThreadCollisionWarner::Enter() {
-    subtle::Atomic64 current_thread_id = CurrentThread();
-
-    int64_t previous_thread_id =
-            subtle::NoBarrier_CompareAndSwap(&valid_thread_id_, 0, 
current_thread_id);
-    if (previous_thread_id != 0) {
-        // gotcha! another thread is trying to use the same class.
-        asserter_->warn(previous_thread_id, current_thread_id);
-    }
-
-    subtle::NoBarrier_AtomicIncrement(&counter_, 1);
-}
-
-void ThreadCollisionWarner::Leave() {
-    if (subtle::Barrier_AtomicIncrement(&counter_, -1) == 0) {
-        subtle::NoBarrier_Store(&valid_thread_id_, 0);
-    }
-}
-
-} // namespace base
diff --git a/be/src/gutil/threading/thread_collision_warner.h 
b/be/src/gutil/threading/thread_collision_warner.h
deleted file mode 100644
index 5761c9a1c79..00000000000
--- a/be/src/gutil/threading/thread_collision_warner.h
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#pragma once
-
-#include <cstdint>
-
-#include "butil/macros.h"
-#include "gutil/atomicops.h"
-
-#ifndef BASE_EXPORT
-#define BASE_EXPORT
-#endif
-
-// A helper class alongside macros to be used to verify assumptions about 
thread
-// safety of a class.
-//
-// Example: Queue implementation non thread-safe but still usable if clients
-//          are synchronized somehow.
-//
-//          In this case the macro DFAKE_SCOPED_LOCK has to be
-//          used, it checks that if a thread is inside the push/pop then
-//          noone else is still inside the pop/push
-//
-// class NonThreadSafeQueue {
-//  public:
-//   ...
-//   void push(int) { DFAKE_SCOPED_LOCK(push_pop_); ... }
-//   int pop() { DFAKE_SCOPED_LOCK(push_pop_); ... }
-//   ...
-//  private:
-//   DFAKE_MUTEX(push_pop_);
-// };
-//
-//
-// Example: Queue implementation non thread-safe but still usable if clients
-//          are synchronized somehow, it calls a method to "protect" from
-//          a "protected" method
-//
-//          In this case the macro DFAKE_SCOPED_RECURSIVE_LOCK
-//          has to be used, it checks that if a thread is inside the push/pop
-//          then noone else is still inside the pop/push
-//
-// class NonThreadSafeQueue {
-//  public:
-//   void push(int) {
-//     DFAKE_SCOPED_LOCK(push_pop_);
-//     ...
-//   }
-//   int pop() {
-//     DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_);
-//     bar();
-//     ...
-//   }
-//   void bar() { DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_); ... }
-//   ...
-//  private:
-//   DFAKE_MUTEX(push_pop_);
-// };
-//
-//
-// Example: Queue implementation not usable even if clients are synchronized,
-//          so only one thread in the class life cycle can use the two members
-//          push/pop.
-//
-//          In this case the macro DFAKE_SCOPED_LOCK_THREAD_LOCKED pins the
-//          specified
-//          critical section the first time a thread enters push or pop, from
-//          that time on only that thread is allowed to execute push or pop.
-//
-// class NonThreadSafeQueue {
-//  public:
-//   ...
-//   void push(int) { DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_); ... }
-//   int pop() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_); ... }
-//   ...
-//  private:
-//   DFAKE_MUTEX(push_pop_);
-// };
-//
-//
-// Example: Class that has to be contructed/destroyed on same thread, it has
-//          a "shareable" method (with external synchronization) and a not
-//          shareable method (even with external synchronization).
-//
-//          In this case 3 Critical sections have to be defined
-//
-// class ExoticClass {
-//  public:
-//   ExoticClass() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
-//   ~ExoticClass() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
-//
-//   void Shareable() { DFAKE_SCOPED_LOCK(shareable_section_); ... }
-//   void NotShareable() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
-//   ...
-//  private:
-//   DFAKE_MUTEX(ctor_dtor_);
-//   DFAKE_MUTEX(shareable_section_);
-// };
-
-#if !defined(NDEBUG)
-
-// Defines a class member that acts like a mutex. It is used only as a
-// verification tool.
-#define DFAKE_MUTEX(obj) mutable base::ThreadCollisionWarner obj
-// Asserts the call is never called simultaneously in two threads. Used at
-// member function scope.
-#define DFAKE_SCOPED_LOCK(obj) base::ThreadCollisionWarner::ScopedCheck 
s_check_##obj(&obj)
-// Asserts the call is never called simultaneously in two threads. Used at
-// member function scope. Same as DFAKE_SCOPED_LOCK but allows recursive locks.
-#define DFAKE_SCOPED_RECURSIVE_LOCK(obj) \
-    base::ThreadCollisionWarner::ScopedRecursiveCheck sr_check_##obj(&obj)
-// Asserts the code is always executed in the same thread.
-#define DFAKE_SCOPED_LOCK_THREAD_LOCKED(obj) 
base::ThreadCollisionWarner::Check check_##obj(&obj)
-
-#else
-
-#define DFAKE_MUTEX(obj) typedef void InternalFakeMutexType##obj
-#define DFAKE_SCOPED_LOCK(obj) ((void)0)
-#define DFAKE_SCOPED_RECURSIVE_LOCK(obj) ((void)0)
-#define DFAKE_SCOPED_LOCK_THREAD_LOCKED(obj) ((void)0)
-
-#endif
-
-namespace base {
-
-// The class ThreadCollisionWarner uses an Asserter to notify the collision
-// AsserterBase is the interfaces and DCheckAsserter is the default asserter
-// used. During the unit tests is used another class that doesn't "DCHECK"
-// in case of collision (check thread_collision_warner_unittests.cc)
-struct BASE_EXPORT AsserterBase {
-    virtual ~AsserterBase() {}
-    virtual void warn(int64_t previous_thread_id, int64_t current_thread_id) = 
0;
-};
-
-struct BASE_EXPORT DCheckAsserter : public AsserterBase {
-    virtual ~DCheckAsserter() {}
-    void warn(int64_t previous_thread_id, int64_t current_thread_id) override;
-};
-
-class BASE_EXPORT ThreadCollisionWarner {
-public:
-    // The parameter asserter is there only for test purpose
-    explicit ThreadCollisionWarner(AsserterBase* asserter = new 
DCheckAsserter())
-            : valid_thread_id_(0), counter_(0), asserter_(asserter) {}
-
-    ~ThreadCollisionWarner() { delete asserter_; }
-
-    // This class is meant to be used through the macro
-    // DFAKE_SCOPED_LOCK_THREAD_LOCKED
-    // it doesn't leave the critical section, as opposed to ScopedCheck,
-    // because the critical section being pinned is allowed to be used only
-    // from one thread
-    class BASE_EXPORT Check {
-    public:
-        explicit Check(ThreadCollisionWarner* warner) : warner_(warner) { 
warner_->EnterSelf(); }
-
-        ~Check() {}
-
-    private:
-        ThreadCollisionWarner* warner_ = nullptr;
-
-        DISALLOW_COPY_AND_ASSIGN(Check);
-    };
-
-    // This class is meant to be used through the macro
-    // DFAKE_SCOPED_LOCK
-    class BASE_EXPORT ScopedCheck {
-    public:
-        explicit ScopedCheck(ThreadCollisionWarner* warner) : warner_(warner) 
{ warner_->Enter(); }
-
-        ~ScopedCheck() { warner_->Leave(); }
-
-    private:
-        ThreadCollisionWarner* warner_ = nullptr;
-
-        DISALLOW_COPY_AND_ASSIGN(ScopedCheck);
-    };
-
-    // This class is meant to be used through the macro
-    // DFAKE_SCOPED_RECURSIVE_LOCK
-    class BASE_EXPORT ScopedRecursiveCheck {
-    public:
-        explicit ScopedRecursiveCheck(ThreadCollisionWarner* warner) : 
warner_(warner) {
-            warner_->EnterSelf();
-        }
-
-        ~ScopedRecursiveCheck() { warner_->Leave(); }
-
-    private:
-        ThreadCollisionWarner* warner_ = nullptr;
-
-        DISALLOW_COPY_AND_ASSIGN(ScopedRecursiveCheck);
-    };
-
-private:
-    // This method stores the current thread identifier and does a DCHECK
-    // if a another thread has already done it, it is safe if same thread
-    // calls this multiple time (recursion allowed).
-    void EnterSelf();
-
-    // Same as EnterSelf but recursion is not allowed.
-    void Enter();
-
-    // Removes the thread_id stored in order to allow other threads to
-    // call EnterSelf or Enter.
-    void Leave();
-
-    // This stores the thread id that is inside the critical section, if the
-    // value is 0 then no thread is inside.
-    volatile subtle::Atomic64 valid_thread_id_;
-
-    // Counter to trace how many time a critical section was "pinned"
-    // (when allowed) in order to unpin it when counter_ reaches 0.
-    volatile subtle::Atomic64 counter_;
-
-    // Here only for class unit tests purpose, during the test I need to not
-    // DCHECK but notify the collision with something else.
-    AsserterBase* asserter_ = nullptr;
-
-    DISALLOW_COPY_AND_ASSIGN(ThreadCollisionWarner);
-};
-
-} // namespace base
diff --git a/be/src/io/fs/file_handle_cache.h b/be/src/io/fs/file_handle_cache.h
index af048b010e5..057bdefc61d 100644
--- a/be/src/io/fs/file_handle_cache.h
+++ b/be/src/io/fs/file_handle_cache.h
@@ -188,7 +188,7 @@ private:
 
     /// Thread to check for unused file handles to evict. This thread will 
exit when
     /// the _shut_down_promise is set.
-    scoped_refptr<Thread> _eviction_thread;
+    std::shared_ptr<Thread> _eviction_thread;
     std::atomic<bool> _is_shut_down = {false};
 };
 
diff --git a/be/src/olap/olap_server.cpp b/be/src/olap/olap_server.cpp
index 6a84081327f..836652d183c 100644
--- a/be/src/olap/olap_server.cpp
+++ b/be/src/olap/olap_server.cpp
@@ -305,7 +305,7 @@ Status 
StorageEngine::start_bg_threads(std::shared_ptr<WorkloadGroup> wg_sptr) {
     // path scan and gc thread
     if (config::path_gc_check) {
         for (auto data_dir : get_stores()) {
-            scoped_refptr<Thread> path_gc_thread;
+            std::shared_ptr<Thread> path_gc_thread;
             RETURN_IF_ERROR(Thread::create(
                     "StorageEngine", "path_gc_thread",
                     [this, data_dir]() { 
this->_path_gc_thread_callback(data_dir); },
diff --git a/be/src/olap/rowset/segment_v2/ngram_bloom_filter.cpp 
b/be/src/olap/rowset/segment_v2/ngram_bloom_filter.cpp
index 6ebdbb58216..be6bd3256f3 100644
--- a/be/src/olap/rowset/segment_v2/ngram_bloom_filter.cpp
+++ b/be/src/olap/rowset/segment_v2/ngram_bloom_filter.cpp
@@ -21,7 +21,7 @@
 #include <glog/logging.h>
 
 #include "absl/strings/substitute.h"
-#include "gutil/hash/city.h"
+#include "util/hash/city.h"
 
 namespace doris::segment_v2 {
 #include "common/compile_check_begin.h"
diff --git a/be/src/olap/storage_engine.h b/be/src/olap/storage_engine.h
index be03f550225..03945c3fbb8 100644
--- a/be/src/olap/storage_engine.h
+++ b/be/src/olap/storage_engine.h
@@ -168,7 +168,7 @@ protected:
     // Hold reference of quering rowsets
     std::mutex _quering_rowsets_mutex;
     std::unordered_map<RowsetId, RowsetSharedPtr> _querying_rowsets;
-    scoped_refptr<Thread> _evict_quering_rowset_thread;
+    std::shared_ptr<Thread> _evict_quering_rowset_thread;
 
     int64_t _memory_limitation_bytes_for_schema_change;
 
@@ -481,23 +481,23 @@ private:
     PendingRowsetSet _pending_local_rowsets;
     PendingRowsetSet _pending_remote_rowsets;
 
-    scoped_refptr<Thread> _unused_rowset_monitor_thread;
+    std::shared_ptr<Thread> _unused_rowset_monitor_thread;
     // thread to monitor snapshot expiry
-    scoped_refptr<Thread> _garbage_sweeper_thread;
+    std::shared_ptr<Thread> _garbage_sweeper_thread;
     // thread to monitor disk stat
-    scoped_refptr<Thread> _disk_stat_monitor_thread;
+    std::shared_ptr<Thread> _disk_stat_monitor_thread;
     // thread to produce both base and cumulative compaction tasks
-    scoped_refptr<Thread> _compaction_tasks_producer_thread;
-    scoped_refptr<Thread> _update_replica_infos_thread;
-    scoped_refptr<Thread> _cache_clean_thread;
+    std::shared_ptr<Thread> _compaction_tasks_producer_thread;
+    std::shared_ptr<Thread> _update_replica_infos_thread;
+    std::shared_ptr<Thread> _cache_clean_thread;
     // threads to clean all file descriptor not actively in use
-    std::vector<scoped_refptr<Thread>> _path_gc_threads;
+    std::vector<std::shared_ptr<Thread>> _path_gc_threads;
     // thread to produce tablet checkpoint tasks
-    scoped_refptr<Thread> _tablet_checkpoint_tasks_producer_thread;
+    std::shared_ptr<Thread> _tablet_checkpoint_tasks_producer_thread;
     // thread to check tablet path
-    scoped_refptr<Thread> _tablet_path_check_thread;
+    std::shared_ptr<Thread> _tablet_path_check_thread;
     // thread to clean tablet lookup cache
-    scoped_refptr<Thread> _lookup_cache_clean_thread;
+    std::shared_ptr<Thread> _lookup_cache_clean_thread;
 
     std::mutex _engine_task_mutex;
 
@@ -537,11 +537,11 @@ private:
     // we use unordered_map to store all cumulative compaction policy sharded 
ptr
     CumuCompactionPolicyTable _cumulative_compaction_policies;
 
-    scoped_refptr<Thread> _cooldown_tasks_producer_thread;
-    scoped_refptr<Thread> _remove_unused_remote_files_thread;
-    scoped_refptr<Thread> _cold_data_compaction_producer_thread;
+    std::shared_ptr<Thread> _cooldown_tasks_producer_thread;
+    std::shared_ptr<Thread> _remove_unused_remote_files_thread;
+    std::shared_ptr<Thread> _cold_data_compaction_producer_thread;
 
-    scoped_refptr<Thread> _cache_file_cleaner_tasks_producer_thread;
+    std::shared_ptr<Thread> _cache_file_cleaner_tasks_producer_thread;
 
     std::unique_ptr<PriorityThreadPool> _cooldown_thread_pool;
 
@@ -556,7 +556,7 @@ private:
     // tablet_id, publish_version, transaction_id, partition_id
     std::map<int64_t, std::map<int64_t, std::pair<int64_t, int64_t>>> 
_async_publish_tasks;
     // aync publish for discontinuous versions of merge_on_write table
-    scoped_refptr<Thread> _async_publish_thread;
+    std::shared_ptr<Thread> _async_publish_thread;
     std::shared_mutex _async_publish_lock;
 
     std::atomic<bool> _need_clean_trash {false};
@@ -569,7 +569,7 @@ private:
     std::unique_ptr<SnapshotManager> _snapshot_mgr;
 
     // thread to check tablet delete bitmap count tasks
-    scoped_refptr<Thread> _check_delete_bitmap_score_thread;
+    std::shared_ptr<Thread> _check_delete_bitmap_score_thread;
 
     int64_t _last_get_peers_replica_backends_time_ms {0};
 
diff --git a/be/src/olap/wal/wal_manager.h b/be/src/olap/wal/wal_manager.h
index 1baa7573fe6..6805b37b86d 100644
--- a/be/src/olap/wal/wal_manager.h
+++ b/be/src/olap/wal/wal_manager.h
@@ -127,11 +127,11 @@ private:
 
     // wal back pressure
     std::vector<std::string> _wal_dirs;
-    scoped_refptr<Thread> _update_wal_dirs_info_thread;
+    std::shared_ptr<Thread> _update_wal_dirs_info_thread;
     std::unique_ptr<WalDirsInfo> _wal_dirs_info;
 
     // replay wal
-    scoped_refptr<Thread> _replay_thread;
+    std::shared_ptr<Thread> _replay_thread;
     std::unique_ptr<doris::ThreadPool> _thread_pool;
 
     std::shared_mutex _table_lock;
diff --git a/be/src/runtime/broker_mgr.h b/be/src/runtime/broker_mgr.h
index deabc623de8..765d4a1a048 100644
--- a/be/src/runtime/broker_mgr.h
+++ b/be/src/runtime/broker_mgr.h
@@ -23,7 +23,6 @@
 #include <string>
 #include <unordered_set>
 
-#include "gutil/ref_counted.h"
 #include "util/countdown_latch.h"
 #include "util/hash_util.hpp" // IWYU pragma: keep
 
@@ -50,7 +49,7 @@ private:
     std::unordered_set<TNetworkAddress> _broker_set;
 
     CountDownLatch _stop_background_threads_latch;
-    scoped_refptr<Thread> _ping_thread;
+    std::shared_ptr<Thread> _ping_thread;
 };
 
 } // namespace doris
diff --git a/be/src/runtime/external_scan_context_mgr.h 
b/be/src/runtime/external_scan_context_mgr.h
index b5c7fee1a4c..52d4f4c3796 100644
--- a/be/src/runtime/external_scan_context_mgr.h
+++ b/be/src/runtime/external_scan_context_mgr.h
@@ -28,7 +28,6 @@
 #include <utility>
 
 #include "common/status.h"
-#include "gutil/ref_counted.h"
 #include "util/countdown_latch.h"
 
 namespace doris {
@@ -67,7 +66,7 @@ private:
     void gc_expired_context();
 
     CountDownLatch _stop_background_threads_latch;
-    scoped_refptr<Thread> _keep_alive_reaper;
+    std::shared_ptr<Thread> _keep_alive_reaper;
 
     std::mutex _lock;
 };
diff --git a/be/src/runtime/fragment_mgr.h b/be/src/runtime/fragment_mgr.h
index 64608259417..c23c4b768ec 100644
--- a/be/src/runtime/fragment_mgr.h
+++ b/be/src/runtime/fragment_mgr.h
@@ -33,7 +33,6 @@
 
 #include "common/be_mock_util.h"
 #include "common/status.h"
-#include "gutil/ref_counted.h"
 #include "http/rest_monitor_iface.h"
 #include "runtime/query_context.h"
 #include "runtime_filter/runtime_filter_mgr.h"
@@ -218,7 +217,7 @@ private:
     std::unordered_map<TUniqueId, std::unordered_map<int, int64_t>> 
_bf_size_map;
 
     CountDownLatch _stop_background_threads_latch;
-    scoped_refptr<Thread> _cancel_thread;
+    std::shared_ptr<Thread> _cancel_thread;
     // This pool is used as global async task pool
     std::unique_ptr<ThreadPool> _thread_pool;
 
diff --git a/be/src/runtime/load_channel_mgr.h 
b/be/src/runtime/load_channel_mgr.h
index 356916c49e6..74ced68a1dd 100644
--- a/be/src/runtime/load_channel_mgr.h
+++ b/be/src/runtime/load_channel_mgr.h
@@ -100,7 +100,7 @@ protected:
 
     CountDownLatch _stop_background_threads_latch;
     // thread to clean timeout load channels
-    scoped_refptr<Thread> _load_channels_clean_thread;
+    std::shared_ptr<Thread> _load_channels_clean_thread;
     Status _start_load_channels_clean();
 };
 
diff --git a/be/src/runtime/load_path_mgr.h b/be/src/runtime/load_path_mgr.h
index 41656e03c4c..528cee4f6e8 100644
--- a/be/src/runtime/load_path_mgr.h
+++ b/be/src/runtime/load_path_mgr.h
@@ -25,7 +25,6 @@
 #include <vector>
 
 #include "common/status.h"
-#include "gutil/ref_counted.h"
 #include "util/countdown_latch.h"
 #include "util/once.h"
 
@@ -77,7 +76,7 @@ private:
     uint32_t _next_shard;
     uint32_t _error_path_next_shard;
     CountDownLatch _stop_background_threads_latch;
-    scoped_refptr<Thread> _clean_thread;
+    std::shared_ptr<Thread> _clean_thread;
     DorisCallOnce<Status> _init_once;
 };
 
diff --git a/be/src/runtime/result_buffer_mgr.h 
b/be/src/runtime/result_buffer_mgr.h
index 66716d0f27c..3e66fb7a37f 100644
--- a/be/src/runtime/result_buffer_mgr.h
+++ b/be/src/runtime/result_buffer_mgr.h
@@ -30,7 +30,6 @@
 #include <vector>
 
 #include "common/status.h"
-#include "gutil/ref_counted.h"
 #include "util/countdown_latch.h"
 #include "util/hash_util.hpp"
 #include "vec/sink/varrow_flight_result_writer.h"
@@ -101,7 +100,7 @@ private:
     TimeoutMap _timeout_map;
 
     CountDownLatch _stop_background_threads_latch;
-    scoped_refptr<Thread> _clean_thread;
+    std::shared_ptr<Thread> _clean_thread;
 };
 
 } // namespace doris
diff --git a/be/src/runtime/routine_load/data_consumer_pool.h 
b/be/src/runtime/routine_load/data_consumer_pool.h
index ac491a8507c..b055a9de285 100644
--- a/be/src/runtime/routine_load/data_consumer_pool.h
+++ b/be/src/runtime/routine_load/data_consumer_pool.h
@@ -71,7 +71,7 @@ private:
     std::list<std::shared_ptr<DataConsumer>> _pool;
 
     CountDownLatch _stop_background_threads_latch;
-    scoped_refptr<Thread> _clean_idle_consumer_thread;
+    std::shared_ptr<Thread> _clean_idle_consumer_thread;
 };
 
 } // end namespace doris
diff --git a/be/src/runtime/workload_management/workload_sched_policy_mgr.h 
b/be/src/runtime/workload_management/workload_sched_policy_mgr.h
index 52b41eacf4b..ecdb0913bb9 100644
--- a/be/src/runtime/workload_management/workload_sched_policy_mgr.h
+++ b/be/src/runtime/workload_management/workload_sched_policy_mgr.h
@@ -43,7 +43,7 @@ private:
 
     std::shared_mutex _stop_lock;
     CountDownLatch _stop_latch;
-    scoped_refptr<Thread> _thread;
+    std::shared_ptr<Thread> _thread;
     ExecEnv* _exec_env;
 };
 
diff --git a/be/src/gutil/hash/city.cc b/be/src/util/hash/city.cc
similarity index 89%
rename from be/src/gutil/hash/city.cc
rename to be/src/util/hash/city.cc
index 91c2fcbc776..bad4a7585d9 100644
--- a/be/src/gutil/hash/city.cc
+++ b/be/src/util/hash/city.cc
@@ -1,3 +1,20 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
 // Copyright 2010 Google Inc. All Rights Reserved.
 // Authors: [email protected] (Geoff Pike), [email protected] (Jyrki Alakuijala)
 //
@@ -14,7 +31,7 @@
 // optimize the code here by writing a program that systematically explores
 // more of the space of possible hash functions, or by using SIMD instructions.
 
-#include "gutil/hash/city.h"
+#include "util/hash/city.h"
 
 #include <algorithm>
 #include <utility>
diff --git a/be/src/gutil/hash/city.h b/be/src/util/hash/city.h
similarity index 68%
rename from be/src/gutil/hash/city.h
rename to be/src/util/hash/city.h
index 2fb931443c9..c819ca01c51 100644
--- a/be/src/gutil/hash/city.h
+++ b/be/src/util/hash/city.h
@@ -1,3 +1,20 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
 // Copyright 2010 Google Inc. All Rights Reserved.
 // Authors: [email protected] (Geoff Pike), [email protected] (Jyrki Alakuijala)
 //
diff --git a/be/src/util/hash_util.hpp b/be/src/util/hash_util.hpp
index 247fa46d5e7..c5dce6c7292 100644
--- a/be/src/util/hash_util.hpp
+++ b/be/src/util/hash_util.hpp
@@ -28,8 +28,8 @@
 #include <functional>
 
 #include "common/compiler_util.h" // IWYU pragma: keep
-#include "gutil/hash/city.h"
 #include "util/cpu_info.h"
+#include "util/hash/city.h"
 #include "util/murmur_hash3.h"
 #include "util/sse_util.hpp"
 
diff --git a/be/src/util/thread.cpp b/be/src/util/thread.cpp
index b502cccc3a2..258f92af5a0 100644
--- a/be/src/util/thread.cpp
+++ b/be/src/util/thread.cpp
@@ -54,7 +54,6 @@
 #include "absl/strings/substitute.h"
 #include "common/config.h"
 #include "common/logging.h"
-#include "gutil/atomicops.h"
 #include "http/web_page_handler.h"
 #include "runtime/thread_context.h"
 #include "util/easy_json.h"
@@ -314,9 +313,9 @@ void Thread::join() {
 }
 
 int64_t Thread::tid() const {
-    int64_t t = base::subtle::Acquire_Load(&_tid);
+    int64_t t = _tid.load();
     if (t != PARENT_WAITING_TID) {
-        return _tid;
+        return t;
     }
     return wait_for_tid();
 }
@@ -364,7 +363,7 @@ int64_t Thread::current_thread_id() {
 int64_t Thread::wait_for_tid() const {
     int loop_count = 0;
     while (true) {
-        int64_t t = Acquire_Load(&_tid);
+        int64_t t = _tid.load();
         if (t != PARENT_WAITING_TID) {
             return t;
         }
@@ -388,11 +387,11 @@ int64_t Thread::wait_for_tid() const {
 }
 
 Status Thread::start_thread(const std::string& category, const std::string& 
name,
-                            const ThreadFunctor& functor, 
scoped_refptr<Thread>* holder) {
+                            const ThreadFunctor& functor, 
std::shared_ptr<Thread>* holder) {
     std::call_once(once, init_threadmgr);
 
     // Temporary reference for the duration of this function.
-    scoped_refptr<Thread> t(new Thread(category, name, functor));
+    auto t = std::make_shared<Thread>(category, name, functor);
 
     // Optional, and only set if the thread was successfully created.
     //
@@ -408,13 +407,13 @@ Status Thread::start_thread(const std::string& category, 
const std::string& name
     // access the thread object, and we have no guarantee that our caller
     // won't drop the reference as soon as we return. This is dereferenced
     // in FinishThread().
-    t->AddRef();
+    t->_shared_self = t;
 
     int ret = pthread_create(&t->_thread, nullptr, &Thread::supervise_thread, 
t.get());
     if (ret) {
         // If we failed to create the thread, we need to undo all of our prep 
work.
         t->_tid = INVALID_TID;
-        t->Release();
+        t->_shared_self.reset();
         return Status::RuntimeError("Could not create thread. (error {}) {}", 
ret, strerror(ret));
     }
 
@@ -438,7 +437,7 @@ void* Thread::supervise_thread(void* arg) {
 
     // Set up the TLS.
     //
-    // We could store a scoped_refptr in the TLS itself, but as its
+    // We could store a ptr in the TLS itself, but as its
     // lifecycle is poorly defined, we'll use a bare pointer. We
     // already incremented the reference count in StartThread.
     Thread::_tls = t;
@@ -448,10 +447,10 @@ void* Thread::supervise_thread(void* arg) {
 
     // Publish our tid to '_tid', which unblocks any callers waiting in
     // WaitForTid().
-    Release_Store(&t->_tid, system_tid);
+    t->_tid.store(system_tid);
 
     std::string name = absl::Substitute("$0-$1", t->name(), system_tid);
-    thread_manager->set_thread_name(name, t->_tid);
+    ThreadMgr::set_thread_name(name, t->_tid);
     thread_manager->add_thread(pthread_self(), name, t->category(), t->_tid);
 
     // FinishThread() is guaranteed to run (even if functor_ throws an
@@ -477,7 +476,7 @@ void Thread::finish_thread(void* arg) {
     t->_done.count_down();
 
     VLOG_CRITICAL << "Ended thread " << t->_tid << " - " << t->category() << 
":" << t->name();
-    t->Release();
+    t->_shared_self.reset();
     // NOTE: the above 'Release' call could be the last reference to 'this',
     // so 'this' could be destructed at this point. Do not add any code
     // following here!
@@ -526,7 +525,7 @@ Status ThreadJoiner::join() {
     while (keep_trying) {
         if (waited_ms >= _warn_after_ms) {
             LOG(WARNING) << absl::Substitute("Waited for $0ms trying to join 
with $1 (tid $2)",
-                                             waited_ms, _thread->_name, 
_thread->_tid);
+                                             waited_ms, _thread->_name, 
_thread->_tid.load());
         }
 
         int remaining_before_giveup = std::numeric_limits<int>::max();
diff --git a/be/src/util/thread.h b/be/src/util/thread.h
index b6834a31c89..5026ff0673f 100644
--- a/be/src/util/thread.h
+++ b/be/src/util/thread.h
@@ -28,59 +28,45 @@
 #include <utility>
 
 #include "common/status.h"
-#include "gutil/ref_counted.h"
 #include "util/countdown_latch.h"
 
 namespace doris {
 class WebPageHandler;
 
-class Thread : public RefCountedThreadSafe<Thread> {
+class Thread {
 public:
     template <class F>
     static Status create(const std::string& category, const std::string& name, 
const F& f,
-                         scoped_refptr<Thread>* holder) {
+                         std::shared_ptr<Thread>* holder) {
         return start_thread(category, name, f, holder);
     }
 
     template <class F, class A1>
     static Status create(const std::string& category, const std::string& name, 
const F& f,
-                         const A1& a1, scoped_refptr<Thread>* holder) {
+                         const A1& a1, std::shared_ptr<Thread>* holder) {
         return start_thread(category, name, std::bind(f, a1), holder);
     }
 
     template <class F, class A1, class A2>
     static Status create(const std::string& category, const std::string& name, 
const F& f,
-                         const A1& a1, const A2& a2, scoped_refptr<Thread>* 
holder) {
+                         const A1& a1, const A2& a2, std::shared_ptr<Thread>* 
holder) {
         return start_thread(category, name, std::bind(f, a1, a2), holder);
     }
 
     template <class F, class A1, class A2, class A3>
     static Status create(const std::string& category, const std::string& name, 
const F& f,
-                         const A1& a1, const A2& a2, const A3& a3, 
scoped_refptr<Thread>* holder) {
+                         const A1& a1, const A2& a2, const A3& a3,
+                         std::shared_ptr<Thread>* holder) {
         return start_thread(category, name, std::bind(f, a1, a2, a3), holder);
     }
 
     template <class F, class A1, class A2, class A3, class A4>
     static Status create(const std::string& category, const std::string& name, 
const F& f,
                          const A1& a1, const A2& a2, const A3& a3, const A4& 
a4,
-                         scoped_refptr<Thread>* holder) {
+                         std::shared_ptr<Thread>* holder) {
         return start_thread(category, name, std::bind(f, a1, a2, a3, a4), 
holder);
     }
 
-    template <class F, class A1, class A2, class A3, class A4, class A5>
-    static Status create(const std::string& category, const std::string& name, 
const F& f,
-                         const A1& a1, const A2& a2, const A3& a3, const A4& 
a4, const A5& a5,
-                         scoped_refptr<Thread>* holder) {
-        return start_thread(category, name, std::bind(f, a1, a2, a3, a4, a5), 
holder);
-    }
-
-    template <class F, class A1, class A2, class A3, class A4, class A5, class 
A6>
-    static Status create(const std::string& category, const std::string& name, 
const F& f,
-                         const A1& a1, const A2& a2, const A3& a3, const A4& 
a4, const A5& a5,
-                         const A6& a6, scoped_refptr<Thread>* holder) {
-        return start_thread(category, name, std::bind(f, a1, a2, a3, a4, a5, 
a6), holder);
-    }
-
     static void set_self_name(const std::string& name);
 
 #ifndef __APPLE__
@@ -138,17 +124,9 @@ public:
     // unique and stable thread ID, not necessarily the system thread ID.
     static int64_t current_thread_id();
 
-private:
-    friend class ThreadJoiner;
-
-    enum {
-        INVALID_TID = -1,
-        PARENT_WAITING_TID = -2,
-    };
-
     // User function to be executed by this thread.
-    typedef std::function<void()> ThreadFunctor;
-    Thread(const std::string& category, const std::string& name, ThreadFunctor 
functor)
+    using ThreadFunctor = std::function<void()>;
+    Thread(std::string category, std::string name, ThreadFunctor functor)
             : _thread(0),
               _tid(INVALID_TID),
               _functor(std::move(functor)),
@@ -157,6 +135,14 @@ private:
               _done(1),
               _joinable(false) {}
 
+private:
+    friend class ThreadJoiner;
+
+    enum {
+        INVALID_TID = -1,
+        PARENT_WAITING_TID = -2,
+    };
+
     // Library-specific thread ID.
     pthread_t _thread;
 
@@ -169,7 +155,7 @@ private:
     //    thread has not yet begun running. Therefore the TID is not yet known
     //    but it will be set once the thread starts.
     // 3. <positive value>: the thread is running.
-    int64_t _tid;
+    std::atomic<int64_t> _tid;
 
     const ThreadFunctor _functor;
 
@@ -189,6 +175,10 @@ private:
     // thread is not a Thread.
     static __thread Thread* _tls;
 
+    // keep self do not released when thread is running
+    // release it after thread joined
+    std::shared_ptr<Thread> _shared_self;
+
     // Wait for the running thread to publish its tid.
     int64_t wait_for_tid() const;
 
@@ -197,7 +187,7 @@ private:
     // thread that initialisation is complete before returning. On success, 
stores a
     // reference to the thread in holder.
     static Status start_thread(const std::string& category, const std::string& 
name,
-                               const ThreadFunctor& functor, 
scoped_refptr<Thread>* holder);
+                               const ThreadFunctor& functor, 
std::shared_ptr<Thread>* holder);
 
     // Wrapper for the user-supplied function. Invoked from the new thread,
     // with the Thread as its only argument. Executes _functor, but before
diff --git a/be/src/vec/common/string_ref.h b/be/src/vec/common/string_ref.h
index d3389f163f6..508399c4af9 100644
--- a/be/src/vec/common/string_ref.h
+++ b/be/src/vec/common/string_ref.h
@@ -32,7 +32,7 @@
 #include <string_view>
 #include <vector>
 
-#include "gutil/hash/city.h"
+#include "util/hash/city.h"
 #include "util/hash_util.hpp"
 #include "util/slice.h"
 #include "util/sse_util.hpp"
diff --git a/be/src/vec/common/uint128.h b/be/src/vec/common/uint128.h
index bf50921afce..961a4958955 100644
--- a/be/src/vec/common/uint128.h
+++ b/be/src/vec/common/uint128.h
@@ -25,7 +25,7 @@
 #include <sstream>
 #include <tuple>
 
-#include "gutil/hash/city.h"
+#include "util/hash/city.h"
 #include "util/sse_util.hpp"
 #include "vec/core/extended_types.h"
 #include "vec/core/types.h"
diff --git a/be/src/vec/spill/spill_stream_manager.h 
b/be/src/vec/spill/spill_stream_manager.h
index eb3e86f6d7b..2d7a6dcea44 100644
--- a/be/src/vec/spill/spill_stream_manager.h
+++ b/be/src/vec/spill/spill_stream_manager.h
@@ -157,7 +157,7 @@ private:
 
     CountDownLatch _stop_background_threads_latch;
     std::unique_ptr<ThreadPool> _spill_io_thread_pool;
-    scoped_refptr<Thread> _spill_gc_thread;
+    std::shared_ptr<Thread> _spill_gc_thread;
 
     std::atomic_uint64_t id_ = 0;
 
diff --git a/be/test/util/cityhash_test.cpp b/be/test/util/cityhash_test.cpp
index 399c7954fa0..bd940c8360a 100644
--- a/be/test/util/cityhash_test.cpp
+++ b/be/test/util/cityhash_test.cpp
@@ -23,9 +23,9 @@
 #include <iostream>
 
 #include "gtest/gtest_pred_impl.h"
-#include "gutil/hash/city.h"
 #include "olap/rowset/segment_v2/ngram_bloom_filter.h"
 #include "testutil/any_type.h"
+#include "util/hash/city.h"
 
 namespace doris {
 
diff --git a/be/test/util/countdown_latch_test.cpp 
b/be/test/util/countdown_latch_test.cpp
index 86999ca929d..c412f158dae 100644
--- a/be/test/util/countdown_latch_test.cpp
+++ b/be/test/util/countdown_latch_test.cpp
@@ -26,7 +26,6 @@
 
 #include "common/status.h"
 #include "gtest/gtest_pred_impl.h"
-#include "gutil/ref_counted.h"
 #include "util/thread.h"
 #include "util/threadpool.h"
 
@@ -66,7 +65,7 @@ TEST(TestCountDownLatch, TestLatch) {
 // continue.
 TEST(TestCountDownLatch, TestResetToZero) {
     CountDownLatch cdl(100);
-    scoped_refptr<Thread> t;
+    std::shared_ptr<Thread> t;
     EXPECT_TRUE(Thread::create("test", "cdl-test", &CountDownLatch::wait, 
&cdl, &t).ok());
 
     // Sleep for a bit until it's likely the other thread is waiting on the 
latch.
diff --git a/be/test/util/thread_test.cpp b/be/test/util/thread_test.cpp
index 554b9936cf9..5957412409d 100644
--- a/be/test/util/thread_test.cpp
+++ b/be/test/util/thread_test.cpp
@@ -27,7 +27,6 @@
 #include "common/logging.h"
 #include "common/status.h"
 #include "gtest/gtest_pred_impl.h"
-#include "gutil/ref_counted.h"
 #include "util/runtime_profile.h"
 #include "util/time.h"
 
@@ -44,7 +43,7 @@ public:
 // Join with a thread and emit warnings while waiting to join.
 // This has to be manually verified.
 TEST_F(ThreadTest, TestJoinAndWarn) {
-    scoped_refptr<Thread> holder;
+    std::shared_ptr<Thread> holder;
     Status status = Thread::create("test", "sleeper thread", SleepForMs, 1000, 
&holder);
     EXPECT_TRUE(status.ok());
     status = 
ThreadJoiner(holder.get()).warn_after_ms(10).warn_every_ms(100).join();
@@ -52,7 +51,7 @@ TEST_F(ThreadTest, TestJoinAndWarn) {
 }
 
 TEST_F(ThreadTest, TestFailedJoin) {
-    scoped_refptr<Thread> holder;
+    std::shared_ptr<Thread> holder;
     Status status = Thread::create("test", "sleeper thread", SleepForMs, 1000, 
&holder);
     EXPECT_TRUE(status.ok());
     status = ThreadJoiner(holder.get()).give_up_after_ms(50).join();
@@ -67,14 +66,14 @@ static void TryJoinOnSelf() {
 
 // Try to join on the thread that is currently running.
 TEST_F(ThreadTest, TestJoinOnSelf) {
-    scoped_refptr<Thread> holder;
+    std::shared_ptr<Thread> holder;
     EXPECT_TRUE(Thread::create("test", "test", TryJoinOnSelf, &holder).ok());
     holder->join();
     // Actual assertion is done by the thread spawned above.
 }
 
 TEST_F(ThreadTest, TestDoubleJoinIsNoOp) {
-    scoped_refptr<Thread> holder;
+    std::shared_ptr<Thread> holder;
     Status status = Thread::create("test", "sleeper thread", SleepForMs, 0, 
&holder);
     EXPECT_TRUE(status.ok());
     ThreadJoiner joiner(holder.get());
@@ -85,7 +84,7 @@ TEST_F(ThreadTest, TestDoubleJoinIsNoOp) {
 }
 
 TEST_F(ThreadTest, ThreadStartBenchmark) {
-    std::vector<scoped_refptr<Thread>> threads(1000);
+    std::vector<std::shared_ptr<Thread>> threads(1000);
     {
         int64_t thread_creation_ns = 0;
         {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to