On Fri, Dec 5, 2025 at 9:18 AM Tomasz Kamiński <[email protected]> wrote:

> This patch implements the P3860R1 (accepted as DR against C++20) and
> LWG4472.
>
> The two constraints on the constructor (that T and U are similar types and
> is_convertible_v<U*, T*>) are combined into a single check:
> is_convertible_v<_Up(*)[1], _Tp(*)[1]>. While this check does is not
> equivalent
> for array of know bound to array of unknown bound conversions (T[N] to
> T[]),
> this is irrelevant for atomic_ref, since instantiation with an array type
> is
> ill-formed (due to the return type of load and other members).
>
> The __atomic_ref_base constructor is modified to accept _Tp* instead of
> _Tp&.
> This allows both the atomic_ref(atomic_ref<_Up>) and atomic_ref(_Tp&)
> constructors to delegate to it. The precondition check on alignment is
> moved
> specifically to the atomic_ref(_Tp&) constructor, preventing redundant
> checks
> during atomic_ref conversion.
>
> A deleted atomic_ref(_Tp&&) constructor is introduced (per LWG4472).
> This prevents the construction of atomic_ref<T> from atomic_ref<volatile T>
> via the conversion operator.
>
> libstdc++-v3/ChangeLog:
>
>         * include/bits/atomic_base.h
>         (__atomic_ref_base<const _Tp>::__atomic_ref_base): Accept
>         pointer instead of reference. Remove precondition check and
>         mark as noexcept.
>         (__atomic_ref_base<_Tp>::__atomic_ref_base): Accept pointer
>         insted of reference, and mark as noexcept.
>         * include/std/atomic (atomic_ref::atomic_ref(_Tp&)): Add
>         precondition check and take address of argument.
>         (atomic_ref::atomic_ref(_Tp&&)): Define as deleted.
>         (atomic_ref::atomic_ref(atomic_ref<_Up>)): Define.
>         * include/bits/shared_ptr_atomic.h (_Sp_atomic::_Atomic_count):
>         Pass address to __atomic_ref constructor.
>         * include/std/barrier (__tree_barrier_base::_M_arrive)
>         (__tree_barrier::arrive): Pass address to __atomic_ref constructor.
>         * testsuite/29_atomics/atomic_ref/ctor.cc: New test.
> ---
> v3 adjusted Overload set to more closely reflect example from reflector.
>
>
>  libstdc++-v3/include/bits/atomic_base.h       |  11 +-
>  libstdc++-v3/include/bits/shared_ptr_atomic.h |  16 +-
>  libstdc++-v3/include/std/atomic               |  21 +-
>  libstdc++-v3/include/std/barrier              |   6 +-
>  .../testsuite/29_atomics/atomic_ref/ctor.cc   | 193 ++++++++++++++++++
>  5 files changed, 228 insertions(+), 19 deletions(-)
>  create mode 100644 libstdc++-v3/testsuite/29_atomics/atomic_ref/ctor.cc
>
> diff --git a/libstdc++-v3/include/bits/atomic_base.h
> b/libstdc++-v3/include/bits/atomic_base.h
> index 90b8df55ede..d07cb0f13c8 100644
> --- a/libstdc++-v3/include/bits/atomic_base.h
> +++ b/libstdc++-v3/include/bits/atomic_base.h
> @@ -1561,11 +1561,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>        __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;
>
>        explicit
> -      __atomic_ref_base(const _Tp& __t)
> -       : _M_ptr(const_cast<_Tp*>(std::addressof(__t)))
> -      {
> -       __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment)
> == 0);
> -      }
> +      __atomic_ref_base(const _Tp* __ptr) noexcept
> +      : _M_ptr(const_cast<_Tp*>(__ptr))
> +      { }
>
>        __atomic_ref_base(const __atomic_ref_base&) noexcept = default;
>
> @@ -1607,7 +1605,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>        using value_type = typename __atomic_ref_base<const
> _Tp>::value_type;
>
>        explicit
> -      __atomic_ref_base(_Tp& __t) : __atomic_ref_base<const _Tp>(__t)
> +      __atomic_ref_base(_Tp* __ptr) noexcept
> +      : __atomic_ref_base<const _Tp>(__ptr)
>        { }
>
>        value_type
> diff --git a/libstdc++-v3/include/bits/shared_ptr_atomic.h
> b/libstdc++-v3/include/bits/shared_ptr_atomic.h
> index cbc4bf621f4..b16d93e6697 100644
> --- a/libstdc++-v3/include/bits/shared_ptr_atomic.h
> +++ b/libstdc++-v3/include/bits/shared_ptr_atomic.h
> @@ -421,7 +421,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>
>         ~_Atomic_count()
>         {
> -         auto __val = _AtomicRef(_M_val).load(memory_order_relaxed);
> +         auto __val = _AtomicRef(&_M_val).load(memory_order_relaxed);
>           _GLIBCXX_TSAN_MUTEX_DESTROY(&_M_val);
>           __glibcxx_assert(!(__val & _S_lock_bit));
>           if (auto __pi = reinterpret_cast<pointer>(__val))
> @@ -443,7 +443,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>         {
>           // To acquire the lock we flip the LSB from 0 to 1.
>
> -         _AtomicRef __aref(_M_val);
> +         _AtomicRef __aref(&_M_val);
>           auto __current = __aref.load(memory_order_relaxed);
>           while (__current & _S_lock_bit)
>             {
> @@ -476,7 +476,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>         unlock(memory_order __o) const noexcept
>         {
>           _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
> -         _AtomicRef(_M_val).fetch_sub(1, __o);
> +         _AtomicRef(&_M_val).fetch_sub(1, __o);
>           _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
>         }
>
> @@ -489,7 +489,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>             __o = memory_order_release;
>           auto __x = reinterpret_cast<uintptr_t>(__c._M_pi);
>           _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
> -         __x = _AtomicRef(_M_val).exchange(__x, __o);
> +         __x = _AtomicRef(&_M_val).exchange(__x, __o);
>           _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
>           __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit);
>         }
> @@ -502,7 +502,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>           auto __old_ptr = __ptr;
>           _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
>           uintptr_t __old_pi
> -           = _AtomicRef(_M_val).fetch_sub(1, memory_order_relaxed) - 1u;
> +           = _AtomicRef(&_M_val).fetch_sub(1, memory_order_relaxed) - 1u;
>           _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
>
>           // Ensure that the correct value of _M_ptr is visible after
> locking,
> @@ -528,14 +528,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>                 // wake up if either of the values changed
>                 return __new_pi != __old_pi || __new_ptr != __old_ptr;
>               },
> -           [__o, this] { return _AtomicRef(_M_val).load(__o); });
> +           [__o, this] { return _AtomicRef(&_M_val).load(__o); });
>         }
>
>         void
>         notify_one() noexcept
>         {
>           _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
> -         _AtomicRef(_M_val).notify_one();
> +         _AtomicRef(&_M_val).notify_one();
>           _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
>         }
>
> @@ -543,7 +543,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>         notify_all() noexcept
>         {
>           _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
> -         _AtomicRef(_M_val).notify_all();
> +         _AtomicRef(&_M_val).notify_all();
>           _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
>         }
>  #endif
> diff --git a/libstdc++-v3/include/std/atomic
> b/libstdc++-v3/include/std/atomic
> index ccb77fa6327..15372ea7994 100644
> --- a/libstdc++-v3/include/std/atomic
> +++ b/libstdc++-v3/include/std/atomic
> @@ -1767,14 +1767,31 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>      struct atomic_ref : __atomic_ref<_Tp>
>      {
>        explicit
> -      atomic_ref(_Tp& __t) noexcept : __atomic_ref<_Tp>(__t)
> -      { }
> +      atomic_ref(_Tp& __t) noexcept
> +      : __atomic_ref<_Tp>(std::addressof(__t))
> +      {
> +       __glibcxx_assert(((__UINTPTR_TYPE__)this->_M_ptr %
> this->required_alignment) == 0);
> +      }
> +
> +      // _GLIBCXX_RESOLVE_LIB_DEFECTS
> +      // 4472. std::atomic_ref<const T> can be constructed from
> temporaries
> +      explicit
> +      atomic_ref(_Tp&&) = delete;
> +
> +      template<typename _Up>
> +       requires is_convertible_v<_Up(*)[1], _Tp(*)[1]>
> +       atomic_ref(atomic_ref<_Up> __other) noexcept
> +       : __atomic_ref<_Tp>(__other._M_ptr)
>
I have changed `__atomic_ref_base` to accept _Tp* (pointer) instead of
reference,
so I can pass `__other._M_ptr` directly here. Accepting a reference, would
require `*__other._M_ptr` here, which would trigger ADL for operator*, and
we side
step the issue as a whole.

> +       { }
>
>        atomic_ref& operator=(const atomic_ref&) = delete;
>
>        atomic_ref(const atomic_ref&) = default;
>
>        using __atomic_ref<_Tp>::operator=;
> +
> +      template<typename>
> +       friend struct atomic_ref;
>      };
>  #endif // __cpp_lib_atomic_ref
>
> diff --git a/libstdc++-v3/include/std/barrier
> b/libstdc++-v3/include/std/barrier
> index 56270c99e05..978c12a8067 100644
> --- a/libstdc++-v3/include/std/barrier
> +++ b/libstdc++-v3/include/std/barrier
> @@ -146,7 +146,7 @@ It looks different from literature pseudocode for two
> main reasons:
>               if (__current == __end_node)
>                 __current = 0;
>               auto __expect = __old_phase;
> -             __atomic_phase_ref_t __phase(__state[__current]
> +             __atomic_phase_ref_t __phase(&__state[__current]
>                                               .__tickets[__round]);
>               if (__current == __last_node && (__current_expected & 1))
>                 {
> @@ -198,7 +198,7 @@ It looks different from literature pseudocode for two
> main reasons:
>
>         std::hash<std::thread::id> __hasher;
>         size_t __current = __hasher(std::this_thread::get_id());
> -       __atomic_phase_ref_t __phase(_M_phase);
> +       __atomic_phase_ref_t __phase(&_M_phase);
>         const auto __old_phase = __phase.load(memory_order_relaxed);
>         const auto __cur = static_cast<unsigned char>(__old_phase);
>
> @@ -230,7 +230,7 @@ It looks different from literature pseudocode for two
> main reasons:
>        void
>        wait(arrival_token&& __old_phase) const
>        {
> -       __atomic_phase_const_ref_t __phase(_M_phase);
> +       __atomic_phase_const_ref_t __phase(&_M_phase);
>         __phase.wait(__old_phase, memory_order_acquire);
>        }
>
> diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/ctor.cc
> b/libstdc++-v3/testsuite/29_atomics/atomic_ref/ctor.cc
> new file mode 100644
> index 00000000000..28ab56fa32b
> --- /dev/null
> +++ b/libstdc++-v3/testsuite/29_atomics/atomic_ref/ctor.cc
> @@ -0,0 +1,193 @@
> +// { dg-do run { target c++26 } }
> +// { dg-require-atomic-cmpxchg-word "" }
> +// { dg-add-options libatomic }
> +
> +#include <atomic>
> +#include <testsuite_hooks.h>
> +
> +struct X
> +{
> +  X() = default;
> +  X(int i) : i(i) { }
> +  int i;
> +
> +  friend bool
> +  operator==(X, X) = default;
> +};
> +
> +template<typename T>
> +void testTemporary()
> +{
> +  static_assert( !std::is_constructible_v<std::atomic_ref<T>, T> );
> +  static_assert( !std::is_constructible_v<std::atomic_ref<const T>, T> );
> +  static_assert( !std::is_constructible_v<std::atomic_ref<T>, const T> );
> +  static_assert( !std::is_constructible_v<std::atomic_ref<const T>, const
> T> );
> +
> +  if constexpr (std::atomic_ref<T>::is_always_lock_free)
> +  {
> +    static_assert( !std::is_constructible_v<std::atomic_ref<volatile T>,
> T> );
> +    static_assert( !std::is_constructible_v<std::atomic_ref<volatile T>,
> volatile T> );
> +    static_assert( !std::is_constructible_v<std::atomic_ref<const
> volatile T>, T> );
> +    static_assert( !std::is_constructible_v<std::atomic_ref<const
> volatile T>, const volatile T> );
> +  }
> +
> +  struct X { X(T) {} };
> +  struct Overload
> +  {
> +    static int operator()(X) { return 1; }
> +    static int operator()(std::atomic_ref<T>) { return 2; }
> +  };
> +  VERIFY( Overload{}(T()) == 1 );
> +  static_assert( !requires { Overload{}({T()}); } );
> +}
> +
> +template<typename T>
> +bool same_address(const std::atomic_ref<T>& t, const
> std::type_identity_t<T>& u)
> +{
> +#if (__cplusplus >  202302L)
> +  return t.address() == &u;
> +#endif
> +  return true;
> +}
> +
> +template<typename From, typename To = From>
> +void
> +testConv()
> +{
> +  alignas(std::atomic_ref<From>::required_alignment) From val{};
> +  std::atomic_ref<From> src(val);
> +  std::atomic_ref<const From> csrc(val);
> +
> +  std::atomic_ref<const To> d1(src);
> +  VERIFY( same_address(d1, val) );
> +  std::atomic_ref<const To> d2(csrc);
> +  VERIFY( same_address(d2, val) );
> +  static_assert( !std::is_convertible_v<std::atomic_ref<const From>,
> +                                       std::atomic_ref<To>> );
> +
> +  if constexpr (std::atomic_ref<From>::is_always_lock_free)
> +  {
> +    std::atomic_ref<const volatile To> d4(src);
> +    VERIFY( same_address(d4, val) );
> +    std::atomic_ref<const volatile To> d5(csrc);
> +    VERIFY( same_address(d5, val) );
> +    if constexpr (std::is_same_v<From, To>)
> +    {
> +      std::atomic_ref<volatile To> d3(src);
> +      VERIFY( same_address(d3, val) );
> +    }
> +
> +    static_assert( !std::is_convertible_v<std::atomic_ref<volatile From>,
> +                                         std::atomic_ref<To>> );
> +    static_assert( !std::is_convertible_v<std::atomic_ref<volatile From>,
> +                                         std::atomic_ref<const To>> );
> +    static_assert( !std::is_convertible_v<std::atomic_ref<const volatile
> From>,
> +                                         std::atomic_ref<To>> );
> +    static_assert( !std::is_convertible_v<std::atomic_ref<const volatile
> From>,
> +                                         std::atomic_ref<const To>> );
> +  }
> +}
> +
> +template<typename From, typename To>
> +void
> +testSimilarConv()
> +{
> +  testConv<From, To>();
> +  static_assert( !std::is_convertible_v<      To,       From> );
> +  static_assert( !std::is_convertible_v<      To, const From> );
> +  static_assert( !std::is_convertible_v<const To,       From> );
> +  static_assert( !std::is_convertible_v<const To, const From> );
> +
> +  if constexpr (std::atomic_ref<From>::is_always_lock_free)
> +  {
> +    static_assert( !std::is_convertible_v<volatile To,          From> );
> +    static_assert( !std::is_convertible_v<         To, volatile From> );
> +    static_assert( !std::is_convertible_v<volatile To, volatile From> );
> +
> +    static_assert( !std::is_convertible_v<const volatile To,
>   From> );
> +    static_assert( !std::is_convertible_v<               To, const
> volatile From> );
> +    static_assert( !std::is_convertible_v<const volatile To, const
> volatile From> );
> +
> +    static_assert( !std::is_convertible_v<      To, volatile From> );
> +    static_assert( !std::is_convertible_v<const To, volatile From> );
> +    static_assert( !std::is_convertible_v<      To, const volatile From>
> );
> +    static_assert( !std::is_convertible_v<const To, const volatile From>
> );
> +
> +    static_assert( !std::is_convertible_v<volatile To,       From> );
> +    static_assert( !std::is_convertible_v<volatile To, const From> );
> +    static_assert( !std::is_convertible_v<const volatile To,       From>
> );
> +    static_assert( !std::is_convertible_v<const volatile To, const From>
> );
> +  }
> +}
> +
> +template<typename T, template<typename> typename MakePtr =
> std::add_pointer_t>
> +void
> +testPtrConv()
> +{
> +  testConv<MakePtr<T>>();
> +  testSimilarConv<MakePtr<T>, MakePtr<const T>>();
> +  testSimilarConv<MakePtr<T*>, MakePtr<const T* const>>();
> +  testSimilarConv<MakePtr<const T*>, MakePtr<const T* const>>();
> +  testSimilarConv<MakePtr<T* const>, MakePtr<const T* const>>();
> +
> +  testSimilarConv<MakePtr<T[2]>, MakePtr<const T[2]>>();
> +  testSimilarConv<MakePtr<T[]>, MakePtr<const T[]>>();
> +
> +  testSimilarConv<MakePtr<T[2]>, MakePtr<T[]>>();
> +  testSimilarConv<MakePtr<T[2]>, MakePtr<const T[]>>();
> +  testSimilarConv<MakePtr<const T[2]>, MakePtr<const T[]>>();
> +
> +  if constexpr (std::atomic_ref<MakePtr<T>>::is_always_lock_free)
> +  {
> +    testSimilarConv<MakePtr<T>, MakePtr<volatile T>>();
> +    testSimilarConv<MakePtr<T>, MakePtr<const volatile T>>();
> +    testSimilarConv<MakePtr<volatile T>, MakePtr<const volatile T>>();
> +    testSimilarConv<MakePtr<T*>, MakePtr<volatile T* const>>();
> +    testSimilarConv<MakePtr<volatile T*>, MakePtr<volatile T* const>>();
> +    testSimilarConv<MakePtr<T*>, MakePtr<const volatile T* const>>();
> +    testSimilarConv<MakePtr<volatile T*>, MakePtr<const volatile T*
> const>>();
> +    testSimilarConv<MakePtr<volatile T* const>, MakePtr<const volatile T*
> const>>();
> +
> +    testSimilarConv<MakePtr<T[2]>, MakePtr<volatile T[2]>>();
> +    testSimilarConv<MakePtr<T[2]>, MakePtr<const volatile T[2]>>();
> +    testSimilarConv<MakePtr<const T[2]>, MakePtr<const volatile T[2]>>();
> +    testSimilarConv<MakePtr<volatile T[2]>, MakePtr<const volatile
> T[2]>>();
> +
> +    testSimilarConv<MakePtr<T[]>, MakePtr<volatile T[]>>();
> +    testSimilarConv<MakePtr<T[]>, MakePtr<const volatile T[]>>();
> +    testSimilarConv<MakePtr<const T[]>, MakePtr<const volatile T[]>>();
> +    testSimilarConv<MakePtr<volatile T[]>, MakePtr<const volatile
> T[]>>();
> +
> +    testSimilarConv<MakePtr<T[2]>, MakePtr<volatile T[]>>();
> +    testSimilarConv<MakePtr<volatile T[2]>, MakePtr<volatile T[]>>();
> +    testSimilarConv<MakePtr<const T[2]>, MakePtr<const volatile T[]>>();
> +    testSimilarConv<MakePtr<volatile T[2]>, MakePtr<const volatile
> T[]>>();
> +    testSimilarConv<MakePtr<const volatile T[2]>, MakePtr<const volatile
> T[]>>();
> +  }
> +}
> +
> +struct D : X {};
> +static_assert( !std::is_convertible_v<std::atomic_ref<D>,
> std::atomic_ref<X>> );
> +static_assert( !std::is_convertible_v<std::atomic_ref<D>,
> std::atomic_ref<const X>> );
> +static_assert( !std::is_convertible_v<std::atomic_ref<D*>,
> std::atomic_ref<const X* const>> );
> +static_assert( !std::is_convertible_v<std::atomic_ref<const D*>,
> std::atomic_ref<const X* const>> );
> +
> +template<typename T>
> +using member_pointer_t = T X::*;
> +
> +int
> +main()
> +{
> +  testTemporary<bool>();
> +  testTemporary<int>();
> +  testTemporary<float>();
> +  testTemporary<int*>();
> +  testTemporary<X>();
> +
> +  testConv<bool>();
> +  testConv<int>();
> +  testConv<float>();
> +  testConv<X>();
> +  testPtrConv<int>();
> +  testPtrConv<int, member_pointer_t>();
> +}
> --
> 2.52.0
>
>

Reply via email to