Hi mclow.lists,
In some places in libc++ we need to use the `__atomic_*` builtins. This patch
adds a header that provides access to those builtins in a uniform way.
http://reviews.llvm.org/D10406
Files:
include/__atomic
include/__config
include/memory
include/mutex
src/memory.cpp
src/mutex.cpp
test/std/thread/thread.mutex/thread.once/thread.once.callonce/call_once.pass.cpp
test/std/thread/thread.mutex/thread.once/thread.once.callonce/call_once11.pass.cpp
test/std/utilities/memory/util.smartptr/util.smartptr.shared/race_condition.pass.cpp
EMAIL PREFERENCES
http://reviews.llvm.org/settings/panel/emailpreferences/
Index: include/__atomic
===================================================================
--- /dev/null
+++ include/__atomic
@@ -0,0 +1,122 @@
+// -*- C++ -*-
+//===--------------------------- __atomic ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ATOMIC
+#define _LIBCPP___ATOMIC
+
+#include <__config>
+#include <type_traits>
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if defined(_LIBCPP_HAS_ATOMIC_BUILTINS)
+
+enum __libcpp_atomic_order {
+ _AO_Relaxed = __ATOMIC_RELAXED,
+ _AO_Consume = __ATOMIC_CONSUME,
+ _AO_Aquire = __ATOMIC_ACQUIRE,
+ _AO_Release = __ATOMIC_RELEASE,
+ _AO_Acq_Rel = __ATOMIC_ACQ_REL,
+ _AO_Seq = __ATOMIC_SEQ_CST
+};
+
+template <class _ValueType, class _FromType>
+inline _LIBCPP_INLINE_VISIBILITY
+void __libcpp_atomic_store(_ValueType* __dest, _FromType __val,
+ int __order = _AO_Seq)
+{
+ _ValueType __from = __val;
+ __atomic_store(__dest, &__from, __order);
+}
+
+
+template <class _ValueType>
+inline _LIBCPP_INLINE_VISIBILITY
+_ValueType __libcpp_atomic_load(_ValueType* __val,
+ int __order = _AO_Seq)
+{
+ typename std::remove_cv<_ValueType>::type __x = 0;
+ __atomic_load(__val, &__x, __order);
+ return __x;
+}
+
+template <class _ValueType, class _AddType>
+inline _LIBCPP_INLINE_VISIBILITY
+_ValueType __libcpp_atomic_add(_ValueType* __val, _AddType __a,
+ int __order = _AO_Seq)
+{
+ return __atomic_add_fetch(__val, __a, __order);
+}
+
+template <class _ValueType>
+inline _LIBCPP_INLINE_VISIBILITY
+bool __libcpp_atomic_compare_exchange(_ValueType* __val,
+ _ValueType* __expected, _ValueType * __after,
+ int __success_order = _AO_Seq,
+ int __fail_order = _AO_Seq)
+{
+ return __atomic_compare_exchange(__val, __expected, __after, true,
+ __success_order, __fail_order);
+}
+
+#else
+
+enum __libcpp_atomic_order {
+ _AO_Relaxed,
+ _AO_Consume,
+ _AO_Acquire,
+ _AO_Release,
+ _AO_Acq_Rel,
+ _AO_Seq
+};
+
+template <class _ValueType, class _FromType>
+inline _LIBCPP_INLINE_VISIBILITY
+void __libcpp_atomic_store(_ValueType* __dest, _FromType __val,
+ int = 0)
+{
+ *__dest = __val;
+}
+
+template <class _ValueType>
+inline _LIBCPP_INLINE_VISIBILITY
+_ValueType __libcpp_atomic_load(_ValueType* __val,
+ int = 0)
+{
+ return *__val;
+}
+
+template <class _ValueType, class _AddType>
+inline _LIBCPP_INLINE_VISIBILITY
+_ValueType __libcpp_atomic_add(_ValueType* __val, _AddType __a,
+ int = 0)
+{
+ return *__val += __a;
+}
+
+template <class _ValueType>
+inline _LIBCPP_INLINE_VISIBILITY
+bool __libcpp_atomic_compare_exchange(_ValueType* __val,
+ _ValueType* __expected, _ValueType* __after,
+ int = 0, int = 0)
+{
+ if (*__val == *__expected) {
+ *__val = *__after;
+ return true;
+ }
+ *__expected = *__val;
+ return false;
+}
+
+#endif
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___ATOMIC
\ No newline at end of file
Index: include/__config
===================================================================
--- include/__config
+++ include/__config
@@ -751,4 +751,22 @@
#define _LIBCPP_PROVIDES_DEFAULT_RUNE_TABLE
#endif
+
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+#if defined(__clang__) && __has_builtin(__atomic_load) \
+ && __has_builtin(__atomic_store) \
+ && __has_builtin(__atomic_add_fetch) \
+ && __has_builtin(__atomic_compare_exchange) \
+ && defined(__ATOMIC_RELAXED) \
+ && defined(__ATOMIC_CONSUME) \
+ && defined(__ATOMIC_ACQUIRE) \
+ && defined(__ATOMIC_RELEASE) \
+ && defined(__ATOMIC_ACQ_REL) \
+ && defined(__ATOMIC_SEQ_CST)
+# define _LIBCPP_HAS_ATOMIC_BUILTINS
+#elif !defined(__clang__) && defined(_GNUC_VER) && _GNUC_VER >= 407
+# define _LIBCPP_HAS_ATOMIC_BUILTINS
+#endif
+#endif // _LIBCPP_HAS_NO_THREADS
+
#endif // _LIBCPP_CONFIG
Index: include/memory
===================================================================
--- include/memory
+++ include/memory
@@ -596,6 +596,7 @@
*/
#include <__config>
+#include <__atomic>
#include <type_traits>
#include <typeinfo>
#include <cstddef>
@@ -3676,7 +3677,9 @@
void __add_shared() _NOEXCEPT;
bool __release_shared() _NOEXCEPT;
_LIBCPP_INLINE_VISIBILITY
- long use_count() const _NOEXCEPT {return __shared_owners_ + 1;}
+ long use_count() const _NOEXCEPT {
+ return __libcpp_atomic_load(&__shared_owners_, _AO_Relaxed) + 1;
+ }
};
class _LIBCPP_TYPE_VIS __shared_weak_count
Index: include/mutex
===================================================================
--- include/mutex
+++ include/mutex
@@ -174,6 +174,7 @@
#include <__config>
#include <__mutex_base>
+#include <__atomic>
#include <functional>
#ifndef _LIBCPP_HAS_NO_VARIADICS
#include <tuple>
@@ -541,7 +542,7 @@
void
call_once(once_flag& __flag, _Callable&& __func, _Args&&... __args)
{
- if (__flag.__state_ != ~0ul)
+ if (__libcpp_atomic_load(&__flag.__state_, _AO_Relaxed) != ~0ul)
{
typedef tuple<typename decay<_Callable>::type, typename decay<_Args>::type...> _Gp;
__call_once_param<_Gp> __p(_Gp(__decay_copy(_VSTD::forward<_Callable>(__func)),
@@ -557,7 +558,7 @@
void
call_once(once_flag& __flag, _Callable __func)
{
- if (__flag.__state_ != ~0ul)
+ if (__libcpp_atomic_load(&__flag.__state_, _AO_Relaxed) != ~0ul)
{
__call_once_param<_Callable> __p(__func);
__call_once(__flag.__state_, &__p, &__call_once_proxy<_Callable>);
Index: src/memory.cpp
===================================================================
--- src/memory.cpp
+++ src/memory.cpp
@@ -16,25 +16,6 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-namespace
-{
-
-template <class T>
-inline T
-increment(T& t) _NOEXCEPT
-{
- return __sync_add_and_fetch(&t, 1);
-}
-
-template <class T>
-inline T
-decrement(T& t) _NOEXCEPT
-{
- return __sync_add_and_fetch(&t, -1);
-}
-
-} // namespace
-
const allocator_arg_t allocator_arg = allocator_arg_t();
bad_weak_ptr::~bad_weak_ptr() _NOEXCEPT {}
@@ -52,13 +33,13 @@
void
__shared_count::__add_shared() _NOEXCEPT
{
- increment(__shared_owners_);
+ __libcpp_atomic_add(&__shared_owners_, 1);
}
bool
__shared_count::__release_shared() _NOEXCEPT
{
- if (decrement(__shared_owners_) == -1)
+ if (__libcpp_atomic_add(&__shared_owners_, -1) == -1)
{
__on_zero_shared();
return true;
@@ -79,7 +60,7 @@
void
__shared_weak_count::__add_weak() _NOEXCEPT
{
- increment(__shared_weak_owners_);
+ __libcpp_atomic_add(&__shared_weak_owners_, 1);
}
void
@@ -92,23 +73,23 @@
void
__shared_weak_count::__release_weak() _NOEXCEPT
{
- if (decrement(__shared_weak_owners_) == -1)
+ if (__libcpp_atomic_add(&__shared_weak_owners_, -1) == -1)
__on_zero_shared_weak();
}
__shared_weak_count*
__shared_weak_count::lock() _NOEXCEPT
{
- long object_owners = __shared_owners_;
+ long object_owners = __libcpp_atomic_load(&__shared_owners_);
while (object_owners != -1)
{
- if (__sync_bool_compare_and_swap(&__shared_owners_,
- object_owners,
- object_owners+1))
+ long object_owners_plus = object_owners + 1;
+ if (__libcpp_atomic_compare_exchange(&__shared_owners_,
+ &object_owners,
+ &object_owners_plus))
return this;
- object_owners = __shared_owners_;
}
- return 0;
+ return nullptr;
}
#if !defined(_LIBCPP_NO_RTTI) || !defined(_LIBCPP_BUILD_STATIC)
Index: src/mutex.cpp
===================================================================
--- src/mutex.cpp
+++ src/mutex.cpp
@@ -252,11 +252,11 @@
try
{
#endif // _LIBCPP_NO_EXCEPTIONS
- flag = 1;
+ __libcpp_atomic_store(&flag, 1, _AO_Relaxed);
pthread_mutex_unlock(&mut);
func(arg);
pthread_mutex_lock(&mut);
- flag = ~0ul;
+ __libcpp_atomic_store(&flag, ~0ul, _AO_Relaxed);
pthread_mutex_unlock(&mut);
pthread_cond_broadcast(&cv);
#ifndef _LIBCPP_NO_EXCEPTIONS
@@ -264,7 +264,7 @@
catch (...)
{
pthread_mutex_lock(&mut);
- flag = 0ul;
+ __libcpp_atomic_store(&flag, 0ul, _AO_Relaxed);
pthread_mutex_unlock(&mut);
pthread_cond_broadcast(&cv);
throw;
Index: test/std/thread/thread.mutex/thread.once/thread.once.callonce/call_once.pass.cpp
===================================================================
--- test/std/thread/thread.mutex/thread.once/thread.once.callonce/call_once.pass.cpp
+++ test/std/thread/thread.mutex/thread.once/thread.once.callonce/call_once.pass.cpp
@@ -62,43 +62,6 @@
}
}
-#ifndef _LIBCPP_HAS_NO_VARIADICS
-
-struct init1
-{
- static int called;
-
- void operator()(int i) {called += i;}
-};
-
-int init1::called = 0;
-
-std::once_flag flg1;
-
-void f1()
-{
- std::call_once(flg1, init1(), 1);
-}
-
-struct init2
-{
- static int called;
-
- void operator()(int i, int j) const {called += i + j;}
-};
-
-int init2::called = 0;
-
-std::once_flag flg2;
-
-void f2()
-{
- std::call_once(flg2, init2(), 2, 3);
- std::call_once(flg2, init2(), 4, 5);
-}
-
-#endif // _LIBCPP_HAS_NO_VARIADICS
-
std::once_flag flg41;
std::once_flag flg42;
@@ -131,29 +94,6 @@
std::call_once(flg41, init41);
}
-#ifndef _LIBCPP_HAS_NO_VARIADICS
-
-class MoveOnly
-{
-#if !defined(__clang__)
- // GCC 4.8 complains about the following being private
-public:
- MoveOnly(const MoveOnly&)
- {
- }
-#else
- MoveOnly(const MoveOnly&);
-#endif
-public:
- MoveOnly() {}
- MoveOnly(MoveOnly&&) {}
-
- void operator()(MoveOnly&&)
- {
- }
-};
-
-#endif
int main()
{
@@ -183,26 +123,4 @@
assert(init41_called == 1);
assert(init42_called == 1);
}
-#ifndef _LIBCPP_HAS_NO_VARIADICS
- // check functors with 1 arg
- {
- std::thread t0(f1);
- std::thread t1(f1);
- t0.join();
- t1.join();
- assert(init1::called == 1);
- }
- // check functors with 2 args
- {
- std::thread t0(f2);
- std::thread t1(f2);
- t0.join();
- t1.join();
- assert(init2::called == 5);
- }
- {
- std::once_flag f;
- std::call_once(f, MoveOnly(), MoveOnly());
- }
-#endif // _LIBCPP_HAS_NO_VARIADICS
}
Index: test/std/thread/thread.mutex/thread.once/thread.once.callonce/call_once11.pass.cpp
===================================================================
--- /dev/null
+++ test/std/thread/thread.mutex/thread.once/thread.once.callonce/call_once11.pass.cpp
@@ -0,0 +1,94 @@
+//===----------------------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// UNSUPPORTED: libcpp-has-no-threads
+// UNSUPPORTED: c++98, c++03
+
+// <mutex>
+
+// struct once_flag;
+
+// template<class Callable, class ...Args>
+// void call_once(once_flag& flag, Callable func, Args&&... args);
+
+#include <mutex>
+#include <thread>
+#include <cassert>
+
+
+struct init1
+{
+ static int called;
+
+ void operator()(int i) {called += i;}
+};
+
+int init1::called = 0;
+
+std::once_flag flg1;
+
+void f1()
+{
+ std::call_once(flg1, init1(), 1);
+}
+
+struct init2
+{
+ static int called;
+
+ void operator()(int i, int j) const {called += i + j;}
+};
+
+int init2::called = 0;
+
+std::once_flag flg2;
+
+void f2()
+{
+ std::call_once(flg2, init2(), 2, 3);
+ std::call_once(flg2, init2(), 4, 5);
+}
+
+
+class MoveOnly
+{
+public:
+ MoveOnly() = default;
+ MoveOnly(const MoveOnly&) = delete;
+ MoveOnly(MoveOnly &&) = default;
+
+
+ void operator()(MoveOnly&&)
+ {
+ }
+};
+
+int main()
+{
+ // check functors with 1 arg
+ {
+ std::thread t0(f1);
+ std::thread t1(f1);
+ t0.join();
+ t1.join();
+ assert(init1::called == 1);
+ }
+ // check functors with 2 args
+ {
+ std::thread t0(f2);
+ std::thread t1(f2);
+ t0.join();
+ t1.join();
+ assert(init2::called == 5);
+ }
+ {
+ std::once_flag f;
+ std::call_once(f, MoveOnly(), MoveOnly());
+ }
+}
Index: test/std/utilities/memory/util.smartptr/util.smartptr.shared/race_condition.pass.cpp
===================================================================
--- /dev/null
+++ test/std/utilities/memory/util.smartptr/util.smartptr.shared/race_condition.pass.cpp
@@ -0,0 +1,72 @@
+//===----------------------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// UNSUPPORTED: libcpp-has-no-threads
+//
+// <memory>
+//
+// class shared_ptr
+//
+// This test attempts to create a race condition surrounding use_count()
+// with the hope that TSAN will diagnose it.
+
+#include <memory>
+#include <atomic>
+#include <thread>
+#include <cassert>
+
+typedef std::shared_ptr<int> Ptr;
+
+std::atomic_bool KeepRunning;
+
+struct TestRunner {
+ TestRunner(Ptr xx) : x(xx) {}
+ void operator()() {
+ while (KeepRunning) {
+ // loop to prevent always checking the atomic.
+ for (int i=0; i < 10000; ++i)
+ Ptr x2 = x;
+ }
+ }
+ Ptr x;
+};
+
+void run_test(Ptr p) {
+ KeepRunning = true;
+ TestRunner r(p);
+ assert(p.use_count() == 3);
+ std::thread t1(r);
+ // Run until we witness 25 use count changes.
+ int changes_count = 0;
+ while (changes_count < 25) {
+ volatile int last = p.use_count();
+ volatile int new_val = p.use_count();
+ assert(last >= 3);
+ assert(new_val >= 3);
+ if (last != new_val) ++changes_count;
+ }
+ KeepRunning = false;
+ t1.join();
+ assert(p.use_count() == 3);
+}
+
+int main() {
+ {
+ // Test with out-of-place shared_count.
+ Ptr p(new int(42));
+ run_test(p);
+ assert(p.use_count() == 1);
+ }
+ {
+ // Test with in-place shared_count.
+ Ptr p = std::make_shared<int>(42);
+ run_test(p);
+ assert(p.use_count() == 1);
+ }
+}
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits