On 11/10/2011 11:48 AM, Andrew MacLeod wrote:
On 11/10/2011 11:47 AM, Richard Henderson wrote:
On 11/10/2011 08:35 AM, Andrew MacLeod wrote:
Currently I don't issue any warnings ...
What are those then?
And, obviously the cris test should be an effective target test.
Oh, those are gone, I must not have re-svn'd
Justa minute
Andrew
doh. sorry about that
Andrew
libstdc++-v3
* include/bits/atomic_base.h (atomic_thread_fence): Call built-in.
(atomic_signal_fence): Call built-in.
(test_and_set, clear): Call new atomic built-ins.
gcc
* builtins.c (expand_builtin_atomic_clear): New. Expand atomic_clear.
(expand_builtin_atomic_test_and_set): New. Expand atomic test_and_set.
(expand_builtin): Add cases for test_and_set and clear.
* sync-builtins.def (BUILT_IN_ATOMIC_TEST_AND_SET): New.
(BUILT_IN_ATOMIC_CLEAR): New.
testsuite
* gcc.dg/atomic-invalid.c: Add test for invalid __atomic_clear models.
* gcc.dg/atomic-flag.c: New. Test __atomic_test_and_set and
__atomic_clear.
Index: libstdc++-v3/include/bits/atomic_base.h
===================================================================
*** libstdc++-v3/include/bits/atomic_base.h (revision 181119)
--- libstdc++-v3/include/bits/atomic_base.h (working copy)
*************** _GLIBCXX_BEGIN_NAMESPACE_VERSION
*** 68,78 ****
return __mo2;
}
! void
! atomic_thread_fence(memory_order __m) noexcept;
! void
! atomic_signal_fence(memory_order __m) noexcept;
/// kill_dependency
template<typename _Tp>
--- 68,84 ----
return __mo2;
}
! inline void
! atomic_thread_fence(memory_order __m) noexcept
! {
! __atomic_thread_fence (__m);
! }
! inline void
! atomic_signal_fence(memory_order __m) noexcept
! {
! __atomic_thread_fence (__m);
! }
/// kill_dependency
template<typename _Tp>
*************** _GLIBCXX_BEGIN_NAMESPACE_VERSION
*** 261,295 ****
bool
test_and_set(memory_order __m = memory_order_seq_cst) noexcept
{
! /* The standard *requires* this to be lock free. If exchange is not
! always lock free, the resort to the old test_and_set. */
! if (__atomic_always_lock_free (sizeof (_M_i), 0))
! return __atomic_exchange_n(&_M_i, 1, __m);
! else
! {
! /* Sync test and set is only guaranteed to be acquire. */
! if (__m == memory_order_seq_cst || __m == memory_order_release
! || __m == memory_order_acq_rel)
! atomic_thread_fence (__m);
! return __sync_lock_test_and_set (&_M_i, 1);
! }
}
bool
test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
{
! /* The standard *requires* this to be lock free. If exchange is not
! always lock free, the resort to the old test_and_set. */
! if (__atomic_always_lock_free (sizeof (_M_i), 0))
! return __atomic_exchange_n(&_M_i, 1, __m);
! else
! {
! /* Sync test and set is only guaranteed to be acquire. */
! if (__m == memory_order_seq_cst || __m == memory_order_release
! || __m == memory_order_acq_rel)
! atomic_thread_fence (__m);
! return __sync_lock_test_and_set (&_M_i, 1);
! }
}
void
--- 267,279 ----
bool
test_and_set(memory_order __m = memory_order_seq_cst) noexcept
{
! return __atomic_test_and_set (&_M_i, __m);
}
bool
test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
{
! return __atomic_test_and_set (&_M_i, __m);
}
void
*************** _GLIBCXX_BEGIN_NAMESPACE_VERSION
*** 299,315 ****
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
! /* The standard *requires* this to be lock free. If store is not always
! lock free, the resort to the old style __sync_lock_release. */
! if (__atomic_always_lock_free (sizeof (_M_i), 0))
! __atomic_store_n(&_M_i, 0, __m);
! else
! {
! __sync_lock_release (&_M_i, 0);
! /* __sync_lock_release is only guaranteed to be a release barrier. */
! if (__m == memory_order_seq_cst)
! atomic_thread_fence (__m);
! }
}
void
--- 283,289 ----
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
! __atomic_clear (&_M_i, __m);
}
void
*************** _GLIBCXX_BEGIN_NAMESPACE_VERSION
*** 319,335 ****
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
! /* The standard *requires* this to be lock free. If store is not always
! lock free, the resort to the old style __sync_lock_release. */
! if (__atomic_always_lock_free (sizeof (_M_i), 0))
! __atomic_store_n(&_M_i, 0, __m);
! else
! {
! __sync_lock_release (&_M_i, 0);
! /* __sync_lock_release is only guaranteed to be a release barrier. */
! if (__m == memory_order_seq_cst)
! atomic_thread_fence (__m);
! }
}
};
--- 293,299 ----
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
! __atomic_clear (&_M_i, __m);
}
};
Index: gcc/builtins.c
===================================================================
*** gcc/builtins.c (revision 181111)
--- gcc/builtins.c (working copy)
*************** expand_builtin_atomic_fetch_op (enum mac
*** 5465,5470 ****
--- 5465,5535 ----
return ret;
}
+
+ /* Expand an atomic clear operation.
+ void _atomic_clear (BOOL *obj, enum memmodel)
+ EXP is the call expression. */
+
+ static rtx
+ expand_builtin_atomic_clear (tree exp)
+ {
+ enum machine_mode mode;
+ rtx mem, ret;
+ enum memmodel model;
+
+ mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ model = get_memmodel (CALL_EXPR_ARG (exp, 1));
+
+ if (model == MEMMODEL_ACQUIRE || model == MEMMODEL_ACQ_REL)
+ {
+ error ("invalid memory model for %<__atomic_store%>");
+ return const0_rtx;
+ }
+
+ /* Try issuing an __atomic_store, and allow fallback to __sync_lock_release.
+ Failing that, a store is issued by __atomic_store. The only way this can
+ fail is if the bool type is larger than a word size. Unlikely, but
+ handle it anyway for completeness. Assume a single threaded model since
+ there is no atomic support in this case, and no barriers are required.
*/
+ ret = expand_atomic_store (mem, const0_rtx, model, true);
+ if (!ret)
+ emit_move_insn (mem, const0_rtx);
+ return const0_rtx;
+ }
+
+ /* Expand an atomic test_and_set operation.
+ bool _atomic_test_and_set (BOOL *obj, enum memmodel)
+ EXP is the call expression. */
+
+ static rtx
+ expand_builtin_atomic_test_and_set (tree exp)
+ {
+ rtx mem, ret;
+ enum memmodel model;
+ enum machine_mode mode;
+
+ mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ model = get_memmodel (CALL_EXPR_ARG (exp, 1));
+
+ /* Try issuing an exchange. If it is lock free, or if there is a limited
+ functionality __sync_lock_test_and_set, this will utilize it. */
+ ret = expand_atomic_exchange (NULL_RTX, mem, const1_rtx, model, true);
+ if (ret)
+ return ret;
+
+ /* Otherwise, there is no lock free support for test and set. Simply
+ perform a load and a store. Since this presumes a non-atomic
architecture,
+ also assume single threadedness and don't issue barriers either. */
+
+ ret = gen_reg_rtx (mode);
+ emit_move_insn (ret, mem);
+ emit_move_insn (mem, const1_rtx);
+ return ret;
+ }
+
+
/* Return true if (optional) argument ARG1 of size ARG0 is always lock free on
this architecture. If ARG1 is NULL, use typical alignment for size ARG0.
*/
*************** expand_builtin (tree exp, rtx target, rt
*** 6693,6698 ****
--- 6758,6769 ----
if (target)
return target;
break;
+
+ case BUILT_IN_ATOMIC_TEST_AND_SET:
+ return expand_builtin_atomic_test_and_set (exp);
+
+ case BUILT_IN_ATOMIC_CLEAR:
+ return expand_builtin_atomic_clear (exp);
case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE:
return expand_builtin_atomic_always_lock_free (exp);
Index: gcc/sync-builtins.def
===================================================================
*** gcc/sync-builtins.def (revision 181110)
--- gcc/sync-builtins.def (working copy)
*************** DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SYNCHRON
*** 259,264 ****
--- 259,270 ----
/* __sync* builtins for the C++ memory model. */
+ DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_TEST_AND_SET, "__atomic_test_and_set",
+ BT_FN_BOOL_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
+
+ DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_CLEAR, "__atomic_clear",
BT_FN_VOID_VPTR_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+
DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE,
"__atomic_exchange",
BT_FN_VOID_SIZE_VPTR_PTR_PTR_INT, ATTR_NOTHROW_LEAF_LIST)
Index: gcc/testsuite/gcc.dg/atomic-invalid.c
===================================================================
*** gcc/testsuite/gcc.dg/atomic-invalid.c (revision 181110)
--- gcc/testsuite/gcc.dg/atomic-invalid.c (working copy)
***************
*** 4,12 ****
--- 4,14 ----
/* { dg-require-effective-target sync_int_long } */
#include <stddef.h>
+ #include <stdbool.h>
int i, e, b;
size_t s;
+ bool x;
main ()
{
*************** main ()
*** 26,29 ****
--- 28,36 ----
i = __atomic_always_lock_free (s, NULL); /* { dg-error "non-constant
argument" } */
__atomic_load_n (&i, 44); /* { dg-warning "invalid memory model" } */
+
+ __atomic_clear (&x, __ATOMIC_ACQUIRE); /* { dg-error "invalid memory model"
} */
+
+ __atomic_clear (&x, __ATOMIC_ACQ_REL); /* { dg-error "invalid memory model"
} */
+
}
Index: gcc/testsuite/gcc.dg/atomic-flag.c
===================================================================
*** gcc/testsuite/gcc.dg/atomic-flag.c (revision 0)
--- gcc/testsuite/gcc.dg/atomic-flag.c (revision 0)
***************
*** 0 ****
--- 1,33 ----
+ /* Test __atomic routines for existence and execution. */
+ /* { dg-do run } */
+
+ #include <stdbool.h>
+
+ /* Test that __atomic_test_and_set and __atomic_clear builtins execute. */
+
+ extern void abort(void);
+ bool a;
+
+ main ()
+ {
+ bool b;
+
+ __atomic_clear (&a, __ATOMIC_RELAXED);
+ if (a != 0)
+ abort ();
+
+ b = __atomic_test_and_set (&a, __ATOMIC_SEQ_CST);
+ if (a != 1 || b != 0)
+ abort ();
+
+ b = __atomic_test_and_set (&a, __ATOMIC_ACQ_REL);
+ if (b != 1 || a != 1)
+ abort ();
+
+ __atomic_clear (&a, __ATOMIC_SEQ_CST);
+ if (a != 0)
+ abort ();
+
+ return 0;
+ }
+