> bits/atomicity.h has volatile qualifiers on the _Atomic_word* arguments to
> the __*_single and __*_dispatch variants of the atomic operations. This
> huts especially the single-threaded optimization variants which are usually
> inlined. Removing those qualifiers allows to reduce code size significantly
> as can be seen in the following simple testcase
I've been able to reproduce this with your example and the following
patch. Thanks for looking at this.
without volatile:
19: 00000000 546 FUNC GLOBAL DEFAULT 2 _Z3fooPKcS0_
with:
19: 00000000 578 FUNC GLOBAL DEFAULT 2 _Z3fooPKcS0_
I don't understand the ABI objections to your suggestion, and feel like
there must be a misunderstanding somewhere. These helper functions are
not exported at all, in fact. Also, the *_dispatch and *_single parts
of the atomicity.h interface are new with 4.2, so I'd like to get the
correct signatures in with their introduction, and not have to patch
this up later.
?
tested x86/linux
abi tested x86/linux
-benjamin
2006-08-30 Benjamin Kosnik <[EMAIL PROTECTED]>
Richard Guenther <[EMAIL PROTECTED]>
* config/abi/pre/gnu.ver: Spell out exact signatures for atomic
access functions.
* include/bits/atomicity.h (__atomic_add_dispatch): Remove
volatile qualification for _Atomic_word argument.
(__atomic_add_single): Same.
(__exchange_and_add_dispatch): Same.
(__exchange_and_add_single): Same.
Index: include/bits/atomicity.h
===================================================================
--- include/bits/atomicity.h (revision 116581)
+++ include/bits/atomicity.h (working copy)
@@ -60,7 +60,7 @@
#endif
static inline _Atomic_word
- __exchange_and_add_single(volatile _Atomic_word* __mem, int __val)
+ __exchange_and_add_single(_Atomic_word* __mem, int __val)
{
_Atomic_word __result = *__mem;
*__mem += __val;
@@ -68,12 +68,12 @@
}
static inline void
- __atomic_add_single(volatile _Atomic_word* __mem, int __val)
+ __atomic_add_single(_Atomic_word* __mem, int __val)
{ *__mem += __val; }
static inline _Atomic_word
__attribute__ ((__unused__))
- __exchange_and_add_dispatch(volatile _Atomic_word* __mem, int __val)
+ __exchange_and_add_dispatch(_Atomic_word* __mem, int __val)
{
#ifdef __GTHREADS
if (__gthread_active_p())
@@ -87,7 +87,7 @@
static inline void
__attribute__ ((__unused__))
- __atomic_add_dispatch(volatile _Atomic_word* __mem, int __val)
+ __atomic_add_dispatch(_Atomic_word* __mem, int __val)
{
#ifdef __GTHREADS
if (__gthread_active_p())
@@ -101,8 +101,9 @@
_GLIBCXX_END_NAMESPACE
-// Even if the CPU doesn't need a memory barrier, we need to ensure that
-// the compiler doesn't reorder memory accesses across the barriers.
+// Even if the CPU doesn't need a memory barrier, we need to ensure
+// that the compiler doesn't reorder memory accesses across the
+// barriers.
#ifndef _GLIBCXX_READ_MEM_BARRIER
#define _GLIBCXX_READ_MEM_BARRIER __asm __volatile ("":::"memory")
#endif
Index: config/abi/pre/gnu.ver
===================================================================
--- config/abi/pre/gnu.ver (revision 116581)
+++ config/abi/pre/gnu.ver (working copy)
@@ -378,8 +378,8 @@
# __gnu_cxx::__atomic_add
# __gnu_cxx::__exchange_and_add
- _ZN9__gnu_cxx12__atomic_add*;
- _ZN9__gnu_cxx18__exchange_and_add*;
+ _ZN9__gnu_cxx12__atomic_addEPVii;
+ _ZN9__gnu_cxx18__exchange_and_addEPVii;
# debug mode
_ZN10__gnu_norm15_List_node_base4hook*;