Next step, change the C++ header files to use the new __sync builtins. pretty straightforward.

mostly.

Turns out, C++ will allow you to specify the memory model as a variable of type enum memory_order... WTF? I would expect that to be pretty uncommon, and in order to get that right, we'd need a switch statement and call the appropriate __sync_mem* routine with the appropriate constant parameter.

That would be quite ugly, and you get what you deserve if you do that. I changed the builtins so that if you dont specify a compile time constant in the memory model parameter, it will simply default to __SYNC_MEM_SEQ_CST, which will always be safe. That is standard compliant (verified), and if anyone is really unhappy about it, then the c++ headers can be really uglified by adding a bunch of switch statements to handle this twisted case.

bootstraps and no new regressions. (In fact, it fixes one of the atomic verification tests!)

Andrew








        * gcc/builtins.c (get_memmodel): Allow non constant parameters and
        default to MEMMODEL_SEQ_CST mode for these cases.

        * libstdc++-v3/include/bits/atomic_2.h (__atomic2): Use new
        __sync_mem routines.

Index: gcc/builtins.c
===================================================================
*** gcc/builtins.c      (revision 177737)
--- gcc/builtins.c      (working copy)
*************** get_memmodel (tree exp)
*** 5225,5240 ****
  {
    rtx op;
  
    if (TREE_CODE (exp) != INTEGER_CST)
!     {
!       error ("invalid memory model argument to builtin");
!       return MEMMODEL_RELAXED;
!     }
    op = expand_normal (exp);
    if (INTVAL (op) < 0 || INTVAL (op) >= MEMMODEL_LAST)
      {
        error ("invalid memory model argument to builtin");
!       return MEMMODEL_RELAXED;
      }
    return (enum memmodel) INTVAL (op);
  }
--- 5225,5240 ----
  {
    rtx op;
  
+   /* If the parameter is not a constant, it's a run time value so we'll just
+      convert it to MEMMODEL_SEQ_CST to avoid annoying runtime checking.  */
    if (TREE_CODE (exp) != INTEGER_CST)
!     return MEMMODEL_SEQ_CST;
! 
    op = expand_normal (exp);
    if (INTVAL (op) < 0 || INTVAL (op) >= MEMMODEL_LAST)
      {
        error ("invalid memory model argument to builtin");
!       return MEMMODEL_SEQ_CST;
      }
    return (enum memmodel) INTVAL (op);
  }
Index: libstdc++-v3/include/bits/atomic_2.h
===================================================================
*** libstdc++-v3/include/bits/atomic_2.h        (revision 177737)
--- libstdc++-v3/include/bits/atomic_2.h        (working copy)
*************** namespace __atomic2
*** 60,78 ****
      bool
      test_and_set(memory_order __m = memory_order_seq_cst)
      {
!       // Redundant synchronize if built-in for lock is a full barrier.
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
!       __sync_synchronize();
!       return __sync_lock_test_and_set(&_M_i, 1);
      }
  
      bool
      test_and_set(memory_order __m = memory_order_seq_cst) volatile
      {
!       // Redundant synchronize if built-in for lock is a full barrier.
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
!       __sync_synchronize();
!       return __sync_lock_test_and_set(&_M_i, 1);
      }
  
      void
--- 60,72 ----
      bool
      test_and_set(memory_order __m = memory_order_seq_cst)
      {
!       return __sync_mem_flag_test_and_set(&_M_i, __m);
      }
  
      bool
      test_and_set(memory_order __m = memory_order_seq_cst) volatile
      {
!       return __sync_mem_flag_test_and_set(&_M_i, __m);
      }
  
      void
*************** namespace __atomic2
*** 82,90 ****
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_lock_release(&_M_i);
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
!       __sync_synchronize();
      }
  
      void
--- 76,82 ----
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_mem_flag_clear(&_M_i, __m);
      }
  
      void
*************** namespace __atomic2
*** 94,102 ****
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_lock_release(&_M_i);
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
!       __sync_synchronize();
      }
    };
  
--- 86,92 ----
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_mem_flag_clear(&_M_i, __m);
      }
    };
  
*************** namespace __atomic2
*** 180,238 ****
  
        __int_type
        operator++()
!       { return __sync_add_and_fetch(&_M_i, 1); }
  
        __int_type
        operator++() volatile
!       { return __sync_add_and_fetch(&_M_i, 1); }
  
        __int_type
        operator--()
!       { return __sync_sub_and_fetch(&_M_i, 1); }
  
        __int_type
        operator--() volatile
!       { return __sync_sub_and_fetch(&_M_i, 1); }
  
        __int_type
        operator+=(__int_type __i)
!       { return __sync_add_and_fetch(&_M_i, __i); }
  
        __int_type
        operator+=(__int_type __i) volatile
!       { return __sync_add_and_fetch(&_M_i, __i); }
  
        __int_type
        operator-=(__int_type __i)
!       { return __sync_sub_and_fetch(&_M_i, __i); }
  
        __int_type
        operator-=(__int_type __i) volatile
!       { return __sync_sub_and_fetch(&_M_i, __i); }
  
        __int_type
        operator&=(__int_type __i)
!       { return __sync_and_and_fetch(&_M_i, __i); }
  
        __int_type
        operator&=(__int_type __i) volatile
!       { return __sync_and_and_fetch(&_M_i, __i); }
  
        __int_type
        operator|=(__int_type __i)
!       { return __sync_or_and_fetch(&_M_i, __i); }
  
        __int_type
        operator|=(__int_type __i) volatile
!       { return __sync_or_and_fetch(&_M_i, __i); }
  
        __int_type
        operator^=(__int_type __i)
!       { return __sync_xor_and_fetch(&_M_i, __i); }
  
        __int_type
        operator^=(__int_type __i) volatile
!       { return __sync_xor_and_fetch(&_M_i, __i); }
  
        bool
        is_lock_free() const
--- 170,228 ----
  
        __int_type
        operator++()
!       { return __sync_mem_add_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator++() volatile
!       { return __sync_mem_add_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator--()
!       { return __sync_mem_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator--() volatile
!       { return __sync_mem_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator+=(__int_type __i)
!       { return __sync_mem_add_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator+=(__int_type __i) volatile
!       { return __sync_mem_add_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator-=(__int_type __i)
!       { return __sync_mem_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator-=(__int_type __i) volatile
!       { return __sync_mem_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator&=(__int_type __i)
!       { return __sync_mem_and_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator&=(__int_type __i) volatile
!       { return __sync_mem_and_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator|=(__int_type __i)
!       { return __sync_mem_or_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator|=(__int_type __i) volatile
!       { return __sync_mem_or_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator^=(__int_type __i)
!       { return __sync_mem_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator^=(__int_type __i) volatile
!       { return __sync_mem_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        bool
        is_lock_free() const
*************** namespace __atomic2
*** 249,263 ****
        __glibcxx_assert(__m != memory_order_acq_rel);
        __glibcxx_assert(__m != memory_order_consume);
  
!       if (__m == memory_order_relaxed)
!         _M_i = __i;
!       else
!         {
!           // write_mem_barrier();
!           _M_i = __i;
!           if (__m == memory_order_seq_cst)
!             __sync_synchronize();
!         }
        }
  
        void
--- 239,245 ----
        __glibcxx_assert(__m != memory_order_acq_rel);
        __glibcxx_assert(__m != memory_order_consume);
  
!       __sync_mem_store (&_M_i, __i, __m);
        }
  
        void
*************** namespace __atomic2
*** 267,281 ****
        __glibcxx_assert(__m != memory_order_acq_rel);
        __glibcxx_assert(__m != memory_order_consume);
  
!       if (__m == memory_order_relaxed)
!         _M_i = __i;
!       else
!         {
!           // write_mem_barrier();
!           _M_i = __i;
!           if (__m == memory_order_seq_cst)
!             __sync_synchronize();
!         }
        }
  
        __int_type
--- 249,255 ----
        __glibcxx_assert(__m != memory_order_acq_rel);
        __glibcxx_assert(__m != memory_order_consume);
  
!       __sync_mem_store (&_M_i, __i, __m);
        }
  
        __int_type
*************** namespace __atomic2
*** 284,293 ****
        __glibcxx_assert(__m != memory_order_release);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_synchronize();
!       __int_type __ret = _M_i;
!       __sync_synchronize();
!       return __ret;
        }
  
        __int_type
--- 258,264 ----
        __glibcxx_assert(__m != memory_order_release);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       return __sync_mem_load (const_cast <__int_type *>(&_M_i), __m);
        }
  
        __int_type
*************** namespace __atomic2
*** 296,320 ****
        __glibcxx_assert(__m != memory_order_release);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_synchronize();
!       __int_type __ret = _M_i;
!       __sync_synchronize();
!       return __ret;
        }
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
        {
!       // XXX built-in assumes memory_order_acquire.
!       return __sync_lock_test_and_set(&_M_i, __i);
        }
  
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst) 
volatile
        {
!       // XXX built-in assumes memory_order_acquire.
!       return __sync_lock_test_and_set(&_M_i, __i);
        }
  
        bool
--- 267,286 ----
        __glibcxx_assert(__m != memory_order_release);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       return __sync_mem_load (const_cast <__int_type *>(&_M_i), __m);
        }
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
        {
!       return __sync_mem_exchange (&_M_i, __i, __m);
        }
  
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst) 
volatile
        {
!       return __sync_mem_exchange (&_M_i, __i, __m);
        }
  
        bool
*************** namespace __atomic2
*** 352,357 ****
--- 318,324 ----
        __glibcxx_assert(__m2 <= __m1);
  
        __int_type __i1o = __i1;
+       // Compare_and_swap is a full barrier already.
        __int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
  
        // Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 368,373 ****
--- 335,341 ----
        __glibcxx_assert(__m2 <= __m1);
  
        __int_type __i1o = __i1;
+       // Compare_and_swap is a full barrier already.
        __int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
  
        // Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 393,440 ****
  
        __int_type
        fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_add(&_M_i, __i); }
  
        __int_type
        fetch_add(__int_type __i,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_add(&_M_i, __i); }
  
        __int_type
        fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_sub(&_M_i, __i); }
  
        __int_type
        fetch_sub(__int_type __i,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_sub(&_M_i, __i); }
  
        __int_type
        fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_and(&_M_i, __i); }
  
        __int_type
        fetch_and(__int_type __i,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_and(&_M_i, __i); }
  
        __int_type
        fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_or(&_M_i, __i); }
  
        __int_type
        fetch_or(__int_type __i,
               memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_or(&_M_i, __i); }
  
        __int_type
        fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_xor(&_M_i, __i); }
  
        __int_type
        fetch_xor(__int_type __i,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_xor(&_M_i, __i); }
      };
  
  
--- 361,408 ----
  
        __int_type
        fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_add(&_M_i, __i, __m); }
  
        __int_type
        fetch_add(__int_type __i,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_add(&_M_i, __i, __m); }
  
        __int_type
        fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_sub(&_M_i, __i, __m); }
  
        __int_type
        fetch_sub(__int_type __i,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_sub(&_M_i, __i, __m); }
  
        __int_type
        fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_and(&_M_i, __i, __m); }
  
        __int_type
        fetch_and(__int_type __i,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_and(&_M_i, __i, __m); }
  
        __int_type
        fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_or(&_M_i, __i, __m); }
  
        __int_type
        fetch_or(__int_type __i,
               memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_or(&_M_i, __i, __m); }
  
        __int_type
        fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_xor(&_M_i, __i, __m); }
  
        __int_type
        fetch_xor(__int_type __i,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_xor(&_M_i, __i, __m); }
      };
  
  
*************** namespace __atomic2
*** 495,529 ****
  
        __pointer_type
        operator++()
!       { return fetch_add(1) + 1; }
  
        __pointer_type
        operator++() volatile
!       { return fetch_add(1) + 1; }
  
        __pointer_type
        operator--()
!       { return fetch_sub(1) -1; }
  
        __pointer_type
        operator--() volatile
!       { return fetch_sub(1) -1; }
  
        __pointer_type
        operator+=(ptrdiff_t __d)
!       { return fetch_add(__d) + __d; }
  
        __pointer_type
        operator+=(ptrdiff_t __d) volatile
!       { return fetch_add(__d) + __d; }
  
        __pointer_type
        operator-=(ptrdiff_t __d)
!       { return fetch_sub(__d) - __d; }
  
        __pointer_type
        operator-=(ptrdiff_t __d) volatile
!       { return fetch_sub(__d) - __d; }
  
        bool
        is_lock_free() const
--- 463,497 ----
  
        __pointer_type
        operator++()
!       { return __sync_mem_add_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator++() volatile
!       { return __sync_mem_add_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator--()
!       { return __sync_mem_sub_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator--() volatile
!       { return __sync_mem_sub_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator+=(ptrdiff_t __d)
!       { return __sync_mem_add_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator+=(ptrdiff_t __d) volatile
!       { return __sync_mem_add_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator-=(ptrdiff_t __d)
!       { return __sync_mem_sub_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator-=(ptrdiff_t __d) volatile
!       { return __sync_mem_sub_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        bool
        is_lock_free() const
*************** namespace __atomic2
*** 540,554 ****
        __glibcxx_assert(__m != memory_order_acq_rel);
        __glibcxx_assert(__m != memory_order_consume);
  
!       if (__m == memory_order_relaxed)
!         _M_p = __p;
!       else
!         {
!           // write_mem_barrier();
!           _M_p = __p;
!           if (__m == memory_order_seq_cst)
!             __sync_synchronize();
!         }
        }
  
        void
--- 508,514 ----
        __glibcxx_assert(__m != memory_order_acq_rel);
        __glibcxx_assert(__m != memory_order_consume);
  
!       __sync_mem_store (&_M_p, __p, __m);
        }
  
        void
*************** namespace __atomic2
*** 559,573 ****
        __glibcxx_assert(__m != memory_order_acq_rel);
        __glibcxx_assert(__m != memory_order_consume);
  
!       if (__m == memory_order_relaxed)
!         _M_p = __p;
!       else
!         {
!           // write_mem_barrier();
!           _M_p = __p;
!           if (__m == memory_order_seq_cst)
!             __sync_synchronize();
!         }
        }
  
        __pointer_type
--- 519,525 ----
        __glibcxx_assert(__m != memory_order_acq_rel);
        __glibcxx_assert(__m != memory_order_consume);
  
!       __sync_mem_store (&_M_p, __p, __m);
        }
  
        __pointer_type
*************** namespace __atomic2
*** 576,585 ****
        __glibcxx_assert(__m != memory_order_release);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_synchronize();
!       __pointer_type __ret = _M_p;
!       __sync_synchronize();
!       return __ret;
        }
  
        __pointer_type
--- 528,534 ----
        __glibcxx_assert(__m != memory_order_release);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       return __sync_mem_load (const_cast <__pointer_type *>(&_M_p), __m);
        }
  
        __pointer_type
*************** namespace __atomic2
*** 588,604 ****
        __glibcxx_assert(__m != memory_order_release);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_synchronize();
!       __pointer_type __ret = _M_p;
!       __sync_synchronize();
!       return __ret;
        }
  
        __pointer_type
        exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
        {
!       // XXX built-in assumes memory_order_acquire.
!       return __sync_lock_test_and_set(&_M_p, __p);
        }
  
  
--- 537,549 ----
        __glibcxx_assert(__m != memory_order_release);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       return __sync_mem_load (const_cast <__pointer_type *>(&_M_p), __m);
        }
  
        __pointer_type
        exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
        {
!       return __sync_mem_exchange (&_M_p, __p, __m);
        }
  
  
*************** namespace __atomic2
*** 606,613 ****
        exchange(__pointer_type __p,
               memory_order __m = memory_order_seq_cst) volatile
        {
!       // XXX built-in assumes memory_order_acquire.
!       return __sync_lock_test_and_set(&_M_p, __p);
        }
  
        bool
--- 551,557 ----
        exchange(__pointer_type __p,
               memory_order __m = memory_order_seq_cst) volatile
        {
!       return __sync_mem_exchange (&_M_p, __p, __m);
        }
  
        bool
*************** namespace __atomic2
*** 619,624 ****
--- 563,569 ----
        __glibcxx_assert(__m2 <= __m1);
  
        __pointer_type __p1o = __p1;
+       // Compare_and_swap is a full barrier already.
        __pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
  
        // Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 635,640 ****
--- 580,586 ----
        __glibcxx_assert(__m2 <= __m1);
  
        __pointer_type __p1o = __p1;
+       // Compare_and_swap is a full barrier already.
        __pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
  
        // Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 644,664 ****
  
        __pointer_type
        fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_add(&_M_p, __d); }
  
        __pointer_type
        fetch_add(ptrdiff_t __d,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_add(&_M_p, __d); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_sub(&_M_p, __d); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_sub(&_M_p, __d); }
      };
  
  } // namespace __atomic2
--- 590,610 ----
  
        __pointer_type
        fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_add(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_add(ptrdiff_t __d,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_add(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_sub(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d,
                memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_sub(&_M_p, __d, __m); }
      };
  
  } // namespace __atomic2

Reply via email to