Author: Kai Luo Date: 2021-10-28T02:18:43Z New Revision: 6ea2431d3f109aefa31cd4d520cc234a5aa5484a
URL: https://github.com/llvm/llvm-project/commit/6ea2431d3f109aefa31cd4d520cc234a5aa5484a DIFF: https://github.com/llvm/llvm-project/commit/6ea2431d3f109aefa31cd4d520cc234a5aa5484a.diff LOG: [clang][compiler-rt][atomics] Add `__c11_atomic_fetch_nand` builtin and support `__atomic_fetch_nand` libcall Add `__c11_atomic_fetch_nand` builtin to language extensions and support `__atomic_fetch_nand` libcall in compiler-rt. Reviewed By: theraven Differential Revision: https://reviews.llvm.org/D112400 Added: Modified: clang/docs/LanguageExtensions.rst clang/include/clang/Basic/Builtins.def clang/lib/AST/Expr.cpp clang/lib/CodeGen/CGAtomic.cpp clang/lib/Sema/SemaChecking.cpp clang/test/Sema/atomic-implicit-seq_cst.c clang/test/Sema/atomic-ops.c compiler-rt/lib/builtins/atomic.c compiler-rt/test/builtins/Unit/atomic_test.c Removed: ################################################################################ diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst index 143b2359b58d7..da2c90778ef46 100644 --- a/clang/docs/LanguageExtensions.rst +++ b/clang/docs/LanguageExtensions.rst @@ -2866,6 +2866,7 @@ the corresponding C11 operations, are: * ``__c11_atomic_fetch_and`` * ``__c11_atomic_fetch_or`` * ``__c11_atomic_fetch_xor`` +* ``__c11_atomic_fetch_nand`` (Nand is not presented in ``<stdatomic.h>``) * ``__c11_atomic_fetch_max`` * ``__c11_atomic_fetch_min`` diff --git a/clang/include/clang/Basic/Builtins.def b/clang/include/clang/Basic/Builtins.def index 874bc655a00b7..7d331a86126f1 100644 --- a/clang/include/clang/Basic/Builtins.def +++ b/clang/include/clang/Basic/Builtins.def @@ -796,6 +796,7 @@ ATOMIC_BUILTIN(__c11_atomic_fetch_sub, "v.", "t") ATOMIC_BUILTIN(__c11_atomic_fetch_and, "v.", "t") ATOMIC_BUILTIN(__c11_atomic_fetch_or, "v.", "t") ATOMIC_BUILTIN(__c11_atomic_fetch_xor, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_fetch_nand, "v.", "t") ATOMIC_BUILTIN(__c11_atomic_fetch_max, "v.", "t") ATOMIC_BUILTIN(__c11_atomic_fetch_min, "v.", "t") BUILTIN(__c11_atomic_thread_fence, "vi", "n") diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp index e9ee624e499da..415b6e52b564b 100644 --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -4695,6 +4695,7 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) { case AO__c11_atomic_fetch_and: case AO__c11_atomic_fetch_or: case AO__c11_atomic_fetch_xor: + case AO__c11_atomic_fetch_nand: case AO__c11_atomic_fetch_max: case AO__c11_atomic_fetch_min: case AO__atomic_fetch_add: diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index b6722ad4e4f18..326ca8d50533e 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -664,6 +664,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_nand_fetch: PostOp = llvm::Instruction::And; // the NOT is special cased below LLVM_FALLTHROUGH; + case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__atomic_fetch_nand: Op = llvm::AtomicRMWInst::Nand; break; @@ -906,6 +907,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__c11_atomic_fetch_max: case AtomicExpr::AO__c11_atomic_fetch_min: case AtomicExpr::AO__opencl_atomic_fetch_and: @@ -972,6 +974,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__opencl_atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_or: + case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__atomic_fetch_nand: case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__opencl_atomic_fetch_sub: @@ -1211,6 +1214,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__atomic_nand_fetch: PostOp = llvm::Instruction::And; // the NOT is special cased below LLVM_FALLTHROUGH; + case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__atomic_fetch_nand: LibCallName = "__atomic_fetch_nand"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index 3eaeae197648a..147f50aeed97f 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -5287,6 +5287,7 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__opencl_atomic_fetch_and: case AtomicExpr::AO__opencl_atomic_fetch_or: case AtomicExpr::AO__opencl_atomic_fetch_xor: diff --git a/clang/test/Sema/atomic-implicit-seq_cst.c b/clang/test/Sema/atomic-implicit-seq_cst.c index fff7b2444906c..562a9df6d758b 100644 --- a/clang/test/Sema/atomic-implicit-seq_cst.c +++ b/clang/test/Sema/atomic-implicit-seq_cst.c @@ -178,6 +178,14 @@ int bad_bitor_2(int i) { return i | atom; // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}} } +int bad_bitnand_1(int i) { + return ~(atom & i); // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}} +} + +int bad_bitnand_2(int i) { + return ~(i & atom); // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}} +} + int bad_and_1(int i) { return atom && i; // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}} } @@ -315,6 +323,7 @@ int good_c11_atomic_fetch_sub(int i) { return __c11_atomic_fetch_sub(&atom, i, _ int good_c11_atomic_fetch_and(int i) { return __c11_atomic_fetch_and(&atom, i, __ATOMIC_RELAXED); } int good_c11_atomic_fetch_or(int i) { return __c11_atomic_fetch_or(&atom, i, __ATOMIC_RELAXED); } int good_c11_atomic_fetch_xor(int i) { return __c11_atomic_fetch_xor(&atom, i, __ATOMIC_RELAXED); } +int good_c11_atomic_fetch_nand(int i) { return __c11_atomic_fetch_nand(&atom, i, __ATOMIC_RELAXED); } void good_cast_to_void(void) { (void)atom; } _Atomic(int) * good_address_of(void) { return &atom; } diff --git a/clang/test/Sema/atomic-ops.c b/clang/test/Sema/atomic-ops.c index 59ecdbf960832..8b5757067d2bc 100644 --- a/clang/test/Sema/atomic-ops.c +++ b/clang/test/Sema/atomic-ops.c @@ -362,6 +362,13 @@ void memory_checks(_Atomic(int) *Ap, int *p, int val) { (void)__c11_atomic_fetch_xor(Ap, val, memory_order_acq_rel); (void)__c11_atomic_fetch_xor(Ap, val, memory_order_seq_cst); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_relaxed); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_acquire); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_consume); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_release); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_acq_rel); + (void)__c11_atomic_fetch_nand(Ap, val, memory_order_seq_cst); + (void)__c11_atomic_fetch_min(Ap, val, memory_order_relaxed); (void)__c11_atomic_fetch_min(Ap, val, memory_order_acquire); (void)__c11_atomic_fetch_min(Ap, val, memory_order_consume); @@ -602,6 +609,8 @@ void nullPointerWarning() { (void)__c11_atomic_fetch_or((_Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} (void)__c11_atomic_fetch_xor((volatile _Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} (void)__c11_atomic_fetch_xor((_Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} + (void)__c11_atomic_fetch_nand((volatile _Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} + (void)__c11_atomic_fetch_nand((_Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} __atomic_store_n((volatile int*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} __atomic_store_n((int*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}} @@ -680,6 +689,8 @@ void nullPointerWarning() { (void)__c11_atomic_fetch_or(&ai, 0, memory_order_relaxed); (void)__c11_atomic_fetch_xor(&vai, 0, memory_order_relaxed); (void)__c11_atomic_fetch_xor(&ai, 0, memory_order_relaxed); + (void)__c11_atomic_fetch_nand(&vai, 0, memory_order_relaxed); + (void)__c11_atomic_fetch_nand(&ai, 0, memory_order_relaxed); // Ditto. __atomic_store_n(&vi, 0, memory_order_relaxed); diff --git a/compiler-rt/lib/builtins/atomic.c b/compiler-rt/lib/builtins/atomic.c index 64bf72dfa345c..4c3ebb99a5136 100644 --- a/compiler-rt/lib/builtins/atomic.c +++ b/compiler-rt/lib/builtins/atomic.c @@ -336,6 +336,18 @@ OPTIMISED_CASES return tmp; \ } +#define ATOMIC_RMW_NAND(n, lockfree, type) \ + type __atomic_fetch_nand_##n(type *ptr, type val, int model) { \ + if (lockfree(ptr)) \ + return __c11_atomic_fetch_nand((_Atomic(type) *)ptr, val, model); \ + Lock *l = lock_for_pointer(ptr); \ + lock(l); \ + type tmp = *ptr; \ + *ptr = ~(tmp & val); \ + unlock(l); \ + return tmp; \ + } + #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +) OPTIMISED_CASES #undef OPTIMISED_CASE @@ -351,3 +363,6 @@ OPTIMISED_CASES #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^) OPTIMISED_CASES #undef OPTIMISED_CASE +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW_NAND(n, lockfree, type) +OPTIMISED_CASES +#undef OPTIMISED_CASE diff --git a/compiler-rt/test/builtins/Unit/atomic_test.c b/compiler-rt/test/builtins/Unit/atomic_test.c index c512998341615..f8281f3649379 100644 --- a/compiler-rt/test/builtins/Unit/atomic_test.c +++ b/compiler-rt/test/builtins/Unit/atomic_test.c @@ -96,6 +96,11 @@ uint16_t __atomic_fetch_xor_2(uint16_t *ptr, uint16_t val, int model); uint32_t __atomic_fetch_xor_4(uint32_t *ptr, uint32_t val, int model); uint64_t __atomic_fetch_xor_8(uint64_t *ptr, uint64_t val, int model); +uint8_t __atomic_fetch_nand_1(uint8_t *ptr, uint8_t val, int model); +uint16_t __atomic_fetch_nand_2(uint16_t *ptr, uint16_t val, int model); +uint32_t __atomic_fetch_nand_4(uint32_t *ptr, uint32_t val, int model); +uint64_t __atomic_fetch_nand_8(uint64_t *ptr, uint64_t val, int model); + // We conditionally test the *_16 atomic function variants based on the same // condition that compiler_rt (atomic.c) uses to conditionally generate them. // Currently atomic.c tests if __SIZEOF_INT128__ is defined (which can be the @@ -119,6 +124,7 @@ uint128_t __atomic_fetch_sub_16(uint128_t *ptr, uint128_t val, int model); uint128_t __atomic_fetch_and_16(uint128_t *ptr, uint128_t val, int model); uint128_t __atomic_fetch_or_16(uint128_t *ptr, uint128_t val, int model); uint128_t __atomic_fetch_xor_16(uint128_t *ptr, uint128_t val, int model); +uint128_t __atomic_fetch_nand_16(uint128_t *ptr, uint128_t val, int model); #else typedef uint64_t maxuint_t; #endif @@ -540,6 +546,28 @@ void test_fetch_op(void) { abort(); #endif + // Fetch nand. + + set_a_values(V + m); + set_b_values(0); + b8 = __atomic_fetch_nand_1(&a8, U8(ONES), model); + if (b8 != U8(V + m) || a8 != U8(~((V + m) & ONES))) + abort(); + b16 = __atomic_fetch_nand_2(&a16, U16(ONES), model); + if (b16 != U16(V + m) || a16 != U16(~((V + m) & ONES))) + abort(); + b32 = __atomic_fetch_nand_4(&a32, U32(ONES), model); + if (b32 != U32(V + m) || a32 != U32(~((V + m) & ONES))) + abort(); + b64 = __atomic_fetch_nand_8(&a64, U64(ONES), model); + if (b64 != U64(V + m) || a64 != U64(~((V + m) & ONES))) + abort(); +#ifdef TEST_16 + b128 = __atomic_fetch_nand_16(&a128, ONES, model); + if (b128 != (V + m) || a128 != ~((V + m) & ONES)) + abort(); +#endif + // Check signed integer overflow behavior set_a_values(V + m); _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits