---
 gcc/config/arm/arm.h   |    6 ++++
 gcc/config/arm/sync.md |   63 +++++++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 68 insertions(+), 1 deletions(-)

diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index 31f4856..33e5b8e 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -276,6 +276,12 @@ extern void 
(*arm_lang_output_object_attributes_hook)(void);
 /* Nonzero if this chip implements a memory barrier instruction.  */
 #define TARGET_HAVE_MEMORY_BARRIER (TARGET_HAVE_DMB || TARGET_HAVE_DMB_MCR)
 
+/* Nonzero if this chip supports swp and swpb.  These are technically present
+   post-armv6, but deprecated.  Never use it if we have OS support, as swp is
+   not well-defined on SMP systems.  */
+#define TARGET_HAVE_SWP \
+  (TARGET_ARM && arm_arch4 && !arm_arch6 && arm_abi != ARM_ABI_AAPCS_LINUX)
+
 /* Nonzero if this chip supports ldrex and strex */
 #define TARGET_HAVE_LDREX      ((arm_arch6 && TARGET_ARM) || arm_arch7)
 
diff --git a/gcc/config/arm/sync.md b/gcc/config/arm/sync.md
index 124ebf0..72e7181 100644
--- a/gcc/config/arm/sync.md
+++ b/gcc/config/arm/sync.md
@@ -26,6 +26,10 @@
    (DI "TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN
        && TARGET_HAVE_MEMORY_BARRIER")])
 
+(define_mode_attr swp_predtab
+  [(QI "TARGET_HAVE_SWP") (HI "false")
+   (SI "TARGET_HAVE_SWP") (DI "false")])
+
 (define_code_iterator syncop [plus minus ior xor and])
 
 (define_code_attr sync_optab
@@ -132,7 +136,41 @@
     DONE;
   })
 
-(define_insn_and_split "atomic_exchange<mode>"
+(define_expand "atomic_exchange<mode>"
+  [(match_operand:QHSD 0 "s_register_operand" "")
+   (match_operand:QHSD 1 "mem_noofs_operand" "")
+   (match_operand:QHSD 2 "s_register_operand" "r")
+   (match_operand:SI 3 "const_int_operand" "")]
+  "<sync_predtab> || <swp_predtab>"
+{
+  if (<sync_predtab>)
+    emit_insn (gen_atomic_exchange<mode>_rex (operands[0], operands[1],
+                                             operands[2], operands[3]));
+  else
+    {
+      /* Memory barriers are introduced in armv6, which also gains the
+        ldrex insns.  Therefore we can ignore the memory model argument
+        when issuing a SWP instruction.  */
+      gcc_checking_assert (!TARGET_HAVE_MEMORY_BARRIER);
+
+      if (<MODE>mode == QImode)
+       {
+         rtx x = gen_reg_rtx (SImode);
+          emit_insn (gen_atomic_exchangeqi_swp (x, operands[1], operands[2]));
+         emit_move_insn (operands[0], gen_lowpart (QImode, x));
+       }
+      else if (<MODE>mode == SImode)
+       {
+         emit_insn (gen_atomic_exchangesi_swp
+                    (operands[0], operands[1], operands[2]));
+       }
+      else
+       gcc_unreachable ();
+    }
+  DONE;
+})
+
+(define_insn_and_split "atomic_exchange<mode>_rex"
   [(set (match_operand:QHSD 0 "s_register_operand" "=&r")      ;; output
        (match_operand:QHSD 1 "mem_noofs_operand" "+Ua"))       ;; memory
    (set (match_dup 1)
@@ -152,6 +190,29 @@
     DONE;
   })
 
+(define_insn "atomic_exchangeqi_swp"
+  [(set (match_operand:SI 0 "s_register_operand" "=&r")                ;; 
output
+       (zero_extend:SI
+         (match_operand:QI 1 "mem_noofs_operand" "+Ua")))      ;; memory
+   (set (match_dup 1)
+       (unspec_volatile:QI
+         [(match_operand:QI 2 "s_register_operand" "r")]       ;; input
+         VUNSPEC_ATOMIC_XCHG))]
+  "TARGET_HAVE_SWP"
+  "swpb%?\t%0, %2, %C1"
+  [(set_attr "predicable" "yes")])
+
+(define_insn "atomic_exchangesi_swp"
+  [(set (match_operand:SI 0 "s_register_operand" "=&r")                ;; 
output
+       (match_operand:SI 1 "mem_noofs_operand" "+Ua"))         ;; memory
+   (set (match_dup 1)
+       (unspec_volatile:SI
+         [(match_operand:SI 2 "s_register_operand" "r")]       ;; input
+         VUNSPEC_ATOMIC_XCHG))]
+  "TARGET_HAVE_SWP"
+  "swp%?\t%0, %2, %C1"
+  [(set_attr "predicable" "yes")])
+
 (define_mode_attr atomic_op_operand
   [(QI "reg_or_int_operand")
    (HI "reg_or_int_operand")
-- 
1.7.6.4

Reply via email to