Hi,

This patch adds support for vcond and vcondu to the AArch64
backend.

Tested with no regressions on aarch64-none-elf.

OK for aarch64-branch?

(If so, someone will have to commit for me, as I do not
have commit rights.)

Thanks
James Greenhalgh

---
2012-09-11  James Greenhalgh  <james.greenha...@arm.com>
            Tejas Belagod  <tejas.bela...@arm.com>

        * config/aarch64/aarch64-simd.md
        (aarch64_simd_bsl<mode>_internal): New pattern.
        (aarch64_simd_bsl<mode>): Likewise.
        (aarch64_vcond_internal<mode>): Likewise.
        (vcondu<mode><mode>): Likewise.
        (vcond<mode><mode>): Likewise.
        * config/aarch64/iterators.md (UNSPEC_BSL): Add to define_constants.
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index a7ddfb1..c9b5e17 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1467,6 +1467,150 @@
    (set_attr "simd_mode" "V2SI")]
 )
 
+;; vbsl_* intrinsics may compile to any of vbsl/vbif/vbit depending on register
+;; allocation.  For an intrinsic of form:
+;;   vD = bsl_* (vS, vN, vM)
+;; We can use any of:
+;;   bsl vS, vN, vM  (if D = S)
+;;   bit vD, vN, vS  (if D = M, so 1-bits in vS choose bits from vN, else vM)
+;;   bif vD, vM, vS  (if D = N, so 0-bits in vS choose bits from vM, else vN)
+
+(define_insn "aarch64_simd_bsl<mode>_internal"
+  [(set (match_operand:VDQ 0 "register_operand"		     "=w,w,w")
+	(unspec:VDQ [(match_operand:VDQ 1 "register_operand" " 0,w,w")
+		     (match_operand:VDQ 2 "register_operand" " w,w,0")
+                     (match_operand:VDQ 3 "register_operand" " w,0,w")]
+                    UNSPEC_BSL))]
+  "TARGET_SIMD"
+  "@
+  bsl\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
+  bit\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
+  bif\\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>"
+)
+
+(define_expand "aarch64_simd_bsl<mode>"
+  [(set (match_operand:VDQ 0 "register_operand")
+        (unspec:VDQ [(match_operand:<V_cmp_result> 1 "register_operand")
+                      (match_operand:VDQ 2 "register_operand")
+                      (match_operand:VDQ 3 "register_operand")]
+                     UNSPEC_BSL))]
+  "TARGET_SIMD"
+{
+  /* We can't alias operands together if they have different modes.  */
+  operands[1] = gen_lowpart (<MODE>mode, operands[1]);
+})
+
+(define_expand "aarch64_vcond_internal<mode>"
+  [(set (match_operand:VDQ 0 "register_operand")
+	(if_then_else:VDQ
+	  (match_operator 3 "comparison_operator"
+	    [(match_operand:VDQ 4 "register_operand")
+	     (match_operand:VDQ 5 "nonmemory_operand")])
+	  (match_operand:VDQ 1 "register_operand")
+	  (match_operand:VDQ 2 "register_operand")))]
+  "TARGET_SIMD"
+{
+  int inverse = 0, has_zero_imm_form = 0;
+  rtx mask = gen_reg_rtx (<MODE>mode);
+
+  switch (GET_CODE (operands[3]))
+    {
+    case LE:
+    case LT:
+    case NE:
+      inverse = 1;
+      /* Fall through.  */
+    case GE:
+    case GT:
+    case EQ:
+      has_zero_imm_form = 1;
+      break;
+    case LEU:
+    case LTU:
+      inverse = 1;
+      break;
+    default:
+      break;
+    }
+
+  if (!REG_P (operands[5])
+      && (operands[5] != CONST0_RTX (<MODE>mode) || !has_zero_imm_form))
+    operands[5] = force_reg (<MODE>mode, operands[5]);
+
+  switch (GET_CODE (operands[3]))
+    {
+    case LT:
+    case GE:
+      emit_insn (gen_aarch64_cmge<mode> (mask, operands[4], operands[5]));
+      break;
+
+    case LE:
+    case GT:
+      emit_insn (gen_aarch64_cmgt<mode> (mask, operands[4], operands[5]));
+      break;
+
+    case LTU:
+    case GEU:
+      emit_insn (gen_aarch64_cmhs<mode> (mask, operands[4], operands[5]));
+      break;
+
+    case LEU:
+    case GTU:
+      emit_insn (gen_aarch64_cmhi<mode> (mask, operands[4], operands[5]));
+      break;
+
+    case NE:
+    case EQ:
+      emit_insn (gen_aarch64_cmeq<mode> (mask, operands[4], operands[5]));
+      break;
+
+    default:
+      gcc_unreachable ();
+    }
+
+  if (inverse)
+    emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[2],
+				    operands[1]));
+  else
+    emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[1],
+				    operands[2]));
+
+  DONE;
+})
+
+(define_expand "vcond<mode><mode>"
+  [(set (match_operand:VDQ 0 "register_operand")
+	(if_then_else:VDQ
+	  (match_operator 3 "comparison_operator"
+	    [(match_operand:VDQ 4 "register_operand")
+	     (match_operand:VDQ 5 "nonmemory_operand")])
+	  (match_operand:VDQ 1 "register_operand")
+	  (match_operand:VDQ 2 "register_operand")))]
+  "TARGET_SIMD"
+{
+  emit_insn (gen_aarch64_vcond_internal<mode> (operands[0], operands[1],
+					       operands[2], operands[3],
+					       operands[4], operands[5]));
+  DONE;
+})
+
+
+(define_expand "vcondu<mode><mode>"
+  [(set (match_operand:VDQ 0 "register_operand")
+	(if_then_else:VDQ
+	  (match_operator 3 "comparison_operator"
+	    [(match_operand:VDQ 4 "register_operand")
+	     (match_operand:VDQ 5 "nonmemory_operand")])
+	  (match_operand:VDQ 1 "register_operand")
+	  (match_operand:VDQ 2 "register_operand")))]
+  "TARGET_SIMD"
+{
+  emit_insn (gen_aarch64_vcond_internal<mode> (operands[0], operands[1],
+					       operands[2], operands[3],
+					       operands[4], operands[5]));
+  DONE;
+})
+
 ;; Patterns for AArch64 SIMD Intrinsics.
 
 (define_expand "aarch64_create<mode>"
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index bf2041e..8d5d4b0 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -227,6 +227,7 @@
     UNSPEC_CMTST	; Used in aarch64-simd.md.
     UNSPEC_FMAX		; Used in aarch64-simd.md.
     UNSPEC_FMIN		; Used in aarch64-simd.md.
+    UNSPEC_BSL		; Used in aarch64-simd.md.
 ])
 
 ;; -------------------------------------------------------------------

Reply via email to