Hi,

The attached patch implements support for AES crypto instructions.

Tested on aarch64-none-elf. OK for trunk?

Thanks,
Tejas.

2013-12-06  Tejas Belagod  <tejas.bela...@arm.com>

gcc/
        * config/aarch64/aarch64-simd-builtins.def: Update builtins table.
        * config/aarch64/aarch64-simd.md (aarch64_crypto_aes<aes_op>v16qi,
        aarch64_crypto_aes<aesmc_op>v16qi): New.
        * config/aarch64/arm_neon.h (vaeseq_u8, vaesdq_u8, vaesmcq_u8,
        vaesimcq_u8): New.
        * config/aarch64/iterators.md (UNSPEC_AESE, UNSPEC_AESD, UNSPEC_AESMC,
        UNSPEC_AESIMC): New.
        (CRYPTO_AES, CRYPTO_AESMC): New int iterators.
        (aes_op, aesmc_op): New int attributes.

testsuite/
        * gcc.target/aarch64/aes.c: New.
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def 
b/gcc/config/aarch64/aarch64-simd-builtins.def
index c18b150..49ab482 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -362,3 +362,8 @@
   /* Implemented by fma<mode>4.  */
   BUILTIN_VDQF (TERNOP, fma, 4)
 
+  /* Implemented by aarch64_crypto_aes<op><mode>.  */
+  VAR1 (BINOP, crypto_aese, 0, v16qi)
+  VAR1 (BINOP, crypto_aesd, 0, v16qi)
+  VAR1 (UNOP, crypto_aesmc, 0, v16qi)
+  VAR1 (UNOP, crypto_aesimc, 0, v16qi)
diff --git a/gcc/config/aarch64/aarch64-simd.md 
b/gcc/config/aarch64/aarch64-simd.md
index 5dcbc62..4b17748 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -4074,3 +4074,25 @@
       (gen_aarch64_get_lane<mode> (operands[0], operands[1], operands[2]));
     DONE;
 })
+
+;; aes
+
+(define_insn "aarch64_crypto_aes<aes_op>v16qi"
+  [(set (match_operand:V16QI 0 "register_operand" "=w")
+        (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0")
+                      (match_operand:V16QI 2 "register_operand" "w")]
+         CRYPTO_AES))]
+  "TARGET_SIMD && TARGET_CRYPTO"
+  "aes<aes_op>\\t%0.16b, %2.16b"
+  [(set_attr "type" "crypto_aes")]
+)
+
+(define_insn "aarch64_crypto_aes<aesmc_op>v16qi"
+  [(set (match_operand:V16QI 0 "register_operand" "=w")
+       (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "w")]
+        CRYPTO_AESMC))]
+  "TARGET_SIMD && TARGET_CRYPTO"
+  "aes<aesmc_op>\\t%0.16b, %1.16b"
+  [(set_attr "type" "crypto_aes")]
+)
+
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index dc56170..9f35e09 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -15793,6 +15793,42 @@ vaddvq_f64 (float64x2_t __a)
   return vgetq_lane_f64 (__t, __LANE0 (2));
 }
 
+#ifdef __ARM_FEATURE_CRYPTO
+
+/* vaes  */
+
+static __inline uint8x16_t
+vaeseq_u8 (uint8x16_t data, uint8x16_t key)
+{
+  return
+    (uint8x16_t) __builtin_aarch64_crypto_aesev16qi ((int8x16_t) data,
+                                                    (int8x16_t) key);
+}
+
+static __inline uint8x16_t
+vaesdq_u8 (uint8x16_t data, uint8x16_t key)
+{
+  return
+    (uint8x16_t) __builtin_aarch64_crypto_aesdv16qi ((int8x16_t) data,
+                                                    (int8x16_t) key);
+}
+
+static __inline uint8x16_t
+vaesmcq_u8 (uint8x16_t data)
+{
+  return
+    (uint8x16_t) __builtin_aarch64_crypto_aesmcv16qi ((int8x16_t) data);
+}
+
+static __inline uint8x16_t
+vaesimcq_u8 (uint8x16_t data)
+{
+  return
+    (uint8x16_t) __builtin_aarch64_crypto_aesimcv16qi ((int8x16_t) data);
+}
+
+#endif
+
 /* vcage  */
 
 __extension__ static __inline uint32_t __attribute__ ((__always_inline__))
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index fd7152c..91d6f74 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -263,6 +263,10 @@
     UNSPEC_UZP2                ; Used in vector permute patterns.
     UNSPEC_TRN1                ; Used in vector permute patterns.
     UNSPEC_TRN2                ; Used in vector permute patterns.
+    UNSPEC_AESE                ; Used in aarch64-simd.md.
+    UNSPEC_AESD         ; Used in aarch64-simd.md.
+    UNSPEC_AESMC        ; Used in aarch64-simd.md.
+    UNSPEC_AESIMC       ; Used in aarch64-simd.md.
 ])
 
 ;; -------------------------------------------------------------------
@@ -843,6 +847,9 @@
 
 (define_int_iterator FRECP [UNSPEC_FRECPE UNSPEC_FRECPX])
 
+(define_int_iterator CRYPTO_AES [UNSPEC_AESE UNSPEC_AESD])
+(define_int_iterator CRYPTO_AESMC [UNSPEC_AESMC UNSPEC_AESIMC])
+
 ;; -------------------------------------------------------------------
 ;; Int Iterators Attributes.
 ;; -------------------------------------------------------------------
@@ -959,3 +966,7 @@
                            (UNSPEC_UZP1 "1") (UNSPEC_UZP2 "2")])
 
 (define_int_attr frecp_suffix  [(UNSPEC_FRECPE "e") (UNSPEC_FRECPX "x")])
+
+(define_int_attr aes_op [(UNSPEC_AESE "e") (UNSPEC_AESD "d")])
+(define_int_attr aesmc_op [(UNSPEC_AESMC "mc") (UNSPEC_AESIMC "imc")])
+
diff --git a/gcc/testsuite/gcc.target/aarch64/aes.c 
b/gcc/testsuite/gcc.target/aarch64/aes.c
new file mode 100644
index 0000000..82665fa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aes.c
@@ -0,0 +1,40 @@
+
+/* { dg-do compile } */
+/* { dg-options "-march=armv8-a+crypto" } */
+
+#include "arm_neon.h"
+
+uint8x16_t
+test_vaeseq_u8 (uint8x16_t data, uint8x16_t key)
+{
+  return vaeseq_u8 (data, key);
+}
+
+/* { dg-final { scan-assembler "aese\\tv\[0-9\]+\.16b, v\[0-9\]+\.16b" } } */
+
+uint8x16_t
+test_vaesdq_u8 (uint8x16_t data, uint8x16_t key)
+{
+  return vaesdq_u8 (data, key);
+}
+
+/* { dg-final { scan-assembler "aesd\\tv\[0-9\]+\.16b, v\[0-9\]+\.16b" } } */
+
+uint8x16_t
+test_vaesmcq_u8 (uint8x16_t data)
+{
+  return vaesmcq_u8 (data);
+}
+
+/* { dg-final { scan-assembler "aesmc\\tv\[0-9\]+\.16b, v\[0-9\]+\.16b" } } */
+
+uint8x16_t
+test_vaesimcq_u8 (uint8x16_t data)
+{
+  return vaesimcq_u8 (data);
+}
+
+/* { dg-final { scan-assembler "aesimc\\tv\[0-9\]+\.16b, v\[0-9\]+\.16b" } } */
+
+
+/* { dg-final { cleanup-saved-temps } } */

Reply via email to