The current vmaxnm/vminnm float intrinsics are implemented using
__builtin_aarch64_smax/min<mode>  which are mapping to backend patterns
using smin/smax rtl operators.  However as documented in rtl.def

  "Further, if both operands are zeros, or if either operand is NaN, then
  it is unspecified which of the two operands is returned as the result."

There is no guarantee that a number will always be returned through
smin/smax operator, and further tests show gcc will optimize something
like smin (1.0f, Nan) to Nan, so current the vmaxnm and vminnm intrinsics
will evetually fail the new added testcases included in this patch.

This patch:

  * Migrate vminnm/vmaxnm float intrinsics to "<fmaxmin><mode>3" pattern
    which guarantee fminnm/fmaxnm sematics.

  * Add new testcases for vminnm and vmaxnm intrinsics which were missing
    previously.  They are marked as XFAIL on arm*-*-* as ARM hasn't
    implemented these intrinsics.

OK for trunk?

2016-07-06  Jiong Wang  <jiong.w...@arm.com>

gcc/
  * config/aarch64/aarch64-simd-builtins.def (smax): Remove float variants.
  (smin): Likewise.
  (fmax): New entry.
  (fmin): Likewise.
  * config/aarch64/arm_neon.h (vmaxnm_f32): Use __builtin_aarch64_fmaxv2sf.
  (vmaxnmq_f32): Likewise.
  (vmaxnmq_f64): Likewise.
  (vminnm_f32): Likewise.
  (vminnmq_f32): Likewise.
  (vminnmq_f64): Likewise.

gcc/testsuite/
  * gcc.target/aarch64/advsimd-intrinsics/binary_op_no64.inc: Support 
HAS_INTEGER_VARIANT.
  * gcc.target/aarch64/advsimd-intrinsics/vrhadd.c: Define HAS_INTEGER_VARIANT.
  * gcc.target/aarch64/advsimd-intrinsics/vhadd.c: Define HAS_INTEGER_VARIANT.
  * gcc.target/aarch64/advsimd-intrinsics/vhsub.c: Define HAS_INTEGER_VARIANT.
  * gcc.target/aarch64/advsimd-intrinsics/vmax.c: Define HAS_INTEGER_VARIANT.
  * gcc.target/aarch64/advsimd-intrinsics/vmin.c: Define HAS_INTEGER_VARIANT.
  * gcc.target/aarch64/advsimd-intrinsics/vhadd.c: Define HAS_INTEGER_VARIANT.
  * gcc.target/aarch64/advsimd-intrinsics/vmaxnm.c: New.
  * gcc.target/aarch64/advsimd-intrinsics/vminnm.c: New.

diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 3e4740c460a335d8a4d5ce8b19fc311aa14a47d4..f1ad325f464f89c981cbdee8a8f6afafa938639a 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -244,13 +244,17 @@
   /* Implemented by <maxmin><mode>3.
      smax variants map to fmaxnm,
      smax_nan variants map to fmax.  */
-  BUILTIN_VDQIF (BINOP, smax, 3)
-  BUILTIN_VDQIF (BINOP, smin, 3)
+  BUILTIN_VDQ_BHSI (BINOP, smax, 3)
+  BUILTIN_VDQ_BHSI (BINOP, smin, 3)
   BUILTIN_VDQ_BHSI (BINOP, umax, 3)
   BUILTIN_VDQ_BHSI (BINOP, umin, 3)
   BUILTIN_VDQF (BINOP, smax_nan, 3)
   BUILTIN_VDQF (BINOP, smin_nan, 3)
 
+  /* Implemented by <fmaxmin><mode>3.  */
+  BUILTIN_VDQF (BINOP, fmax, 3)
+  BUILTIN_VDQF (BINOP, fmin, 3)
+
   /* Implemented by aarch64_<maxmin_uns>p<mode>.  */
   BUILTIN_VDQ_BHSI (BINOP, smaxp, 0)
   BUILTIN_VDQ_BHSI (BINOP, sminp, 0)
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 475e200a683436af5026edafa568f16126f4340a..300e7951f47a30a5b125899b240913023b94de0b 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -17352,19 +17352,19 @@ vpminnms_f32 (float32x2_t a)
 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
 vmaxnm_f32 (float32x2_t __a, float32x2_t __b)
 {
-  return __builtin_aarch64_smaxv2sf (__a, __b);
+  return __builtin_aarch64_fmaxv2sf (__a, __b);
 }
 
 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
 vmaxnmq_f32 (float32x4_t __a, float32x4_t __b)
 {
-  return __builtin_aarch64_smaxv4sf (__a, __b);
+  return __builtin_aarch64_fmaxv4sf (__a, __b);
 }
 
 __extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
 vmaxnmq_f64 (float64x2_t __a, float64x2_t __b)
 {
-  return __builtin_aarch64_smaxv2df (__a, __b);
+  return __builtin_aarch64_fmaxv2df (__a, __b);
 }
 
 /* vmaxv  */
@@ -17582,19 +17582,19 @@ vminq_u32 (uint32x4_t __a, uint32x4_t __b)
 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
 vminnm_f32 (float32x2_t __a, float32x2_t __b)
 {
-  return __builtin_aarch64_sminv2sf (__a, __b);
+  return __builtin_aarch64_fminv2sf (__a, __b);
 }
 
 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
 vminnmq_f32 (float32x4_t __a, float32x4_t __b)
 {
-  return __builtin_aarch64_sminv4sf (__a, __b);
+  return __builtin_aarch64_fminv4sf (__a, __b);
 }
 
 __extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
 vminnmq_f64 (float64x2_t __a, float64x2_t __b)
 {
-  return __builtin_aarch64_sminv2df (__a, __b);
+  return __builtin_aarch64_fminv2df (__a, __b);
 }
 
 /* vminv  */
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/binary_op_no64.inc b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/binary_op_no64.inc
index 1eb9271b7f52aff96694f45a987c5368f2c9f95d..58082b2c95b2d6801ce5507070f8f828927adbb9 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/binary_op_no64.inc
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/binary_op_no64.inc
@@ -26,13 +26,16 @@ void FNNAME (INSN_NAME) (void)
 
   clean_results ();
 
+#ifdef HAS_INTEGER_VARIANT
   /* Initialize input "vector" from "buffer".  */
   TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+#endif
 #ifdef HAS_FLOAT_VARIANT
   VLOAD(vector, buffer, , float, f, 32, 2);
   VLOAD(vector, buffer, q, float, f, 32, 4);
 #endif
 
+#ifdef HAS_INTEGER_VARIANT
   /* Choose init value arbitrarily, will be used as comparison value.  */
   VDUP(vector2, , int, s, 8, 8, -13);
   VDUP(vector2, , int, s, 16, 4, -14);
@@ -46,6 +49,7 @@ void FNNAME (INSN_NAME) (void)
   VDUP(vector2, q, uint, u, 8, 16, 0xf9);
   VDUP(vector2, q, uint, u, 16, 8, 0xfff2);
   VDUP(vector2, q, uint, u, 32, 4, 0xfffffff1);
+#endif
 #ifdef HAS_FLOAT_VARIANT
   VDUP(vector2, , float, f, 32, 2, -15.5f);
   VDUP(vector2, q, float, f, 32, 4, -14.5f);
@@ -59,6 +63,7 @@ void FNNAME (INSN_NAME) (void)
 #define FLOAT_VARIANT(MACRO, VAR)
 #endif
 
+#ifdef HAS_INTEGER_VARIANT
 #define TEST_MACRO_NO64BIT_VARIANT_1_5(MACRO, VAR)	\
   MACRO(VAR, , int, s, 8, 8);				\
   MACRO(VAR, , int, s, 16, 4);				\
@@ -73,10 +78,15 @@ void FNNAME (INSN_NAME) (void)
   MACRO(VAR, q, uint, u, 16, 8);			\
   MACRO(VAR, q, uint, u, 32, 4);			\
   FLOAT_VARIANT(MACRO, VAR)
+#else
+#define TEST_MACRO_NO64BIT_VARIANT_1_5(MACRO, VAR)	\
+  FLOAT_VARIANT(MACRO, VAR)
+#endif
 
   /* Apply a binary operator named INSN_NAME.  */
   TEST_MACRO_NO64BIT_VARIANT_1_5(TEST_BINARY_OP, INSN_NAME);
 
+#ifdef HAS_INTEGER_VARIANT
   CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, "");
   CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, "");
   CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, "");
@@ -89,6 +99,7 @@ void FNNAME (INSN_NAME) (void)
   CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, "");
   CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, "");
   CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, "");
+#endif
 
 #ifdef HAS_FLOAT_VARIANT
   CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected, "");
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vhadd.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vhadd.c
index d8a09ca294eddcda9cc0b48db31f425e3a641c25..ebd7f58ebe3d2b1b91534f75ce00da457817ff4c 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vhadd.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vhadd.c
@@ -5,6 +5,8 @@
 #define INSN_NAME vhadd
 #define TEST_MSG "VHADD/VHADDQ"
 
+#define HAS_INTEGER_VARIANT
+
 /* Expected results.  */
 VECT_VAR_DECL(expected,int,8,8) [] = { 0xf1, 0xf2, 0xf2, 0xf3,
 				       0xf3, 0xf4, 0xf4, 0xf5 };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vhsub.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vhsub.c
index 0fe808028e4f5a938e0f62460d235e2364c0d77c..04279052b5f3c7785fae75d8edb5bc8eff141a1e 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vhsub.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vhsub.c
@@ -5,6 +5,8 @@
 #define INSN_NAME vhsub
 #define TEST_MSG "VHSUB/VHSUBQ"
 
+#define HAS_INTEGER_VARIANT
+
 /* Expected results.  */
 VECT_VAR_DECL(expected,int,8,8) [] = { 0xfe, 0xff, 0xff, 0x0,
 				       0x0, 0x1, 0x1, 0x2 };
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmax.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmax.c
index 830603dff6a328b919c7eced364cab3cbbeaad3f..4a0db99023f7cd5e3f43fb0f1e127632aee5ba91 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmax.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmax.c
@@ -5,6 +5,7 @@
 #define INSN_NAME vmax
 #define TEST_MSG "VMAX/VMAXQ"
 
+#define HAS_INTEGER_VARIANT
 #define HAS_FLOAT_VARIANT
 
 /* Expected results.  */
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmaxnm.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmaxnm.c
new file mode 100644
index 0000000000000000000000000000000000000000..12fd08c4601710cfecf454e731ec7fee6cb0f4b1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmaxnm.c
@@ -0,0 +1,29 @@
+/* { dg-skip-if "ARM hasn't implemented these intrinsics" { arm*-*-* } } */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define INSN_NAME vmaxnm
+#define TEST_MSG "VMAXNM/VMAXNMQ"
+
+#define HAS_FLOAT_VARIANT
+
+/* Expected results.  */
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0xc1780000, 0xc1700000 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0xc1680000, 0xc1680000,
+					   0xc1600000, 0xc1500000 };
+/* Expected results with special FP values.  */
+VECT_VAR_DECL(expected_nan,hfloat,32,4) [] = { 0x3f800000, 0x3f800000,
+					       0x3f800000, 0x3f800000 };
+VECT_VAR_DECL(expected_mnan,hfloat,32,4) [] = { 0x3f800000, 0x3f800000,
+						0x3f800000, 0x3f800000 };
+VECT_VAR_DECL(expected_inf,hfloat,32,4) [] = { 0x7f800000, 0x7f800000,
+					       0x7f800000, 0x7f800000 };
+VECT_VAR_DECL(expected_minf,hfloat,32,4) [] = { 0x3f800000, 0x3f800000,
+						0x3f800000, 0x3f800000 };
+VECT_VAR_DECL(expected_zero1,hfloat,32,4) [] = { 0x00000000, 0x00000000,
+						 0x00000000, 0x00000000 };
+VECT_VAR_DECL(expected_zero2,hfloat,32,4) [] = { 0x00000000, 0x00000000,
+						 0x00000000, 0x00000000 };
+#include "binary_op_no64.inc"
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmin.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmin.c
index 8ad2703c3db661e0677e48eb7a2d60ba58c9cefe..8102edf345862a9732833e1e5fc0be05dad99e2b 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmin.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmin.c
@@ -5,6 +5,7 @@
 #define INSN_NAME vmin
 #define TEST_MSG "VMIN/VMINQ"
 
+#define HAS_INTEGER_VARIANT
 #define HAS_FLOAT_VARIANT
 
 /* Expected results.  */
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vminnm.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vminnm.c
new file mode 100644
index 0000000000000000000000000000000000000000..eb0d3179266e39d11d4d88b1919cd76ee85f3406
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vminnm.c
@@ -0,0 +1,29 @@
+/* { dg-skip-if "ARM hasn't implemented these intrinsics" { arm*-*-* } } */
+
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+#define INSN_NAME vminnm
+#define TEST_MSG "VMINNM/VMINNMQ"
+
+#define HAS_FLOAT_VARIANT
+
+/* Expected results.  */
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0xc1800000, 0xc1780000 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0xc1800000, 0xc1700000,
+					   0xc1680000, 0xc1680000 };
+/* Expected results with special FP values.  */
+VECT_VAR_DECL(expected_nan,hfloat,32,4) [] = { 0x3f800000, 0x3f800000,
+					       0x3f800000, 0x3f800000 };
+VECT_VAR_DECL(expected_mnan,hfloat,32,4) [] = { 0x3f800000, 0x3f800000,
+						0x3f800000, 0x3f800000 };
+VECT_VAR_DECL(expected_inf,hfloat,32,4) [] = { 0x3f800000, 0x3f800000,
+					       0x3f800000, 0x3f800000 };
+VECT_VAR_DECL(expected_minf,hfloat,32,4) [] = { 0xff800000, 0xff800000,
+						0xff800000, 0xff800000 };
+VECT_VAR_DECL(expected_zero1,hfloat,32,4) [] = { 0x80000000, 0x80000000,
+						 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_zero2,hfloat,32,4) [] = { 0x80000000, 0x80000000,
+						 0x80000000, 0x80000000 };
+#include "binary_op_no64.inc"
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrhadd.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrhadd.c
index eb820026ae7e709dc51244f2069f675c9fcb0d08..009dd82b2933f92083fbe7c481f820fe148250a4 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrhadd.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrhadd.c
@@ -5,6 +5,8 @@
 #define INSN_NAME vrhadd
 #define TEST_MSG "VRHADD/VRHADDQ"
 
+#define HAS_INTEGER_VARIANT
+
 /* Expected results.  */
 VECT_VAR_DECL(expected,int,8,8) [] = { 0xf2, 0xf2, 0xf3, 0xf3,
 				       0xf4, 0xf4, 0xf5, 0xf5 };

Reply via email to