Hi all,

I notice these scan-assembler tests fail when testing -mabi=ilp32 because the 
64-bit operation that they
expect doesn't happen on the 32-bit long types in that configuration.

The easy fix is to change the 'long' types to be 'long long' so that they are 
always 64-bit.
With this patch the tests now pass with and without -mabi=ilp32.

Ok for trunk?

Thanks,
Kyrill

2016-06-29  Kyrylo Tkachov  <kyrylo.tkac...@arm.com>

    * gcc.target/aarch64/cinc_common_1.c: Use long long instead of long.
    * gcc.target/aarch64/combine_bfi_1.c: Likewise.
    * gcc.target/aarch64/fmul_fcvt_1.c: Likewise.
    * gcc.target/aarch64/mult-synth_4.c: Likewise.
    * gcc.target/aarch64/target_attr_3.c: Likewise.
diff --git a/gcc/testsuite/gcc.target/aarch64/cinc_common_1.c b/gcc/testsuite/gcc.target/aarch64/cinc_common_1.c
index d04126331a7456dc267fc999f12366acbddf7927..f93364f74ba04e8dc11914a5a1bc342edf5e0751 100644
--- a/gcc/testsuite/gcc.target/aarch64/cinc_common_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/cinc_common_1.c
@@ -15,14 +15,14 @@ barsi (int x)
   return x > 100 ? x + 4 : x + 3;
 }
 
-long
-foodi (long x)
+long long
+foodi (long long x)
 {
   return x > 100 ? x - 2 : x - 1;
 }
 
-long
-bardi (long x)
+long long
+bardi (long long x)
 {
   return x > 100 ? x + 4 : x + 3;
 }
diff --git a/gcc/testsuite/gcc.target/aarch64/combine_bfi_1.c b/gcc/testsuite/gcc.target/aarch64/combine_bfi_1.c
index accf14410938a6c0119f7b9b5c76db38170e8e09..9cc3bdb3ddfc286bfb6c079aaba27fb5edb1806c 100644
--- a/gcc/testsuite/gcc.target/aarch64/combine_bfi_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/combine_bfi_1.c
@@ -25,8 +25,8 @@ f4 (int x, int y)
   return (x & ~0xff) | (y & 0xff);
 }
 
-long
-f5 (long x, long y)
+long long
+f5 (long long x, long long y)
 {
   return (x & ~0xffffffffull) | (y & 0xffffffff);
 }
diff --git a/gcc/testsuite/gcc.target/aarch64/fmul_fcvt_1.c b/gcc/testsuite/gcc.target/aarch64/fmul_fcvt_1.c
index 05e06e3729e59a73018ed647057ea42d9c9aa952..af155f2dda216bb7ace03209d3a8a809c4ee2f8d 100644
--- a/gcc/testsuite/gcc.target/aarch64/fmul_fcvt_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/fmul_fcvt_1.c
@@ -14,26 +14,26 @@ usffoo##__a (float x)	\
   return x * __a##.0f;	\
 }			\
 			\
-long			\
+long long		\
 lsffoo##__a (float x)	\
 {			\
   return x * __a##.0f;	\
 }			\
 			\
-unsigned long		\
+unsigned long long	\
 ulsffoo##__a (float x)	\
 {			\
   return x * __a##.0f;	\
 }
 
 #define FUNC_DEFD(__a)	\
-long			\
+long long		\
 dffoo##__a (double x)	\
 {			\
   return x * __a##.0;	\
 }			\
 			\
-unsigned long		\
+unsigned long long	\
 udffoo##__a (double x)	\
 {			\
   return x * __a##.0;	\
diff --git a/gcc/testsuite/gcc.target/aarch64/mult-synth_4.c b/gcc/testsuite/gcc.target/aarch64/mult-synth_4.c
index 4b607782f9e3257e3444b355d84970580f90a41e..f1283e84e200bc24986b6e0c57e33694e655b532 100644
--- a/gcc/testsuite/gcc.target/aarch64/mult-synth_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/mult-synth_4.c
@@ -1,10 +1,10 @@
 /* { dg-do compile } */
 /* { dg-options "-O2 -mcpu=cortex-a57 -save-temps" } */
 
-long
+long long
 foo (int x, int y)
 {
-   return (long)x * 6L;
+   return (long long)x * 6L;
 }
 
 /* { dg-final { scan-assembler-times "smull\tx\[0-9\]+, w\[0-9\]+, w\[0-9\]+" 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/target_attr_3.c b/gcc/testsuite/gcc.target/aarch64/target_attr_3.c
index 50e52520ef0d1082681661999cb034f01ffc3fab..7887208fd7d83723c8f5e21000ed4359a27d0dae 100644
--- a/gcc/testsuite/gcc.target/aarch64/target_attr_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/target_attr_3.c
@@ -5,12 +5,12 @@
    and the fix is applied once.  */
 
 __attribute__ ((target ("fix-cortex-a53-835769")))
-unsigned long
-test (unsigned long a, double b, unsigned long c,
-      unsigned long d, unsigned long *e)
+unsigned long long
+test (unsigned long long a, double b, unsigned long long c,
+      unsigned long long d, unsigned long long *e)
 {
   double result;
-  volatile unsigned long tmp = *e;
+  volatile unsigned long long tmp = *e;
   __asm__ __volatile ("// %0, %1"
 			: "=w" (result)
 			: "0" (b)
@@ -18,12 +18,12 @@ test (unsigned long a, double b, unsigned long c,
   return c * d + d;
 }
 
-unsigned long
-test2 (unsigned long a, double b, unsigned long c,
-       unsigned long d, unsigned long *e)
+unsigned long long
+test2 (unsigned long long a, double b, unsigned long long c,
+       unsigned long long d, unsigned long long *e)
 {
   double result;
-  volatile unsigned long tmp = *e;
+  volatile unsigned long long tmp = *e;
   __asm__ __volatile ("// %0, %1"
 			: "=w" (result)
 			: "0" (b)

Reply via email to