On Thu, Jul 18, 2024 at 3:46 AM Haochen Jiang <[email protected]> wrote:
>
> Hi all,
>
> For compile test, we should generate valid asm except for special purposes.
> Fix the compile test that generates invalid asm.
>
> Regtested on x86-64-pc-linux-gnu. Ok for trunk?
>
> Thx,
> Haochen
>
> gcc/testsuite/ChangeLog:
>
> * gcc.target/i386/apx-egprs-names.c: Use ax for short and
> al for char instead of eax.
> * gcc.target/i386/avx512bw-kandnq-1.c: Do not run the test
> under -m32 since kmovq with register is invalid. Use long
> long to use 64 bit register instead of 32 bit register for
> kmovq.
> * gcc.target/i386/avx512bw-kandq-1.c: Ditto.
> * gcc.target/i386/avx512bw-knotq-1.c: Ditto.
> * gcc.target/i386/avx512bw-korq-1.c: Ditto.
> * gcc.target/i386/avx512bw-kshiftlq-1.c: Ditto.
> * gcc.target/i386/avx512bw-kshiftrq-1.c: Ditto.
> * gcc.target/i386/avx512bw-kxnorq-1.c: Ditto.
> * gcc.target/i386/avx512bw-kxorq-1.c: Ditto.
> ---
> gcc/testsuite/gcc.target/i386/apx-egprs-names.c | 4 ++--
> gcc/testsuite/gcc.target/i386/avx512bw-kandnq-1.c | 6 +++---
> gcc/testsuite/gcc.target/i386/avx512bw-kandq-1.c | 6 +++---
> gcc/testsuite/gcc.target/i386/avx512bw-knotq-1.c | 4 ++--
> gcc/testsuite/gcc.target/i386/avx512bw-korq-1.c | 6 +++---
> gcc/testsuite/gcc.target/i386/avx512bw-kshiftlq-1.c | 4 ++--
> gcc/testsuite/gcc.target/i386/avx512bw-kshiftrq-1.c | 4 ++--
> gcc/testsuite/gcc.target/i386/avx512bw-kxnorq-1.c | 6 +++---
> gcc/testsuite/gcc.target/i386/avx512bw-kxorq-1.c | 6 +++---
> 9 files changed, 23 insertions(+), 23 deletions(-)
>
> diff --git a/gcc/testsuite/gcc.target/i386/apx-egprs-names.c
> b/gcc/testsuite/gcc.target/i386/apx-egprs-names.c
> index f0517e47c33..5b342aa385b 100644
> --- a/gcc/testsuite/gcc.target/i386/apx-egprs-names.c
> +++ b/gcc/testsuite/gcc.target/i386/apx-egprs-names.c
> @@ -12,6 +12,6 @@ void foo ()
> register char d __asm ("r28");
> __asm__ __volatile__ ("mov %0, %%rax" : : "r" (a) : "rax");
> __asm__ __volatile__ ("mov %0, %%eax" : : "r" (b) : "eax");
> - __asm__ __volatile__ ("mov %0, %%eax" : : "r" (c) : "eax");
> - __asm__ __volatile__ ("mov %0, %%eax" : : "r" (d) : "eax");
> + __asm__ __volatile__ ("mov %0, %%ax" : : "r" (c) : "ax");
> + __asm__ __volatile__ ("mov %0, %%al" : : "r" (d) : "al");
You can use the insn suffix (movq, movl, movw and movb) to make the
asm even more robust.
Uros.
> }
> diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-kandnq-1.c
> b/gcc/testsuite/gcc.target/i386/avx512bw-kandnq-1.c
> index e8b7a5f9aa2..f9f03c90782 100644
> --- a/gcc/testsuite/gcc.target/i386/avx512bw-kandnq-1.c
> +++ b/gcc/testsuite/gcc.target/i386/avx512bw-kandnq-1.c
> @@ -1,4 +1,4 @@
> -/* { dg-do compile } */
> +/* { dg-do compile { target { ! ia32 } } } */
> /* { dg-options "-mavx512bw -O2" } */
> /* { dg-final { scan-assembler-times "kandnq\[
> \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
>
> @@ -10,8 +10,8 @@ avx512bw_test ()
> __mmask64 k1, k2, k3;
> volatile __m512i x = _mm512_setzero_si512 ();
>
> - __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
> - __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2) );
> + __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
> + __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2ULL) );
>
> k3 = _kandn_mask64 (k1, k2);
> x = _mm512_mask_add_epi8 (x, k3, x, x);
> diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-kandq-1.c
> b/gcc/testsuite/gcc.target/i386/avx512bw-kandq-1.c
> index a1aaed67c66..6ad836087ad 100644
> --- a/gcc/testsuite/gcc.target/i386/avx512bw-kandq-1.c
> +++ b/gcc/testsuite/gcc.target/i386/avx512bw-kandq-1.c
> @@ -1,4 +1,4 @@
> -/* { dg-do compile } */
> +/* { dg-do compile { target { ! ia32 } } } */
> /* { dg-options "-mavx512bw -O2" } */
> /* { dg-final { scan-assembler-times "kandq\[
> \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
>
> @@ -10,8 +10,8 @@ avx512bw_test ()
> __mmask64 k1, k2, k3;
> volatile __m512i x = _mm512_setzero_epi32();
>
> - __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
> - __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2) );
> + __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
> + __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2ULL) );
>
> k3 = _kand_mask64 (k1, k2);
> x = _mm512_mask_add_epi8 (x, k3, x, x);
> diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-knotq-1.c
> b/gcc/testsuite/gcc.target/i386/avx512bw-knotq-1.c
> index deb65795760..341bbc03847 100644
> --- a/gcc/testsuite/gcc.target/i386/avx512bw-knotq-1.c
> +++ b/gcc/testsuite/gcc.target/i386/avx512bw-knotq-1.c
> @@ -1,4 +1,4 @@
> -/* { dg-do compile } */
> +/* { dg-do compile { target { ! ia32 } } } */
> /* { dg-options "-mavx512bw -O2" } */
> /* { dg-final { scan-assembler-times "knotq\[
> \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
>
> @@ -10,7 +10,7 @@ avx512bw_test ()
> __mmask64 k1, k2;
> volatile __m512i x = _mm512_setzero_si512 ();
>
> - __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (45) );
> + __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (45ULL) );
>
> k2 = _knot_mask64 (k1);
> x = _mm512_mask_add_epi8 (x, k1, x, x);
> diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-korq-1.c
> b/gcc/testsuite/gcc.target/i386/avx512bw-korq-1.c
> index 89753f02340..6e211491224 100644
> --- a/gcc/testsuite/gcc.target/i386/avx512bw-korq-1.c
> +++ b/gcc/testsuite/gcc.target/i386/avx512bw-korq-1.c
> @@ -1,4 +1,4 @@
> -/* { dg-do compile } */
> +/* { dg-do compile { target { ! ia32 } } } */
> /* { dg-options "-mavx512bw -O2" } */
> /* { dg-final { scan-assembler-times "korq\[
> \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
>
> @@ -10,8 +10,8 @@ avx512bw_test ()
> __mmask64 k1, k2, k3;
> volatile __m512i x = _mm512_setzero_si512 ();
>
> - __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
> - __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2) );
> + __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
> + __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2ULL) );
>
> k3 = _kor_mask64 (k1, k2);
> x = _mm512_mask_add_epi8 (x, k3, x, x);
> diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-kshiftlq-1.c
> b/gcc/testsuite/gcc.target/i386/avx512bw-kshiftlq-1.c
> index 70a4b676a18..dec2251b9f4 100644
> --- a/gcc/testsuite/gcc.target/i386/avx512bw-kshiftlq-1.c
> +++ b/gcc/testsuite/gcc.target/i386/avx512bw-kshiftlq-1.c
> @@ -1,4 +1,4 @@
> -/* { dg-do compile } */
> +/* { dg-do compile { target { ! ia32 } } } */
> /* { dg-options "-mavx512bw -O2" } */
> /* { dg-final { scan-assembler-times "kshiftlq\[
> \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
>
> @@ -11,7 +11,7 @@ avx512bw_test ()
> unsigned int i = 5;
> volatile __m512i x = _mm512_setzero_si512 ();
>
> - __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
> + __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
>
> k2 = _kshiftli_mask64 (k1, i);
> x = _mm512_mask_add_epi8 (x, k2, x, x);
> diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-kshiftrq-1.c
> b/gcc/testsuite/gcc.target/i386/avx512bw-kshiftrq-1.c
> index b0051b5ecf9..f1bb4ca9b93 100644
> --- a/gcc/testsuite/gcc.target/i386/avx512bw-kshiftrq-1.c
> +++ b/gcc/testsuite/gcc.target/i386/avx512bw-kshiftrq-1.c
> @@ -1,4 +1,4 @@
> -/* { dg-do compile } */
> +/* { dg-do compile { target { ! ia32 } } } */
> /* { dg-options "-mavx512bw -O2" } */
> /* { dg-final { scan-assembler-times "kshiftrq\[
> \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
>
> @@ -11,7 +11,7 @@ avx512bw_test ()
> unsigned int i = 5;
> volatile __m512i x = _mm512_setzero_si512 ();
>
> - __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
> + __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
>
> k2 = _kshiftri_mask64 (k1, i);
> x = _mm512_mask_add_epi8 (x, k2, x, x);
> diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-kxnorq-1.c
> b/gcc/testsuite/gcc.target/i386/avx512bw-kxnorq-1.c
> index ba72e1ff086..bdcaccee7ee 100644
> --- a/gcc/testsuite/gcc.target/i386/avx512bw-kxnorq-1.c
> +++ b/gcc/testsuite/gcc.target/i386/avx512bw-kxnorq-1.c
> @@ -1,4 +1,4 @@
> -/* { dg-do compile } */
> +/* { dg-do compile { target { ! ia32 } } } */
> /* { dg-options "-mavx512bw -O2" } */
> /* { dg-final { scan-assembler-times "kxnorq\[
> \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
>
> @@ -10,8 +10,8 @@ avx512bw_test ()
> __mmask64 k1, k2, k3;
> volatile __m512i x = _mm512_setzero_si512 ();
>
> - __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
> - __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2) );
> + __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
> + __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2ULL) );
>
> k3 = _kxnor_mask64 (k1, k2);
> x = _mm512_mask_add_epi8 (x, k3, x, x);
> diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-kxorq-1.c
> b/gcc/testsuite/gcc.target/i386/avx512bw-kxorq-1.c
> index abf42809651..1120371cd5b 100644
> --- a/gcc/testsuite/gcc.target/i386/avx512bw-kxorq-1.c
> +++ b/gcc/testsuite/gcc.target/i386/avx512bw-kxorq-1.c
> @@ -1,4 +1,4 @@
> -/* { dg-do compile } */
> +/* { dg-do compile { target { ! ia32 } } } */
> /* { dg-options "-mavx512bw -O2" } */
> /* { dg-final { scan-assembler-times "kxorq\[
> \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
>
> @@ -10,8 +10,8 @@ avx512bw_test ()
> __mmask64 k1, k2, k3;
> volatile __m512i x = _mm512_setzero_si512 ();
>
> - __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
> - __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2) );
> + __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
> + __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2ULL) );
>
> k3 = _kxor_mask64 (k1, k2);
> x = _mm512_mask_add_epi8 (x, k3, x, x);
> --
> 2.31.1
>