Christoph Müllner <christoph.muell...@vrull.eu> writes:
> The avoid-store-forwarding pass is disabled by default and therefore
> in the risk of bit-rotting.  This patch addresses this by enabling
> the pass at O2 or higher.
>
> The assembly patterns in `bitfield-bitint-abi-align16.c` and
> `bitfield-bitint-abi-align8.c` have been updated to account for
> the ASF transformations.
>
> This was bootstrapped on x86-64 and AArch64 and showed no
> regressions in the test suite (--enable-checking=yes,extra and
> all languages).
>
> gcc/ChangeLog:
>
>       * doc/invoke.texi: Document asf as an O2 enabled option.
>       * opts.cc: Enable asf at O2.
>
> gcc/testsuite/ChangeLog:
>
>       * gcc.target/aarch64/bitfield-bitint-abi-align16.c:
>       Modify testcases to account for the asf transformations.
>       * gcc.target/aarch64/bitfield-bitint-abi-align8.c: Likewise.
>       * gcc.target/aarch64/avoid-store-forwarding-6.c: New test.

Thanks for doing this.  Just a question about the testsuite updates:

> diff --git a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c 
> b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
> index c29a230a771..b4501d81c45 100644
> --- a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
> +++ b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
> @@ -91,10 +91,11 @@
>  **   mov     (w[0-9]+), 0
>  **   bfi     \3, w\2, 0, 1
>  **   and     x3, x\2, 9223372036854775807
> -**   mov     x2, 0
> +**   mov     (x[0-9]+), 0
> +**   bfi     \4, (x[0-9]+), 0, 8
>  **   str     xzr, \[sp\]
>  **   strb    \3, \[sp\]
> -**   ldr     x1, \[sp\]
> +**   mov     \5, 0
>  **   add     sp, sp, 16
>  **   b       fp
>  */

This looks odd, in that \5 is used to match an output, whereas:

> +**   bfi     \4, (x[0-9]+), 0, 8

allows any input.  I would have expected (x[0-9]+) to only be used for
temporary results, with hard-coded registers being used for ABI-facing
results.  Similarly, all uses were previously \n or hard-coded registers.

Thanks,
Richard

> @@ -183,19 +184,21 @@
>  **   sxtw    (x[0-9]+), w1
>  **   mov     x0, \2
>  **   and     x7, \2, 9223372036854775807
> +**   mov     (x[0-9]+), 0
>  **   mov     (w[0-9]+), 0
> -**   bfi     \3, w\1, 0, 1
> +**   bfi     \4, w\1, 0, 1
> +**   mov     (x[0-9]+), \3
> +**   bfi     \5, (x[0-9]+), 0, 8
> +**   stp     x7, \5, \[sp\]
>  **   strb    wzr, \[sp, 16\]
>  **   mov     x6, x7
>  **   mov     x5, x7
>  **   mov     x4, x7
> -**   mov     x3, x7
> -**   mov     x2, x7
> -**   str     xzr, \[sp, 48\]
> -**   strb    \3, \[sp, 48\]
> -**   ldr     (x[0-9]+), \[sp, 48\]
> -**   stp     x7, \4, \[sp\]
> -**   mov     x1, x7
> +**   mov     \5, x7
> +**   str     \3, \[sp, 48\]
> +**   strb    \4, \[sp, 48\]
> +**   mov     \3, x7
> +**   mov     \6, x7
>  **   bl      fp_stack
>  **   sbfx    x0, x0, 0, 63
>  **...
> @@ -343,10 +346,11 @@
>  **   mov     w0, w1
>  **   mov     (w[0-9]+), 0
>  **   bfi     \2, w\1, 0, 1
> -**   mov     x2, 0
> +**   mov     (x[0-9]+), 0
> +**   bfi     \3, (x[0-9]+), 0, 8
>  **   str     xzr, \[sp\]
>  **   strb    \2, \[sp\]
> -**   ldr     x1, \[sp\]
> +**   mov     \4, 0
>  **...
>  **   b       fp_stdarg
>  */
> diff --git a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c 
> b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
> index 13ffbf416ca..a9ac917d3a6 100644
> --- a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
> +++ b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
> @@ -91,10 +91,11 @@
>  **   mov     (w[0-9]+), 0
>  **   bfi     \3, w\2, 0, 1
>  **   and     x3, x\2, 9223372036854775807
> -**   mov     x2, 0
> +**   mov     (x[0-9]+), 0
> +**   bfi     \4, (x[0-9]+), 0, 8
>  **   str     xzr, \[sp\]
>  **   strb    \3, \[sp\]
> -**   ldr     x1, \[sp\]
> +**   mov     \5, 0
>  **   add     sp, sp, 16
>  **   b       fp
>  */
> @@ -183,19 +184,21 @@
>  **   sxtw    (x[0-9]+), w1
>  **   mov     x0, \2
>  **   and     x7, \2, 9223372036854775807
> +**   mov     (x[0-9]+), 0
>  **   mov     (w[0-9]+), 0
> -**   bfi     \3, w\1, 0, 1
> +**   bfi     \4, w\1, 0, 1
> +**   mov     (x[0-9]+), \3
> +**   bfi     \5, (x[0-9]+), 0, 8
> +**   stp     x7, \5, \[sp\]
>  **   strb    wzr, \[sp, 16\]
>  **   mov     x6, x7
>  **   mov     x5, x7
>  **   mov     x4, x7
> -**   mov     x3, x7
> -**   mov     x2, x7
> -**   str     xzr, \[sp, 48\]
> -**   strb    \3, \[sp, 48\]
> -**   ldr     (x[0-9]+), \[sp, 48\]
> -**   stp     x7, \4, \[sp\]
> -**   mov     x1, x7
> +**   mov     \5, x7
> +**   str     \3, \[sp, 48\]
> +**   strb    \4, \[sp, 48\]
> +**   mov     \3, x7
> +**   mov     \6, x7
>  **   bl      fp_stack
>  **   sbfx    x0, x0, 0, 63
>  **...
> @@ -345,10 +348,11 @@
>  **   mov     w0, w1
>  **   mov     (w[0-9]+), 0
>  **   bfi     \2, w\1, 0, 1
> -**   mov     x2, 0
> +**   mov     (x[0-9]+), 0
> +**   bfi     \3, (x[0-9]+), 0, 8
>  **   str     xzr, \[sp\]
>  **   strb    \2, \[sp\]
> -**   ldr     x1, \[sp\]
> +**   mov     \4, 0
>  **...
>  **   b       fp_stdarg
>  */

Reply via email to