https://gcc.gnu.org/g:71f6540fc54036cc8f71db497cc22816f794549a

commit r15-7483-g71f6540fc54036cc8f71db497cc22816f794549a
Author: Jeff Law <j...@ventanamicro.com>
Date:   Tue Feb 11 16:55:03 2025 -0700

    [PR target/115478] Accept ADD, IOR or XOR when combining objects with no 
bits in common
    
    So the change to prefer ADD over IOR for combining two objects with no bits 
in
    common is (IMHO) generally good.  It has some minor fallout.
    
    In particular the aarch64 port (and I suspect others) have patterns that
    recognize IOR, but not PLUS or XOR for these cases and thus tests which
    expected to optimize with IOR are no longer optimizing.
    
    Roger suggested using a code iterator for this purpose.  Richard S. 
suggested a
    new match operator to cover those cases.
    
    I really like the match operator idea, but as Richard S. notes in the PR it
    would require either not validating the "no bits in common", which 
dramatically
    reduces the utility IMHO or we'd need some work to allow consistent results
    without polluting the nonzero bits cache.
    
    So this patch goes back to Roger's idea of just using a match iterator in 
the
    aarch64 backend (and presumably anywhere else we see this popping up).
    
    Bootstrapped and regression tested on aarch64-linux-gnu where it fixes
    bitint-args.c (as expected).
    
            PR target/115478
    gcc/
            * config/aarch64/iterators.md (any_or_plus): New code iterator.
            * config/aarch64/aarch64.md (extr<mode>5_insn): Use any_or_plus.
            (extr<mode>5_insn_alt, extrsi5_insn_uxtw): Likewise.
            (extrsi5_insn_uxtw_alt, extrsi5_insn_di): Likewise.
    
    gcc/testsuite/
            * gcc.target/aarch64/bitint-args.c: Update expected output.

Diff:
---
 gcc/config/aarch64/aarch64.md                  | 50 ++++++++++++++------------
 gcc/config/aarch64/iterators.md                |  4 +++
 gcc/testsuite/gcc.target/aarch64/bitint-args.c |  4 +--
 3 files changed, 33 insertions(+), 25 deletions(-)

diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 071058dbeb33..cfe730f3732c 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -6194,10 +6194,11 @@
 
 (define_insn "*extr<mode>5_insn"
   [(set (match_operand:GPI 0 "register_operand" "=r")
-       (ior:GPI (ashift:GPI (match_operand:GPI 1 "register_operand" "r")
-                            (match_operand 3 "const_int_operand" "n"))
-                (lshiftrt:GPI (match_operand:GPI 2 "register_operand" "r")
-                              (match_operand 4 "const_int_operand" "n"))))]
+       (any_or_plus:GPI
+         (ashift:GPI (match_operand:GPI 1 "register_operand" "r")
+                     (match_operand 3 "const_int_operand" "n"))
+         (lshiftrt:GPI (match_operand:GPI 2 "register_operand" "r")
+                     (match_operand 4 "const_int_operand" "n"))))]
   "UINTVAL (operands[3]) < GET_MODE_BITSIZE (<MODE>mode) &&
    (UINTVAL (operands[3]) + UINTVAL (operands[4]) == GET_MODE_BITSIZE 
(<MODE>mode))"
   "extr\\t%<w>0, %<w>1, %<w>2, %4"
@@ -6208,10 +6209,11 @@
 ;; so we have to match both orderings.
 (define_insn "*extr<mode>5_insn_alt"
   [(set (match_operand:GPI 0 "register_operand" "=r")
-       (ior:GPI  (lshiftrt:GPI (match_operand:GPI 2 "register_operand" "r")
-                               (match_operand 4 "const_int_operand" "n"))
-                 (ashift:GPI (match_operand:GPI 1 "register_operand" "r")
-                             (match_operand 3 "const_int_operand" "n"))))]
+       (any_or_plus:GPI
+         (lshiftrt:GPI (match_operand:GPI 2 "register_operand" "r")
+                       (match_operand 4 "const_int_operand" "n"))
+         (ashift:GPI (match_operand:GPI 1 "register_operand" "r")
+                     (match_operand 3 "const_int_operand" "n"))))]
   "UINTVAL (operands[3]) < GET_MODE_BITSIZE (<MODE>mode)
    && (UINTVAL (operands[3]) + UINTVAL (operands[4])
        == GET_MODE_BITSIZE (<MODE>mode))"
@@ -6223,10 +6225,11 @@
 (define_insn "*extrsi5_insn_uxtw"
   [(set (match_operand:DI 0 "register_operand" "=r")
        (zero_extend:DI
-        (ior:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
-                           (match_operand 3 "const_int_operand" "n"))
-                (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
-                             (match_operand 4 "const_int_operand" "n")))))]
+        (any_or_plus:SI
+          (ashift:SI (match_operand:SI 1 "register_operand" "r")
+                     (match_operand 3 "const_int_operand" "n"))
+          (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
+                     (match_operand 4 "const_int_operand" "n")))))]
   "UINTVAL (operands[3]) < 32 &&
    (UINTVAL (operands[3]) + UINTVAL (operands[4]) == 32)"
   "extr\\t%w0, %w1, %w2, %4"
@@ -6236,10 +6239,11 @@
 (define_insn "*extrsi5_insn_uxtw_alt"
   [(set (match_operand:DI 0 "register_operand" "=r")
        (zero_extend:DI
-        (ior:SI (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
-                              (match_operand 4 "const_int_operand" "n"))
-                (ashift:SI (match_operand:SI 1 "register_operand" "r")
-                           (match_operand 3 "const_int_operand" "n")))))]
+        (any_or_plus:SI
+          (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
+                       (match_operand 4 "const_int_operand" "n"))
+          (ashift:SI (match_operand:SI 1 "register_operand" "r")
+                     (match_operand 3 "const_int_operand" "n")))))]
   "UINTVAL (operands[3]) < 32 &&
    (UINTVAL (operands[3]) + UINTVAL (operands[4]) == 32)"
   "extr\\t%w0, %w1, %w2, %4"
@@ -6248,13 +6252,13 @@
 
 (define_insn "*extrsi5_insn_di"
   [(set (match_operand:SI 0 "register_operand" "=r")
-       (ior:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
-                          (match_operand 3 "const_int_operand" "n"))
-               (match_operator:SI 6 "subreg_lowpart_operator"
-                 [(zero_extract:DI
-                    (match_operand:DI 2 "register_operand" "r")
-                    (match_operand 5 "const_int_operand" "n")
-                    (match_operand 4 "const_int_operand" "n"))])))]
+       (any_or_plus:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
+                                   (match_operand 3 "const_int_operand" "n"))
+                        (match_operator:SI 6 "subreg_lowpart_operator"
+                         [(zero_extract:DI
+                            (match_operand:DI 2 "register_operand" "r")
+                            (match_operand 5 "const_int_operand" "n")
+                            (match_operand 4 "const_int_operand" "n"))])))]
   "UINTVAL (operands[3]) < 32
    && UINTVAL (operands[3]) + UINTVAL (operands[4]) == 32
    && INTVAL (operands[3]) == INTVAL (operands[5])"
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 9fbd74939882..5bfd6e7d362a 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -2657,6 +2657,10 @@
 ;; Code iterator for logical operations
 (define_code_iterator LOGICAL [and ior xor])
 
+;; Code iterator for operations that are equivalent when the
+;; two input operands are known have disjoint bits set.
+(define_code_iterator any_or_plus [plus ior xor])
+
 ;; LOGICAL with plus, for when | gets converted to +.
 (define_code_iterator LOGICAL_OR_PLUS [and ior xor plus])
 
diff --git a/gcc/testsuite/gcc.target/aarch64/bitint-args.c 
b/gcc/testsuite/gcc.target/aarch64/bitint-args.c
index e7e1099c303b..06b47246fa50 100644
--- a/gcc/testsuite/gcc.target/aarch64/bitint-args.c
+++ b/gcc/testsuite/gcc.target/aarch64/bitint-args.c
@@ -69,7 +69,7 @@ CHECK_ARG(65)
 ** f65:
 **     extr    (x[0-9]+), x3, x2, 1
 **     and     (x[0-9]+), x2, 1
-**     orr     (x[0-9]+), \2, \1, lsl 1
+**     add     (x[0-9]+), \2, \1, lsl 1
 **     asr     (x[0-9]+), \1, 63
 **     stp     \3, \4, \[x0\]
 **     ret
@@ -80,7 +80,7 @@ CHECK_ARG(127)
 ** f127:
 **     extr    (x[0-9]+), x3, x2, 63
 **     and     (x[0-9]+), x2, 9223372036854775807
-**     orr     (x[0-9]+), \2, \1, lsl 63
+**     add     (x[0-9]+), \2, \1, lsl 63
 **     asr     (x[0-9]+), \1, 1
 **     stp     \3, \4, \[x0\]
 **     ret

Reply via email to