Hi All,

SVE has much bigger immediate encoding range for bitmasks than Advanced SIMD has
and so on a system that is SVE capable if we need an Advanced SIMD Inclusive-OR
by immediate and would require a reload then an unpredicated SVE ORR instead.

This has both speed and size improvements.

Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

        PR tree-optimization/109154
        * config/aarch64/aarch64.md (<optab><mode>3): Convert to new syntax and
        SVE split case.
        * config/aarch64/iterators.md (VCONV, vconv): New.

gcc/testsuite/ChangeLog:

        PR tree-optimization/109154
        * gcc.target/aarch64/sve/fneg-abs_2.c: Updated.
        * gcc.target/aarch64/sve/fneg-abs_4.c: Updated.

--- inline copy of patch -- 
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 
60c92213c75a2a4c18a6b59ae52fe45d1e872718..377c5cafedd43d8d1320489a36267cc6e5f15239
 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -4551,17 +4551,27 @@ (define_insn_and_split "*aarch64_and<mode>_imm2"
   }
 )
 
-(define_insn "<optab><mode>3"
-  [(set (match_operand:GPI 0 "register_operand" "=r,rk,w")
-       (LOGICAL:GPI (match_operand:GPI 1 "register_operand" "%r,r,w")
-                    (match_operand:GPI 2 "aarch64_logical_operand" 
"r,<lconst>,w")))]
-  ""
-  "@
-  <logical>\\t%<w>0, %<w>1, %<w>2
-  <logical>\\t%<w>0, %<w>1, %2
-  <logical>\\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
-  [(set_attr "type" "logic_reg,logic_imm,neon_logic")
-   (set_attr "arch" "*,*,simd")]
+(define_insn_and_split "<optab><mode>3"
+  [(set (match_operand:GPI 0 "register_operand")
+       (LOGICAL:GPI (match_operand:GPI 1 "register_operand")
+                    (match_operand:GPI 2 "aarch64_logical_operand")))]
+  ""
+  {@ [cons: =0, 1, 2; attrs: type, arch]
+     [r , %r, r       ; logic_reg , *   ] <logical>\t%<w>0, %<w>1, %<w>2
+     [rk, r , <lconst>; logic_imm , *   ] <logical>\t%<w>0, %<w>1, %2
+     [w , 0 , <lconst>; *         , sve ] #
+     [w , w , w       ; neon_logic, simd] <logical>\t%0.<Vbtype>, %1.<Vbtype>, 
%2.<Vbtype>
+  }
+  "&& TARGET_SVE && rtx_equal_p (operands[0], operands[1])
+   && satisfies_constraint_<lconst> (operands[2])
+   && FP_REGNUM_P (REGNO (operands[0]))"
+  [(const_int 0)]
+  {
+    rtx op1 = lowpart_subreg (<VCONV>mode, operands[1], <MODE>mode);
+    rtx op2 = gen_const_vec_duplicate (<VCONV>mode, operands[2]);
+    emit_insn (gen_<optab><vconv>3 (op1, op1, op2));
+    DONE;
+  }
 )
 
 ;; zero_extend version of above
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 
d17becc37e230684beaee3c69e2a0f0ce612eda5..568cd5d1a3a9e00475376177ad13de72609df3d8
 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -1432,6 +1432,11 @@ (define_mode_attr VCONQ [(V8QI "V16QI") (V16QI "V16QI")
                         (HI   "V8HI") (QI   "V16QI")
                         (SF   "V4SF") (DF   "V2DF")])
 
+;; 128-bit container modes for the lower part of an SVE vector to the inner or
+;; scalar source mode.
+(define_mode_attr VCONV [(SI "VNx4SI") (DI "VNx2DI")])
+(define_mode_attr vconv [(SI "vnx4si") (DI "vnx2di")])
+
 ;; Half modes of all vector modes.
 (define_mode_attr VHALF [(V8QI "V4QI")  (V16QI "V8QI")
                         (V4HI "V2HI")  (V8HI  "V4HI")
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c 
b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
index 
a60cd31b9294af2dac69eed1c93f899bd5c78fca..fe9f27bf91b8fb18205a5891a5d5e847a5d88e4b
 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
@@ -7,8 +7,7 @@
 
 /*
 ** f1:
-**     movi    v[0-9]+.2s, 0x80, lsl 24
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.s, z0.s, #0x80000000
 **     ret
 */
 float32_t f1 (float32_t a)
@@ -18,9 +17,7 @@ float32_t f1 (float32_t a)
 
 /*
 ** f2:
-**     mov     x0, -9223372036854775808
-**     fmov    d[0-9]+, x0
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.d, z0.d, #0x8000000000000000
 **     ret
 */
 float64_t f2 (float64_t a)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c 
b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c
index 
21f2a8da2a5d44e3d01f6604ca7be87e3744d494..707bcb0b6c53e212b55a255f500e9e548e9ccd80
 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c
@@ -6,9 +6,7 @@
 
 /*
 ** negabs:
-**     mov     x0, -9223372036854775808
-**     fmov    d[0-9]+, x0
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.d, z0.d, #0x8000000000000000
 **     ret
 */
 double negabs (double x)
@@ -22,8 +20,7 @@ double negabs (double x)
 
 /*
 ** negabsf:
-**     movi    v[0-9]+.2s, 0x80, lsl 24
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.s, z0.s, #0x80000000
 **     ret
 */
 float negabsf (float x)




-- 
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 
60c92213c75a2a4c18a6b59ae52fe45d1e872718..377c5cafedd43d8d1320489a36267cc6e5f15239
 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -4551,17 +4551,27 @@ (define_insn_and_split "*aarch64_and<mode>_imm2"
   }
 )
 
-(define_insn "<optab><mode>3"
-  [(set (match_operand:GPI 0 "register_operand" "=r,rk,w")
-       (LOGICAL:GPI (match_operand:GPI 1 "register_operand" "%r,r,w")
-                    (match_operand:GPI 2 "aarch64_logical_operand" 
"r,<lconst>,w")))]
-  ""
-  "@
-  <logical>\\t%<w>0, %<w>1, %<w>2
-  <logical>\\t%<w>0, %<w>1, %2
-  <logical>\\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
-  [(set_attr "type" "logic_reg,logic_imm,neon_logic")
-   (set_attr "arch" "*,*,simd")]
+(define_insn_and_split "<optab><mode>3"
+  [(set (match_operand:GPI 0 "register_operand")
+       (LOGICAL:GPI (match_operand:GPI 1 "register_operand")
+                    (match_operand:GPI 2 "aarch64_logical_operand")))]
+  ""
+  {@ [cons: =0, 1, 2; attrs: type, arch]
+     [r , %r, r       ; logic_reg , *   ] <logical>\t%<w>0, %<w>1, %<w>2
+     [rk, r , <lconst>; logic_imm , *   ] <logical>\t%<w>0, %<w>1, %2
+     [w , 0 , <lconst>; *         , sve ] #
+     [w , w , w       ; neon_logic, simd] <logical>\t%0.<Vbtype>, %1.<Vbtype>, 
%2.<Vbtype>
+  }
+  "&& TARGET_SVE && rtx_equal_p (operands[0], operands[1])
+   && satisfies_constraint_<lconst> (operands[2])
+   && FP_REGNUM_P (REGNO (operands[0]))"
+  [(const_int 0)]
+  {
+    rtx op1 = lowpart_subreg (<VCONV>mode, operands[1], <MODE>mode);
+    rtx op2 = gen_const_vec_duplicate (<VCONV>mode, operands[2]);
+    emit_insn (gen_<optab><vconv>3 (op1, op1, op2));
+    DONE;
+  }
 )
 
 ;; zero_extend version of above
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 
d17becc37e230684beaee3c69e2a0f0ce612eda5..568cd5d1a3a9e00475376177ad13de72609df3d8
 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -1432,6 +1432,11 @@ (define_mode_attr VCONQ [(V8QI "V16QI") (V16QI "V16QI")
                         (HI   "V8HI") (QI   "V16QI")
                         (SF   "V4SF") (DF   "V2DF")])
 
+;; 128-bit container modes for the lower part of an SVE vector to the inner or
+;; scalar source mode.
+(define_mode_attr VCONV [(SI "VNx4SI") (DI "VNx2DI")])
+(define_mode_attr vconv [(SI "vnx4si") (DI "vnx2di")])
+
 ;; Half modes of all vector modes.
 (define_mode_attr VHALF [(V8QI "V4QI")  (V16QI "V8QI")
                         (V4HI "V2HI")  (V8HI  "V4HI")
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c 
b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
index 
a60cd31b9294af2dac69eed1c93f899bd5c78fca..fe9f27bf91b8fb18205a5891a5d5e847a5d88e4b
 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
@@ -7,8 +7,7 @@
 
 /*
 ** f1:
-**     movi    v[0-9]+.2s, 0x80, lsl 24
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.s, z0.s, #0x80000000
 **     ret
 */
 float32_t f1 (float32_t a)
@@ -18,9 +17,7 @@ float32_t f1 (float32_t a)
 
 /*
 ** f2:
-**     mov     x0, -9223372036854775808
-**     fmov    d[0-9]+, x0
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.d, z0.d, #0x8000000000000000
 **     ret
 */
 float64_t f2 (float64_t a)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c 
b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c
index 
21f2a8da2a5d44e3d01f6604ca7be87e3744d494..707bcb0b6c53e212b55a255f500e9e548e9ccd80
 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c
@@ -6,9 +6,7 @@
 
 /*
 ** negabs:
-**     mov     x0, -9223372036854775808
-**     fmov    d[0-9]+, x0
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.d, z0.d, #0x8000000000000000
 **     ret
 */
 double negabs (double x)
@@ -22,8 +20,7 @@ double negabs (double x)
 
 /*
 ** negabsf:
-**     movi    v[0-9]+.2s, 0x80, lsl 24
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.s, z0.s, #0x80000000
 **     ret
 */
 float negabsf (float x)



Reply via email to