Hi Tamar, > -----Original Message----- > From: Tamar Christina <tamar.christ...@arm.com> > Sent: Thursday, January 4, 2024 11:06 AM > To: Tamar Christina <tamar.christ...@arm.com>; gcc-patches@gcc.gnu.org > Cc: nd <n...@arm.com>; Ramana Radhakrishnan > <ramana.radhakrish...@arm.com>; Richard Earnshaw > <richard.earns...@arm.com>; ni...@redhat.com; Kyrylo Tkachov > <kyrylo.tkac...@arm.com> > Subject: RE: [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation > > Ping, > > --- > > Hi All, > > This adds an implementation for conditional branch optab for AArch32. > The previous version only allowed operand 0 but it looks like cbranch > expansion does not check with the target and so we have to implement all. > > I therefore did not commit it. This is a larger version. I've also dropped > the MVE > version because the mid-end can rewrite the comparison into comparing two > predicates without checking with the backend. Since MVE only has 1 predicate > register this would need to go through memory and two MRS calls. It's > unlikely > to be beneficial and so that's for GCC 15 when I can fix the middle-end. > > The cases where AArch32 is skipped in the testsuite are all > missed-optimizations > due to AArch32 missing some optabs.
Does the testsuite have vect_* checks that can be used instead of target arm*? If so let's use those. Otherwise it's okay as is. Thanks, Kyrill > > For e.g. > > void f1 () > { > for (int i = 0; i < N; i++) > { > b[i] += a[i]; > if (a[i] > 0) > break; > } > } > > For 128-bit vectors we generate: > > vcgt.s32 q8, q9, #0 > vpmax.u32 d7, d16, d17 > vpmax.u32 d7, d7, d7 > vmov r3, s14 @ int > cmp r3, #0 > > and of 64-bit vector we can omit one vpmax as we still need to compress to > 32-bits. > > Bootstrapped Regtested on arm-none-linux-gnueabihf and no issues. > > Ok for master? > > Thanks, > Tamar > > gcc/ChangeLog: > > * config/arm/neon.md (cbranch<mode>4): New. > > gcc/testsuite/ChangeLog: > > * gcc.dg/vect/vect-early-break_2.c: Skip Arm. > * gcc.dg/vect/vect-early-break_7.c: Likewise. > * gcc.dg/vect/vect-early-break_75.c: Likewise. > * gcc.dg/vect/vect-early-break_77.c: Likewise. > * gcc.dg/vect/vect-early-break_82.c: Likewise. > * gcc.dg/vect/vect-early-break_88.c: Likewise. > * lib/target-supports.exp (add_options_for_vect_early_break, > check_effective_target_vect_early_break_hw, > check_effective_target_vect_early_break): Support AArch32. > * gcc.target/arm/vect-early-break-cbranch.c: New test. > > --- inline version of patch --- > > diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md > index > d213369ffc38fb88ad0357d848cc7da5af73bab7..ed659ab736862da416d1ff6241d > 0d3e6c6b96ff1 100644 > --- a/gcc/config/arm/neon.md > +++ b/gcc/config/arm/neon.md > @@ -408,6 +408,55 @@ (define_insn "vec_extract<mode><V_elem_l>" > [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")] > ) > > +;; Patterns comparing two vectors and conditionally jump. > +;; Avdanced SIMD lacks a vector != comparison, but this is a quite common > +;; operation. To not pay the penalty for inverting == we can map our any > +;; comparisons to all i.e. any(~x) => all(x). > +;; > +;; However unlike the AArch64 version, we can't optimize this further as the > +;; chain is too long for combine due to these being unspecs so it doesn't > fold > +;; the operation to something simpler. > +(define_expand "cbranch<mode>4" > + [(set (pc) (if_then_else > + (match_operator 0 "expandable_comparison_operator" > + [(match_operand:VDQI 1 "register_operand") > + (match_operand:VDQI 2 "reg_or_zero_operand")]) > + (label_ref (match_operand 3 "" "")) > + (pc)))] > + "TARGET_NEON" > +{ > + rtx mask = operands[1]; > + > + /* If comparing against a non-zero vector we have to do a comparison first > + so we can have a != 0 comparison with the result. */ > + if (operands[2] != CONST0_RTX (<MODE>mode)) > + { > + mask = gen_reg_rtx (<MODE>mode); > + emit_insn (gen_xor<mode>3 (mask, operands[1], operands[2])); > + } > + > + /* For 128-bit vectors we need an additional reductions. */ > + if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode))) > + { > + /* Always reduce using a V4SI. */ > + mask = gen_reg_rtx (V2SImode); > + rtx low = gen_reg_rtx (V2SImode); > + rtx high = gen_reg_rtx (V2SImode); > + rtx op1 = lowpart_subreg (V4SImode, operands[1], <MODE>mode); > + emit_insn (gen_neon_vget_lowv4si (low, op1)); > + emit_insn (gen_neon_vget_highv4si (high, op1)); > + emit_insn (gen_neon_vpumaxv2si (mask, low, high)); > + } > + > + rtx op1 = lowpart_subreg (V2SImode, mask, GET_MODE (mask)); > + emit_insn (gen_neon_vpumaxv2si (op1, op1, op1)); > + > + rtx val = gen_reg_rtx (SImode); > + emit_move_insn (val, gen_lowpart (SImode, mask)); > + emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, > operands[3])); > + DONE; > +}) > + > ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to > ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called > ;; by define_expand in vec-common.md file. > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c > b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c > index > 5c32bf94409e9743e72429985ab3bf13aab8f2c1..dec0b492ab883de6e02944a95f > d554a109a68a39 100644 > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c > @@ -5,7 +5,7 @@ > > /* { dg-additional-options "-Ofast" } */ > > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */ > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*- > *" } } } } */ > > #include <complex.h> > > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c > b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c > index > 8c86c5034d7522b3733543fb384a23c5d6ed0fcf..d218a0686719fee4c167684dcf2 > 6402851b53260 100644 > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c > @@ -5,7 +5,7 @@ > > /* { dg-additional-options "-Ofast" } */ > > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */ > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*- > *" } } } } */ > > #include <complex.h> > > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c > b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c > index > ed27f8635730ff0d8803517c72693625a2feddef..9dcc3372acd657458df8d94ce36 > c4bd96f02fd52 100644 > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c > @@ -3,7 +3,7 @@ > /* { dg-require-effective-target vect_int } */ > > /* { dg-additional-options "-O3" } */ > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! > "x86_64-*- > * i?86-*-*" } } } } */ > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64- > *-* i?86-*-* arm*-*-*" } } } } */ > > #include <limits.h> > #include <assert.h> > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c > b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c > index > 225106aab0a3efc7536de6f6e45bc6ff16210ea8..9fa7e6948ebfb5f1723833653fd > 6ad1fc65f4e8e 100644 > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c > @@ -3,7 +3,7 @@ > /* { dg-require-effective-target vect_int } */ > > /* { dg-additional-options "-O3" } */ > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */ > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*- > *" } } } } */ > > #include "tree-vect.h" > > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c > b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c > index > 0e9b2d8d385c556063a3c6fcb14383317b056a79..7cd21d33485f3abb823e1943c > 87e9481c41fd2c3 100644 > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c > @@ -5,7 +5,7 @@ > > /* { dg-additional-options "-Ofast" } */ > > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */ > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*- > *" } } } } */ > > #include <complex.h> > > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c > b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c > index > b392dd46553994d813761da41c42989a79b90119..59ed57c5fb5f3e8197fc20058 > eeb0a81a55815cc 100644 > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c > @@ -3,7 +3,7 @@ > /* { dg-require-effective-target vect_int } */ > > /* { dg-additional-options "-Ofast --param vect-partial-vector-usage=2" } */ > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */ > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*- > *" } } } } */ > > #include "tree-vect.h" > > diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c > b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c > new file mode 100644 > index > 0000000000000000000000000000000000000000..0e9a39d231fdf4cb56590945e > 7cedfabd11d39b5 > --- /dev/null > +++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c > @@ -0,0 +1,138 @@ > +/* { dg-do compile } */ > +/* { dg-require-effective-target vect_early_break } */ > +/* { dg-require-effective-target arm_neon_ok } */ > +/* { dg-require-effective-target arm32 } */ > +/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard -fno- > schedule-insns -fno-reorder-blocks -fno-schedule-insns2" } */ > +/* { dg-final { check-function-bodies "**" "" "" } } */ > + > +#define N 640 > +int a[N] = {0}; > +int b[N] = {0}; > + > +/* > +** f1: > +** ... > +** vcgt.s32 q[0-9]+, q[0-9]+, #0 > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vmov r[0-9]+, s[0-9]+ @ int > +** cmp r[0-9]+, #0 > +** bne \.L[0-9]+ > +** ... > +*/ > +void f1 () > +{ > + for (int i = 0; i < N; i++) > + { > + b[i] += a[i]; > + if (a[i] > 0) > + break; > + } > +} > + > +/* > +** f2: > +** ... > +** vcge.s32 q[0-9]+, q[0-9]+, #0 > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vmov r[0-9]+, s[0-9]+ @ int > +** cmp r[0-9]+, #0 > +** bne \.L[0-9]+ > +** ... > +*/ > +void f2 () > +{ > + for (int i = 0; i < N; i++) > + { > + b[i] += a[i]; > + if (a[i] >= 0) > + break; > + } > +} > + > +/* > +** f3: > +** ... > +** vceq.i32 q[0-9]+, q[0-9]+, #0 > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vmov r[0-9]+, s[0-9]+ @ int > +** cmp r[0-9]+, #0 > +** bne \.L[0-9]+ > +** ... > +*/ > +void f3 () > +{ > + for (int i = 0; i < N; i++) > + { > + b[i] += a[i]; > + if (a[i] == 0) > + break; > + } > +} > + > +/* > +** f4: > +** ... > +** vceq.i32 q[0-9]+, q[0-9]+, #0 > +** vmvn q[0-9]+, q[0-9]+ > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vmov r[0-9]+, s[0-9]+ @ int > +** cmp r[0-9]+, #0 > +** bne \.L[0-9]+ > +** ... > +*/ > +void f4 () > +{ > + for (int i = 0; i < N; i++) > + { > + b[i] += a[i]; > + if (a[i] != 0) > + break; > + } > +} > + > +/* > +** f5: > +** ... > +** vclt.s32 q[0-9]+, q[0-9]+, #0 > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vmov r[0-9]+, s[0-9]+ @ int > +** cmp r[0-9]+, #0 > +** bne \.L[0-9]+ > +** ... > +*/ > +void f5 () > +{ > + for (int i = 0; i < N; i++) > + { > + b[i] += a[i]; > + if (a[i] < 0) > + break; > + } > +} > + > +/* > +** f6: > +** ... > +** vcle.s32 q[0-9]+, q[0-9]+, #0 > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+ > +** vmov r[0-9]+, s[0-9]+ @ int > +** cmp r[0-9]+, #0 > +** bne \.L[0-9]+ > +** ... > +*/ > +void f6 () > +{ > + for (int i = 0; i < N; i++) > + { > + b[i] += a[i]; > + if (a[i] <= 0) > + break; > + } > +} > + > diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target- > supports.exp > index > 05fc417877bcd658931061b7245eb8ba5abd2e09..24a937dbb59b5723af038bd9e > 0b89369595fcf87 100644 > --- a/gcc/testsuite/lib/target-supports.exp > +++ b/gcc/testsuite/lib/target-supports.exp > @@ -4059,6 +4059,7 @@ proc check_effective_target_vect_early_break { } { > return [check_cached_effective_target_indexed vect_early_break { > expr { > [istarget aarch64*-*-*] > + || [check_effective_target_arm_v8_neon_ok] > || [check_effective_target_sse4] > }}] > } > @@ -4072,6 +4073,7 @@ proc check_effective_target_vect_early_break_hw { } > { > return [check_cached_effective_target_indexed vect_early_break_hw { > expr { > [istarget aarch64*-*-*] > + || [check_effective_target_arm_v8_neon_hw] > || [check_sse4_hw_available] > }}] > } > @@ -4081,6 +4083,11 @@ proc add_options_for_vect_early_break { flags } { > return "$flags" > } > > + if { [check_effective_target_arm_v8_neon_ok] } { > + global et_arm_v8_neon_flags > + return "$flags $et_arm_v8_neon_flags -march=armv8-a" > + } > + > if { [check_effective_target_sse4] } { > return "$flags -msse4.1" > }