https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87064
--- Comment #14 from Jakub Jelinek <jakub at gcc dot gnu.org> ---
And, if I disable that define_insn_and_split altogether (add 0 && to the
condition), the assembly change is:
--- reduction-3.s2 2019-01-18 18:19:42.184057246 +0100
+++ reduction-3.s4 2019-01-18 18:26:23.079506011 +0100
@@ -9,26 +9,16 @@ MAIN__._omp_fn.0:
.cfi_startproc
ld 10,0(3)
lxvdsx 0,0,10
- addi 9,1,-16
- xxpermdi 0,0,0,2
- stxvd2x 0,0,9
ld 9,8(3)
li 8,5
mtctr 8
.L2:
- lxvd2x 0,0,9
- addi 8,1,-16
- lxvd2x 12,0,8
- xxpermdi 12,12,12,2
- xvmaxdp 0,12,0
- xxpermdi 0,0,0,2
- stxvd2x 0,0,8
- xxpermdi 0,0,0,2
+ lxvd2x 12,0,9
+ xvmaxdp 0,0,12
addi 9,9,16
bdnz .L2
- # vec_extract to same register
- lfd 12,-16(1)
- xsmaxdp 0,12,0
+ xxsldwi 12,0,0,2
+ xvmaxdp 0,12,0
stfd 0,0(10)
blr
.long 0
which looks much better. So, what is the reason for this
define_insn_and_split? Is it useful for BYTES_BIG_ENDIAN only perhaps?