Hi! Since the x86 backend enabled V2SImode vectorization (with TARGET_MMX_WITH_SSE), slp vectorization can kick in and emit movq (%rdi), %xmm1 pshufd $225, %xmm1, %xmm0 movq %xmm0, (%rdi) instead of rolq $32, (%rdi) we used to emit (or emit when slp vectorization is disabled). I think the rotate is both smaller and faster, so this patch adds a combiner splitter to optimize that back.
Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk? 2021-02-13 Jakub Jelinek <ja...@redhat.com> PR target/96166 * config/i386/mmx.md (*mmx_pshufd_1): Add a combine splitter for swap of V2SImode elements in memory into DImode memory rotate by 32. * gcc.target/i386/pr96166.c: New test. --- gcc/config/i386/mmx.md.jj 2021-02-03 09:10:28.741347389 +0100 +++ gcc/config/i386/mmx.md 2021-02-12 15:27:38.159393941 +0100 @@ -2076,6 +2076,17 @@ (define_insn "*mmx_pshufd_1" (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) +;; Optimize V2SImode load from memory, swapping the elements and +;; storing back into the memory into DImode rotate of the memory by 32. +(define_split + [(set (match_operand:V2SI 0 "memory_operand") + (vec_select:V2SI (match_dup 0) + (parallel [(const_int 1) (const_int 0)])))] + "TARGET_64BIT && (TARGET_READ_MODIFY_WRITE || optimize_insn_for_size_p ())" + [(set (match_dup 0) + (rotate:DI (match_dup 0) (const_int 32)))] + "operands[0] = adjust_address (operands[0], DImode, 0);") + (define_insn "mmx_pswapdv2si2" [(set (match_operand:V2SI 0 "register_operand" "=y,Yv") (vec_select:V2SI --- gcc/testsuite/gcc.target/i386/pr96166.c.jj 2021-02-12 15:36:31.251410875 +0100 +++ gcc/testsuite/gcc.target/i386/pr96166.c 2021-02-12 15:37:26.467792818 +0100 @@ -0,0 +1,21 @@ +/* PR target/96166 */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O3 -mtune=generic -masm=att" } */ +/* { dg-final { scan-assembler "rolq\\s\\\$32, \\\(%\[re]di\\\)" } } */ + +static inline void +swap (int *x, int *y) +{ + int tmp = *x; + *x = *y; + *y = tmp; +} + +void +bar (int (*x)[2]) +{ + int y[2]; + __builtin_memcpy (&y, x, sizeof *x); + swap (&y[0], &y[1]); + __builtin_memcpy (x, &y, sizeof *x); +} Jakub