https://gcc.gnu.org/bugzilla/show_bug.cgi?id=114908
Bug ID: 114908
Summary: fails to optimize avx2 in-register permute written
with std::experimental::simd
Product: gcc
Version: 14.0
Status: UNCONFIRMED
Severity: normal
Priority: P3
Component: target
Assignee: unassigned at gcc dot gnu.org
Reporter: lee.imple at gmail dot com
Target Milestone: ---
I am trying to write simd code with std::experimental::simd.
Here is the same function written in both std::experimental::simd and GNU
vector extension versions (available online at https://godbolt.org/z/dc169rY3o
).
The purpose is to permute the register from [w, x, y, z] into [0, w, x, y].
```c++
#include <experimental/simd>
#include <cstdint>
namespace stdx = std::experimental;
using data_t = std::uint64_t;
constexpr std::size_t data_size = 4;
template <std::size_t N>
using simd_of = std::experimental::simd<data_t,
std::experimental::simd_abi::deduce_t<data_t, N>>;
using simd_t = simd_of<data_size>;
template <std::size_t N>
constexpr simd_of<N> zero = {};
// stdx version
simd_t permute_simd(simd_t data) {
auto [carry, _] = split<data_size-1, 1>(data);
return concat(zero<1>, carry);
}
typedef data_t vector_t [[gnu::vector_size(data_size * sizeof(data_t))]];
constexpr vector_t zero_v = {0};
// gnu vector extension version
vector_t permute_vector(vector_t data) {
return __builtin_shufflevector(data, zero_v, 4, 0, 1, 2);
}
```
The code is compiled with the options `-O3 -march=x86-64-v3 -std=c++20`.
Although they should have the same functionality, generated assembly (by GCC)
is so different.
```asm
permute_simd(std::experimental::parallelism_v2::simd<unsigned long,
std::experimental::parallelism_v2::simd_abi::_VecBuiltin<32> >):
pushq %rbp
vpxor %xmm1, %xmm1, %xmm1
movq %rsp, %rbp
andq $-32, %rsp
subq $8, %rsp
vmovdqa %ymm0, -120(%rsp)
vmovdqa %ymm1, -56(%rsp)
movq -104(%rsp), %rax
vmovdqa %xmm0, -56(%rsp)
movq -48(%rsp), %rdx
movq $0, -88(%rsp)
movq %rax, -40(%rsp)
movq -56(%rsp), %rax
vmovdqa -56(%rsp), %ymm2
vmovq %rax, %xmm0
vmovdqa %ymm2, -24(%rsp)
movq -8(%rsp), %rax
vpinsrq $1, %rdx, %xmm0, %xmm0
vmovdqu %xmm0, -80(%rsp)
movq %rax, -64(%rsp)
vmovdqa -88(%rsp), %ymm0
leave
ret
permute_vector(unsigned long __vector(4)):
vpxor %xmm1, %xmm1, %xmm1
vpermq $144, %ymm0, %ymm0
vpblendd $3, %ymm1, %ymm0, %ymm0
ret
```
However, Clang can optimize `permute_simd` into the same assembly as
`permute_vector`, so I think, instead of a bug in the std::experimental::simd,
it is a missed optimization in GCC.
```asm
permute_simd(std::experimental::parallelism_v2::simd<unsigned long,
std::experimental::parallelism_v2::simd_abi::_VecBuiltin<32> >): #
@permute_simd(std::experimental::parallelism_v2::simd<unsigned long,
std::experimental::parallelism_v2::simd_abi::_VecBuiltin<32> >)
vpermpd $144, %ymm0, %ymm0 # ymm0 = ymm0[0,0,1,2]
vxorps %xmm1, %xmm1, %xmm1
vblendps $3, %ymm1, %ymm0, %ymm0 # ymm0 =
ymm1[0,1],ymm0[2,3,4,5,6,7]
retq
permute_vector(unsigned long __vector(4)): #
@permute_vector(unsigned long __vector(4))
vpermpd $144, %ymm0, %ymm0 # ymm0 = ymm0[0,0,1,2]
vxorps %xmm1, %xmm1, %xmm1
vblendps $3, %ymm1, %ymm0, %ymm0 # ymm0 =
ymm1[0,1],ymm0[2,3,4,5,6,7]
retq
```