https://gcc.gnu.org/g:e60ec9b0e02e8647c289d204342e992e91750011
commit r12-10422-ge60ec9b0e02e8647c289d204342e992e91750011 Author: Matthias Kretz <m.kr...@gsi.de> Date: Fri Jun 2 13:44:22 2023 +0200 libstdc++: Replace use of incorrect non-temporal store The call to the base implementation sometimes didn't find a matching signature because the _Abi parameter of _SimdImpl* was "wrong" after conversion. It has to call into <new ABI tag>::_SimdImpl instead of the current ABI tag's _SimdImpl. This also reduces the number of possible template instantiations. Signed-off-by: Matthias Kretz <m.kr...@gsi.de> libstdc++-v3/ChangeLog: PR libstdc++/110054 * include/experimental/bits/simd_builtin.h (_S_masked_store): Call into deduced ABI's SimdImpl after conversion. * include/experimental/bits/simd_x86.h (_S_masked_store_nocvt): Don't use _mm_maskmoveu_si128. Use the generic fall-back implementation. Also fix masked stores without SSE2, which were not doing anything before. (cherry picked from commit 27e45b7597d6fb1a71927d658a0294797b720c0a) Diff: --- .../include/experimental/bits/simd_builtin.h | 6 ++-- libstdc++-v3/include/experimental/bits/simd_x86.h | 38 +++------------------- 2 files changed, 7 insertions(+), 37 deletions(-) diff --git a/libstdc++-v3/include/experimental/bits/simd_builtin.h b/libstdc++-v3/include/experimental/bits/simd_builtin.h index 9ea6259bfda2..8923a82da39e 100644 --- a/libstdc++-v3/include/experimental/bits/simd_builtin.h +++ b/libstdc++-v3/include/experimental/bits/simd_builtin.h @@ -1628,7 +1628,7 @@ template <typename _Abi, typename> if constexpr (_UW_size == _TV_size) // one convert+store { const _UW __converted = __convert<_UW>(__v); - _SuperImpl::_S_masked_store_nocvt( + _UAbi::_SimdImpl::_S_masked_store_nocvt( __converted, __mem, _UAbi::_MaskImpl::template _S_convert< __int_for_sizeof_t<_Up>>(__k)); @@ -1643,7 +1643,7 @@ template <typename _Abi, typename> const array<_UV, _NAllStores> __converted = __convert_all<_UV, _NAllStores>(__v); __execute_n_times<_NFullStores>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { - _SuperImpl::_S_masked_store_nocvt( + _UAbi::_SimdImpl::_S_masked_store_nocvt( _UW(__converted[__i]), __mem + __i * _UW_size, _UAbi::_MaskImpl::template _S_convert< __int_for_sizeof_t<_Up>>( @@ -1651,7 +1651,7 @@ template <typename _Abi, typename> }); if constexpr (_NAllStores > _NFullStores) // one partial at the end - _SuperImpl::_S_masked_store_nocvt( + _UAbi::_SimdImpl::_S_masked_store_nocvt( _UW(__converted[_NFullStores]), __mem + _NFullStores * _UW_size, _UAbi::_MaskImpl::template _S_convert< diff --git a/libstdc++-v3/include/experimental/bits/simd_x86.h b/libstdc++-v3/include/experimental/bits/simd_x86.h index 557462893964..03febe7044c9 100644 --- a/libstdc++-v3/include/experimental/bits/simd_x86.h +++ b/libstdc++-v3/include/experimental/bits/simd_x86.h @@ -1106,31 +1106,6 @@ template <typename _Abi, typename> else _mm512_mask_storeu_pd(__mem, __k, __vi); } -#if 0 // with KNL either sizeof(_Tp) >= 4 or sizeof(_vi) <= 32 - // with Skylake-AVX512, __have_avx512bw is true - else if constexpr (__have_sse2) - { - using _M = __vector_type_t<_Tp, _Np>; - using _MVT = _VectorTraits<_M>; - _mm_maskmoveu_si128(__auto_bitcast(__extract<0, 4>(__v._M_data)), - __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(__k._M_data)), - reinterpret_cast<char*>(__mem)); - _mm_maskmoveu_si128(__auto_bitcast(__extract<1, 4>(__v._M_data)), - __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>( - __k._M_data >> 1 * _MVT::_S_full_size)), - reinterpret_cast<char*>(__mem) + 1 * 16); - _mm_maskmoveu_si128(__auto_bitcast(__extract<2, 4>(__v._M_data)), - __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>( - __k._M_data >> 2 * _MVT::_S_full_size)), - reinterpret_cast<char*>(__mem) + 2 * 16); - if constexpr (_Np > 48 / sizeof(_Tp)) - _mm_maskmoveu_si128( - __auto_bitcast(__extract<3, 4>(__v._M_data)), - __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>( - __k._M_data >> 3 * _MVT::_S_full_size)), - reinterpret_cast<char*>(__mem) + 3 * 16); - } -#endif else __assert_unreachable<_Tp>(); } @@ -1233,8 +1208,8 @@ template <typename _Abi, typename> else if constexpr (__have_avx && sizeof(_Tp) == 8) _mm_maskstore_pd(reinterpret_cast<double*>(__mem), __ki, __vector_bitcast<double>(__vi)); - else if constexpr (__have_sse2) - _mm_maskmoveu_si128(__vi, __ki, reinterpret_cast<char*>(__mem)); + else + _Base::_S_masked_store_nocvt(__v, __mem, __k); } else if constexpr (sizeof(__v) == 32) { @@ -1259,13 +1234,8 @@ template <typename _Abi, typename> else if constexpr (__have_avx && sizeof(_Tp) == 8) _mm256_maskstore_pd(reinterpret_cast<double*>(__mem), __ki, __vector_bitcast<double>(__v)); - else if constexpr (__have_sse2) - { - _mm_maskmoveu_si128(__lo128(__vi), __lo128(__ki), - reinterpret_cast<char*>(__mem)); - _mm_maskmoveu_si128(__hi128(__vi), __hi128(__ki), - reinterpret_cast<char*>(__mem) + 16); - } + else + _Base::_S_masked_store_nocvt(__v, __mem, __k); } else __assert_unreachable<_Tp>();