================
@@ -1454,24 +1454,52 @@ __m128d test_mm_permutevar_pd(__m128d A, __m128i B) {
// CHECK: call {{.*}}<2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>
%{{.*}}, <2 x i64> %{{.*}})
return _mm_permutevar_pd(A, B);
}
+TEST_CONSTEXPR(match_m128d(
+ _mm_permutevar_pd(
+ ((__m128d){0.0, 1.0}),
+ ((__m128i){0b10, 0b00})
+ ),
+ 1.0, 0.0
+));
__m256d test_mm256_permutevar_pd(__m256d A, __m256i B) {
// CHECK-LABEL: test_mm256_permutevar_pd
// CHECK: call {{.*}}<4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x
double> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_permutevar_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(
+ _mm256_permutevar_pd(
+ ((__m256d){0.0, 1.0, 2.0, 3.0}),
+ ((__m256i){0b10, 0b00, 0b10, 0b00})
----------------
RKSimon wrote:
avoid matching lane masks ? it helps avoid bugs where we're reused the wrong
lane offsets
https://github.com/llvm/llvm-project/pull/168861
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits