Author: ctopper Date: Sat Jul 7 15:03:16 2018 New Revision: 336498 URL: http://llvm.org/viewvc/llvm-project?rev=336498&view=rev Log: [X86] Fix a few intrinsics that were ignoring their rounding mode argument and hardcoded _MM_FROUND_CUR_DIRECTION internally.
I believe these have been broken since their introduction into clang. I've enhanced the tests for these intrinsics to using a real rounding mode and checking all the intrinsic arguments instead of just the name. Modified: cfe/trunk/lib/Headers/avx512fintrin.h cfe/trunk/test/CodeGen/avx512f-builtins.c Modified: cfe/trunk/lib/Headers/avx512fintrin.h URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/avx512fintrin.h?rev=336498&r1=336497&r2=336498&view=diff ============================================================================== --- cfe/trunk/lib/Headers/avx512fintrin.h (original) +++ cfe/trunk/lib/Headers/avx512fintrin.h Sat Jul 7 15:03:16 2018 @@ -6597,7 +6597,7 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m12 (__v4sf)(__m128)(B), \ (__v4sf)_mm_setzero_ps(), \ (__mmask8)(U), \ - _MM_FROUND_CUR_DIRECTION) + (int)(R)) static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_srai_epi32(__m512i __A, int __B) @@ -7826,7 +7826,7 @@ _mm_maskz_fmadd_ss (__mmask8 __U, __m128 (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ (__v4sf)(__m128)(B), \ (__v4sf)(__m128)(C), (__mmask8)(U), \ - _MM_FROUND_CUR_DIRECTION) + (int)(R)) static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) @@ -7988,7 +7988,7 @@ _mm_maskz_fnmsub_ss (__mmask8 __U, __m12 (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ -(__v4sf)(__m128)(B), \ -(__v4sf)(__m128)(C), (__mmask8)(U), \ - _MM_FROUND_CUR_DIRECTION) + (int)(R)) static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) @@ -8042,7 +8042,7 @@ _mm_maskz_fmadd_sd (__mmask8 __U, __m128 (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \ (__v2df)(__m128d)(B), \ (__v2df)(__m128d)(C), (__mmask8)(U), \ - _MM_FROUND_CUR_DIRECTION) + (int)(R)) static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) @@ -8205,7 +8205,7 @@ _mm_maskz_fnmsub_sd (__mmask8 __U, __m12 -(__v2df)(__m128d)(B), \ -(__v2df)(__m128d)(C), \ (__mmask8)(U), \ - _MM_FROUND_CUR_DIRECTION) + (int)(R)) static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) Modified: cfe/trunk/test/CodeGen/avx512f-builtins.c URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512f-builtins.c?rev=336498&r1=336497&r2=336498&view=diff ============================================================================== --- cfe/trunk/test/CodeGen/avx512f-builtins.c (original) +++ cfe/trunk/test/CodeGen/avx512f-builtins.c Sat Jul 7 15:03:16 2018 @@ -5433,8 +5433,8 @@ __m512 test_mm512_maskz_scalef_ps(__mmas __m128d test_mm_scalef_round_sd(__m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_scalef_round_sd - // CHECK: @llvm.x86.avx512.mask.scalef - return _mm_scalef_round_sd(__A, __B, _MM_FROUND_CUR_DIRECTION); + // CHECK: @llvm.x86.avx512.mask.scalef.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %2, i8 -1, i32 8) + return _mm_scalef_round_sd(__A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128d test_mm_scalef_sd(__m128d __A, __m128d __B) { @@ -5451,8 +5451,8 @@ __m128d test_mm_mask_scalef_sd(__m128d _ __m128d test_mm_mask_scalef_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ // CHECK-LABEL: @test_mm_mask_scalef_round_sd - // CHECK: @llvm.x86.avx512.mask.scalef.sd - return _mm_mask_scalef_round_sd(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); + // CHECK: @llvm.x86.avx512.mask.scalef.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) + return _mm_mask_scalef_round_sd(__W, __U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128d test_mm_maskz_scalef_sd(__mmask8 __U, __m128d __A, __m128d __B){ @@ -5463,14 +5463,14 @@ __m128d test_mm_maskz_scalef_sd(__mmask8 __m128d test_mm_maskz_scalef_round_sd(__mmask8 __U, __m128d __A, __m128d __B){ // CHECK-LABEL: @test_mm_maskz_scalef_round_sd - // CHECK: @llvm.x86.avx512.mask.scalef.sd - return _mm_maskz_scalef_round_sd(__U, __A, __B, _MM_FROUND_CUR_DIRECTION); + // CHECK: @llvm.x86.avx512.mask.scalef.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) + return _mm_maskz_scalef_round_sd(__U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128 test_mm_scalef_round_ss(__m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_scalef_round_ss - // CHECK: @llvm.x86.avx512.mask.scalef.ss - return _mm_scalef_round_ss(__A, __B, _MM_FROUND_CUR_DIRECTION); + // CHECK: @llvm.x86.avx512.mask.scalef.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 -1, i32 8) + return _mm_scalef_round_ss(__A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128 test_mm_scalef_ss(__m128 __A, __m128 __B) { @@ -5487,8 +5487,8 @@ __m128 test_mm_mask_scalef_ss(__m128 __W __m128 test_mm_mask_scalef_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ // CHECK-LABEL: @test_mm_mask_scalef_round_ss - // CHECK: @llvm.x86.avx512.mask.scalef.ss - return _mm_mask_scalef_round_ss(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); + // CHECK: @llvm.x86.avx512.mask.scalef.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) + return _mm_mask_scalef_round_ss(__W, __U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128 test_mm_maskz_scalef_ss(__mmask8 __U, __m128 __A, __m128 __B){ @@ -5499,8 +5499,8 @@ __m128 test_mm_maskz_scalef_ss(__mmask8 __m128 test_mm_maskz_scalef_round_ss(__mmask8 __U, __m128 __A, __m128 __B){ // CHECK-LABEL: @test_mm_maskz_scalef_round_ss - // CHECK: @llvm.x86.avx512.mask.scalef.ss - return _mm_maskz_scalef_round_ss(__U, __A, __B, _MM_FROUND_CUR_DIRECTION); + // CHECK: @llvm.x86.avx512.mask.scalef.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) + return _mm_maskz_scalef_round_ss(__U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m512i test_mm512_srai_epi32(__m512i __A) { @@ -7232,13 +7232,13 @@ __m128 test_mm_mask_fmadd_ss(__m128 __W, __m128 test_mm_fmadd_round_ss(__m128 __A, __m128 __B, __m128 __C){ // CHECK-LABEL: @test_mm_fmadd_round_ss - // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 -1, i32 8) return _mm_fmadd_round_ss(__A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128 test_mm_mask_fmadd_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ // CHECK-LABEL: @test_mm_mask_fmadd_round_ss - // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask_fmadd_round_ss(__W, __U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7266,7 +7266,7 @@ __m128 test_mm_maskz_fmadd_ss(__mmask8 _ __m128 test_mm_maskz_fmadd_round_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ // CHECK-LABEL: @test_mm_maskz_fmadd_round_ss - // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_maskz_fmadd_round_ss(__U, __A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7295,7 +7295,7 @@ __m128 test_mm_mask3_fmadd_ss(__m128 __W __m128 test_mm_mask3_fmadd_round_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ // CHECK-LABEL: @test_mm_mask3_fmadd_round_ss - // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss + // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask3_fmadd_round_ss(__W, __X, __Y, __U, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7325,13 +7325,13 @@ __m128 test_mm_mask_fmsub_ss(__m128 __W, __m128 test_mm_fmsub_round_ss(__m128 __A, __m128 __B, __m128 __C){ // CHECK-LABEL: @test_mm_fmsub_round_ss - // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 -1, i32 8) return _mm_fmsub_round_ss(__A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128 test_mm_mask_fmsub_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ // CHECK-LABEL: @test_mm_mask_fmsub_round_ss - // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask_fmsub_round_ss(__W, __U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7360,7 +7360,7 @@ __m128 test_mm_maskz_fmsub_ss(__mmask8 _ __m128 test_mm_maskz_fmsub_round_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ // CHECK-LABEL: @test_mm_maskz_fmsub_round_ss - // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_maskz_fmsub_round_ss(__U, __A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7390,7 +7390,7 @@ __m128 test_mm_mask3_fmsub_ss(__m128 __W __m128 test_mm_mask3_fmsub_round_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ // CHECK-LABEL: @test_mm_mask3_fmsub_round_ss - // CHECK: @llvm.x86.avx512.mask3.vfmsub.ss + // CHECK: @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask3_fmsub_round_ss(__W, __X, __Y, __U, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7420,13 +7420,13 @@ __m128 test_mm_mask_fnmadd_ss(__m128 __W __m128 test_mm_fnmadd_round_ss(__m128 __A, __m128 __B, __m128 __C){ // CHECK-LABEL: @test_mm_fnmadd_round_ss - // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 -1, i32 8) return _mm_fnmadd_round_ss(__A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128 test_mm_mask_fnmadd_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ // CHECK-LABEL: @test_mm_mask_fnmadd_round_ss - // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask_fnmadd_round_ss(__W, __U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7455,7 +7455,7 @@ __m128 test_mm_maskz_fnmadd_ss(__mmask8 __m128 test_mm_maskz_fnmadd_round_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ // CHECK-LABEL: @test_mm_maskz_fnmadd_round_ss - // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_maskz_fnmadd_round_ss(__U, __A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7485,7 +7485,7 @@ __m128 test_mm_mask3_fnmadd_ss(__m128 __ __m128 test_mm_mask3_fnmadd_round_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ // CHECK-LABEL: @test_mm_mask3_fnmadd_round_ss - // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss + // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask3_fnmadd_round_ss(__W, __X, __Y, __U, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7516,13 +7516,13 @@ __m128 test_mm_mask_fnmsub_ss(__m128 __W __m128 test_mm_fnmsub_round_ss(__m128 __A, __m128 __B, __m128 __C){ // CHECK-LABEL: @test_mm_fnmsub_round_ss - // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 -1, i32 8) return _mm_fnmsub_round_ss(__A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128 test_mm_mask_fnmsub_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ // CHECK-LABEL: @test_mm_mask_fnmsub_round_ss - // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask_fnmsub_round_ss(__W, __U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7552,7 +7552,7 @@ __m128 test_mm_maskz_fnmsub_ss(__mmask8 __m128 test_mm_maskz_fnmsub_round_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ // CHECK-LABEL: @test_mm_maskz_fnmsub_round_ss - // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_maskz_fnmsub_round_ss(__U, __A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7583,7 +7583,7 @@ __m128 test_mm_mask3_fnmsub_ss(__m128 __ __m128 test_mm_mask3_fnmsub_round_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ // CHECK-LABEL: @test_mm_mask3_fnmsub_round_ss - // CHECK: @llvm.x86.avx512.mask3.vfmsub.ss + // CHECK: @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask3_fnmsub_round_ss(__W, __X, __Y, __U, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7612,13 +7612,13 @@ __m128d test_mm_mask_fmadd_sd(__m128d __ __m128d test_mm_fmadd_round_sd(__m128d __A, __m128d __B, __m128d __C){ // CHECK-LABEL: @test_mm_fmadd_round_sd - // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 -1, i32 8) return _mm_fmadd_round_sd(__A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128d test_mm_mask_fmadd_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ // CHECK-LABEL: @test_mm_mask_fmadd_round_sd - // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask_fmadd_round_sd(__W, __U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7646,7 +7646,7 @@ __m128d test_mm_maskz_fmadd_sd(__mmask8 __m128d test_mm_maskz_fmadd_round_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ // CHECK-LABEL: @test_mm_maskz_fmadd_round_sd - // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_maskz_fmadd_round_sd(__U, __A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7675,7 +7675,7 @@ __m128d test_mm_mask3_fmadd_sd(__m128d _ __m128d test_mm_mask3_fmadd_round_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ // CHECK-LABEL: @test_mm_mask3_fmadd_round_sd - // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd + // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask3_fmadd_round_sd(__W, __X, __Y, __U, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7705,13 +7705,13 @@ __m128d test_mm_mask_fmsub_sd(__m128d __ __m128d test_mm_fmsub_round_sd(__m128d __A, __m128d __B, __m128d __C){ // CHECK-LABEL: @test_mm_fmsub_round_sd - // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 -1, i32 8) return _mm_fmsub_round_sd(__A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128d test_mm_mask_fmsub_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ // CHECK-LABEL: @test_mm_mask_fmsub_round_sd - // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask_fmsub_round_sd(__W, __U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7740,7 +7740,7 @@ __m128d test_mm_maskz_fmsub_sd(__mmask8 __m128d test_mm_maskz_fmsub_round_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ // CHECK-LABEL: @test_mm_maskz_fmsub_round_sd - // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_maskz_fmsub_round_sd(__U, __A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7770,7 +7770,7 @@ __m128d test_mm_mask3_fmsub_sd(__m128d _ __m128d test_mm_mask3_fmsub_round_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ // CHECK-LABEL: @test_mm_mask3_fmsub_round_sd - // CHECK: @llvm.x86.avx512.mask3.vfmsub.sd + // CHECK: @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask3_fmsub_round_sd(__W, __X, __Y, __U, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7800,13 +7800,13 @@ __m128d test_mm_mask_fnmadd_sd(__m128d _ __m128d test_mm_fnmadd_round_sd(__m128d __A, __m128d __B, __m128d __C){ // CHECK-LABEL: @test_mm_fnmadd_round_sd - // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 -1, i32 8) return _mm_fnmadd_round_sd(__A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128d test_mm_mask_fnmadd_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ // CHECK-LABEL: @test_mm_mask_fnmadd_round_sd - // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask_fnmadd_round_sd(__W, __U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7835,7 +7835,7 @@ __m128d test_mm_maskz_fnmadd_sd(__mmask8 __m128d test_mm_maskz_fnmadd_round_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ // CHECK-LABEL: @test_mm_maskz_fnmadd_round_sd - // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_maskz_fnmadd_round_sd(__U, __A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7865,7 +7865,7 @@ __m128d test_mm_mask3_fnmadd_sd(__m128d __m128d test_mm_mask3_fnmadd_round_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ // CHECK-LABEL: @test_mm_mask3_fnmadd_round_sd - // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd + // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask3_fnmadd_round_sd(__W, __X, __Y, __U, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7896,13 +7896,13 @@ __m128d test_mm_mask_fnmsub_sd(__m128d _ __m128d test_mm_fnmsub_round_sd(__m128d __A, __m128d __B, __m128d __C){ // CHECK-LABEL: @test_mm_fnmsub_round_sd - // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 -1, i32 8) return _mm_fnmsub_round_sd(__A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } __m128d test_mm_mask_fnmsub_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ // CHECK-LABEL: @test_mm_mask_fnmsub_round_sd - // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask_fnmsub_round_sd(__W, __U, __A, __B, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7932,7 +7932,7 @@ __m128d test_mm_maskz_fnmsub_sd(__mmask8 __m128d test_mm_maskz_fnmsub_round_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ // CHECK-LABEL: @test_mm_maskz_fnmsub_round_sd - // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_maskz_fnmsub_round_sd(__U, __A, __B, __C, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } @@ -7963,7 +7963,7 @@ __m128d test_mm_mask3_fnmsub_sd(__m128d __m128d test_mm_mask3_fnmsub_round_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ // CHECK-LABEL: @test_mm_mask3_fnmsub_round_sd - // CHECK: @llvm.x86.avx512.mask3.vfmsub.sd + // CHECK: @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, i8 %{{.*}}, i32 8) return _mm_mask3_fnmsub_round_sd(__W, __X, __Y, __U, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits