From: "Hu, Lin1" <lin1...@intel.com> gcc/ChangeLog:
* config/i386/avx10_2roundingintrin.h: New intrins. * config/i386/i386-builtin-types.def: Add new DEF_FUNCTION_TYPE. * config/i386/i386-builtin.def (BDESC): Add new builtins. * config/i386/i386-expand.cc (ix86_expand_round_builtin): Handle V16HI_FTYPE_V16HF_V16HI_UHI_INT, V4DF_FTYPE_V4SF_V4DF_UQI_INT V8HF_FTYPE_V8SF_V8HF_UQI_INT. * config/i386/sse.md (avx512fp16_vcvt<castmode>2ph_<mode><mask_name><round_name>): Add round condition check. * config/i386/subst.md (round_mode_condition): Add V16HI check for 256bit. gcc/testsuite/ChangeLog: * gcc.target/i386/avx-1.c: Add new builtin test. * gcc.target/i386/sse-13.c: Ditto. * gcc.target/i386/sse-14.c: Ditto. * gcc.target/i386/sse-22.c: Add new macro test. * gcc.target/i386/sse-23.c: Ditto. * gcc.target/i386/avx10_2-rounding-1.c: Add test. --- gcc/config/i386/avx10_2roundingintrin.h | 220 ++++++++++++++++++ gcc/config/i386/i386-builtin-types.def | 3 + gcc/config/i386/i386-builtin.def | 4 + gcc/config/i386/i386-expand.cc | 3 + gcc/config/i386/sse.md | 2 +- gcc/config/i386/subst.md | 1 + gcc/testsuite/gcc.target/i386/avx-1.c | 4 + .../gcc.target/i386/avx10_2-rounding-1.c | 36 +++ gcc/testsuite/gcc.target/i386/sse-13.c | 4 + gcc/testsuite/gcc.target/i386/sse-14.c | 12 + gcc/testsuite/gcc.target/i386/sse-22.c | 12 + gcc/testsuite/gcc.target/i386/sse-23.c | 4 + 12 files changed, 304 insertions(+), 1 deletion(-) diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h index 29966f5e1bf..bc3f92a7d1a 100644 --- a/gcc/config/i386/avx10_2roundingintrin.h +++ b/gcc/config/i386/avx10_2roundingintrin.h @@ -726,6 +726,143 @@ _mm256_maskz_cvt_roundph_epu64 (__mmask8 __U, __m128h __A, const int __R) (__mmask8) __U, __R); } + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvt_roundph_epu16 (__m256h __A, const int __R) +{ + return + (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A, + (__v16hi) + _mm256_undefined_si256 (), + (__mmask16) -1, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvt_roundph_epu16 (__m256i __W, __mmask16 __U, __m256h __A, + const int __R) +{ + return (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A, + (__v16hi) __W, + (__mmask16) __U, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvt_roundph_epu16 (__mmask16 __U, __m256h __A, const int __R) +{ + return + (__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) __A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvt_roundph_epi16 (__m256h __A, const int __R) +{ + return + (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A, + (__v16hi) + _mm256_undefined_si256 (), + (__mmask16) -1, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvt_roundph_epi16 (__m256i __W, __mmask16 __U, __m256h __A, + const int __R) +{ + return (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A, + (__v16hi) __W, + (__mmask16) __U, + __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R) +{ + return + (__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) __A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvt_roundps_pd (__m128 __A, const int __R) +{ + return + (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A, + (__v4df) + _mm256_undefined_pd (), + (__mmask8) -1, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvt_roundps_pd (__m256d __W, __mmask8 __U, __m128 __A, + const int __R) +{ + return (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A, + (__v4df) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvt_roundps_pd (__mmask8 __U, __m128 __A, const int __R) +{ + return (__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtx_roundps_ph (__m256 __A, const int __R) +{ + return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A, + (__v8hf) + _mm_setzero_ph (), + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtx_roundps_ph (__m128h __W, __mmask8 __U, __m256 __A, + const int __R) +{ + return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A, + (__v8hf) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtx_roundps_ph (__mmask8 __U, __m256 __A, const int __R) +{ + return (__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) __A, + (__v8hf) + _mm_setzero_ph (), + (__mmask8) __U, + __R); +} #else #define _mm256_add_round_pd(A, B, R) \ ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \ @@ -1139,6 +1276,89 @@ _mm256_maskz_cvt_roundph_epu64 (__mmask8 __U, __m128h __A, const int __R) (_mm256_setzero_si256 ()), \ (__mmask8) (U), \ (R))) + +#define _mm256_cvt_roundph_epu16(A, R) \ + ((__m256i) \ + __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \ + (__v16hi) \ + (_mm256_undefined_si256 ()), \ + (__mmask16) (-1), \ + (R))) + +#define _mm256_mask_cvt_roundph_epu16(W, U, A, R) \ + ((__m256i) __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \ + (__v16hi) (W), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_maskz_cvt_roundph_epu16(U, A, R) \ + ((__m256i) \ + __builtin_ia32_vcvtph2uw256_mask_round ((__v16hf) (A), \ + (__v16hi) \ + (_mm256_setzero_si256 ()), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_cvt_roundph_epi16(A, R) \ + ((__m256i) \ + __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \ + (__v16hi) \ + (_mm256_undefined_si256 ()), \ + (__mmask16) (-1), \ + (R))) + +#define _mm256_mask_cvt_roundph_epi16(W, U, A, R) \ + ((__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \ + (__v16hi) (W), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_maskz_cvt_roundph_epi16(U, A, R) \ + ((__m256i) __builtin_ia32_vcvtph2w256_mask_round ((__v16hf) (A), \ + (__v16hi) \ + (_mm256_setzero_si256 ()), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_cvt_roundps_pd(A, R) \ + ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \ + (__v4df) \ + (_mm256_undefined_pd ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_cvt_roundps_pd(W, U, A, R) \ + ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \ + (__v4df) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_cvt_roundps_pd(U, A, R) \ + ((__m256d) __builtin_ia32_vcvtps2pd256_mask_round ((__v4sf) (A), \ + (__v4df) \ + (_mm256_setzero_pd ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_cvtx_roundps_ph(A, R) \ + ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \ + (__v8hf) \ + (_mm_setzero_ph ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_cvtx_roundps_ph(W, U, A, R) \ + ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \ + (__v8hf) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_cvtx_roundps_ph(U, A, R) \ + ((__m128h) __builtin_ia32_vcvtps2phx256_mask_round ((__v8sf) (A), \ + (__v8hf) \ + (_mm_setzero_ph ()), \ + (__mmask8) (U), \ + (R))) #endif #ifdef __DISABLE_AVX10_2_256__ diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def index a660828228d..b850ee0c2f6 100644 --- a/gcc/config/i386/i386-builtin-types.def +++ b/gcc/config/i386/i386-builtin-types.def @@ -1431,3 +1431,6 @@ DEF_FUNCTION_TYPE (V8SI, V8HF, V8SI, UQI, INT) DEF_FUNCTION_TYPE (V4DF, V8HF, V4DF, UQI, INT) DEF_FUNCTION_TYPE (V8SF, V8HF, V8SF, UQI, INT) DEF_FUNCTION_TYPE (V4DI, V8HF, V4DI, UQI, INT) +DEF_FUNCTION_TYPE (V16HI, V16HF, V16HI, UHI, INT) +DEF_FUNCTION_TYPE (V4DF, V4SF, V4DF, UQI, INT) +DEF_FUNCTION_TYPE (V8HF, V8SF, V8HF, UQI, INT) diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def index e1979e757b0..2a6c46f17a0 100644 --- a/gcc/config/i386/i386-builtin.def +++ b/gcc/config/i386/i386-builtin.def @@ -3340,6 +3340,10 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_float_extend_phv8sf2 BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2qq_v4di_mask_round, "__builtin_ia32_vcvtph2qq256_mask_round", IX86_BUILTIN_VCVTPH2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT) BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2udq_v8si_mask_round, "__builtin_ia32_vcvtph2udq256_mask_round", IX86_BUILTIN_VCVTPH2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI_INT) BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2uqq_v4di_mask_round, "__builtin_ia32_vcvtph2uqq256_mask_round", IX86_BUILTIN_VCVTPH2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2uw_v16hi_mask_round, "__builtin_ia32_vcvtph2uw256_mask_round", IX86_BUILTIN_VCVTPH2UW256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2w_v16hi_mask_round, "__builtin_ia32_vcvtph2w256_mask_round", IX86_BUILTIN_VCVTPH2W256_MASK_ROUND, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_cvtps2pd256_mask_round, "__builtin_ia32_vcvtps2pd256_mask_round", IX86_BUILTIN_VCVTPS2PD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4SF_V4DF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtps2ph_v8sf_mask_round, "__builtin_ia32_vcvtps2phx256_mask_round", IX86_BUILTIN_VCVTPS2PHX256_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8SF_V8HF_UQI_INT) BDESC_END (ROUND_ARGS, MULTI_ARG) diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc index c56842a2902..2ce6a2ceaa1 100644 --- a/gcc/config/i386/i386-expand.cc +++ b/gcc/config/i386/i386-expand.cc @@ -12462,11 +12462,13 @@ ix86_expand_round_builtin (const struct builtin_description *d, case V16SI_FTYPE_V16SF_V16SI_HI_INT: case V16SI_FTYPE_V16HF_V16SI_UHI_INT: case V16HF_FTYPE_V16SI_V16HF_UHI_INT: + case V16HI_FTYPE_V16HF_V16HI_UHI_INT: case V8DF_FTYPE_V8SF_V8DF_QI_INT: case V16SF_FTYPE_V16HI_V16SF_HI_INT: case V8SF_FTYPE_V8SI_V8SF_UQI_INT: case V8SF_FTYPE_V8HF_V8SF_UQI_INT: case V8SI_FTYPE_V8HF_V8SI_UQI_INT: + case V4DF_FTYPE_V4SF_V4DF_UQI_INT: case V4DF_FTYPE_V8HF_V4DF_UQI_INT: case V4DI_FTYPE_V8HF_V4DI_UQI_INT: case V4DI_FTYPE_V4DF_V4DI_UQI_INT: @@ -12476,6 +12478,7 @@ ix86_expand_round_builtin (const struct builtin_description *d, case V4SF_FTYPE_V4SF_V4SF_V4SF_INT: case V8HF_FTYPE_V8DI_V8HF_UQI_INT: case V8HF_FTYPE_V8DF_V8HF_UQI_INT: + case V8HF_FTYPE_V8SF_V8HF_UQI_INT: case V8HF_FTYPE_V8SI_V8HF_UQI_INT: case V8HF_FTYPE_V4DF_V8HF_UQI_INT: case V16HF_FTYPE_V16SF_V16HF_UHI_INT: diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md index 0041eacc0b0..38a690dc2ba 100644 --- a/gcc/config/i386/sse.md +++ b/gcc/config/i386/sse.md @@ -7850,7 +7850,7 @@ [(set (match_operand:<ssePHmode> 0 "register_operand" "=v") (float_truncate:<ssePHmode> (match_operand:VF48H_AVX512VL 1 "<round_nimm_predicate>" "<round_constraint>")))] - "TARGET_AVX512FP16" + "TARGET_AVX512FP16 && <round_mode_condition>" "vcvt<castmode>2ph<ph2pssuffix><round_qq2phsuff>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") diff --git a/gcc/config/i386/subst.md b/gcc/config/i386/subst.md index 716a47f6cf4..7bb25d19eb5 100644 --- a/gcc/config/i386/subst.md +++ b/gcc/config/i386/subst.md @@ -218,6 +218,7 @@ || <MODE>mode == V4DFmode || <MODE>mode == V4DImode || <MODE>mode == V8SImode + || <MODE>mode == V16HImode || <MODE>mode == V16HFmode)))") (define_subst_attr "round_applied" "round" "false" "true") diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c b/gcc/testsuite/gcc.target/i386/avx-1.c index bc8a72559bb..5eaad5c5250 100644 --- a/gcc/testsuite/gcc.target/i386/avx-1.c +++ b/gcc/testsuite/gcc.target/i386/avx-1.c @@ -864,6 +864,10 @@ #define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8) #define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8) #define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8) #include <wmmintrin.h> #include <immintrin.h> diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c index 9f7ada455df..c35c667569a 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c @@ -60,6 +60,18 @@ /* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ /* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\{rn-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ /* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtph2uw\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtph2uw\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtph2uw\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtph2w\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtph2w\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtph2w\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtps2pd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtps2pd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtps2pd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtps2phxy\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtps2phx\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vcvtps2phx\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ #include <immintrin.h> @@ -182,3 +194,27 @@ avx10_2_test_7 (void) x = _mm256_mask_cvtx_roundph_ps (x, m8, hxh, 8); x = _mm256_maskz_cvtx_roundph_ps (m8, hxh, 8); } + +void extern +avx10_2_test_8 (void) +{ + xi = _mm256_cvt_roundph_epu16 (xh, 4); + xi = _mm256_mask_cvt_roundph_epu16 (xi, m16, xh, 8); + xi = _mm256_maskz_cvt_roundph_epu16 (m16, xh, 11); + + xi = _mm256_cvt_roundph_epi16 (xh, 4); + xi = _mm256_mask_cvt_roundph_epi16 (xi, m16, xh, 8); + xi = _mm256_maskz_cvt_roundph_epi16 (m16, xh, 11); +} + +void extern +avx10_2_test_9 (void) +{ + xd = _mm256_cvt_roundps_pd (hx, _MM_FROUND_NO_EXC); + xd = _mm256_mask_cvt_roundps_pd (xd, m8, hx, _MM_FROUND_NO_EXC); + xd = _mm256_maskz_cvt_roundps_pd (m8, hx, _MM_FROUND_NO_EXC); + + hxh = _mm256_cvtx_roundps_ph (x, 4); + hxh = _mm256_mask_cvtx_roundps_ph (hxh, m8, x, 8); + hxh = _mm256_maskz_cvtx_roundps_ph (m8, x, 11); +} diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c index 51c5c89817c..2c91c662688 100644 --- a/gcc/testsuite/gcc.target/i386/sse-13.c +++ b/gcc/testsuite/gcc.target/i386/sse-13.c @@ -871,5 +871,9 @@ #define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8) #define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8) #define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8) #include <x86intrin.h> diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c index 344b7d7d7f1..8f3271048ad 100644 --- a/gcc/testsuite/gcc.target/i386/sse-14.c +++ b/gcc/testsuite/gcc.target/i386/sse-14.c @@ -1035,6 +1035,10 @@ test_1 (_mm256_cvt_roundph_ps, __m256, __m128i, 8) test_1 (_mm256_cvtx_roundph_ps, __m256, __m128h, 8) test_1 (_mm256_cvt_roundph_epi64, __m256i, __m128h, 8) test_1 (_mm256_cvt_roundph_epu64, __m256i, __m128h, 8) +test_1 (_mm256_cvt_roundph_epu16, __m256i, __m256h, 8) +test_1 (_mm256_cvt_roundph_epi16, __m256i, __m256h, 8) +test_1 (_mm256_cvt_roundps_pd, __m256d, __m128, 8) +test_1 (_mm256_cvtx_roundps_ph, __m128h, __m256, 8) test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9) test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8) test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9) @@ -1053,6 +1057,10 @@ test_2 (_mm256_maskz_cvtx_roundph_ps, __m256, __mmask8, __m128h, 8) test_2 (_mm256_maskz_cvt_roundph_epi64, __m256i, __mmask8, __m128h, 8) test_2 (_mm256_maskz_cvt_roundph_epu32, __m256i, __mmask8, __m128h, 8) test_2 (_mm256_maskz_cvt_roundph_epu64, __m256i, __mmask8, __m128h, 8) +test_2 (_mm256_maskz_cvt_roundph_epu16, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_cvt_roundph_epi16, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_cvt_roundps_pd, __m256d, __mmask8, __m128, 8) +test_2 (_mm256_maskz_cvtx_roundps_ph, __m128h, __mmask8, __m256, 8) test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8) test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8) test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8) @@ -1074,6 +1082,10 @@ test_3 (_mm256_mask_cvtx_roundph_ps, __m256, __m256, __mmask8, __m128h, 8) test_3 (_mm256_mask_cvt_roundph_epi64, __m256i, __m256i, __mmask8, __m128h, 8) test_3 (_mm256_mask_cvt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8) test_3 (_mm256_mask_cvt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8) +test_3 (_mm256_mask_cvt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_cvt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_cvt_roundps_pd, __m256d, __m256d, __mmask8, __m128, 8) +test_3 (_mm256_mask_cvtx_roundps_ph, __m128h, __m128h, __mmask8, __m256, 8) test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8) test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8) test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8) diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c index 0b51b067821..f51345476d3 100644 --- a/gcc/testsuite/gcc.target/i386/sse-22.c +++ b/gcc/testsuite/gcc.target/i386/sse-22.c @@ -1077,6 +1077,10 @@ test_1 (_mm256_cvtx_roundph_ps, __m256, __m128h, 8) test_1 (_mm256_cvt_roundph_epi64, __m256i, __m128h, 8) test_1 (_mm256_cvt_roundph_epu32, __m256i, __m128h, 8) test_1 (_mm256_cvt_roundph_epu64, __m256i, __m128h, 8) +test_1 (_mm256_cvt_roundph_epu16, __m256i, __m256h, 8) +test_1 (_mm256_cvt_roundph_epi16, __m256i, __m256h, 8) +test_1 (_mm256_cvt_roundps_pd, __m256d, __m128, 8) +test_1 (_mm256_cvtx_roundps_ph, __m128h, __m256, 8) test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9) test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8) test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9) @@ -1095,6 +1099,10 @@ test_2 (_mm256_maskz_cvtx_roundph_ps, __m256, __mmask8, __m128h, 8) test_2 (_mm256_maskz_cvt_roundph_epi64, __m256i, __mmask8, __m128h, 8) test_2 (_mm256_maskz_cvt_roundph_epu32, __m256i, __mmask8, __m128h, 8) test_2 (_mm256_maskz_cvt_roundph_epu64, __m256i, __mmask8, __m128h, 8) +test_2 (_mm256_maskz_cvt_roundph_epu16, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_cvt_roundph_epi16, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_cvt_roundps_pd, __m256d, __mmask8, __m128, 8) +test_2 (_mm256_maskz_cvtx_roundps_ph, __m128h, __mmask8, __m256, 8) test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8) test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8) test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8) @@ -1116,6 +1124,10 @@ test_3 (_mm256_mask_cvtx_roundph_ps, __m256, __m256, __mmask8, __m128h, 8) test_3 (_mm256_mask_cvt_roundph_epi64, __m256i, __m256i, __mmask8, __m128h, 8) test_3 (_mm256_mask_cvt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8) test_3 (_mm256_mask_cvt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8) +test_3 (_mm256_mask_cvt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_cvt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_cvt_roundps_pd, __m256d, __m256d, __mmask8, __m128, 8) +test_3 (_mm256_mask_cvtx_roundps_ph, __m128h, __m128h, __mmask8, __m256, 8) test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8) test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8) test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8) diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c index 2c74d651336..93a0904ba28 100644 --- a/gcc/testsuite/gcc.target/i386/sse-23.c +++ b/gcc/testsuite/gcc.target/i386/sse-23.c @@ -846,6 +846,10 @@ #define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8) #define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8) #define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtph2w256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2pd256_mask_round(A, B, C, 8) +#define __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, D) __builtin_ia32_vcvtps2phx256_mask_round(A, B, C, 8) #pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512") -- 2.31.1