From: "Hu, Lin1" <lin1...@intel.com>

gcc/ChangeLog:

        * config/i386/avx10_2roundingintrin.h: New intrins.
        * config/i386/i386-builtin-types.def: Add new DEF_FUNCTION_TYPE.
        * config/i386/i386-builtin.def (BDESC): Add new builtins.
        * config/i386/i386-expand.cc (ix86_expand_round_builtin): Handle
        V16HF_FTYPE_V16HI_V16HF_UHI_INT.

gcc/testsuite/ChangeLog:

        * gcc.target/i386/avx-1.c: Add new builtin test.
        * gcc.target/i386/sse-13.c: Ditto.
        * gcc.target/i386/sse-14.c: Ditto.
        * gcc.target/i386/sse-22.c: Add new macro test.
        * gcc.target/i386/sse-23.c: Ditto.
        * gcc.target/i386/avx10_2-rounding-3.c: New test.
---
 gcc/config/i386/avx10_2roundingintrin.h       | 286 ++++++++++++++++++
 gcc/config/i386/i386-builtin-types.def        |   1 +
 gcc/config/i386/i386-builtin.def              |   5 +
 gcc/config/i386/i386-expand.cc                |   1 +
 gcc/testsuite/gcc.target/i386/avx-1.c         |   5 +
 .../gcc.target/i386/avx10_2-rounding-3.c      |  58 ++++
 gcc/testsuite/gcc.target/i386/sse-13.c        |   5 +
 gcc/testsuite/gcc.target/i386/sse-14.c        |  15 +
 gcc/testsuite/gcc.target/i386/sse-22.c        |  15 +
 gcc/testsuite/gcc.target/i386/sse-23.c        |   5 +
 10 files changed, 396 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c

diff --git a/gcc/config/i386/avx10_2roundingintrin.h 
b/gcc/config/i386/avx10_2roundingintrin.h
index 384facb424c..15ea46b5983 100644
--- a/gcc/config/i386/avx10_2roundingintrin.h
+++ b/gcc/config/i386/avx10_2roundingintrin.h
@@ -1757,6 +1757,183 @@ _mm256_maskz_cvt_roundepu64_ps (__mmask8 __U, __m256i 
__A, const int __R)
                                                          (__mmask8) __U,
                                                          __R);
 }
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundepu16_ph (__m256i __A, const int __R)
+{
+  return (__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) __A,
+                                                          (__v16hf)
+                                                          _mm256_setzero_ph (),
+                                                          (__mmask16) -1,
+                                                          __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundepu16_ph (__m256h __W, __mmask16 __U, __m256i __A,
+                              const int __R)
+{
+  return (__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) __A,
+                                                          (__v16hf) __W,
+                                                          (__mmask16) __U,
+                                                          __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundepu16_ph (__mmask16 __U, __m256i __A, const int __R)
+{
+  return (__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) __A,
+                                                          (__v16hf)
+                                                          _mm256_setzero_ph (),
+                                                          (__mmask16) __U,
+                                                          __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvt_roundepi16_ph (__m256i __A, const int __R)
+{
+  return (__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) __A,
+                                                         (__v16hf)
+                                                         _mm256_setzero_ph (),
+                                                         (__mmask16) -1,
+                                                         __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvt_roundepi16_ph (__m256h __W, __mmask16 __U, __m256i __A,
+                              const int __R)
+{
+  return (__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) __A,
+                                                         (__v16hf) __W,
+                                                         (__mmask16) __U,
+                                                         __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvt_roundepi16_ph (__mmask16 __U, __m256i __A, const int __R)
+{
+  return (__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) __A,
+                                                         (__v16hf)
+                                                         _mm256_setzero_ph (),
+                                                         (__mmask16) __U,
+                                                         __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_div_round_pd (__m256d __A, __m256d __B, const int __R)
+{
+  return (__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) __A,
+                                                      (__v4df) __B,
+                                                      (__v4df)
+                                                      _mm256_undefined_pd (),
+                                                      (__mmask8) -1,
+                                                      __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_div_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                         __m256d __B, const int __R)
+{
+  return (__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) __A,
+                                                      (__v4df) __B,
+                                                      (__v4df) __W,
+                                                      (__mmask8) __U,
+                                                      __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_div_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
+                          const int __R)
+{
+  return (__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) __A,
+                                                      (__v4df) __B,
+                                                      (__v4df)
+                                                      _mm256_setzero_pd (),
+                                                      (__mmask8) __U,
+                                                      __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_div_round_ph (__m256h __A, __m256h __B, const int __R)
+{
+  return (__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) __A,
+                                                      (__v16hf) __B,
+                                                      (__v16hf)
+                                                      _mm256_setzero_ph (),
+                                                      (__mmask16) -1,
+                                                      __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_div_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
+                         __m256h __B, const int __R)
+{
+  return (__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) __A,
+                                                      (__v16hf) __B,
+                                                      (__v16hf) __W,
+                                                      (__mmask16) __U,
+                                                      __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_div_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
+                          const int __R)
+{
+  return (__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) __A,
+                                                      (__v16hf) __B,
+                                                      (__v16hf)
+                                                      _mm256_setzero_ph (),
+                                                      (__mmask16) __U,
+                                                      __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_div_round_ps (__m256 __A, __m256 __B, const int __R)
+{
+  return (__m256) __builtin_ia32_divps256_mask_round ((__v8sf) __A,
+                                                     (__v8sf) __B,
+                                                     (__v8sf)
+                                                     _mm256_undefined_ps (),
+                                                     (__mmask8) -1,
+                                                     __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_div_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
+                         const int __R)
+{
+  return (__m256) __builtin_ia32_divps256_mask_round ((__v8sf) __A,
+                                                     (__v8sf) __B,
+                                                     (__v8sf) __W,
+                                                     (__mmask8) __U,
+                                                     __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_div_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
+                          const int __R)
+{
+  return (__m256) __builtin_ia32_divps256_mask_round ((__v8sf) __A,
+                                                     (__v8sf) __B,
+                                                     (__v8sf)
+                                                     _mm256_setzero_ps (),
+                                                     (__mmask8) __U,
+                                                     __R);
+}
 #else
 #define _mm256_add_round_pd(A, B, R) \
   ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
@@ -2802,6 +2979,115 @@ _mm256_maskz_cvt_roundepu64_ps (__mmask8 __U, __m256i 
__A, const int __R)
                                                    (_mm_setzero_ps ()), \
                                                    (__mmask8) (U), \
                                                    (R)))
+
+#define _mm256_cvt_roundepu16_ph(A, R) \
+  ((__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) (A), \
+                                                    (__v16hf) \
+                                                    (_mm256_setzero_ph ()), \
+                                                    (__mmask16) (-1), \
+                                                    (R)))
+
+#define _mm256_mask_cvt_roundepu16_ph(W, U, A, R) \
+  ((__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) (A), \
+                                                    (__v16hf) (W), \
+                                                    (__mmask16) (U), \
+                                                    (R)))
+
+#define _mm256_maskz_cvt_roundepu16_ph(U, A, R) \
+  ((__m256h) __builtin_ia32_vcvtuw2ph256_mask_round ((__v16hi) (A), \
+                                                    (__v16hf) \
+                                                    (_mm256_setzero_ph ()), \
+                                                    (__mmask16) (U), \
+                                                    (R)))
+
+#define _mm256_cvt_roundepi16_ph(A, R) \
+  ((__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) (A), \
+                                                   (__v16hf) \
+                                                   (_mm256_setzero_ph ()), \
+                                                   (__mmask16) (-1), \
+                                                   (R)))
+
+#define _mm256_mask_cvt_roundepi16_ph(W, U, A, R) \
+  ((__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) (A), \
+                                                   (__v16hf) (W), \
+                                                   (__mmask16) (U), \
+                                                   (R)))
+
+#define _mm256_maskz_cvt_roundepi16_ph(U, A, R) \
+  ((__m256h) __builtin_ia32_vcvtw2ph256_mask_round ((__v16hi) (A), \
+                                                   (__v16hf) \
+                                                   (_mm256_setzero_ph ()), \
+                                                   (__mmask16) (U), \
+                                                   (R)))
+
+#define _mm256_div_round_pd(A, B, R) \
+  ((__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) (A), \
+                                                (__v4df) (B), \
+                                                (__v4df) \
+                                                (_mm256_undefined_pd ()), \
+                                                (__mmask8) (-1), \
+                                                (R)))
+
+#define _mm256_mask_div_round_pd(W, U, A, B, R) \
+  ((__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) (A), \
+                                                (__v4df) (B), \
+                                                (__v4df) (W), \
+                                                (__mmask8) (U), \
+                                                (R)))
+
+#define _mm256_maskz_div_round_pd(U, A, B, R) \
+  ((__m256d) __builtin_ia32_divpd256_mask_round ((__v4df) (A), \
+                                                (__v4df) (B), \
+                                                (__v4df) \
+                                                (_mm256_setzero_pd ()), \
+                                                (__mmask8) (U), \
+                                                (R)))
+
+#define _mm256_div_round_ph(A, B, R) \
+  ((__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) (A), \
+                                                (__v16hf) (B), \
+                                                (__v16hf) \
+                                                (_mm256_setzero_ph ()), \
+                                                (__mmask16) (-1), \
+                                                (R)))
+
+#define _mm256_mask_div_round_ph(W, U, A, B, R) \
+  ((__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) (A), \
+                                                (__v16hf) (B), \
+                                                (__v16hf) (W), \
+                                                (__mmask16) (U), \
+                                                (R)))
+
+#define _mm256_maskz_div_round_ph(U, A, B, R) \
+  ((__m256h) __builtin_ia32_divph256_mask_round ((__v16hf) (A), \
+                                                (__v16hf) (B), \
+                                                (__v16hf) \
+                                                (_mm256_setzero_ph ()), \
+                                                (__mmask16) (U), \
+                                                (R)))
+
+#define _mm256_div_round_ps(A, B, R) \
+  ((__m256) __builtin_ia32_divps256_mask_round ((__v8sf) (A), \
+                                               (__v8sf) (B), \
+                                               (__v8sf) \
+                                               (_mm256_undefined_ps ()), \
+                                               (__mmask8) (-1), \
+                                               (R)))
+
+#define _mm256_mask_div_round_ps(W, U, A, B, R) \
+  ((__m256) __builtin_ia32_divps256_mask_round ((__v8sf) (A), \
+                                               (__v8sf) (B), \
+                                               (__v8sf) (W), \
+                                               (__mmask8) (U), \
+                                               (R)))
+
+#define _mm256_maskz_div_round_ps(U, A, B, R) \
+  ((__m256) __builtin_ia32_divps256_mask_round ((__v8sf) (A), \
+                                               (__v8sf) (B), \
+                                               (__v8sf) \
+                                               (_mm256_setzero_ps ()), \
+                                               (__mmask8) (U), \
+                                               (R)))
 #endif
 
 #ifdef __DISABLE_AVX10_2_256__
diff --git a/gcc/config/i386/i386-builtin-types.def 
b/gcc/config/i386/i386-builtin-types.def
index adbc6d22f4c..c89c6e4021b 100644
--- a/gcc/config/i386/i386-builtin-types.def
+++ b/gcc/config/i386/i386-builtin-types.def
@@ -1439,3 +1439,4 @@ DEF_FUNCTION_TYPE (V4DI, V4SF, V4DI, UQI, INT)
 DEF_FUNCTION_TYPE (V4DF, V4DI, V4DF, UQI, INT)
 DEF_FUNCTION_TYPE (V8HF, V4DI, V8HF, UQI, INT)
 DEF_FUNCTION_TYPE (V4SF, V4DI, V4SF, UQI, INT)
+DEF_FUNCTION_TYPE (V16HF, V16HI, V16HF, UHI, INT)
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index 1290ae6d10a..8e655726775 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -3370,6 +3370,11 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, 
CODE_FOR_floatunsv8siv8sf2_mask_round, "
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatunsv4div4df2_mask_round, 
"__builtin_ia32_cvtuqq2pd256_mask_round", 
IX86_BUILTIN_VCVTUQQ2PD256_MASK_ROUND, UNKNOWN, (int) 
V4DF_FTYPE_V4DI_V4DF_UQI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, 
CODE_FOR_avx512fp16_vcvtuqq2ph_v4di_mask_round, 
"__builtin_ia32_vcvtuqq2ph256_mask_round", 
IX86_BUILTIN_VCVTUQQ2PH256_MASK_ROUND, UNKNOWN, (int) 
V8HF_FTYPE_V4DI_V8HF_UQI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_floatunsv4div4sf2_mask_round, 
"__builtin_ia32_cvtuqq2ps256_mask_round", 
IX86_BUILTIN_VCVTUQQ2PS256_MASK_ROUND, UNKNOWN, (int) 
V4SF_FTYPE_V4DI_V4SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, 
CODE_FOR_avx512fp16_vcvtuw2ph_v16hi_mask_round, 
"__builtin_ia32_vcvtuw2ph256_mask_round", IX86_BUILTIN_VCVTUW2PH256_MASK_ROUND, 
UNKNOWN, (int) V16HF_FTYPE_V16HI_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, 
CODE_FOR_avx512fp16_vcvtw2ph_v16hi_mask_round, 
"__builtin_ia32_vcvtw2ph256_mask_round", IX86_BUILTIN_VCVTW2PH256_MASK_ROUND, 
UNKNOWN, (int) V16HF_FTYPE_V16HI_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_divv4df3_mask_round, 
"__builtin_ia32_divpd256_mask_round", IX86_BUILTIN_VDIVPD256_MASK_ROUND, 
UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, 
CODE_FOR_avx512fp16_divv16hf3_mask_round, "__builtin_ia32_divph256_mask_round", 
IX86_BUILTIN_VDIVPH256_MASK_ROUND, UNKNOWN, (int) 
V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_divv8sf3_mask_round, 
"__builtin_ia32_divps256_mask_round", IX86_BUILTIN_VDIVPS256_MASK_ROUND, 
UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
 
 BDESC_END (ROUND_ARGS, MULTI_ARG)
 
diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
index 8c2a7c7e33d..613e3be7ce3 100644
--- a/gcc/config/i386/i386-expand.cc
+++ b/gcc/config/i386/i386-expand.cc
@@ -12487,6 +12487,7 @@ ix86_expand_round_builtin (const struct 
builtin_description *d,
     case V8HF_FTYPE_V4DF_V8HF_UQI_INT:
     case V8HF_FTYPE_V4DI_V8HF_UQI_INT:
     case V16HF_FTYPE_V16SF_V16HF_UHI_INT:
+    case V16HF_FTYPE_V16HI_V16HF_UHI_INT:
     case V8HF_FTYPE_V8HF_V8HF_V8HF_INT:
       nargs = 4;
       break;
diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c 
b/gcc/testsuite/gcc.target/i386/avx-1.c
index 59a62ab2080..a6687d091d2 100644
--- a/gcc/testsuite/gcc.target/i386/avx-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx-1.c
@@ -894,6 +894,11 @@
 #define __builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, D) 
__builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, 8)
 #define __builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, D) 
__builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, 8)
 #define __builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, D) 
__builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtuw2ph256_mask_round(A, B, C, D) 
__builtin_ia32_vcvtuw2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtw2ph256_mask_round(A, B, C, D) 
__builtin_ia32_vcvtw2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_divpd256_mask_round(A, B, C, D, E) 
__builtin_ia32_divpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_divph256_mask_round(A, B, C, D, E) 
__builtin_ia32_divph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_divps256_mask_round(A, B, C, D, E) 
__builtin_ia32_divps256_mask_round(A, B, C, D, 8)
 
 #include <wmmintrin.h>
 #include <immintrin.h>
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c 
b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c
new file mode 100644
index 00000000000..c2313e94d72
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c
@@ -0,0 +1,58 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx10.2" } */
+/* { dg-final { scan-assembler-times "vcvtuw2ph\[ 
\\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtuw2ph\[ 
\\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[
 \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtuw2ph\[ 
\\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[
 \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtw2ph\[ 
\\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtw2ph\[ 
\\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[
 \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vcvtw2ph\[ 
\\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[
 \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vdivpd\[ 
\\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vdivpd\[ 
\\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 
 }  } */
+/* { dg-final { scan-assembler-times "vdivpd\[ 
\\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ 
\\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vdivph\[ 
\\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[
 \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vdivph\[ 
\\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[
 \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vdivph\[ 
\\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[
 \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vdivps\[ 
\\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1  }  } */
+/* { dg-final { scan-assembler-times "vdivps\[ 
\\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 
 }  } */
+/* { dg-final { scan-assembler-times "vdivps\[ 
\\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ 
\\t\]+#)" 1  }  } */
+
+#include <immintrin.h>
+
+volatile __m128 hx;
+volatile __m128i hxi;
+volatile __m128h hxh;
+volatile __m256 x;
+volatile __m256d xd;
+volatile __m256h xh;
+volatile __m256i xi;
+volatile __mmask8 m8;
+volatile __mmask16 m16;
+volatile __mmask32 m32;
+
+void extern
+avx10_2_test_1 (void)
+{
+  xh = _mm256_cvt_roundepu16_ph (xi, 4);
+  xh = _mm256_mask_cvt_roundepu16_ph (xh, m16, xi, 8);
+  xh = _mm256_maskz_cvt_roundepu16_ph (m16, xi, 11);
+
+  xh = _mm256_cvt_roundepi16_ph (xi, 4);
+  xh = _mm256_mask_cvt_roundepi16_ph (xh, m16, xi, 8);
+  xh = _mm256_maskz_cvt_roundepi16_ph (m16, xi, 11);
+}
+
+void extern
+avx10_2_test_2 (void)
+{
+  xd = _mm256_div_round_pd (xd, xd, _MM_FROUND_TO_NEAREST_INT | 
_MM_FROUND_NO_EXC);
+  xd = _mm256_mask_div_round_pd (xd, m8, xd, xd, _MM_FROUND_TO_NEG_INF | 
_MM_FROUND_NO_EXC);
+  xd = _mm256_maskz_div_round_pd (m8, xd, xd, _MM_FROUND_TO_ZERO | 
_MM_FROUND_NO_EXC);
+
+  xh = _mm256_div_round_ph (xh, xh, 8);
+  xh = _mm256_mask_div_round_ph (xh, m16, xh, xh, 8);
+  xh = _mm256_maskz_div_round_ph (m16, xh, xh, 11);
+
+  x = _mm256_div_round_ps (x, x, _MM_FROUND_TO_NEAREST_INT | 
_MM_FROUND_NO_EXC);
+  x = _mm256_mask_div_round_ps (x, m16, x, x, _MM_FROUND_TO_POS_INF | 
_MM_FROUND_NO_EXC);
+  x = _mm256_maskz_div_round_ps (m16, x, x, _MM_FROUND_TO_ZERO | 
_MM_FROUND_NO_EXC);
+}
diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c 
b/gcc/testsuite/gcc.target/i386/sse-13.c
index 25d028d8a90..3e99f8bd39a 100644
--- a/gcc/testsuite/gcc.target/i386/sse-13.c
+++ b/gcc/testsuite/gcc.target/i386/sse-13.c
@@ -901,5 +901,10 @@
 #define __builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, D) 
__builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, 8)
 #define __builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, D) 
__builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, 8)
 #define __builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, D) 
__builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtuw2ph256_mask_round(A, B, C, D) 
__builtin_ia32_vcvtuw2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtw2ph256_mask_round(A, B, C, D) 
__builtin_ia32_vcvtw2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_divpd256_mask_round(A, B, C, D, E) 
__builtin_ia32_divpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_divph256_mask_round(A, B, C, D, E) 
__builtin_ia32_divph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_divps256_mask_round(A, B, C, D, E) 
__builtin_ia32_divps256_mask_round(A, B, C, D, 8)
 
 #include <x86intrin.h>
diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c 
b/gcc/testsuite/gcc.target/i386/sse-14.c
index f89b884498e..21636e856a9 100644
--- a/gcc/testsuite/gcc.target/i386/sse-14.c
+++ b/gcc/testsuite/gcc.target/i386/sse-14.c
@@ -1064,6 +1064,8 @@ test_1 (_mm256_cvt_roundepu32_ps, __m256, __m256i, 9)
 test_1 (_mm256_cvt_roundepu64_pd, __m256d, __m256i, 9)
 test_1 (_mm256_cvt_roundepu64_ph, __m128h, __m256i, 9)
 test_1 (_mm256_cvt_roundepu64_ps, __m128, __m256i, 9)
+test_1 (_mm256_cvt_roundepu16_ph, __m256h, __m256i, 8)
+test_1 (_mm256_cvt_roundepi16_ph, __m256h, __m256i, 8)
 test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
 test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
 test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
@@ -1112,6 +1114,11 @@ test_2 (_mm256_maskz_cvt_roundepu32_ps, __m256, 
__mmask8, __m256i, 9)
 test_2 (_mm256_maskz_cvt_roundepu64_pd, __m256d, __mmask8, __m256i, 9)
 test_2 (_mm256_maskz_cvt_roundepu64_ph, __m128h, __mmask8, __m256i, 8)
 test_2 (_mm256_maskz_cvt_roundepu64_ps, __m128, __mmask8, __m256i, 9)
+test_2 (_mm256_maskz_cvt_roundepu16_ph, __m256h, __mmask16, __m256i, 8)
+test_2 (_mm256_maskz_cvt_roundepi16_ph, __m256h, __mmask16, __m256i, 8)
+test_2 (_mm256_div_round_pd, __m256d, __m256d, __m256d, 9)
+test_2 (_mm256_div_round_ph, __m256h, __m256h, __m256h, 9)
+test_2 (_mm256_div_round_ps, __m256, __m256, __m256, 9)
 test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
 test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
 test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1163,9 +1170,17 @@ test_3 (_mm256_mask_cvt_roundepu32_ps, __m256, __m256, 
__mmask8, __m256i, 9)
 test_3 (_mm256_mask_cvt_roundepu64_pd, __m256d, __m256d, __mmask8, __m256i, 9)
 test_3 (_mm256_mask_cvt_roundepu64_ph, __m128h, __m128h, __mmask8, __m256i, 8)
 test_3 (_mm256_mask_cvt_roundepu64_ps, __m128, __m128, __mmask8, __m256i, 9)
+test_3 (_mm256_mask_cvt_roundepu16_ph, __m256h, __m256h, __mmask16, __m256i, 8)
+test_3 (_mm256_mask_cvt_roundepi16_ph, __m256h, __m256h, __mmask16, __m256i, 8)
+test_3 (_mm256_maskz_div_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
+test_3 (_mm256_maskz_div_round_ph, __m256h, __mmask8, __m256h, __m256h, 9)
+test_3 (_mm256_maskz_div_round_ps, __m256, __mmask8, __m256, __m256, 9)
 test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 
1, 8)
 test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, 
__m256h, 1, 8)
 test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 
8)
 test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, 
__m256d, 9)
 test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, 
__m256h, 8)
 test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
+test_4 (_mm256_mask_div_round_pd, __m256d, __m256d, __mmask8, __m256d, 
__m256d, 9)
+test_4 (_mm256_mask_div_round_ph, __m256h, __m256h, __mmask8, __m256h, 
__m256h, 9)
+test_4 (_mm256_mask_div_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c 
b/gcc/testsuite/gcc.target/i386/sse-22.c
index 1fa699bc47d..61aa49e8e0b 100644
--- a/gcc/testsuite/gcc.target/i386/sse-22.c
+++ b/gcc/testsuite/gcc.target/i386/sse-22.c
@@ -1107,6 +1107,8 @@ test_1 (_mm256_cvt_roundepu32_ps, __m256, __m256i, 9)
 test_1 (_mm256_cvt_roundepu64_pd, __m256d, __m256i, 9)
 test_1 (_mm256_cvt_roundepu64_ph, __m128h, __m256i, 9)
 test_1 (_mm256_cvt_roundepu64_ps, __m128, __m256i, 9)
+test_1 (_mm256_cvt_roundepu16_ph, __m256h, __m256i, 8)
+test_1 (_mm256_cvt_roundepi16_ph, __m256h, __m256i, 8)
 test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
 test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
 test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
@@ -1155,6 +1157,11 @@ test_2 (_mm256_maskz_cvt_roundepu32_ps, __m256, 
__mmask8, __m256i, 9)
 test_2 (_mm256_maskz_cvt_roundepu64_pd, __m256d, __mmask8, __m256i, 9)
 test_2 (_mm256_maskz_cvt_roundepu64_ph, __m128h, __mmask8, __m256i, 8)
 test_2 (_mm256_maskz_cvt_roundepu64_ps, __m128, __mmask8, __m256i, 9)
+test_2 (_mm256_maskz_cvt_roundepu16_ph, __m256h, __mmask16, __m256i, 8)
+test_2 (_mm256_maskz_cvt_roundepi16_ph, __m256h, __mmask16, __m256i, 8)
+test_2 (_mm256_div_round_pd, __m256d, __m256d, __m256d, 9)
+test_2 (_mm256_div_round_ph, __m256h, __m256h, __m256h, 9)
+test_2 (_mm256_div_round_ps, __m256, __m256, __m256, 9)
 test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
 test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
 test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@@ -1206,9 +1213,17 @@ test_3 (_mm256_mask_cvt_roundepu32_ps, __m256, __m256, 
__mmask8, __m256i, 9)
 test_3 (_mm256_mask_cvt_roundepu64_pd, __m256d, __m256d, __mmask8, __m256i, 9)
 test_3 (_mm256_mask_cvt_roundepu64_ph, __m128h, __m128h, __mmask8, __m256i, 8)
 test_3 (_mm256_mask_cvt_roundepu64_ps, __m128, __m128, __mmask8, __m256i, 9)
+test_3 (_mm256_mask_cvt_roundepu16_ph, __m256h, __m256h, __mmask16, __m256i, 8)
+test_3 (_mm256_mask_cvt_roundepi16_ph, __m256h, __m256h, __mmask16, __m256i, 8)
+test_3 (_mm256_maskz_div_round_pd, __m256d, __mmask8, __m256d, __m256d, 9)
+test_3 (_mm256_maskz_div_round_ph, __m256h, __mmask8, __m256h, __m256h, 9)
+test_3 (_mm256_maskz_div_round_ps, __m256, __mmask8, __m256, __m256, 9)
 test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 
1, 8)
 test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, 
__m256h, 1, 8)
 test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 
8)
 test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, 
__m256d, 9)
 test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, 
__m256h, 8)
 test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
+test_4 (_mm256_mask_div_round_pd, __m256d, __m256d, __mmask8, __m256d, 
__m256d, 9)
+test_4 (_mm256_mask_div_round_ph, __m256h, __m256h, __mmask8, __m256h, 
__m256h, 9)
+test_4 (_mm256_mask_div_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9)
diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c 
b/gcc/testsuite/gcc.target/i386/sse-23.c
index b18716b83ec..fe7850f23d2 100644
--- a/gcc/testsuite/gcc.target/i386/sse-23.c
+++ b/gcc/testsuite/gcc.target/i386/sse-23.c
@@ -876,6 +876,11 @@
 #define __builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, D) 
__builtin_ia32_cvtuqq2pd256_mask_round(A, B, C, 8)
 #define __builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, D) 
__builtin_ia32_vcvtuqq2ph256_mask_round(A, B, C, 8)
 #define __builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, D) 
__builtin_ia32_cvtuqq2ps256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtuw2ph256_mask_round(A, B, C, D) 
__builtin_ia32_vcvtuw2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtw2ph256_mask_round(A, B, C, D) 
__builtin_ia32_vcvtw2ph256_mask_round(A, B, C, 8)
+#define __builtin_ia32_divpd256_mask_round(A, B, C, D, E) 
__builtin_ia32_divpd256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_divph256_mask_round(A, B, C, D, E) 
__builtin_ia32_divph256_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_divps256_mask_round(A, B, C, D, E) 
__builtin_ia32_divps256_mask_round(A, B, C, D, 8)
 
 #pragma GCC target 
("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")
 
-- 
2.31.1

Reply via email to