gcc/

        PR target/87662
        * i386/avx512vlintrin.h (_mm256_or_epi32): New.
        (_mm_or_epi32): Likewise.
        (_mm256_xor_epi32): Likewise.
        (_mm_xor_epi32): Likewise.
        (_mm256_or_epi64): Likewise.
        (_mm_or_epi64): Likewise.
        (_mm256_xor_epi64): Likewise.
        (_mm_xor_epi64): Likewise.

gcc/testsuite/

        PR target/87662
        * gcc.target/i386/pr87662.c
---
 gcc/config/i386/avx512vlintrin.h        | 48 ++++++++++++++++
 gcc/testsuite/gcc.target/i386/pr87662.c | 76 +++++++++++++++++++++++++
 2 files changed, 124 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/i386/pr87662.c

diff --git a/gcc/config/i386/avx512vlintrin.h b/gcc/config/i386/avx512vlintrin.h
index 68b5537845b..a4fb0b0ac00 100644
--- a/gcc/config/i386/avx512vlintrin.h
+++ b/gcc/config/i386/avx512vlintrin.h
@@ -4855,6 +4855,12 @@ _mm256_maskz_or_epi32 (__mmask8 __U, __m256i __A, 
__m256i __B)
                                                (__mmask8) __U);
 }
 
+extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
+_mm256_or_epi32 (__m256i __A, __m256i __B)
+{
+  return (__m256i) ((__v8su)__A | (__v8su)__B);
+}
+
 extern __inline __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_or_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
@@ -4876,6 +4882,12 @@ _mm_maskz_or_epi32 (__mmask8 __U, __m128i __A, __m128i 
__B)
                                                (__mmask8) __U);
 }
 
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
+_mm_or_epi32 (__m128i __A, __m128i __B)
+{
+  return (__m128i) ((__v4su)__A | (__v4su)__B);
+}
+
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mask_xor_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
@@ -4898,6 +4910,12 @@ _mm256_maskz_xor_epi32 (__mmask8 __U, __m256i __A, 
__m256i __B)
                                                 (__mmask8) __U);
 }
 
+extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
+_mm256_xor_epi32 (__m256i __A, __m256i __B)
+{
+  return (__m256i) ((__v8su)__A ^ (__v8su)__B);
+}
+
 extern __inline __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_xor_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
@@ -4920,6 +4938,12 @@ _mm_maskz_xor_epi32 (__mmask8 __U, __m128i __A, __m128i 
__B)
                                                 (__mmask8) __U);
 }
 
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
+_mm_xor_epi32 (__m128i __A, __m128i __B)
+{
+  return (__m128i) ((__v4su)__A ^ (__v4su)__B);
+}
+
 extern __inline __m128
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A)
@@ -7340,6 +7364,12 @@ _mm256_maskz_or_epi64 (__mmask8 __U, __m256i __A, 
__m256i __B)
                                                (__mmask8) __U);
 }
 
+extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
+_mm256_or_epi64 (__m256i __A, __m256i __B)
+{
+  return (__m256i) ((__v4du)__A | (__v4du)__B);
+}
+
 extern __inline __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_or_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
@@ -7361,6 +7391,12 @@ _mm_maskz_or_epi64 (__mmask8 __U, __m128i __A, __m128i 
__B)
                                                (__mmask8) __U);
 }
 
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
+_mm_or_epi64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) ((__v2du)__A | (__v2du)__B);
+}
+
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mask_xor_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
@@ -7383,6 +7419,12 @@ _mm256_maskz_xor_epi64 (__mmask8 __U, __m256i __A, 
__m256i __B)
                                                 (__mmask8) __U);
 }
 
+extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
+_mm256_xor_epi64 (__m256i __A, __m256i __B)
+{
+  return (__m256i) ((__v4du)__A ^ (__v4du)__B);
+}
+
 extern __inline __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_xor_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
@@ -7405,6 +7447,12 @@ _mm_maskz_xor_epi64 (__mmask8 __U, __m128i __A, __m128i 
__B)
                                                 (__mmask8) __U);
 }
 
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
+_mm_xor_epi64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) ((__v2du)__A ^ (__v2du)__B);
+}
+
 extern __inline __m256d
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mask_max_pd (__m256d __W, __mmask8 __U, __m256d __A,
diff --git a/gcc/testsuite/gcc.target/i386/pr87662.c 
b/gcc/testsuite/gcc.target/i386/pr87662.c
new file mode 100644
index 00000000000..c9110a04440
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr87662.c
@@ -0,0 +1,76 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -mavx512vl" } */
+/* { dg-final { scan-assembler-times "vpord\[^\n\r\]*ymm16" 1 } } */
+/* { dg-final { scan-assembler-times "vpord\[^\n\r\]*xmm16" 1 } } */
+/* { dg-final { scan-assembler-times "vporq\[^\n\r\]*ymm16" 1 } } */
+/* { dg-final { scan-assembler-times "vporq\[^\n\r\]*xmm16" 1 } } */
+/* { dg-final { scan-assembler-times "vpxord\[^\n\r\]*ymm16" 1 } } */
+/* { dg-final { scan-assembler-times "vpxord\[^\n\r\]*xmm16" 1 } } */
+/* { dg-final { scan-assembler-times "vpxorq\[^\n\r\]*ymm16" 1 } } */
+/* { dg-final { scan-assembler-times "vpxorq\[^\n\r\]*xmm16" 1 } } */
+
+#include <immintrin.h>
+
+__m256i
+foo1 (__m256i x, __m256i y)
+{
+  register __m256i z __asm ("xmm16") = y;
+  asm volatile ("" : "+v" (z));
+  return _mm256_or_epi32 (x, z);
+}
+
+__m256i
+foo2 (__m256i x, __m256i y)
+{
+  register __m256i z __asm ("xmm16") = y;
+  asm volatile ("" : "+v" (z));
+  return _mm256_xor_epi32 (x, z);
+}
+
+__m128i
+foo3 (__m128i x, __m128i y)
+{
+  register __m128i z __asm ("xmm16") = y;
+  asm volatile ("" : "+v" (z));
+  return _mm_or_epi32 (x, z);
+}
+
+__m128i
+foo4 (__m128i x, __m128i y)
+{
+  register __m128i z __asm ("xmm16") = y;
+  asm volatile ("" : "+v" (z));
+  return _mm_xor_epi32 (x, z);
+}
+
+__m256i
+foo5 (__m256i x, __m256i y)
+{
+  register __m256i z __asm ("xmm16") = y;
+  asm volatile ("" : "+v" (z));
+  return _mm256_or_epi64 (x, z);
+}
+
+__m256i
+foo6 (__m256i x, __m256i y)
+{
+  register __m256i z __asm ("xmm16") = y;
+  asm volatile ("" : "+v" (z));
+  return _mm256_xor_epi64 (x, z);
+}
+
+__m128i
+foo7 (__m128i x, __m128i y)
+{
+  register __m128i z __asm ("xmm16") = y;
+  asm volatile ("" : "+v" (z));
+  return _mm_or_epi64 (x, z);
+}
+
+__m128i
+foo8 (__m128i x, __m128i y)
+{
+  register __m128i z __asm ("xmm16") = y;
+  asm volatile ("" : "+v" (z));
+  return _mm_xor_epi64 (x, z);
+}
-- 
2.17.2

Reply via email to