https://github.com/ahmednoursphinx updated 
https://github.com/llvm/llvm-project/pull/168757

>From 2f02de39803ff7ebde3e52ac60dabbb3d062515c Mon Sep 17 00:00:00 2001
From: ahmed <[email protected]>
Date: Wed, 19 Nov 2025 12:46:29 +0200
Subject: [PATCH 1/8] feat: Add support for kunpack builtins

---
 clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp    | 52 ++++++++++-
 .../CIR/CodeGen/X86/avx512-kunpck-builtins.c  | 92 +++++++++++++++++++
 2 files changed, 141 insertions(+), 3 deletions(-)
 create mode 100644 clang/test/CIR/CodeGen/X86/avx512-kunpck-builtins.c

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
index ee6900141647f..7220e76f6c7d7 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
@@ -33,6 +33,29 @@ static mlir::Value emitIntrinsicCallOp(CIRGenFunction &cgf, 
const CallExpr *e,
       .getResult();
 }
 
+static mlir::Value getMaskVecValue(CIRGenBuilderTy &builder,
+                                   mlir::Value mask, unsigned numElems) {
+  auto maskIntType = mlir::cast<cir::IntType>(mask.getType());
+  unsigned maskWidth = maskIntType.getWidth();
+
+  // Create a vector of bool type with maskWidth elements
+  auto maskVecTy = cir::VectorType::get(
+      builder.getContext(), cir::BoolType::get(builder.getContext()),
+      maskWidth);
+  mlir::Value maskVec = builder.createBitcast(mask, maskVecTy);
+
+  // If we have less than 8 elements, then the starting mask was an i8 and
+  // we need to extract down to the right number of elements.
+  if (numElems < 8) {
+    llvm::SmallVector<int64_t, 4> indices;
+    for (unsigned i = 0; i != numElems; ++i)
+      indices.push_back(i);
+    maskVec =
+        builder.createVecShuffle(mask.getLoc(), maskVec, indices);
+  }
+  return maskVec;
+}
+
 // OG has unordered comparison as a form of optimization in addition to
 // ordered comparison, while CIR doesn't.
 //
@@ -169,6 +192,32 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned 
builtinID,
   case X86::BI__builtin_ia32_vec_set_v16hi:
   case X86::BI__builtin_ia32_vec_set_v8si:
   case X86::BI__builtin_ia32_vec_set_v4di:
+
+  case X86::BI__builtin_ia32_kunpckdi:
+  case X86::BI__builtin_ia32_kunpcksi:
+  case X86::BI__builtin_ia32_kunpckhi: {
+    auto maskIntType = mlir::cast<cir::IntType>(ops[0].getType());
+    unsigned numElems = maskIntType.getWidth();
+    mlir::Value lhs = getMaskVecValue(builder, ops[0], numElems);
+    mlir::Value rhs = getMaskVecValue(builder, ops[1], numElems);
+    llvm::SmallVector<int64_t, 64> indices;
+    for (unsigned i = 0; i != numElems; ++i)
+      indices.push_back(i);
+
+    // First extract half of each vector. This gives better codegen than
+    // doing it in a single shuffle.
+    mlir::Location loc = getLoc(expr->getExprLoc());
+    lhs = builder.createVecShuffle(loc, lhs,
+                                   llvm::ArrayRef(indices.data(), numElems / 
2));
+    rhs = builder.createVecShuffle(loc, rhs,
+                                   llvm::ArrayRef(indices.data(), numElems / 
2));
+    // Concat the vectors.
+    // NOTE: Operands are swapped to match the intrinsic definition.
+    mlir::Value res = builder.createVecShuffle(
+        loc, rhs, lhs, llvm::ArrayRef(indices.data(), numElems));
+    return builder.createBitcast(res, ops[0].getType());
+  }
+
   case X86::BI_mm_setcsr:
   case X86::BI__builtin_ia32_ldmxcsr:
   case X86::BI_mm_getcsr:
@@ -675,9 +724,6 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned 
builtinID,
   case X86::BI__builtin_ia32_kmovw:
   case X86::BI__builtin_ia32_kmovd:
   case X86::BI__builtin_ia32_kmovq:
-  case X86::BI__builtin_ia32_kunpckdi:
-  case X86::BI__builtin_ia32_kunpcksi:
-  case X86::BI__builtin_ia32_kunpckhi:
   case X86::BI__builtin_ia32_sqrtsh_round_mask:
   case X86::BI__builtin_ia32_sqrtsd_round_mask:
   case X86::BI__builtin_ia32_sqrtss_round_mask:
diff --git a/clang/test/CIR/CodeGen/X86/avx512-kunpck-builtins.c 
b/clang/test/CIR/CodeGen/X86/avx512-kunpck-builtins.c
new file mode 100644
index 0000000000000..197f2570769a5
--- /dev/null
+++ b/clang/test/CIR/CodeGen/X86/avx512-kunpck-builtins.c
@@ -0,0 +1,92 @@
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s 
-triple=x86_64-unknown-linux -target-feature +avx512f -target-feature +avx512bw 
-fclangir -emit-cir -o %t.cir -Wall -Werror
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s 
-triple=x86_64-unknown-linux -target-feature +avx512f -target-feature +avx512bw 
-fclangir -emit-llvm -o %t.ll -Wall -Werror
+// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s
+
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s 
-triple=x86_64-unknown-linux -target-feature +avx512f -target-feature +avx512bw 
-emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG
+
+// This test exercises the kunpck (mask unpack) builtins for AVX-512.
+
+#include <immintrin.h>
+
+__mmask16 test_mm512_kunpackb(__mmask16 __A, __mmask16 __B) {
+  // CIR-LABEL: test_mm512_kunpackb
+  // LLVM-LABEL: test_mm512_kunpackb
+  // OGCG-LABEL: test_mm512_kunpackb
+  return _mm512_kunpackb(__A, __B);
+  // CIR: [[MASK_A:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 16>), 
!cir.vector<!cir.bool x 16>
+  // CIR: [[EXTRACT_A:%.*]] = cir.vec.shuffle([[MASK_A]], {{.*}}) [#cir.int<0> 
: !s32i, #cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, 
#cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<6> : !s32i, #cir.int<7> : 
!s32i]
+  // CIR: [[MASK_B:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 16>), 
!cir.vector<!cir.bool x 16>
+  // CIR: [[EXTRACT_B:%.*]] = cir.vec.shuffle([[MASK_B]], {{.*}}) [#cir.int<0> 
: !s32i, #cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, 
#cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<6> : !s32i, #cir.int<7> : 
!s32i]
+  // CIR: [[CONCAT:%.*]] = cir.vec.shuffle([[EXTRACT_B]], [[EXTRACT_A]]) 
[#cir.int<0> : !s32i, #cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : 
!s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<6> : !s32i, 
#cir.int<7> : !s32i, #cir.int<8> : !s32i, #cir.int<9> : !s32i, #cir.int<10> : 
!s32i, #cir.int<11> : !s32i, #cir.int<12> : !s32i, #cir.int<13> : !s32i, 
#cir.int<14> : !s32i, #cir.int<15> : !s32i]
+  // CIR: {{%.*}} = cir.cast(bitcast, [[CONCAT]] : !cir.vector<!cir.bool x 
16>), !cir.int<u, 16>
+
+  // LLVM: [[A_BITCAST:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+  // LLVM: [[A_SHUFFLE:%.*]] = shufflevector <16 x i1> [[A_BITCAST]], <16 x 
i1> [[A_BITCAST]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7>
+  // LLVM: [[B_BITCAST:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+  // LLVM: [[B_SHUFFLE:%.*]] = shufflevector <16 x i1> [[B_BITCAST]], <16 x 
i1> [[B_BITCAST]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7>
+  // LLVM: [[CONCAT:%.*]] = shufflevector <8 x i1> [[B_SHUFFLE]], <8 x i1> 
[[A_SHUFFLE]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  // LLVM: bitcast <16 x i1> [[CONCAT]] to i16
+
+  // OGCG: [[A_BITCAST:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+  // OGCG: [[A_SHUFFLE:%.*]] = shufflevector <16 x i1> [[A_BITCAST]], <16 x 
i1> [[A_BITCAST]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7>
+  // OGCG: [[B_BITCAST:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+  // OGCG: [[B_SHUFFLE:%.*]] = shufflevector <16 x i1> [[B_BITCAST]], <16 x 
i1> [[B_BITCAST]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7>
+  // OGCG: [[CONCAT:%.*]] = shufflevector <8 x i1> [[B_SHUFFLE]], <8 x i1> 
[[A_SHUFFLE]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  // OGCG: bitcast <16 x i1> [[CONCAT]] to i16
+}
+
+__mmask32 test_mm512_kunpackw(__mmask32 __A, __mmask32 __B) {
+  // CIR-LABEL: test_mm512_kunpackw
+  // LLVM-LABEL: test_mm512_kunpackw
+  // OGCG-LABEL: test_mm512_kunpackw
+  return _mm512_kunpackw(__A, __B);
+  // CIR: [[MASK_A:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 32>), 
!cir.vector<!cir.bool x 32>
+  // CIR: [[EXTRACT_A:%.*]] = cir.vec.shuffle([[MASK_A]], {{.*}})
+  // CIR: [[MASK_B:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 32>), 
!cir.vector<!cir.bool x 32>
+  // CIR: [[EXTRACT_B:%.*]] = cir.vec.shuffle([[MASK_B]], {{.*}})
+  // CIR: [[CONCAT:%.*]] = cir.vec.shuffle([[EXTRACT_B]], [[EXTRACT_A]])
+  // CIR: {{%.*}} = cir.cast(bitcast, [[CONCAT]] : !cir.vector<!cir.bool x 
32>), !cir.int<u, 32>
+
+  // LLVM: [[A_BITCAST:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+  // LLVM: [[A_SHUFFLE:%.*]] = shufflevector <32 x i1> [[A_BITCAST]], <32 x 
i1> [[A_BITCAST]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  // LLVM: [[B_BITCAST:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+  // LLVM: [[B_SHUFFLE:%.*]] = shufflevector <32 x i1> [[B_BITCAST]], <32 x 
i1> [[B_BITCAST]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  // LLVM: [[CONCAT:%.*]] = shufflevector <16 x i1> [[B_SHUFFLE]], <16 x i1> 
[[A_SHUFFLE]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 
17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 
27, i32 28, i32 29, i32 30, i32 31>
+  // LLVM: bitcast <32 x i1> [[CONCAT]] to i32
+
+  // OGCG: [[A_BITCAST:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+  // OGCG: [[A_SHUFFLE:%.*]] = shufflevector <32 x i1> [[A_BITCAST]], <32 x 
i1> [[A_BITCAST]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  // OGCG: [[B_BITCAST:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+  // OGCG: [[B_SHUFFLE:%.*]] = shufflevector <32 x i1> [[B_BITCAST]], <32 x 
i1> [[B_BITCAST]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  // OGCG: [[CONCAT:%.*]] = shufflevector <16 x i1> [[B_SHUFFLE]], <16 x i1> 
[[A_SHUFFLE]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 
17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 
27, i32 28, i32 29, i32 30, i32 31>
+  // OGCG: bitcast <32 x i1> [[CONCAT]] to i32
+}
+
+__mmask64 test_mm512_kunpackd(__mmask64 __A, __mmask64 __B) {
+  // CIR-LABEL: test_mm512_kunpackd
+  // LLVM-LABEL: test_mm512_kunpackd
+  // OGCG-LABEL: test_mm512_kunpackd
+  return _mm512_kunpackd(__A, __B);
+  // CIR: [[MASK_A:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 64>), 
!cir.vector<!cir.bool x 64>
+  // CIR: [[EXTRACT_A:%.*]] = cir.vec.shuffle([[MASK_A]], {{.*}})
+  // CIR: [[MASK_B:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 64>), 
!cir.vector<!cir.bool x 64>
+  // CIR: [[EXTRACT_B:%.*]] = cir.vec.shuffle([[MASK_B]], {{.*}})
+  // CIR: [[CONCAT:%.*]] = cir.vec.shuffle([[EXTRACT_B]], [[EXTRACT_A]])
+  // CIR: {{%.*}} = cir.cast(bitcast, [[CONCAT]] : !cir.vector<!cir.bool x 
64>), !cir.int<u, 64>
+
+  // LLVM: [[A_BITCAST:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+  // LLVM: [[A_SHUFFLE:%.*]] = shufflevector <64 x i1> [[A_BITCAST]], <64 x 
i1> [[A_BITCAST]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, 
i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, 
i32 27, i32 28, i32 29, i32 30, i32 31>
+  // LLVM: [[B_BITCAST:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+  // LLVM: [[B_SHUFFLE:%.*]] = shufflevector <64 x i1> [[B_BITCAST]], <64 x 
i1> [[B_BITCAST]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, 
i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, 
i32 27, i32 28, i32 29, i32 30, i32 31>
+  // LLVM: [[CONCAT:%.*]] = shufflevector <32 x i1> [[B_SHUFFLE]], <32 x i1> 
[[A_SHUFFLE]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 
17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 
27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 
37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 
47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 
57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+  // LLVM: bitcast <64 x i1> [[CONCAT]] to i64
+
+  // OGCG: [[A_BITCAST:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+  // OGCG: [[A_SHUFFLE:%.*]] = shufflevector <64 x i1> [[A_BITCAST]], <64 x 
i1> [[A_BITCAST]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, 
i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, 
i32 27, i32 28, i32 29, i32 30, i32 31>
+  // OGCG: [[B_BITCAST:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+  // OGCG: [[B_SHUFFLE:%.*]] = shufflevector <64 x i1> [[B_BITCAST]], <64 x 
i1> [[B_BITCAST]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, 
i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, 
i32 27, i32 28, i32 29, i32 30, i32 31>
+  // OGCG: [[CONCAT:%.*]] = shufflevector <32 x i1> [[B_SHUFFLE]], <32 x i1> 
[[A_SHUFFLE]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 
17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 
27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 
37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 
47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 
57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+  // OGCG: bitcast <64 x i1> [[CONCAT]] to i64
+}
+

>From ad1525eb8257977cc738302e638c5fafc0608df9 Mon Sep 17 00:00:00 2001
From: ahmed <[email protected]>
Date: Wed, 19 Nov 2025 20:52:34 +0200
Subject: [PATCH 2/8] chore: format files

---
 clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 21 ++++++++++-----------
 1 file changed, 10 insertions(+), 11 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
index 7220e76f6c7d7..23310d26a1336 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
@@ -33,15 +33,15 @@ static mlir::Value emitIntrinsicCallOp(CIRGenFunction &cgf, 
const CallExpr *e,
       .getResult();
 }
 
-static mlir::Value getMaskVecValue(CIRGenBuilderTy &builder,
-                                   mlir::Value mask, unsigned numElems) {
+static mlir::Value getMaskVecValue(CIRGenBuilderTy &builder, mlir::Value mask,
+                                   unsigned numElems) {
   auto maskIntType = mlir::cast<cir::IntType>(mask.getType());
   unsigned maskWidth = maskIntType.getWidth();
 
   // Create a vector of bool type with maskWidth elements
-  auto maskVecTy = cir::VectorType::get(
-      builder.getContext(), cir::BoolType::get(builder.getContext()),
-      maskWidth);
+  auto maskVecTy =
+      cir::VectorType::get(builder.getContext(),
+                           cir::BoolType::get(builder.getContext()), 
maskWidth);
   mlir::Value maskVec = builder.createBitcast(mask, maskVecTy);
 
   // If we have less than 8 elements, then the starting mask was an i8 and
@@ -50,8 +50,7 @@ static mlir::Value getMaskVecValue(CIRGenBuilderTy &builder,
     llvm::SmallVector<int64_t, 4> indices;
     for (unsigned i = 0; i != numElems; ++i)
       indices.push_back(i);
-    maskVec =
-        builder.createVecShuffle(mask.getLoc(), maskVec, indices);
+    maskVec = builder.createVecShuffle(mask.getLoc(), maskVec, indices);
   }
   return maskVec;
 }
@@ -207,10 +206,10 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned 
builtinID,
     // First extract half of each vector. This gives better codegen than
     // doing it in a single shuffle.
     mlir::Location loc = getLoc(expr->getExprLoc());
-    lhs = builder.createVecShuffle(loc, lhs,
-                                   llvm::ArrayRef(indices.data(), numElems / 
2));
-    rhs = builder.createVecShuffle(loc, rhs,
-                                   llvm::ArrayRef(indices.data(), numElems / 
2));
+    lhs = builder.createVecShuffle(
+        loc, lhs, llvm::ArrayRef(indices.data(), numElems / 2));
+    rhs = builder.createVecShuffle(
+        loc, rhs, llvm::ArrayRef(indices.data(), numElems / 2));
     // Concat the vectors.
     // NOTE: Operands are swapped to match the intrinsic definition.
     mlir::Value res = builder.createVecShuffle(

>From 2b75e0727faa701f0625fef5c35caec12ae16df2 Mon Sep 17 00:00:00 2001
From: ahmed <[email protected]>
Date: Wed, 19 Nov 2025 20:58:14 +0200
Subject: [PATCH 3/8] refactor: rename property to include full name

---
 clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
index 23310d26a1336..cb3d50bd7a4ad 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
@@ -39,10 +39,10 @@ static mlir::Value getMaskVecValue(CIRGenBuilderTy 
&builder, mlir::Value mask,
   unsigned maskWidth = maskIntType.getWidth();
 
   // Create a vector of bool type with maskWidth elements
-  auto maskVecTy =
+  auto maskVecType =
       cir::VectorType::get(builder.getContext(),
                            cir::BoolType::get(builder.getContext()), 
maskWidth);
-  mlir::Value maskVec = builder.createBitcast(mask, maskVecTy);
+  mlir::Value maskVec = builder.createBitcast(mask, maskVecType);
 
   // If we have less than 8 elements, then the starting mask was an i8 and
   // we need to extract down to the right number of elements.

>From 052c48e3b49fba467d854f6c9c402bfe3d98a2b8 Mon Sep 17 00:00:00 2001
From: ahmed <[email protected]>
Date: Thu, 20 Nov 2025 10:40:08 +0200
Subject: [PATCH 4/8] refactor: move logic to helper function and modify
 function name

---
 clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 55 ++++++++++++----------
 1 file changed, 31 insertions(+), 24 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
index cb3d50bd7a4ad..b6370c4e172eb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
@@ -33,8 +33,8 @@ static mlir::Value emitIntrinsicCallOp(CIRGenFunction &cgf, 
const CallExpr *e,
       .getResult();
 }
 
-static mlir::Value getMaskVecValue(CIRGenBuilderTy &builder, mlir::Value mask,
-                                   unsigned numElems) {
+static mlir::Value convertMaskToVector(CIRGenBuilderTy &builder,
+                                       mlir::Value mask, unsigned numElems) {
   auto maskIntType = mlir::cast<cir::IntType>(mask.getType());
   unsigned maskWidth = maskIntType.getWidth();
 
@@ -55,6 +55,29 @@ static mlir::Value getMaskVecValue(CIRGenBuilderTy &builder, 
mlir::Value mask,
   return maskVec;
 }
 
+static mlir::Value emitKunpckOp(CIRGenBuilderTy &builder, mlir::Value op0,
+                                mlir::Value op1, mlir::Location loc) {
+  auto maskIntType = mlir::cast<cir::IntType>(op0.getType());
+  unsigned numElems = maskIntType.getWidth();
+  mlir::Value lhs = convertMaskToVector(builder, op0, numElems);
+  mlir::Value rhs = convertMaskToVector(builder, op1, numElems);
+  llvm::SmallVector<int64_t, 64> indices;
+  for (unsigned i = 0; i != numElems; ++i)
+    indices.push_back(i);
+
+  // First extract half of each vector. This gives better codegen than
+  // doing it in a single shuffle.
+  lhs = builder.createVecShuffle(loc, lhs,
+                                 llvm::ArrayRef(indices.data(), numElems / 2));
+  rhs = builder.createVecShuffle(loc, rhs,
+                                 llvm::ArrayRef(indices.data(), numElems / 2));
+  // Concat the vectors.
+  // NOTE: Operands are swapped to match the intrinsic definition.
+  mlir::Value res = builder.createVecShuffle(
+      loc, rhs, lhs, llvm::ArrayRef(indices.data(), numElems));
+  return builder.createBitcast(res, op0.getType());
+}
+
 // OG has unordered comparison as a form of optimization in addition to
 // ordered comparison, while CIR doesn't.
 //
@@ -191,31 +214,15 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned 
builtinID,
   case X86::BI__builtin_ia32_vec_set_v16hi:
   case X86::BI__builtin_ia32_vec_set_v8si:
   case X86::BI__builtin_ia32_vec_set_v4di:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented X86 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return {};
 
   case X86::BI__builtin_ia32_kunpckdi:
   case X86::BI__builtin_ia32_kunpcksi:
-  case X86::BI__builtin_ia32_kunpckhi: {
-    auto maskIntType = mlir::cast<cir::IntType>(ops[0].getType());
-    unsigned numElems = maskIntType.getWidth();
-    mlir::Value lhs = getMaskVecValue(builder, ops[0], numElems);
-    mlir::Value rhs = getMaskVecValue(builder, ops[1], numElems);
-    llvm::SmallVector<int64_t, 64> indices;
-    for (unsigned i = 0; i != numElems; ++i)
-      indices.push_back(i);
-
-    // First extract half of each vector. This gives better codegen than
-    // doing it in a single shuffle.
-    mlir::Location loc = getLoc(expr->getExprLoc());
-    lhs = builder.createVecShuffle(
-        loc, lhs, llvm::ArrayRef(indices.data(), numElems / 2));
-    rhs = builder.createVecShuffle(
-        loc, rhs, llvm::ArrayRef(indices.data(), numElems / 2));
-    // Concat the vectors.
-    // NOTE: Operands are swapped to match the intrinsic definition.
-    mlir::Value res = builder.createVecShuffle(
-        loc, rhs, lhs, llvm::ArrayRef(indices.data(), numElems));
-    return builder.createBitcast(res, ops[0].getType());
-  }
+  case X86::BI__builtin_ia32_kunpckhi:
+    return emitKunpckOp(builder, ops[0], ops[1], getLoc(expr->getExprLoc()));
 
   case X86::BI_mm_setcsr:
   case X86::BI__builtin_ia32_ldmxcsr:

>From dab064526ad64d2ad0fba3ce0acf950d2f79529f Mon Sep 17 00:00:00 2001
From: ahmed <[email protected]>
Date: Thu, 20 Nov 2025 10:50:46 +0200
Subject: [PATCH 5/8] refactor: rearrange funcs

---
 clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 81 +++++++++++-----------
 1 file changed, 39 insertions(+), 42 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
index b6370c4e172eb..4defe0b08f563 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
@@ -33,12 +33,44 @@ static mlir::Value emitIntrinsicCallOp(CIRGenFunction &cgf, 
const CallExpr *e,
       .getResult();
 }
 
-static mlir::Value convertMaskToVector(CIRGenBuilderTy &builder,
-                                       mlir::Value mask, unsigned numElems) {
-  auto maskIntType = mlir::cast<cir::IntType>(mask.getType());
-  unsigned maskWidth = maskIntType.getWidth();
+// OG has unordered comparison as a form of optimization in addition to
+//
+// This means that we can't encode the comparison code of UGT (unordered
+// greater than), at least not at the CIR level.
+//
+// The boolean shouldInvert compensates for this.
+// For example: to get to the comparison code UGT, we pass in
+// emitVectorFCmp (OLE, shouldInvert = true) since OLE is the inverse of UGT.
+
+// There are several ways to support this otherwise:
+// - register extra CmpOpKind for unordered comparison types and build the
+// translation code for
+//    to go from CIR -> LLVM dialect. Notice we get this naturally with
+//    shouldInvert, benefiting from existing infrastructure, albeit having to
+//    generate an extra `not` at CIR).
+// - Just add extra comparison code to a new VecCmpOpKind instead of
+// cluttering CmpOpKind.
+// - Add a boolean in VecCmpOp to indicate if it's doing unordered or ordered
+// comparison
+// - Just emit the intrinsics call instead of calling this helper, see how the
+// LLVM lowering handles this.
+static mlir::Value emitVectorFCmp(CIRGenBuilderTy &builder,
+                                  llvm::SmallVector<mlir::Value> &ops,
+                                  mlir::Location loc, cir::CmpOpKind pred,
+                                  bool shouldInvert) {
+  assert(!cir::MissingFeatures::cgFPOptionsRAII());
+  // TODO(cir): Add isSignaling boolean once emitConstrainedFPCall implemented
+  assert(!cir::MissingFeatures::emitConstrainedFPCall());
+  mlir::Value cmp = builder.createVecCompare(loc, pred, ops[0], ops[1]);
+  mlir::Value bitCast = builder.createBitcast(
+      shouldInvert ? builder.createNot(cmp) : cmp, ops[0].getType());
+  return bitCast;
+}
 
-  // Create a vector of bool type with maskWidth elements
+static mlir::Value convertKunpckMaskToVector(CIRGenBuilderTy &builder,
+                                             mlir::Value mask,
+                                             unsigned numElems) {
+  unsigned maskWidth = mlir::cast<cir::IntType>(mask.getType()).getWidth();
   auto maskVecType =
       cir::VectorType::get(builder.getContext(),
                            cir::BoolType::get(builder.getContext()), 
maskWidth);
@@ -59,8 +91,8 @@ static mlir::Value emitKunpckOp(CIRGenBuilderTy &builder, 
mlir::Value op0,
                                 mlir::Value op1, mlir::Location loc) {
   auto maskIntType = mlir::cast<cir::IntType>(op0.getType());
   unsigned numElems = maskIntType.getWidth();
-  mlir::Value lhs = convertMaskToVector(builder, op0, numElems);
-  mlir::Value rhs = convertMaskToVector(builder, op1, numElems);
+  mlir::Value lhs = convertKunpckMaskToVector(builder, op0, numElems);
+  mlir::Value rhs = convertKunpckMaskToVector(builder, op1, numElems);
   llvm::SmallVector<int64_t, 64> indices;
   for (unsigned i = 0; i != numElems; ++i)
     indices.push_back(i);
@@ -78,41 +110,6 @@ static mlir::Value emitKunpckOp(CIRGenBuilderTy &builder, 
mlir::Value op0,
   return builder.createBitcast(res, op0.getType());
 }
 
-// OG has unordered comparison as a form of optimization in addition to
-// ordered comparison, while CIR doesn't.
-//
-// This means that we can't encode the comparison code of UGT (unordered
-// greater than), at least not at the CIR level.
-//
-// The boolean shouldInvert compensates for this.
-// For example: to get to the comparison code UGT, we pass in
-// emitVectorFCmp (OLE, shouldInvert = true) since OLE is the inverse of UGT.
-
-// There are several ways to support this otherwise:
-// - register extra CmpOpKind for unordered comparison types and build the
-// translation code for
-//    to go from CIR -> LLVM dialect. Notice we get this naturally with
-//    shouldInvert, benefiting from existing infrastructure, albeit having to
-//    generate an extra `not` at CIR).
-// - Just add extra comparison code to a new VecCmpOpKind instead of
-// cluttering CmpOpKind.
-// - Add a boolean in VecCmpOp to indicate if it's doing unordered or ordered
-// comparison
-// - Just emit the intrinsics call instead of calling this helper, see how the
-// LLVM lowering handles this.
-static mlir::Value emitVectorFCmp(CIRGenBuilderTy &builder,
-                                  llvm::SmallVector<mlir::Value> &ops,
-                                  mlir::Location loc, cir::CmpOpKind pred,
-                                  bool shouldInvert) {
-  assert(!cir::MissingFeatures::cgFPOptionsRAII());
-  // TODO(cir): Add isSignaling boolean once emitConstrainedFPCall implemented
-  assert(!cir::MissingFeatures::emitConstrainedFPCall());
-  mlir::Value cmp = builder.createVecCompare(loc, pred, ops[0], ops[1]);
-  mlir::Value bitCast = builder.createBitcast(
-      shouldInvert ? builder.createNot(cmp) : cmp, ops[0].getType());
-  return bitCast;
-}
-
 mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
                                                const CallExpr *expr) {
   if (builtinID == Builtin::BI__builtin_cpu_is) {

>From faab7d26f152fd519726e3e94383a48d7eb23eb9 Mon Sep 17 00:00:00 2001
From: ahmed <[email protected]>
Date: Thu, 20 Nov 2025 11:07:26 +0200
Subject: [PATCH 6/8] refactor: optimize createVecShuffle

---
 clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 20 ++++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
index 4defe0b08f563..192b405b47446 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
@@ -34,6 +34,7 @@ static mlir::Value emitIntrinsicCallOp(CIRGenFunction &cgf, 
const CallExpr *e,
 }
 
 // OG has unordered comparison as a form of optimization in addition to
+// ordered comparison, while CIR doesn't.
 //
 // This means that we can't encode the comparison code of UGT (unordered
 // greater than), at least not at the CIR level.
@@ -79,9 +80,10 @@ static mlir::Value convertKunpckMaskToVector(CIRGenBuilderTy 
&builder,
   // If we have less than 8 elements, then the starting mask was an i8 and
   // we need to extract down to the right number of elements.
   if (numElems < 8) {
-    llvm::SmallVector<int64_t, 4> indices;
-    for (unsigned i = 0; i != numElems; ++i)
-      indices.push_back(i);
+    llvm::SmallVector<mlir::Attribute, 4> indices;
+    mlir::Type i32Ty = builder.getSInt32Ty();
+    for (auto i : llvm::seq<unsigned>(0, numElems))
+      indices.push_back(cir::IntAttr::get(i32Ty, i));
     maskVec = builder.createVecShuffle(mask.getLoc(), maskVec, indices);
   }
   return maskVec;
@@ -93,9 +95,12 @@ static mlir::Value emitKunpckOp(CIRGenBuilderTy &builder, 
mlir::Value op0,
   unsigned numElems = maskIntType.getWidth();
   mlir::Value lhs = convertKunpckMaskToVector(builder, op0, numElems);
   mlir::Value rhs = convertKunpckMaskToVector(builder, op1, numElems);
-  llvm::SmallVector<int64_t, 64> indices;
-  for (unsigned i = 0; i != numElems; ++i)
-    indices.push_back(i);
+  
+  // Build shuffle indices as attributes to avoid redundant conversion.
+  llvm::SmallVector<mlir::Attribute, 64> indices;
+  mlir::Type i32Ty = builder.getSInt32Ty();
+  for (auto i : llvm::seq<unsigned>(0, numElems))
+    indices.push_back(cir::IntAttr::get(i32Ty, i));
 
   // First extract half of each vector. This gives better codegen than
   // doing it in a single shuffle.
@@ -105,8 +110,7 @@ static mlir::Value emitKunpckOp(CIRGenBuilderTy &builder, 
mlir::Value op0,
                                  llvm::ArrayRef(indices.data(), numElems / 2));
   // Concat the vectors.
   // NOTE: Operands are swapped to match the intrinsic definition.
-  mlir::Value res = builder.createVecShuffle(
-      loc, rhs, lhs, llvm::ArrayRef(indices.data(), numElems));
+  mlir::Value res = builder.createVecShuffle(loc, rhs, lhs, indices);
   return builder.createBitcast(res, op0.getType());
 }
 

>From bb801bca64c556a9a37402440f338584018922ec Mon Sep 17 00:00:00 2001
From: ahmed <[email protected]>
Date: Thu, 20 Nov 2025 11:12:50 +0200
Subject: [PATCH 7/8] refactor: move test functions

---
 .../CIR/CodeGen/X86/avx512-kunpck-builtins.c  | 92 -------------------
 clang/test/CodeGen/X86/avx512bw-builtins.c    | 22 +++++
 clang/test/CodeGen/X86/avx512f-builtins.c     | 11 +++
 3 files changed, 33 insertions(+), 92 deletions(-)
 delete mode 100644 clang/test/CIR/CodeGen/X86/avx512-kunpck-builtins.c

diff --git a/clang/test/CIR/CodeGen/X86/avx512-kunpck-builtins.c 
b/clang/test/CIR/CodeGen/X86/avx512-kunpck-builtins.c
deleted file mode 100644
index 197f2570769a5..0000000000000
--- a/clang/test/CIR/CodeGen/X86/avx512-kunpck-builtins.c
+++ /dev/null
@@ -1,92 +0,0 @@
-// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s 
-triple=x86_64-unknown-linux -target-feature +avx512f -target-feature +avx512bw 
-fclangir -emit-cir -o %t.cir -Wall -Werror
-// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
-// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s 
-triple=x86_64-unknown-linux -target-feature +avx512f -target-feature +avx512bw 
-fclangir -emit-llvm -o %t.ll -Wall -Werror
-// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s
-
-// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s 
-triple=x86_64-unknown-linux -target-feature +avx512f -target-feature +avx512bw 
-emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG
-
-// This test exercises the kunpck (mask unpack) builtins for AVX-512.
-
-#include <immintrin.h>
-
-__mmask16 test_mm512_kunpackb(__mmask16 __A, __mmask16 __B) {
-  // CIR-LABEL: test_mm512_kunpackb
-  // LLVM-LABEL: test_mm512_kunpackb
-  // OGCG-LABEL: test_mm512_kunpackb
-  return _mm512_kunpackb(__A, __B);
-  // CIR: [[MASK_A:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 16>), 
!cir.vector<!cir.bool x 16>
-  // CIR: [[EXTRACT_A:%.*]] = cir.vec.shuffle([[MASK_A]], {{.*}}) [#cir.int<0> 
: !s32i, #cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, 
#cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<6> : !s32i, #cir.int<7> : 
!s32i]
-  // CIR: [[MASK_B:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 16>), 
!cir.vector<!cir.bool x 16>
-  // CIR: [[EXTRACT_B:%.*]] = cir.vec.shuffle([[MASK_B]], {{.*}}) [#cir.int<0> 
: !s32i, #cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, 
#cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<6> : !s32i, #cir.int<7> : 
!s32i]
-  // CIR: [[CONCAT:%.*]] = cir.vec.shuffle([[EXTRACT_B]], [[EXTRACT_A]]) 
[#cir.int<0> : !s32i, #cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : 
!s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<6> : !s32i, 
#cir.int<7> : !s32i, #cir.int<8> : !s32i, #cir.int<9> : !s32i, #cir.int<10> : 
!s32i, #cir.int<11> : !s32i, #cir.int<12> : !s32i, #cir.int<13> : !s32i, 
#cir.int<14> : !s32i, #cir.int<15> : !s32i]
-  // CIR: {{%.*}} = cir.cast(bitcast, [[CONCAT]] : !cir.vector<!cir.bool x 
16>), !cir.int<u, 16>
-
-  // LLVM: [[A_BITCAST:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
-  // LLVM: [[A_SHUFFLE:%.*]] = shufflevector <16 x i1> [[A_BITCAST]], <16 x 
i1> [[A_BITCAST]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7>
-  // LLVM: [[B_BITCAST:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
-  // LLVM: [[B_SHUFFLE:%.*]] = shufflevector <16 x i1> [[B_BITCAST]], <16 x 
i1> [[B_BITCAST]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7>
-  // LLVM: [[CONCAT:%.*]] = shufflevector <8 x i1> [[B_SHUFFLE]], <8 x i1> 
[[A_SHUFFLE]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  // LLVM: bitcast <16 x i1> [[CONCAT]] to i16
-
-  // OGCG: [[A_BITCAST:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
-  // OGCG: [[A_SHUFFLE:%.*]] = shufflevector <16 x i1> [[A_BITCAST]], <16 x 
i1> [[A_BITCAST]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7>
-  // OGCG: [[B_BITCAST:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
-  // OGCG: [[B_SHUFFLE:%.*]] = shufflevector <16 x i1> [[B_BITCAST]], <16 x 
i1> [[B_BITCAST]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7>
-  // OGCG: [[CONCAT:%.*]] = shufflevector <8 x i1> [[B_SHUFFLE]], <8 x i1> 
[[A_SHUFFLE]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  // OGCG: bitcast <16 x i1> [[CONCAT]] to i16
-}
-
-__mmask32 test_mm512_kunpackw(__mmask32 __A, __mmask32 __B) {
-  // CIR-LABEL: test_mm512_kunpackw
-  // LLVM-LABEL: test_mm512_kunpackw
-  // OGCG-LABEL: test_mm512_kunpackw
-  return _mm512_kunpackw(__A, __B);
-  // CIR: [[MASK_A:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 32>), 
!cir.vector<!cir.bool x 32>
-  // CIR: [[EXTRACT_A:%.*]] = cir.vec.shuffle([[MASK_A]], {{.*}})
-  // CIR: [[MASK_B:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 32>), 
!cir.vector<!cir.bool x 32>
-  // CIR: [[EXTRACT_B:%.*]] = cir.vec.shuffle([[MASK_B]], {{.*}})
-  // CIR: [[CONCAT:%.*]] = cir.vec.shuffle([[EXTRACT_B]], [[EXTRACT_A]])
-  // CIR: {{%.*}} = cir.cast(bitcast, [[CONCAT]] : !cir.vector<!cir.bool x 
32>), !cir.int<u, 32>
-
-  // LLVM: [[A_BITCAST:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
-  // LLVM: [[A_SHUFFLE:%.*]] = shufflevector <32 x i1> [[A_BITCAST]], <32 x 
i1> [[A_BITCAST]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  // LLVM: [[B_BITCAST:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
-  // LLVM: [[B_SHUFFLE:%.*]] = shufflevector <32 x i1> [[B_BITCAST]], <32 x 
i1> [[B_BITCAST]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  // LLVM: [[CONCAT:%.*]] = shufflevector <16 x i1> [[B_SHUFFLE]], <16 x i1> 
[[A_SHUFFLE]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 
17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 
27, i32 28, i32 29, i32 30, i32 31>
-  // LLVM: bitcast <32 x i1> [[CONCAT]] to i32
-
-  // OGCG: [[A_BITCAST:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
-  // OGCG: [[A_SHUFFLE:%.*]] = shufflevector <32 x i1> [[A_BITCAST]], <32 x 
i1> [[A_BITCAST]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  // OGCG: [[B_BITCAST:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
-  // OGCG: [[B_SHUFFLE:%.*]] = shufflevector <32 x i1> [[B_BITCAST]], <32 x 
i1> [[B_BITCAST]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  // OGCG: [[CONCAT:%.*]] = shufflevector <16 x i1> [[B_SHUFFLE]], <16 x i1> 
[[A_SHUFFLE]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 
17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 
27, i32 28, i32 29, i32 30, i32 31>
-  // OGCG: bitcast <32 x i1> [[CONCAT]] to i32
-}
-
-__mmask64 test_mm512_kunpackd(__mmask64 __A, __mmask64 __B) {
-  // CIR-LABEL: test_mm512_kunpackd
-  // LLVM-LABEL: test_mm512_kunpackd
-  // OGCG-LABEL: test_mm512_kunpackd
-  return _mm512_kunpackd(__A, __B);
-  // CIR: [[MASK_A:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 64>), 
!cir.vector<!cir.bool x 64>
-  // CIR: [[EXTRACT_A:%.*]] = cir.vec.shuffle([[MASK_A]], {{.*}})
-  // CIR: [[MASK_B:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.int<u, 64>), 
!cir.vector<!cir.bool x 64>
-  // CIR: [[EXTRACT_B:%.*]] = cir.vec.shuffle([[MASK_B]], {{.*}})
-  // CIR: [[CONCAT:%.*]] = cir.vec.shuffle([[EXTRACT_B]], [[EXTRACT_A]])
-  // CIR: {{%.*}} = cir.cast(bitcast, [[CONCAT]] : !cir.vector<!cir.bool x 
64>), !cir.int<u, 64>
-
-  // LLVM: [[A_BITCAST:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
-  // LLVM: [[A_SHUFFLE:%.*]] = shufflevector <64 x i1> [[A_BITCAST]], <64 x 
i1> [[A_BITCAST]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, 
i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, 
i32 27, i32 28, i32 29, i32 30, i32 31>
-  // LLVM: [[B_BITCAST:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
-  // LLVM: [[B_SHUFFLE:%.*]] = shufflevector <64 x i1> [[B_BITCAST]], <64 x 
i1> [[B_BITCAST]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, 
i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, 
i32 27, i32 28, i32 29, i32 30, i32 31>
-  // LLVM: [[CONCAT:%.*]] = shufflevector <32 x i1> [[B_SHUFFLE]], <32 x i1> 
[[A_SHUFFLE]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 
17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 
27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 
37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 
47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 
57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-  // LLVM: bitcast <64 x i1> [[CONCAT]] to i64
-
-  // OGCG: [[A_BITCAST:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
-  // OGCG: [[A_SHUFFLE:%.*]] = shufflevector <64 x i1> [[A_BITCAST]], <64 x 
i1> [[A_BITCAST]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, 
i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, 
i32 27, i32 28, i32 29, i32 30, i32 31>
-  // OGCG: [[B_BITCAST:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
-  // OGCG: [[B_SHUFFLE:%.*]] = shufflevector <64 x i1> [[B_BITCAST]], <64 x 
i1> [[B_BITCAST]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, 
i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, 
i32 27, i32 28, i32 29, i32 30, i32 31>
-  // OGCG: [[CONCAT:%.*]] = shufflevector <32 x i1> [[B_SHUFFLE]], <32 x i1> 
[[A_SHUFFLE]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 
17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 
27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 
37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 
47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 
57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-  // OGCG: bitcast <64 x i1> [[CONCAT]] to i64
-}
-
diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c 
b/clang/test/CodeGen/X86/avx512bw-builtins.c
index 140a2c0dcbb56..c3a1197d6adf7 100644
--- a/clang/test/CodeGen/X86/avx512bw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bw-builtins.c
@@ -3152,3 +3152,25 @@ void test_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, 
__mmask32 __M, __m512i
  // CHECK: @llvm.x86.avx512.mask.pmovus.wb.mem.512
  _mm512_mask_cvtusepi16_storeu_epi8 ( __P, __M, __A);
 }
+
+__mmask32 test_mm512_kunpackw(__mmask32 a, __mmask32 b) {
+  // CHECK-LABEL: test_mm512_kunpackw
+  // CHECK: [[A_BITCAST:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+  // CHECK: [[A_SHUFFLE:%.*]] = shufflevector <32 x i1> [[A_BITCAST]], <32 x 
i1> [[A_BITCAST]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  // CHECK: [[B_BITCAST:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+  // CHECK: [[B_SHUFFLE:%.*]] = shufflevector <32 x i1> [[B_BITCAST]], <32 x 
i1> [[B_BITCAST]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  // CHECK: [[CONCAT:%.*]] = shufflevector <16 x i1> [[B_SHUFFLE]], <16 x i1> 
[[A_SHUFFLE]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 
17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 
27, i32 28, i32 29, i32 30, i32 31>
+  // CHECK: bitcast <32 x i1> [[CONCAT]] to i32
+  return _mm512_kunpackw(a, b);
+}
+
+__mmask64 test_mm512_kunpackd(__mmask64 a, __mmask64 b) {
+  // CHECK-LABEL: test_mm512_kunpackd
+  // CHECK: [[A_BITCAST:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+  // CHECK: [[A_SHUFFLE:%.*]] = shufflevector <64 x i1> [[A_BITCAST]], <64 x 
i1> [[A_BITCAST]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, 
i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, 
i32 27, i32 28, i32 29, i32 30, i32 31>
+  // CHECK: [[B_BITCAST:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+  // CHECK: [[B_SHUFFLE:%.*]] = shufflevector <64 x i1> [[B_BITCAST]], <64 x 
i1> [[B_BITCAST]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, 
i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, 
i32 27, i32 28, i32 29, i32 30, i32 31>
+  // CHECK: [[CONCAT:%.*]] = shufflevector <32 x i1> [[B_SHUFFLE]], <32 x i1> 
[[A_SHUFFLE]], <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 
17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 
27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 
37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 
47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 
57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+  // CHECK: bitcast <64 x i1> [[CONCAT]] to i64
+  return _mm512_kunpackd(a, b);
+}
diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c 
b/clang/test/CodeGen/X86/avx512f-builtins.c
index eb25aa538e9a3..9cea44be13c73 100644
--- a/clang/test/CodeGen/X86/avx512f-builtins.c
+++ b/clang/test/CodeGen/X86/avx512f-builtins.c
@@ -12027,3 +12027,14 @@ TEST_CONSTEXPR(match_v8di(
                                     (__m512i)(__v8di){0, 15, 16, 23, 1, 17, 2, 
18},
                                     (__m512i)(__v8di){101, 102, 103, 104, 105, 
106, 107, 108}),
     -1, 108, -3, -8, -5, -2, -7, -3));
+
+__mmask16 test_mm512_kunpackb(__mmask16 a, __mmask16 b) {
+  // CHECK-LABEL: test_mm512_kunpackb
+  // CHECK: [[A_BITCAST:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+  // CHECK: [[A_SHUFFLE:%.*]] = shufflevector <16 x i1> [[A_BITCAST]], <16 x 
i1> [[A_BITCAST]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7>
+  // CHECK: [[B_BITCAST:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+  // CHECK: [[B_SHUFFLE:%.*]] = shufflevector <16 x i1> [[B_BITCAST]], <16 x 
i1> [[B_BITCAST]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, 
i32 7>
+  // CHECK: [[CONCAT:%.*]] = shufflevector <8 x i1> [[B_SHUFFLE]], <8 x i1> 
[[A_SHUFFLE]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 
7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  // CHECK: bitcast <16 x i1> [[CONCAT]] to i16
+  return _mm512_kunpackb(a, b);
+}

>From 212c5dfc88380231c6ba1a8f1561fa3708255922 Mon Sep 17 00:00:00 2001
From: ahmed <[email protected]>
Date: Thu, 20 Nov 2025 11:13:06 +0200
Subject: [PATCH 8/8] chore: Format files

---
 clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
index 192b405b47446..8b6b2a3fa7bed 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
@@ -95,7 +95,7 @@ static mlir::Value emitKunpckOp(CIRGenBuilderTy &builder, 
mlir::Value op0,
   unsigned numElems = maskIntType.getWidth();
   mlir::Value lhs = convertKunpckMaskToVector(builder, op0, numElems);
   mlir::Value rhs = convertKunpckMaskToVector(builder, op1, numElems);
-  
+
   // Build shuffle indices as attributes to avoid redundant conversion.
   llvm::SmallVector<mlir::Attribute, 64> indices;
   mlir::Type i32Ty = builder.getSInt32Ty();

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to