https://github.com/TianYe717 updated 
https://github.com/llvm/llvm-project/pull/162005

>From 28819c6782e4d6b351b617460b79e4436cc2644a Mon Sep 17 00:00:00 2001
From: Ye Tian <[email protected]>
Date: Sun, 2 Nov 2025 21:45:55 +0800
Subject: [PATCH 1/3] [Headers][X86] Improve PALIGNR helper: unify
 align/shuffle logic and add zero-fill support

---
 clang/include/clang/Basic/BuiltinsX86.td     |  8 +--
 clang/lib/AST/ByteCode/InterpBuiltin.cpp     | 58 ++++++++++++++++++++
 clang/lib/AST/ExprConstant.cpp               | 37 +++++++++++++
 clang/test/CodeGen/X86/avx2-builtins.c       |  2 +
 clang/test/CodeGen/X86/avx512bw-builtins.c   |  4 ++
 clang/test/CodeGen/X86/avx512vlbw-builtins.c |  4 ++
 clang/test/CodeGen/X86/mmx-builtins.c        |  2 +
 clang/test/CodeGen/X86/ssse3-builtins.c      |  2 +
 8 files changed, 113 insertions(+), 4 deletions(-)

diff --git a/clang/include/clang/Basic/BuiltinsX86.td 
b/clang/include/clang/Basic/BuiltinsX86.td
index 9e877b92eac68..45a26fd487ec0 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -315,7 +315,7 @@ let Features = "sse3", Attributes = [NoThrow, 
RequiredVectorWidth<128>] in {
   def lddqu : X86Builtin<"_Vector<16, char>(char const *)">;
 }
 
-let Features = "ssse3", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>] in {
+let Features = "ssse3", Attributes = [NoThrow, Const, Constexpr, 
RequiredVectorWidth<128>] in {
   def palignr128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, 
_Vector<16, char>, _Constant int)">;
 }
 
@@ -609,8 +609,7 @@ let Features = "avx", Attributes = [NoThrow, Const, 
Constexpr, RequiredVectorWid
 
 let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] 
in {
   def mpsadbw256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, 
_Vector<32, char>, _Constant char)">;
-  def palignr256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, "
-                              "_Vector<32, char>, _Constant int)">;
+
   def psadbw256
       : X86Builtin<
             "_Vector<4, long long int>(_Vector<32, char>, _Vector<32, char>)">;
@@ -634,6 +633,7 @@ let Features = "avx2", Attributes = [NoThrow, Const, 
Constexpr, RequiredVectorWi
   def pmovmskb256 : X86Builtin<"int(_Vector<32, char>)">;
   def pavgb256 : X86Builtin<"_Vector<32, unsigned char>(_Vector<32, unsigned 
char>, _Vector<32, unsigned char>)">;
   def pavgw256 : X86Builtin<"_Vector<16, unsigned short>(_Vector<16, unsigned 
short>, _Vector<16, unsigned short>)">;
+  def palignr256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, 
_Vector<32, char>, _Constant int)">;
 
   def pblendd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, 
int>, _Constant int)">;
   def pblendd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, 
int>, _Constant int)">;
@@ -3294,7 +3294,7 @@ let Features = "avx512bw", Attributes = [NoThrow, Const] 
in {
   def kmovq : X86Builtin<"unsigned long long int(unsigned long long int)">;
 }
 
-let Features = "avx512bw", Attributes = [NoThrow, Const, 
RequiredVectorWidth<512>] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, 
RequiredVectorWidth<512>] in {
   def palignr512 : X86Builtin<"_Vector<64, char>(_Vector<64, char>, 
_Vector<64, char>, _Constant int)">;
 }
 
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp 
b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 8b57b963c538f..334b0dc288ef8 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3447,6 +3447,45 @@ static bool interp__builtin_ia32_shuffle_generic(
   return true;
 }
 
+
+static bool interp__builtin_x86_palignr(
+    InterpState &S, CodePtr OpPC, const CallExpr *Call,
+    llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned, unsigned)>
+        GetSourceIndex) {
+
+  assert(Call->getNumArgs() == 3);
+  unsigned Shift = popToAPSInt(S, Call->getArg(2)).getZExtValue() & 0xff;
+
+  QualType Arg0Type = Call->getArg(0)->getType();
+  const auto *VecT = Arg0Type->castAs<VectorType>();
+  PrimType ElemT = *S.getContext().classify(VecT->getElementType());
+  unsigned NumElems = VecT->getNumElements();
+
+  const Pointer &B = S.Stk.pop<Pointer>();
+  const Pointer &A = S.Stk.pop<Pointer>();
+  const Pointer &Dst = S.Stk.peek<Pointer>();
+
+  for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
+    auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, Shift, NumElems);
+
+    if (SrcIdx < 0) {
+      // Zero out this element
+      if (ElemT == PT_Float) {
+        Dst.elem<Floating>(DstIdx) = Floating(
+            S.getASTContext().getFloatTypeSemantics(VecT->getElementType()));
+      } else {
+        INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(DstIdx) = T::from(0); });
+      }
+    } else {
+      const Pointer &Src = (SrcVecIdx == 0) ? A : B;
+      TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
+    }
+  }
+  Dst.initializeAllElements();
+
+  return true;
+}
+
 bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
                       uint32_t BuiltinID) {
   if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
@@ -4636,6 +4675,25 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, 
const CallExpr *Call,
           return APInt(8, 0);
         });
 
+  case X86::BI__builtin_ia32_palignr128:
+  case X86::BI__builtin_ia32_palignr256:
+  case X86::BI__builtin_ia32_palignr512:
+    return interp__builtin_x86_palignr(S, OpPC, Call, [](unsigned DstIdx, 
unsigned Shift, unsigned NumElems) {
+      // Default to -1 → zero-fill this destination element
+      unsigned VecIdx = 0;
+      int ElemIdx = -1;
+
+      // Elements come from VecB first, then VecA after the shift boundary
+      unsigned ShiftedIdx = DstIdx + Shift;
+      if(ShiftedIdx < NumElems) {   // from VecB
+        VecIdx = 1;                 
+        ElemIdx = DstIdx + Shift;
+      }else if(ShiftedIdx < 2 * NumElems) {  // from VecA
+        ElemIdx = DstIdx + Shift - NumElems;
+      }
+      return std::pair<unsigned, int>{VecIdx,ElemIdx};
+    });
+
   default:
     S.FFDiag(S.Current->getLocation(OpPC),
              diag::note_invalid_subexpr_in_const_expr)
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 97eeba8b9d6cc..f90c6cf386f3b 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -13080,6 +13080,43 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr 
*E) {
 
     return Success(APValue(ResultElements.data(), ResultElements.size()), E);
   }
+
+
+  case X86::BI__builtin_ia32_palignr128:
+  case X86::BI__builtin_ia32_palignr256:
+  case X86::BI__builtin_ia32_palignr512: {
+    assert(E->getNumArgs() == 3);
+
+    APValue VecA, VecB;
+    APSInt Imm;
+    if (!EvaluateAsRValue(Info, E->getArg(0), VecA) ||
+        !EvaluateAsRValue(Info, E->getArg(1), VecB) ||
+        !EvaluateInteger(E->getArg(2), Imm, Info))
+      return false;
+
+    if (!VecA.isVector() || !VecB.isVector())
+      return false;
+
+    unsigned LenA = VecA.getVectorLength();
+    unsigned LenB = VecB.getVectorLength();
+    assert(LenA == LenB && (LenA % 16 == 0));
+
+    unsigned Shift = Imm.getZExtValue() & 0xff;
+    SmallVector<APValue> ResultElements;
+    for (unsigned I = 0; I < LenA; ++I) {
+      if (I + Shift < LenA) {
+        ResultElements.push_back(VecB.getVectorElt(I + Shift));
+      } else if (I + Shift < LenA + LenB) {
+        ResultElements.push_back(VecA.getVectorElt(I + Shift - LenA));
+      } else {
+        APSInt Zero(/*BitWidth=*/8, /*isUnsigned=*/true);
+        Zero = 0;
+        ResultElements.push_back(APValue(Zero));
+      }
+    }
+
+    return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+  }
   }
 }
 
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c 
b/clang/test/CodeGen/X86/avx2-builtins.c
index de4cb2fd0b055..e6820ee97015e 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -109,12 +109,14 @@ __m256i test_mm256_alignr_epi8(__m256i a, __m256i b) {
   // CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> %{{.*}}, <32 x i32> 
<i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 
12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 18, i32 19, i32 20, i32 21, i32 
22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 
48, i32 49>
   return _mm256_alignr_epi8(a, b, 2);
 }
+TEST_CONSTEXPR(match_v32qi(_mm256_alignr_epi8(((__m256i)(__v32qs){1, 2, 3, 4, 
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 
26, 27, 28, 29, 30, 31, 32}), ((__m256i)(__v32qs){33, 34, 35, 36, 37, 38, 39, 
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 
60, 61, 62, 63, 64}), 2), 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 1, 2));
 
 __m256i test2_mm256_alignr_epi8(__m256i a, __m256i b) {
   // CHECK-LABEL: test2_mm256_alignr_epi8
   // CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> zeroinitializer, <32 x 
i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, 
i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, 
i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, 
i32 31, i32 48>
   return _mm256_alignr_epi8(a, b, 17);
 }
+TEST_CONSTEXPR(match_v32qi(_mm256_alignr_epi8(((__m256i)(__v32qs){1, 2, 3, 4, 
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 
26, 27, 28, 29, 30, 31, 32}), ((__m256i)(__v32qs){33, 34, 35, 36, 37, 38, 39, 
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 
60, 61, 62, 63, 64}), 64), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
 
 __m256i test_mm256_and_si256(__m256i a, __m256i b) {
   // CHECK-LABEL: test_mm256_and_si256
diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c 
b/clang/test/CodeGen/X86/avx512bw-builtins.c
index be2cd480f7558..c84e36bed120f 100644
--- a/clang/test/CodeGen/X86/avx512bw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bw-builtins.c
@@ -2692,6 +2692,8 @@ __m512i test_mm512_alignr_epi8(__m512i __A,__m512i __B){
     // CHECK: shufflevector <64 x i8> %{{.*}}, <64 x i8> %{{.*}}, <64 x i32> 
<i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 
12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 18, i32 19, i32 20, i32 21, i32 
22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 
80, i32 81, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 
42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 50, i32 51, i32 
52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 
62, i32 63, i32 112, i32 113>
     return _mm512_alignr_epi8(__A, __B, 2); 
 }
+TEST_CONSTEXPR(match_v32qi(_mm256_alignr_epi8(((__m256i)(__v32qs){1, 2, 3, 4, 
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 
26, 27, 28, 29, 30, 31, 32}), ((__m256i)(__v32qs){33, 34, 35, 36, 37, 38, 39, 
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 
60, 61, 62, 63, 64}), 2), 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 1, 2));
+TEST_CONSTEXPR(match_v32qi(_mm256_alignr_epi8(((__m256i)(__v32qs){1, 2, 3, 4, 
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 
26, 27, 28, 29, 30, 31, 32}), ((__m256i)(__v32qs){33, 34, 35, 36, 37, 38, 39, 
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 
60, 61, 62, 63, 64}), 64), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
 
 __m512i test_mm512_mask_alignr_epi8(__m512i __W, __mmask64 __U, __m512i 
__A,__m512i __B){
     // CHECK-LABEL: test_mm512_mask_alignr_epi8
@@ -2699,6 +2701,7 @@ __m512i test_mm512_mask_alignr_epi8(__m512i __W, 
__mmask64 __U, __m512i __A,__m5
     // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
     return _mm512_mask_alignr_epi8(__W, __U, __A, __B, 2); 
 }
+TEST_CONSTEXPR(match_v64qi(_mm512_mask_alignr_epi8(((__m512i)(__v64qs){127, 
127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 
127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 
127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 
127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127}), 
(__mmask64)0x000000000000000f, ((__m512i)(__v64qs){1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64}), 
((__m512i)(__v64qs){65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 
100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 127}), 2), 67, 68, 
69, 70, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 
127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 
127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 
127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127));
 
 __m512i test_mm512_maskz_alignr_epi8(__mmask64 __U, __m512i __A,__m512i __B){
     // CHECK-LABEL: test_mm512_maskz_alignr_epi8
@@ -2706,6 +2709,7 @@ __m512i test_mm512_maskz_alignr_epi8(__mmask64 __U, 
__m512i __A,__m512i __B){
     // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
    return _mm512_maskz_alignr_epi8(__U, __A, __B, 2); 
 }
+TEST_CONSTEXPR(match_v64qi(_mm512_maskz_alignr_epi8((__mmask64)0x000000000000000f,
 ((__m512i)(__v64qs){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 
58, 59, 60, 61, 62, 63, 64}), ((__m512i)(__v64qs){65, 66, 67, 68, 69, 70, 71, 
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 
109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 
125, 126, 127, 127}), 2), 67, 68, 69, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
 
 
 
diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c 
b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
index febef46458ae9..df85e9627a65f 100644
--- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
@@ -3376,6 +3376,7 @@ __m128i test_mm_mask_alignr_epi8(__m128i __W, __mmask16 
__U, __m128i __A, __m128
   // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
   return _mm_mask_alignr_epi8(__W, __U, __A, __B, 2); 
 }
+TEST_CONSTEXPR(match_v16qi(_mm_mask_alignr_epi8(((__m128i)(__v16qs){127, 127, 
127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127}), 
(__mmask16)0x000f, ((__m128i)(__v16qs){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13, 14, 15, 16}), ((__m128i)(__v16qs){17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 
27, 28, 29, 30, 31, 32}), 2), 19, 20, 21, 22, 127, 127, 127, 127, 127, 127, 
127, 127, 127, 127, 127, 127));
 
 __m128i test_mm_maskz_alignr_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
   // CHECK-LABEL: test_mm_maskz_alignr_epi8
@@ -3383,6 +3384,7 @@ __m128i test_mm_maskz_alignr_epi8(__mmask16 __U, __m128i 
__A, __m128i __B) {
   // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
   return _mm_maskz_alignr_epi8(__U, __A, __B, 2); 
 }
+TEST_CONSTEXPR(match_v16qi( _mm_maskz_alignr_epi8((__mmask16)0x000f, 
((__m128i)(__v16qs){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), 
((__m128i)(__v16qs){17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 
32}),2), 19, 20, 21, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
 
 __m256i test_mm256_mask_alignr_epi8(__m256i __W, __mmask32 __U, __m256i __A, 
__m256i __B) {
   // CHECK-LABEL: test_mm256_mask_alignr_epi8
@@ -3390,6 +3392,7 @@ __m256i test_mm256_mask_alignr_epi8(__m256i __W, 
__mmask32 __U, __m256i __A, __m
   // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
   return _mm256_mask_alignr_epi8(__W, __U, __A, __B, 2); 
 }
+TEST_CONSTEXPR(match_v32qi(_mm256_mask_alignr_epi8(((__m256i)(__v32qs){127, 
127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 
127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127}), 
(__mmask32)0x0000000f, ((__m256i)(__v32qs){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 
32}), ((__m256i)(__v32qs){33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64}), 
2), 35, 36, 37, 38, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 
127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 
127));
 
 __m256i test_mm256_maskz_alignr_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
   // CHECK-LABEL: test_mm256_maskz_alignr_epi8
@@ -3397,6 +3400,7 @@ __m256i test_mm256_maskz_alignr_epi8(__mmask32 __U, 
__m256i __A, __m256i __B) {
   // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
   return _mm256_maskz_alignr_epi8(__U, __A, __B, 2); 
 }
+TEST_CONSTEXPR(match_v32qi(_mm256_maskz_alignr_epi8((__mmask32)0x0000000f, 
((__m256i)(__v32qs){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}), 
((__m256i)(__v32qs){33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64}), 2), 35, 
36, 37, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0));
 
 __m128i test_mm_dbsad_epu8(__m128i __A, __m128i __B) {
   // CHECK-LABEL: test_mm_dbsad_epu8
diff --git a/clang/test/CodeGen/X86/mmx-builtins.c 
b/clang/test/CodeGen/X86/mmx-builtins.c
index 273138063a1b1..494262e49724d 100644
--- a/clang/test/CodeGen/X86/mmx-builtins.c
+++ b/clang/test/CodeGen/X86/mmx-builtins.c
@@ -102,6 +102,8 @@ __m64 test_mm_alignr_pi8(__m64 a, __m64 b) {
   // CHECK: shufflevector <16 x i8> {{%.*}}, <16 x i8> zeroinitializer, <16 x 
i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, 
i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
   return _mm_alignr_pi8(a, b, 2);
 }
+TEST_CONSTEXPR(match_v8qi(_mm_alignr_pi8(((__m64)(__v8qs){1, 2, 3, 4, 5, 6, 7, 
8}), ((__m64)(__v8qs){9, 10, 11, 12, 13, 14, 15, 16}), 1), 10, 11, 12, 13, 14, 
15, 16, 1));
+TEST_CONSTEXPR(match_v8qi(_mm_alignr_pi8(((__m64)(__v8qs){1, 2, 3, 4, 5, 6, 7, 
8}), ((__m64)(__v8qs){9, 10, 11, 12, 13, 14, 15, 16}), 16), 0, 0, 0, 0, 0, 0, 
0, 0));
 
 __m64 test_mm_and_si64(__m64 a, __m64 b) {
   // CHECK-LABEL: test_mm_and_si64
diff --git a/clang/test/CodeGen/X86/ssse3-builtins.c 
b/clang/test/CodeGen/X86/ssse3-builtins.c
index b7a4a2fe7ccd7..193fa37f65d14 100644
--- a/clang/test/CodeGen/X86/ssse3-builtins.c
+++ b/clang/test/CodeGen/X86/ssse3-builtins.c
@@ -48,6 +48,8 @@ __m128i test_mm_alignr_epi8(__m128i a, __m128i b) {
   // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> 
<i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 
12, i32 13, i32 14, i32 15, i32 16, i32 17>
   return _mm_alignr_epi8(a, b, 2);
 }
+TEST_CONSTEXPR(match_v16qi(_mm_alignr_epi8(((__m128i)(__v16qi){1, 2, 3, 4, 5, 
6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), ((__m128i)(__v16qi){17, 18, 19, 20, 
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}), 2), 19, 20, 21, 22, 23, 24, 
25, 26, 27, 28, 29, 30, 31, 32, 1, 2));
+TEST_CONSTEXPR(match_v16qi(_mm_alignr_epi8(((__m128i)(__v16qi){1, 2, 3, 4, 5, 
6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), ((__m128i)(__v16qi){17, 18, 19, 20, 
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}), 32), 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0));
 
 __m128i test2_mm_alignr_epi8(__m128i a, __m128i b) {
   // CHECK-LABEL: test2_mm_alignr_epi8

>From 4247c75e3d7ee3ac22f783107de7b9b514d483c7 Mon Sep 17 00:00:00 2001
From: Ye Tian <[email protected]>
Date: Mon, 3 Nov 2025 09:25:52 +0800
Subject: [PATCH 2/3] [NFC] Format code

---
 clang/lib/AST/ByteCode/InterpBuiltin.cpp | 32 ++++++++++++------------
 clang/lib/AST/ExprConstant.cpp           |  1 -
 2 files changed, 16 insertions(+), 17 deletions(-)

diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp 
b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 334b0dc288ef8..3bb85a9e2e3df 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3447,7 +3447,6 @@ static bool interp__builtin_ia32_shuffle_generic(
   return true;
 }
 
-
 static bool interp__builtin_x86_palignr(
     InterpState &S, CodePtr OpPC, const CallExpr *Call,
     llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned, unsigned)>
@@ -4678,21 +4677,22 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, 
const CallExpr *Call,
   case X86::BI__builtin_ia32_palignr128:
   case X86::BI__builtin_ia32_palignr256:
   case X86::BI__builtin_ia32_palignr512:
-    return interp__builtin_x86_palignr(S, OpPC, Call, [](unsigned DstIdx, 
unsigned Shift, unsigned NumElems) {
-      // Default to -1 → zero-fill this destination element
-      unsigned VecIdx = 0;
-      int ElemIdx = -1;
-
-      // Elements come from VecB first, then VecA after the shift boundary
-      unsigned ShiftedIdx = DstIdx + Shift;
-      if(ShiftedIdx < NumElems) {   // from VecB
-        VecIdx = 1;                 
-        ElemIdx = DstIdx + Shift;
-      }else if(ShiftedIdx < 2 * NumElems) {  // from VecA
-        ElemIdx = DstIdx + Shift - NumElems;
-      }
-      return std::pair<unsigned, int>{VecIdx,ElemIdx};
-    });
+    return interp__builtin_x86_palignr(
+        S, OpPC, Call, [](unsigned DstIdx, unsigned Shift, unsigned NumElems) {
+          // Default to -1 → zero-fill this destination element
+          unsigned VecIdx = 0;
+          int ElemIdx = -1;
+
+          // Elements come from VecB first, then VecA after the shift boundary
+          unsigned ShiftedIdx = DstIdx + Shift;
+          if (ShiftedIdx < NumElems) { // from VecB
+            VecIdx = 1;
+            ElemIdx = DstIdx + Shift;
+          } else if (ShiftedIdx < 2 * NumElems) { // from VecA
+            ElemIdx = DstIdx + Shift - NumElems;
+          }
+          return std::pair<unsigned, int>{VecIdx, ElemIdx};
+        });
 
   default:
     S.FFDiag(S.Current->getLocation(OpPC),
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index f90c6cf386f3b..6dc70e4b3b7a2 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -13081,7 +13081,6 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr 
*E) {
     return Success(APValue(ResultElements.data(), ResultElements.size()), E);
   }
 
-
   case X86::BI__builtin_ia32_palignr128:
   case X86::BI__builtin_ia32_palignr256:
   case X86::BI__builtin_ia32_palignr512: {

>From c70e2c7aaeae3407a09d123f559b4f072d855244 Mon Sep 17 00:00:00 2001
From: Ye Tian <[email protected]>
Date: Wed, 5 Nov 2025 17:13:35 +0800
Subject: [PATCH 3/3] [Headers][X86] Refactor and reuse
 interp__builtin_ia32_shuffle_generic for multiple widths

---
 clang/lib/AST/ByteCode/InterpBuiltin.cpp |  52 ++--------
 clang/lib/AST/ExprConstant.cpp           | 119 ++++++++++++-----------
 2 files changed, 70 insertions(+), 101 deletions(-)

diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp 
b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 3bb85a9e2e3df..95907d4269b40 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3410,50 +3410,12 @@ static bool interp__builtin_x86_byteshift(
 }
 
 static bool interp__builtin_ia32_shuffle_generic(
-    InterpState &S, CodePtr OpPC, const CallExpr *Call,
-    llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
-        GetSourceIndex) {
-
-  assert(Call->getNumArgs() == 3);
-  unsigned ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
-
-  QualType Arg0Type = Call->getArg(0)->getType();
-  const auto *VecT = Arg0Type->castAs<VectorType>();
-  PrimType ElemT = *S.getContext().classify(VecT->getElementType());
-  unsigned NumElems = VecT->getNumElements();
-
-  const Pointer &B = S.Stk.pop<Pointer>();
-  const Pointer &A = S.Stk.pop<Pointer>();
-  const Pointer &Dst = S.Stk.peek<Pointer>();
-
-  for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
-    auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
-
-    if (SrcIdx < 0) {
-      // Zero out this element
-      if (ElemT == PT_Float) {
-        Dst.elem<Floating>(DstIdx) = Floating(
-            S.getASTContext().getFloatTypeSemantics(VecT->getElementType()));
-      } else {
-        INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(DstIdx) = T::from(0); });
-      }
-    } else {
-      const Pointer &Src = (SrcVecIdx == 0) ? A : B;
-      TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
-    }
-  }
-  Dst.initializeAllElements();
-
-  return true;
-}
-
-static bool interp__builtin_x86_palignr(
     InterpState &S, CodePtr OpPC, const CallExpr *Call,
     llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned, unsigned)>
         GetSourceIndex) {
 
   assert(Call->getNumArgs() == 3);
-  unsigned Shift = popToAPSInt(S, Call->getArg(2)).getZExtValue() & 0xff;
+  unsigned ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
 
   QualType Arg0Type = Call->getArg(0)->getType();
   const auto *VecT = Arg0Type->castAs<VectorType>();
@@ -3465,7 +3427,7 @@ static bool interp__builtin_x86_palignr(
   const Pointer &Dst = S.Stk.peek<Pointer>();
 
   for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
-    auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, Shift, NumElems);
+    auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask, NumElems);
 
     if (SrcIdx < 0) {
       // Zero out this element
@@ -4419,7 +4381,8 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const 
CallExpr *Call,
   case X86::BI__builtin_ia32_shufps256:
   case X86::BI__builtin_ia32_shufps512:
     return interp__builtin_ia32_shuffle_generic(
-        S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+        S, OpPC, Call,
+        [](unsigned DstIdx, unsigned ShuffleMask, unsigned NumElems) {
           unsigned NumElemPerLane = 4;
           unsigned NumSelectableElems = NumElemPerLane / 2;
           unsigned BitsPerElem = 2;
@@ -4438,7 +4401,8 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const 
CallExpr *Call,
   case X86::BI__builtin_ia32_shufpd256:
   case X86::BI__builtin_ia32_shufpd512:
     return interp__builtin_ia32_shuffle_generic(
-        S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+        S, OpPC, Call,
+        [](unsigned DstIdx, unsigned ShuffleMask, unsigned NumElems) {
           unsigned NumElemPerLane = 2;
           unsigned NumSelectableElems = NumElemPerLane / 2;
           unsigned BitsPerElem = 1;
@@ -4455,7 +4419,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const 
CallExpr *Call,
         });
   case X86::BI__builtin_ia32_insertps128:
     return interp__builtin_ia32_shuffle_generic(
-        S, OpPC, Call, [](unsigned DstIdx, unsigned Mask) {
+        S, OpPC, Call, [](unsigned DstIdx, unsigned Mask, unsigned NumElems) {
           // Bits [3:0]: zero mask - if bit is set, zero this element
           if ((Mask & (1 << DstIdx)) != 0) {
             return std::pair<unsigned, int>{0, -1};
@@ -4677,7 +4641,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const 
CallExpr *Call,
   case X86::BI__builtin_ia32_palignr128:
   case X86::BI__builtin_ia32_palignr256:
   case X86::BI__builtin_ia32_palignr512:
-    return interp__builtin_x86_palignr(
+    return interp__builtin_ia32_shuffle_generic(
         S, OpPC, Call, [](unsigned DstIdx, unsigned Shift, unsigned NumElems) {
           // Default to -1 → zero-fill this destination element
           unsigned VecIdx = 0;
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 6dc70e4b3b7a2..9e36df2b5a05a 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11621,7 +11621,7 @@ static bool evalPackBuiltin(const CallExpr *E, EvalInfo 
&Info, APValue &Result,
 
 static bool evalShuffleGeneric(
     EvalInfo &Info, const CallExpr *Call, APValue &Out,
-    llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
+    llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned, unsigned)>
         GetSourceIndex) {
 
   const auto *VT = Call->getType()->getAs<VectorType>();
@@ -11643,13 +11643,25 @@ static bool evalShuffleGeneric(
   ResultElements.reserve(NumElts);
 
   for (unsigned DstIdx = 0; DstIdx != NumElts; ++DstIdx) {
-    auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
+    auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask, NumElts);
 
     if (SrcIdx < 0) {
       // Zero out this element
       QualType ElemTy = VT->getElementType();
-      ResultElements.push_back(
-          APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy))));
+      if (ElemTy->isRealFloatingType()) {
+        ResultElements.push_back(
+            APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy))));
+      } else if (ElemTy->isIntegerType()) {
+        unsigned BitWidth = Info.Ctx.getTypeSize(ElemTy);
+        bool IsUnsigned = ElemTy->isUnsignedIntegerType();
+        llvm::APSInt ZeroValue(BitWidth, IsUnsigned);
+        ZeroValue = 0;
+        ResultElements.push_back(APValue(ZeroValue));
+      } else {
+        // Other types of fallback logic
+        ResultElements.push_back(APValue());
+      }
+
     } else {
       const APValue &Src = (SrcVecIdx == 0) ? A : B;
       ResultElements.push_back(Src.getVectorElt(SrcIdx));
@@ -12445,8 +12457,8 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr 
*E) {
     APValue R;
     if (!evalShuffleGeneric(
             Info, E, R,
-            [](unsigned DstIdx,
-               unsigned ShuffleMask) -> std::pair<unsigned, int> {
+            [](unsigned DstIdx, unsigned ShuffleMask,
+               unsigned NumElems) -> std::pair<unsigned, int> {
               constexpr unsigned LaneBits = 128u;
               unsigned NumElemPerLane = LaneBits / 32;
               unsigned NumSelectableElems = NumElemPerLane / 2;
@@ -12470,8 +12482,8 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr 
*E) {
     APValue R;
     if (!evalShuffleGeneric(
             Info, E, R,
-            [](unsigned DstIdx,
-               unsigned ShuffleMask) -> std::pair<unsigned, int> {
+            [](unsigned DstIdx, unsigned ShuffleMask,
+               unsigned NumElems) -> std::pair<unsigned, int> {
               constexpr unsigned LaneBits = 128u;
               unsigned NumElemPerLane = LaneBits / 64;
               unsigned NumSelectableElems = NumElemPerLane / 2;
@@ -12491,25 +12503,28 @@ bool VectorExprEvaluator::VisitCallExpr(const 
CallExpr *E) {
   }
   case X86::BI__builtin_ia32_insertps128: {
     APValue R;
-    if (!evalShuffleGeneric(
-            Info, E, R,
-            [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
-              // Bits [3:0]: zero mask - if bit is set, zero this element
-              if ((Mask & (1 << DstIdx)) != 0) {
-                return {0, -1};
-              }
-              // Bits [7:6]: select element from source vector Y (0-3)
-              // Bits [5:4]: select destination position (0-3)
-              unsigned SrcElem = (Mask >> 6) & 0x3;
-              unsigned DstElem = (Mask >> 4) & 0x3;
-              if (DstIdx == DstElem) {
-                // Insert element from source vector (B) at this position
-                return {1, static_cast<int>(SrcElem)};
-              } else {
-                // Copy from destination vector (A)
-                return {0, static_cast<int>(DstIdx)};
-              }
-            }))
+    if (!evalShuffleGeneric(Info, E, R,
+                            [](unsigned DstIdx, unsigned Mask,
+                               unsigned NumElems) -> std::pair<unsigned, int> {
+                              // Bits [3:0]: zero mask - if bit is set, zero
+                              // this element
+                              if ((Mask & (1 << DstIdx)) != 0) {
+                                return {0, -1};
+                              }
+                              // Bits [7:6]: select element from source vector 
Y
+                              // (0-3) Bits [5:4]: select destination position
+                              // (0-3)
+                              unsigned SrcElem = (Mask >> 6) & 0x3;
+                              unsigned DstElem = (Mask >> 4) & 0x3;
+                              if (DstIdx == DstElem) {
+                                // Insert element from source vector (B) at 
this
+                                // position
+                                return {1, static_cast<int>(SrcElem)};
+                              } else {
+                                // Copy from destination vector (A)
+                                return {0, static_cast<int>(DstIdx)};
+                              }
+                            }))
       return false;
     return Success(R, E);
   }
@@ -13084,37 +13099,27 @@ bool VectorExprEvaluator::VisitCallExpr(const 
CallExpr *E) {
   case X86::BI__builtin_ia32_palignr128:
   case X86::BI__builtin_ia32_palignr256:
   case X86::BI__builtin_ia32_palignr512: {
-    assert(E->getNumArgs() == 3);
-
-    APValue VecA, VecB;
-    APSInt Imm;
-    if (!EvaluateAsRValue(Info, E->getArg(0), VecA) ||
-        !EvaluateAsRValue(Info, E->getArg(1), VecB) ||
-        !EvaluateInteger(E->getArg(2), Imm, Info))
-      return false;
-
-    if (!VecA.isVector() || !VecB.isVector())
+    APValue R;
+    if (!evalShuffleGeneric(Info, E, R,
+                            [](unsigned DstIdx, unsigned Shift,
+                               unsigned NumElems) -> std::pair<unsigned, int> {
+                              unsigned SrcVecIdx = 0;
+                              int ElemIdx = -1;
+
+                              // Elements come from VecB first, then VecA after
+                              // the shift boundary
+                              unsigned ShiftedIdx = DstIdx + Shift;
+                              if (ShiftedIdx < NumElems) { // from VecB
+                                SrcVecIdx = 1;
+                                ElemIdx = ShiftedIdx;
+                              } else if (ShiftedIdx <
+                                         2 * NumElems) { // from VecA
+                                ElemIdx = ShiftedIdx - NumElems;
+                              }
+                              return {SrcVecIdx, ElemIdx};
+                            }))
       return false;
-
-    unsigned LenA = VecA.getVectorLength();
-    unsigned LenB = VecB.getVectorLength();
-    assert(LenA == LenB && (LenA % 16 == 0));
-
-    unsigned Shift = Imm.getZExtValue() & 0xff;
-    SmallVector<APValue> ResultElements;
-    for (unsigned I = 0; I < LenA; ++I) {
-      if (I + Shift < LenA) {
-        ResultElements.push_back(VecB.getVectorElt(I + Shift));
-      } else if (I + Shift < LenA + LenB) {
-        ResultElements.push_back(VecA.getVectorElt(I + Shift - LenA));
-      } else {
-        APSInt Zero(/*BitWidth=*/8, /*isUnsigned=*/true);
-        Zero = 0;
-        ResultElements.push_back(APValue(Zero));
-      }
-    }
-
-    return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+    return Success(R, E);
   }
   }
 }

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to