[llvm-branch-commits] [llvm] [LoongArch][DAGCombiner] Combine vxor (vand ..) to vandn (PR #161037)
https://github.com/ylzsx updated
https://github.com/llvm/llvm-project/pull/161037
>From 1c1ee6e018de04d6590bb4e7a6769b01da5582a8 Mon Sep 17 00:00:00 2001
From: yangzhaoxin
Date: Thu, 25 Sep 2025 16:42:24 +0800
Subject: [PATCH 1/3] [LoongArch][DAGCombiner] Combine xor (and ..) to vandn
After this commit, DAGCombiner will have more opportunities to perform
vector folding. This patch includes several foldings, as follows:
- VANDN(x,NOT(y)) -> AND(NOT(x),NOT(y)) -> NOT(OR(X,Y))
- VANDN(x, SplatVector(Imm)) -> AND(NOT(x), NOT(SplatVector(~Imm)))
---
.../LoongArch/LoongArchISelLowering.cpp | 155 ++
.../Target/LoongArch/LoongArchISelLowering.h | 3 +
.../LoongArch/LoongArchLASXInstrInfo.td | 26 +--
.../Target/LoongArch/LoongArchLSXInstrInfo.td | 27 +--
4 files changed, 185 insertions(+), 26 deletions(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 94f53d5b85f10..30d4bac25da78 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -4939,6 +4939,96 @@ void LoongArchTargetLowering::ReplaceNodeResults(
}
}
+// Check if all elements in build_vector are the same or undef, and if so,
+// return true and set the splat element in SplatValue.
+static bool isSplatOrUndef(SDNode *N, SDValue &SplatValue) {
+ if (N->getOpcode() != ISD::BUILD_VECTOR)
+return false;
+ for (SDValue Op : N->ops()) {
+if (!Op.isUndef() && SplatValue && Op != SplatValue)
+ return false;
+if (!Op.isUndef())
+ SplatValue = Op;
+ }
+ return true;
+}
+
+// Helper to attempt to return a cheaper, bit-inverted version of \p V.
+static SDValue isNOT(SDValue V, SelectionDAG &DAG) {
+ // TODO: don't always ignore oneuse constraints.
+ V = peekThroughBitcasts(V);
+ EVT VT = V.getValueType();
+
+ // Match not(xor X, -1) -> X.
+ if (V.getOpcode() == ISD::XOR &&
+ (ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()) ||
+ isAllOnesConstant(V.getOperand(1
+return V.getOperand(0);
+
+ // Match not(extract_subvector(not(X)) -> extract_subvector(X).
+ if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+ (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
+if (SDValue Not = isNOT(V.getOperand(0), DAG)) {
+ Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), VT, Not,
+ V.getOperand(1));
+}
+ }
+
+ // Match not(SplatVector(not(X)) -> SplatVector(X).
+ SDValue SplatValue;
+ if (isSplatOrUndef(V.getNode(), SplatValue) &&
+ V->isOnlyUserOf(SplatValue.getNode())) {
+if (SDValue Not = isNOT(SplatValue, DAG)) {
+ Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
+ return DAG.getSplat(VT, SDLoc(Not), Not);
+}
+ }
+
+ // Match not(or(not(X),not(Y))) -> and(X, Y).
+ if (V.getOpcode() == ISD::OR && DAG.getTargetLoweringInfo().isTypeLegal(VT)
&&
+ V.getOperand(0).hasOneUse() && V.getOperand(1).hasOneUse()) {
+// TODO: Handle cases with single NOT operand -> VANDN
+if (SDValue Op1 = isNOT(V.getOperand(1), DAG))
+ if (SDValue Op0 = isNOT(V.getOperand(0), DAG))
+return DAG.getNode(ISD::AND, SDLoc(V), VT, DAG.getBitcast(VT, Op0),
+ DAG.getBitcast(VT, Op1));
+ }
+
+ // TODO: Add more matching patterns. Such as,
+ // not(concat_vectors(not(X), not(Y))) -> concat_vectors(X, Y).
+ // not(slt(C, X)) -> slt(X - 1, C)
+
+ return SDValue();
+}
+
+/// Try to fold: (and (xor X, -1), Y) -> (vandn X, Y).
+static SDValue combineAndNotIntoVANDN(SDNode *N, const SDLoc &DL,
+ SelectionDAG &DAG) {
+ assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDN");
+
+ MVT VT = N->getSimpleValueType(0);
+ if (!VT.is128BitVector() && !VT.is256BitVector())
+return SDValue();
+
+ SDValue X, Y;
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ if (SDValue Not = isNOT(N0, DAG)) {
+X = Not;
+Y = N1;
+ } else if (SDValue Not = isNOT(N1, DAG)) {
+X = Not;
+Y = N0;
+ } else
+return SDValue();
+
+ X = DAG.getBitcast(VT, X);
+ Y = DAG.getBitcast(VT, Y);
+ return DAG.getNode(LoongArchISD::VANDN, DL, VT, X, Y);
+}
+
static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const LoongArchSubtarget &Subtarget) {
@@ -4960,6 +5050,9 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG
&DAG,
if (!Subtarget.has32S())
return SDValue();
+ if (SDValue R = combineAndNotIntoVANDN(N, DL, DAG))
+return R;
+
// Op's second operand must be a shifted mask.
if (!(CN = dyn_cast(SecondOperand)) ||
!isShiftedMask_64(CN->getZExtValue(), SMIdx, SMLen))
@@ -6628,6 +6721,65 @@ performEXTRACT_VECTOR_ELTCombine(SDNode *N, Select
[llvm-branch-commits] [llvm] [DirectX] Introduce `dx.Padding` type (PR #160957)
https://github.com/bogner updated
https://github.com/llvm/llvm-project/pull/160957
>From d8512f795eb6034050044ebbab3a7dfa44b133dc Mon Sep 17 00:00:00 2001
From: Justin Bogner
Date: Thu, 7 Aug 2025 15:02:49 -0700
Subject: [PATCH 1/2] [DirectX] Introduce `dx.Padding` type
This introduces the `dx.Padding` type as an alternative to the
`dx.Layout` types that are currently used for cbuffers. Later, we'll
remove the `dx.Layout` types completely, but making the backend handle
either makes it easier to stage the necessary changes to get there.
See #147352 for details.
---
llvm/include/llvm/Analysis/DXILResource.h | 19 +
llvm/lib/Analysis/DXILResource.cpp| 47 ++-
llvm/lib/IR/Type.cpp | 4 +
.../DXILResource/buffer-frombinding.ll| 4 +-
.../DirectX/CBufferLoadLegacy-errors.ll | 12 +--
.../test/CodeGen/DirectX/CBufferLoadLegacy.ll | 12 +--
.../ContainerData/PSVResources-order.ll | 2 +-
.../DirectX/ContainerData/PSVResources.ll | 2 +-
.../DirectX/CreateHandleFromBinding.ll| 2 +-
.../ForwardHandleAccesses/cbuffer-access.ll | 20 ++---
...ffer_metadata.ll => cbuffer-layouttype.ll} | 3 +
.../DirectX/Metadata/cbuffer-metadata.ll | 84 +++
.../CodeGen/DirectX/Metadata/cbuffer-only.ll | 2 +-
llvm/unittests/Analysis/DXILResourceTest.cpp | 4 +-
14 files changed, 182 insertions(+), 35 deletions(-)
rename llvm/test/CodeGen/DirectX/Metadata/{cbuffer_metadata.ll =>
cbuffer-layouttype.ll} (96%)
create mode 100644 llvm/test/CodeGen/DirectX/Metadata/cbuffer-metadata.ll
diff --git a/llvm/include/llvm/Analysis/DXILResource.h
b/llvm/include/llvm/Analysis/DXILResource.h
index 88ac0a11fe5a2..c7aff167324e6 100644
--- a/llvm/include/llvm/Analysis/DXILResource.h
+++ b/llvm/include/llvm/Analysis/DXILResource.h
@@ -243,6 +243,25 @@ class LayoutExtType : public TargetExtType {
}
};
+/// The dx.Padding target extension type
+///
+/// `target("dx.Padding", NumBytes)`
+class PaddingExtType : public TargetExtType {
+public:
+ PaddingExtType() = delete;
+ PaddingExtType(const PaddingExtType &) = delete;
+ PaddingExtType &operator=(const PaddingExtType &) = delete;
+
+ unsigned getNumBytes() const { return getIntParameter(0); }
+
+ static bool classof(const TargetExtType *T) {
+return T->getName() == "dx.Padding";
+ }
+ static bool classof(const Type *T) {
+return isa(T) && classof(cast(T));
+ }
+};
+
//===--===//
class ResourceTypeInfo {
diff --git a/llvm/lib/Analysis/DXILResource.cpp
b/llvm/lib/Analysis/DXILResource.cpp
index b78cc03e34dbc..f9bf09262dd1f 100644
--- a/llvm/lib/Analysis/DXILResource.cpp
+++ b/llvm/lib/Analysis/DXILResource.cpp
@@ -281,6 +281,38 @@ static StructType *getOrCreateElementStruct(Type
*ElemType, StringRef Name) {
return StructType::create(ElemType, Name);
}
+static Type *getTypeWithoutPadding(Type *Ty) {
+ // Recursively remove padding from structures.
+ if (auto *ST = dyn_cast(Ty)) {
+LLVMContext &Ctx = Ty->getContext();
+SmallVector ElementTypes;
+ElementTypes.reserve(ST->getNumElements());
+for (Type *ElTy : ST->elements()) {
+ if (isa(ElTy))
+continue;
+ ElementTypes.push_back(getTypeWithoutPadding(ElTy));
+}
+
+// Handle explicitly padded cbuffer arrays like { [ n x paddedty ], ty }
+if (ElementTypes.size() == 2)
+ if (auto *AT = dyn_cast(ElementTypes[0]))
+if (ElementTypes[1] == AT->getElementType())
+ return ArrayType::get(ElementTypes[1], AT->getNumElements() + 1);
+
+// If we only have a single element, don't wrap it in a struct.
+if (ElementTypes.size() == 1)
+ return ElementTypes[0];
+
+return StructType::get(Ctx, ElementTypes, /*IsPacked=*/false);
+ }
+ // Arrays just need to have their element type adjusted.
+ if (auto *AT = dyn_cast(Ty))
+return ArrayType::get(getTypeWithoutPadding(AT->getElementType()),
+ AT->getNumElements());
+ // Anything else should be good as is.
+ return Ty;
+}
+
StructType *ResourceTypeInfo::createElementStruct(StringRef CBufferName) {
SmallString<64> TypeName;
@@ -334,14 +366,21 @@ StructType
*ResourceTypeInfo::createElementStruct(StringRef CBufferName) {
}
case ResourceKind::CBuffer: {
auto *RTy = cast(HandleTy);
-LayoutExtType *LayoutType = cast(RTy->getResourceType());
-StructType *Ty = cast(LayoutType->getWrappedType());
SmallString<64> Name = getResourceKindName(Kind);
if (!CBufferName.empty()) {
Name.append(".");
Name.append(CBufferName);
}
-return StructType::create(Ty->elements(), Name);
+
+// TODO: Remove this when we update the frontend to use explicit padding.
+if (LayoutExtType *LayoutType =
+dyn_cast(RTy->getResourceType())) {
+ StructType *Ty = cast(LayoutType->getWrappedType());
+ return StructType::create(Ty->elements(), N
[llvm-branch-commits] [llvm] [LoongArch] Add patterns to support vector type average instructions generation (PR #161079)
https://github.com/zhaoqi5 updated
https://github.com/llvm/llvm-project/pull/161079
>From dbdb23407c01756a57fe6c6dbf9bc1e9254a81d3 Mon Sep 17 00:00:00 2001
From: Qi Zhao
Date: Sun, 28 Sep 2025 20:19:59 +0800
Subject: [PATCH 1/3] [LoongArch] Add patterns to support vector type average
instructions generation
---
.../LoongArch/LoongArchLASXInstrInfo.td | 50 +++
.../Target/LoongArch/LoongArchLSXInstrInfo.td | 50 +++
2 files changed, 100 insertions(+)
diff --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
index adfe990ba1234..6eb68129d9dba 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
@@ -2015,6 +2015,56 @@ def : Pat<(v4i32(fp_to_uint v4f64:$vj)),
(XVFTINTRZ_LU_D v4f64:$vj)),
sub_128)>;
+// XVAVG_{B/H/W/D/BU/HU/WU/DU}, XVAVGR_{B/H/W/D/BU/HU/WU/DU}
+def : Pat<(sra (v32i8 (add v32i8:$xj, v32i8:$xk)), (v32i8 (vsplat_imm_eq_1))),
+ (XVAVG_B v32i8:$xj, v32i8:$xk)>;
+def : Pat<(sra (v16i16 (add v16i16:$xj, v16i16:$xk)), (v16i16
(vsplat_imm_eq_1))),
+ (XVAVG_H v16i16:$xj, v16i16:$xk)>;
+def : Pat<(sra (v8i32 (add v8i32:$xj, v8i32:$xk)), (v8i32 (vsplat_imm_eq_1))),
+ (XVAVG_W v8i32:$xj, v8i32:$xk)>;
+def : Pat<(sra (v4i64 (add v4i64:$xj, v4i64:$xk)), (v4i64 (vsplat_imm_eq_1))),
+ (XVAVG_D v4i64:$xj, v4i64:$xk)>;
+def : Pat<(srl (v32i8 (add v32i8:$xj, v32i8:$xk)), (v32i8 (vsplat_imm_eq_1))),
+ (XVAVG_BU v32i8:$xj, v32i8:$xk)>;
+def : Pat<(srl (v16i16 (add v16i16:$xj, v16i16:$xk)), (v16i16
(vsplat_imm_eq_1))),
+ (XVAVG_HU v16i16:$xj, v16i16:$xk)>;
+def : Pat<(srl (v8i32 (add v8i32:$xj, v8i32:$xk)), (v8i32 (vsplat_imm_eq_1))),
+ (XVAVG_WU v8i32:$xj, v8i32:$xk)>;
+def : Pat<(srl (v4i64 (add v4i64:$xj, v4i64:$xk)), (v4i64 (vsplat_imm_eq_1))),
+ (XVAVG_DU v4i64:$xj, v4i64:$xk)>;
+def : Pat<(sra (v32i8 (add (v32i8 (add v32i8:$vj, v32i8:$vk)),
+ (v32i8 (vsplat_imm_eq_1,
+ (v32i8 (vsplat_imm_eq_1))),
+ (XVAVGR_B v32i8:$vj, v32i8:$vk)>;
+def : Pat<(sra (v16i16 (add (v16i16 (add v16i16:$vj, v16i16:$vk)),
+(v16i16 (vsplat_imm_eq_1,
+ (v16i16 (vsplat_imm_eq_1))),
+ (XVAVGR_H v16i16:$vj, v16i16:$vk)>;
+def : Pat<(sra (v8i32 (add (v8i32 (add v8i32:$vj, v8i32:$vk)),
+ (v8i32 (vsplat_imm_eq_1,
+ (v8i32 (vsplat_imm_eq_1))),
+ (XVAVGR_W v8i32:$vj, v8i32:$vk)>;
+def : Pat<(sra (v4i64 (add (v4i64 (add v4i64:$vj, v4i64:$vk)),
+ (v4i64 (vsplat_imm_eq_1,
+ (v4i64 (vsplat_imm_eq_1))),
+ (XVAVGR_D v4i64:$vj, v4i64:$vk)>;
+def : Pat<(srl (v32i8 (add (v32i8 (add v32i8:$vj, v32i8:$vk)),
+ (v32i8 (vsplat_imm_eq_1,
+ (v32i8 (vsplat_imm_eq_1))),
+ (XVAVGR_BU v32i8:$vj, v32i8:$vk)>;
+def : Pat<(srl (v16i16 (add (v16i16 (add v16i16:$vj, v16i16:$vk)),
+(v16i16 (vsplat_imm_eq_1,
+ (v16i16 (vsplat_imm_eq_1))),
+ (XVAVGR_HU v16i16:$vj, v16i16:$vk)>;
+def : Pat<(srl (v8i32 (add (v8i32 (add v8i32:$vj, v8i32:$vk)),
+ (v8i32 (vsplat_imm_eq_1,
+ (v8i32 (vsplat_imm_eq_1))),
+ (XVAVGR_WU v8i32:$vj, v8i32:$vk)>;
+def : Pat<(srl (v4i64 (add (v4i64 (add v4i64:$vj, v4i64:$vk)),
+ (v4i64 (vsplat_imm_eq_1,
+ (v4i64 (vsplat_imm_eq_1))),
+ (XVAVGR_DU v4i64:$vj, v4i64:$vk)>;
+
// XVABSD_{B/H/W/D}[U]
defm : PatXrXr;
defm : PatXrXrU;
diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
index b0eb51a92c6c6..169f0d56c223e 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
@@ -2153,6 +2153,56 @@ def : Pat<(f32 f32imm_vldi:$in),
def : Pat<(f64 f64imm_vldi:$in),
(f64 (EXTRACT_SUBREG (VLDI (to_f64imm_vldi f64imm_vldi:$in)),
sub_64))>;
+// VAVG_{B/H/W/D/BU/HU/WU/DU}, VAVGR_{B/H/W/D/BU/HU/WU/DU}
+def : Pat<(sra (v16i8 (add v16i8:$vj, v16i8:$vk)), (v16i8 (vsplat_imm_eq_1))),
+ (VAVG_B v16i8:$vj, v16i8:$vk)>;
+def : Pat<(sra (v8i16 (add v8i16:$vj, v8i16:$vk)), (v8i16 (vsplat_imm_eq_1))),
+ (VAVG_H v8i16:$vj, v8i16:$vk)>;
+def : Pat<(sra (v4i32 (add v4i32:$vj, v4i32:$vk)), (v4i32 (vsplat_imm_eq_1))),
+ (VAVG_W v4i32:$vj, v4i32:$vk)>;
+def : Pat<(sra (v2i64 (add v2i64:$vj, v2i64:$vk)), (v2i64 (vsplat_imm_eq_1))),
+ (VAVG_D v2i64:$vj, v2i64:$vk)>;
+def : Pat<(srl (v16i8 (add v16i8:$vj, v16i8:$vk)), (v16i8 (vsplat_imm_eq_1))),
+ (VAVG_BU v16i8:$vj, v16i8:$vk)>;
+def : Pat<(srl (v8i16 (add v8i16:$vj, v8i16:$vk)), (v8i16 (vsplat_imm_eq_1))),
+ (VAVG_HU v8i16:$vj, v8i16:$vk)>
[llvm-branch-commits] [llvm] [DirectX] Introduce `dx.Padding` type (PR #160957)
@@ -0,0 +1,84 @@
+; RUN: opt -S -dxil-translate-metadata < %s | FileCheck %s
+; RUN: opt -S --passes="dxil-pretty-printer" < %s 2>&1 | FileCheck %s
--check-prefix=PRINT
+; RUN: llc %s --filetype=asm -o - < %s 2>&1 | FileCheck %s
--check-prefixes=CHECK,PRINT
+
+target triple = "dxil-pc-shadermodel6.6-compute"
+
+%__cblayout_CB1 = type <{ float, i32, double, <2 x i32> }>
[email protected] = global target("dx.CBuffer", %__cblayout_CB1) poison
[email protected] = private unnamed_addr constant [4 x i8] c"CB1\00", align 1
+
+%__cblayout_CB2 = type <{ float, [4 x i8], double, float, half, i16, i64, i32
}>
[email protected] = global target("dx.CBuffer", %__cblayout_CB2) poison
[email protected] = private unnamed_addr constant [4 x i8] c"CB2\00", align 1
+
+%__cblayout_MyConstants = type <{
+ double, target("dx.Padding", 8),
+ <3 x float>, float,
+ <3 x double>, half, target("dx.Padding", 6),
+ <2 x double>,
+ float, <3 x half>, <3 x half>
+}>
[email protected] = global target("dx.CBuffer", %__cblayout_MyConstants) poison
[email protected] = private unnamed_addr constant [12 x i8] c"MyConstants\00",
align 1
+
+; PRINT:; Resource Bindings:
+; PRINT-NEXT:;
+; PRINT-NEXT:; NameType Format Dim IDHLSL Bind Count
+; PRINT-NEXT:;
+; PRINT-NEXT:; CB1 cbuffer NA NA CB0 cb0 1
+; PRINT-NEXT:; CB2 cbuffer NA NA CB1 cb1 1
+; PRINT-NEXT:; MyConstants cbuffer NA NA CB2 cb5,space15 1
+
+define void @test() #0 {
+
+ ; cbuffer CB1 : register(b0) {
+ ; float a;
+ ; int b;
+ ; double c;
+ ; int2 d;
+ ; }
+ %CB1.cb_h = call target("dx.CBuffer", %__cblayout_CB1)
[email protected](i32 0, i32 0, i32 1, i32 0,
ptr @CB1.str)
+ ; cbuffer CB2 : register(b0) {
+ ; float a;
+ ; double b;
+ ; float c;
+ ; half d;
+ ; uint16_t e;
+ ; int64_t f;
+ ; int g;
+ ;}
+
+ %CB2.cb_h = call target("dx.CBuffer", %__cblayout_CB2)
[email protected](i32 0, i32 1, i32 1, i32 0,
ptr @CB2.str)
+ ; cbuffer CB3 : register(b5) {
+ ; double B0;
+ ; float3 B1;
+ ; float B2;
+ ; double3 B3;
+ ; half B4;
+ ; double2 B5;
+ ; float B6;
+ ; half3 B7;
+ ; half3 B8;
+ ; }
+ %CB3.cb_h = call target("dx.CBuffer", %__cblayout_MyConstants)
[email protected](i32 15, i32 5, i32 1, i32 0,
ptr @MyConstants.str)
+
+ ret void
+}
+
+attributes #0 = { noinline nounwind "hlsl.shader"="compute" }
+
+; CHECK: @CB1 = external constant %CBuffer.CB1
+; CHECK: @CB2 = external constant %CBuffer.CB2
+; CHECK: @MyConstants = external constant %CBuffer.MyConstants
bogner wrote:
In doing this I also noticed one of the types wasn't using the explicit padding
type, so I updated that as well.
https://github.com/llvm/llvm-project/pull/160957
___
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
[llvm-branch-commits] [llvm] [LoongArch] Override cost hooks to expose more DAG combine opportunities (PR #157824)
https://github.com/heiher approved this pull request. LGTM. https://github.com/llvm/llvm-project/pull/157824 ___ llvm-branch-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
[llvm-branch-commits] [llvm] [llvm][mustache] Refactor template rendering (PR #159189)
https://github.com/evelez7 approved this pull request. https://github.com/llvm/llvm-project/pull/159189 ___ llvm-branch-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
[llvm-branch-commits] [llvm] [llvm][mustache] Introduce MustacheContext to simplify mustache APIs (PR #159191)
https://github.com/evelez7 approved this pull request. https://github.com/llvm/llvm-project/pull/159191 ___ llvm-branch-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
