Author: Zakk Chen Date: 2022-03-22T02:32:31-07:00 New Revision: 9ab18cc535379c3442bf52e21fbe21c92eb0fd60
URL: https://github.com/llvm/llvm-project/commit/9ab18cc535379c3442bf52e21fbe21c92eb0fd60 DIFF: https://github.com/llvm/llvm-project/commit/9ab18cc535379c3442bf52e21fbe21c92eb0fd60.diff LOG: [RISCV] Add policy operand for masked vid and viota IR intrinsics. Reviewed By: rogfer01 Differential Revision: https://reviews.llvm.org/D120227 Added: Modified: clang/include/clang/Basic/riscv_vector.td llvm/include/llvm/IR/IntrinsicsRISCV.td llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td llvm/test/CodeGen/RISCV/rvv/masked-tama.ll llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll llvm/test/CodeGen/RISCV/rvv/vid.ll llvm/test/CodeGen/RISCV/rvv/viota.ll Removed: ################################################################################ diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 556f6c244cee5..6e4dff801dafb 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -2007,6 +2007,7 @@ def vmsif : RVVMaskUnaryBuiltin; // 16.6. vmsof.m set-only-first mask bit def vmsof : RVVMaskUnaryBuiltin; +} let NoMaskPolicy = HasPassthruOperand, HasNoMaskedOverloaded = false in { // 16.8. Vector Iota Instruction @@ -2016,7 +2017,6 @@ let NoMaskPolicy = HasPassthruOperand, HasNoMaskedOverloaded = false in { defm vid : RVVOutBuiltinSet<"vid", "csil", [["v", "v", "v"], ["v", "Uv", "Uv"]]>; } -} // 17. Vector Permutation Instructions // 17.1. Integer Scalar Move Instructions diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 9c8a320d11023..7ac157d48bb85 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -816,7 +816,7 @@ let TargetPrefix = "riscv" in { } // Output: (vector) // Input: (passthru, vl) - class RISCVNullaryIntrinsicTU + class RISCVID : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { @@ -1460,26 +1460,26 @@ let TargetPrefix = "riscv" in { let VLOperand = 2; } // Output: (vector) - // Input: (maskedoff, mask type vector_in, mask, vl) + // Input: (maskedoff, mask type vector_in, mask, vl, policy) def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic { + llvm_anyint_ty, LLVMMatchType<1>], + [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 3; } // Output: (vector) // Input: (passthru, vl) - def int_riscv_vid : RISCVNullaryIntrinsicTU; + def int_riscv_vid : RISCVID; // Output: (vector) - // Input: (maskedoff, mask, vl) + // Input: (maskedoff, mask, vl, policy) def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic { + llvm_anyint_ty, LLVMMatchType<1>], + [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 2; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 406d30ac73360..60a09db4efd5e 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -907,7 +907,7 @@ class VPseudoNullaryNoMaskTU<VReg RegClass>: class VPseudoNullaryMask<VReg RegClass>: Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd), (ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, AVL:$vl, - ixlenimm:$sew), []>, RISCVVPseudo { + ixlenimm:$sew, ixlenimm:$policy), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -916,6 +916,7 @@ class VPseudoNullaryMask<VReg RegClass>: let HasSEWOp = 1; let HasMergeOp = 1; let UsesMaskPolicy = 1; + let HasVecPolicyOp = 1; let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); } @@ -1730,7 +1731,7 @@ multiclass VPseudoVIOT_M { Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>; def "_" # m.MX # "_TU" : VPseudoUnaryNoMaskTU<m.vrclass, VR, constraint>, Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>; - def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>, + def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, VR, constraint>, Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>; } } @@ -3307,8 +3308,8 @@ multiclass VPatUnaryV_M<string intrinsic, string instruction> vti.Log2SEW, vti.LMul, VR>; def : VPatUnaryNoMaskTU<intrinsic, instruction, "M", vti.Vector, vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass,VR>; - def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask, - vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>; + def : VPatUnaryMaskTA<intrinsic, instruction, "M", vti.Vector, vti.Mask, + vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>; } } @@ -3361,10 +3362,10 @@ multiclass VPatNullaryV<string intrinsic, string instruction> vti.RegClass:$merge, GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask") (vti.Vector vti.RegClass:$merge), - (vti.Mask V0), VLOpFrag)), + (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))), (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK") vti.RegClass:$merge, (vti.Mask V0), - GPR:$vl, vti.Log2SEW)>; + GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll index c0284f152ffc1..bb63081f94c61 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll @@ -1267,3 +1267,47 @@ entry: ret <vscale x 1 x i8> %a } + +declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i1> %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vid.v v8, v0.t +; CHECK-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8> undef, + <vscale x 1 x i1> %0, + iXLen %1, iXLen 3) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: viota.m v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8> undef, + <vscale x 1 x i1> %0, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 3) + + ret <vscale x 1 x i8> %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll index 396cd81898fbc..fa8f97614dd5e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll @@ -1196,3 +1196,45 @@ entry: ret <vscale x 1 x i8> %a } + +declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vid.v v8, v0.t +; CHECK-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 1) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: viota.m v8, v0, v0.t +; CHECK-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 1) + + ret <vscale x 1 x i8> %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll index c9ca9cd0ca2f0..2cb0d42321d5c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll @@ -1196,3 +1196,45 @@ entry: ret <vscale x 1 x i8> %a } + +declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vid.v v8, v0.t +; CHECK-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 2) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: viota.m v8, v0, v0.t +; CHECK-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 2) + + ret <vscale x 1 x i8> %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll index bd0f24aa7862a..da88b324e1d15 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll @@ -1196,3 +1196,45 @@ entry: ret <vscale x 1 x i8> %a } + +declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vid.v v8, v0.t +; CHECK-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 0) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: viota.m v8, v0, v0.t +; CHECK-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 0) + + ret <vscale x 1 x i8> %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vid.ll b/llvm/test/CodeGen/RISCV/rvv/vid.ll index 815b37625187c..8d5dfd874ac15 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vid.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vid.ll @@ -24,7 +24,7 @@ entry: declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( <vscale x 1 x i8>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: @@ -36,7 +36,7 @@ entry: %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( <vscale x 1 x i8> %0, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i8> %a } @@ -62,7 +62,7 @@ entry: declare <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8( <vscale x 2 x i8>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8: @@ -74,7 +74,7 @@ entry: %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8( <vscale x 2 x i8> %0, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i8> %a } @@ -100,7 +100,7 @@ entry: declare <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8( <vscale x 4 x i8>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8: @@ -112,7 +112,7 @@ entry: %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8( <vscale x 4 x i8> %0, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i8> %a } @@ -138,7 +138,7 @@ entry: declare <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8( <vscale x 8 x i8>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8: @@ -150,7 +150,7 @@ entry: %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8( <vscale x 8 x i8> %0, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i8> %a } @@ -176,7 +176,7 @@ entry: declare <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8( <vscale x 16 x i8>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8: @@ -188,7 +188,7 @@ entry: %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8( <vscale x 16 x i8> %0, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i8> %a } @@ -214,7 +214,7 @@ entry: declare <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8( <vscale x 32 x i8>, <vscale x 32 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8: @@ -226,7 +226,7 @@ entry: %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8( <vscale x 32 x i8> %0, <vscale x 32 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 32 x i8> %a } @@ -252,7 +252,7 @@ entry: declare <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16( <vscale x 1 x i16>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16: @@ -264,7 +264,7 @@ entry: %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16( <vscale x 1 x i16> %0, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i16> %a } @@ -290,7 +290,7 @@ entry: declare <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16( <vscale x 2 x i16>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16: @@ -302,7 +302,7 @@ entry: %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16( <vscale x 2 x i16> %0, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i16> %a } @@ -328,7 +328,7 @@ entry: declare <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16( <vscale x 4 x i16>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16: @@ -340,7 +340,7 @@ entry: %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16( <vscale x 4 x i16> %0, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i16> %a } @@ -366,7 +366,7 @@ entry: declare <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16( <vscale x 8 x i16>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16: @@ -378,7 +378,7 @@ entry: %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16( <vscale x 8 x i16> %0, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i16> %a } @@ -404,7 +404,7 @@ entry: declare <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16( <vscale x 16 x i16>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16: @@ -416,7 +416,7 @@ entry: %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16( <vscale x 16 x i16> %0, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i16> %a } @@ -442,7 +442,7 @@ entry: declare <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16( <vscale x 32 x i16>, <vscale x 32 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16: @@ -454,7 +454,7 @@ entry: %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16( <vscale x 32 x i16> %0, <vscale x 32 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 32 x i16> %a } @@ -480,7 +480,7 @@ entry: declare <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32( <vscale x 1 x i32>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32: @@ -492,7 +492,7 @@ entry: %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32( <vscale x 1 x i32> %0, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i32> %a } @@ -518,7 +518,7 @@ entry: declare <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32( <vscale x 2 x i32>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32: @@ -530,7 +530,7 @@ entry: %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32( <vscale x 2 x i32> %0, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i32> %a } @@ -556,7 +556,7 @@ entry: declare <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32( <vscale x 4 x i32>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32: @@ -568,7 +568,7 @@ entry: %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32( <vscale x 4 x i32> %0, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i32> %a } @@ -594,7 +594,7 @@ entry: declare <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32( <vscale x 8 x i32>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32: @@ -606,7 +606,7 @@ entry: %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32( <vscale x 8 x i32> %0, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i32> %a } @@ -632,7 +632,7 @@ entry: declare <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32( <vscale x 16 x i32>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32: @@ -644,7 +644,7 @@ entry: %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32( <vscale x 16 x i32> %0, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i32> %a } @@ -670,7 +670,7 @@ entry: declare <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64( <vscale x 1 x i64>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64: @@ -682,7 +682,7 @@ entry: %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64( <vscale x 1 x i64> %0, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i64> %a } @@ -708,7 +708,7 @@ entry: declare <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64( <vscale x 2 x i64>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64: @@ -720,7 +720,7 @@ entry: %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64( <vscale x 2 x i64> %0, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i64> %a } @@ -746,7 +746,7 @@ entry: declare <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64( <vscale x 4 x i64>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64: @@ -758,7 +758,7 @@ entry: %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64( <vscale x 4 x i64> %0, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i64> %a } @@ -784,7 +784,7 @@ entry: declare <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64( <vscale x 8 x i64>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64: @@ -796,7 +796,7 @@ entry: %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64( <vscale x 8 x i64> %0, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i64> %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/viota.ll b/llvm/test/CodeGen/RISCV/rvv/viota.ll index a170b5fcbe50b..7c276a42d8c30 100644 --- a/llvm/test/CodeGen/RISCV/rvv/viota.ll +++ b/llvm/test/CodeGen/RISCV/rvv/viota.ll @@ -27,7 +27,7 @@ declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( <vscale x 1 x i8>, <vscale x 1 x i1>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: @@ -40,7 +40,7 @@ entry: <vscale x 1 x i8> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i8> %a } @@ -69,7 +69,7 @@ declare <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8( <vscale x 2 x i8>, <vscale x 2 x i1>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1: @@ -82,7 +82,7 @@ entry: <vscale x 2 x i8> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i8> %a } @@ -111,7 +111,7 @@ declare <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8( <vscale x 4 x i8>, <vscale x 4 x i1>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1: @@ -124,7 +124,7 @@ entry: <vscale x 4 x i8> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i8> %a } @@ -153,7 +153,7 @@ declare <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8( <vscale x 8 x i8>, <vscale x 8 x i1>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1: @@ -166,7 +166,7 @@ entry: <vscale x 8 x i8> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i8> %a } @@ -195,7 +195,7 @@ declare <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8( <vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1: @@ -208,7 +208,7 @@ entry: <vscale x 16 x i8> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i8> %a } @@ -237,7 +237,7 @@ declare <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8( <vscale x 32 x i8>, <vscale x 32 x i1>, <vscale x 32 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1: @@ -250,7 +250,7 @@ entry: <vscale x 32 x i8> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 32 x i8> %a } @@ -279,7 +279,7 @@ declare <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8( <vscale x 64 x i8>, <vscale x 64 x i1>, <vscale x 64 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1: @@ -292,7 +292,7 @@ entry: <vscale x 64 x i8> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 64 x i8> %a } @@ -321,7 +321,7 @@ declare <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16( <vscale x 1 x i16>, <vscale x 1 x i1>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1: @@ -334,7 +334,7 @@ entry: <vscale x 1 x i16> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i16> %a } @@ -363,7 +363,7 @@ declare <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16( <vscale x 2 x i16>, <vscale x 2 x i1>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1: @@ -376,7 +376,7 @@ entry: <vscale x 2 x i16> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i16> %a } @@ -405,7 +405,7 @@ declare <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16( <vscale x 4 x i16>, <vscale x 4 x i1>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1: @@ -418,7 +418,7 @@ entry: <vscale x 4 x i16> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i16> %a } @@ -447,7 +447,7 @@ declare <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16( <vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1: @@ -460,7 +460,7 @@ entry: <vscale x 8 x i16> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i16> %a } @@ -489,7 +489,7 @@ declare <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16( <vscale x 16 x i16>, <vscale x 16 x i1>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1: @@ -502,7 +502,7 @@ entry: <vscale x 16 x i16> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i16> %a } @@ -531,7 +531,7 @@ declare <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16( <vscale x 32 x i16>, <vscale x 32 x i1>, <vscale x 32 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1: @@ -544,7 +544,7 @@ entry: <vscale x 32 x i16> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 32 x i16> %a } @@ -573,7 +573,7 @@ declare <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32( <vscale x 1 x i32>, <vscale x 1 x i1>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1: @@ -586,7 +586,7 @@ entry: <vscale x 1 x i32> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i32> %a } @@ -615,7 +615,7 @@ declare <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32( <vscale x 2 x i32>, <vscale x 2 x i1>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1: @@ -628,7 +628,7 @@ entry: <vscale x 2 x i32> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i32> %a } @@ -657,7 +657,7 @@ declare <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32( <vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1: @@ -670,7 +670,7 @@ entry: <vscale x 4 x i32> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i32> %a } @@ -699,7 +699,7 @@ declare <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32( <vscale x 8 x i32>, <vscale x 8 x i1>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1: @@ -712,7 +712,7 @@ entry: <vscale x 8 x i32> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i32> %a } @@ -741,7 +741,7 @@ declare <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32( <vscale x 16 x i32>, <vscale x 16 x i1>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1: @@ -754,7 +754,7 @@ entry: <vscale x 16 x i32> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i32> %a } @@ -783,7 +783,7 @@ declare <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64( <vscale x 1 x i64>, <vscale x 1 x i1>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1: @@ -796,7 +796,7 @@ entry: <vscale x 1 x i64> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i64> %a } @@ -825,7 +825,7 @@ declare <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64( <vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1: @@ -838,7 +838,7 @@ entry: <vscale x 2 x i64> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i64> %a } @@ -867,7 +867,7 @@ declare <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64( <vscale x 4 x i64>, <vscale x 4 x i1>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1: @@ -880,7 +880,7 @@ entry: <vscale x 4 x i64> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i64> %a } @@ -909,7 +909,7 @@ declare <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64( <vscale x 8 x i64>, <vscale x 8 x i1>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1: @@ -922,7 +922,7 @@ entry: <vscale x 8 x i64> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i64> %a } _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits