Author: ShihPo Hung Date: 2021-01-21T18:38:49-08:00 New Revision: bea661d9a52f9abb4fef7cf195092e912c165d34
URL: https://github.com/llvm/llvm-project/commit/bea661d9a52f9abb4fef7cf195092e912c165d34 DIFF: https://github.com/llvm/llvm-project/commit/bea661d9a52f9abb4fef7cf195092e912c165d34.diff LOG: [RISCV] Add intrinsics for RVV 1.0 vrgatherei16 Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D95014 Added: llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll Modified: llvm/include/llvm/IR/IntrinsicsRISCV.td llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td Removed: ################################################################################ diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 659010399977..cda0c86f0048 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -885,6 +885,7 @@ let TargetPrefix = "riscv" in { defm vfslide1down : RISCVBinaryAAX; defm vrgather : RISCVBinaryAAX; + defm vrgatherei16 : RISCVBinaryAAX; def "int_riscv_vcompress" : RISCVBinaryAAAMask; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 4650c75b77fc..fd563f010cae 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1266,11 +1266,40 @@ multiclass VPseudoBinary<VReg RetClass, } } +multiclass VPseudoBinaryEmul<VReg RetClass, + VReg Op1Class, + DAGOperand Op2Class, + LMULInfo lmul, + LMULInfo emul, + string Constraint = ""> { + let VLMul = lmul.value in { + def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class, + Constraint>; + def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, + Constraint>; + } +} + multiclass VPseudoBinaryV_VV<string Constraint = ""> { foreach m = MxList.m in defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>; } +multiclass VPseudoBinaryV_VV_EEW<int eew, string Constraint = ""> { + foreach m = MxList.m in { + foreach sew = EEWList in { + defvar octuple_lmul = octuple_from_str<m.MX>.ret; + // emul = lmul * eew / sew + defvar octuple_emul = !srl(!mul(octuple_lmul, eew), shift_amount<sew>.val); + if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { + defvar emulMX = octuple_to_str<octuple_emul>.ret; + defvar emul = !cast<LMULInfo>("V_" # emulMX); + defm _VV : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul, Constraint>; + } + } + } +} + multiclass VPseudoBinaryV_VX<bit IsFloat, string Constraint = ""> { foreach m = MxList.m in defm !if(IsFloat, "_VF", "_VX") : VPseudoBinary<m.vrclass, m.vrclass, @@ -2236,6 +2265,25 @@ multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction, } } +multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction, + int eew, list<VTypeInfo> vtilist> { + foreach vti = vtilist in { + // emul = lmul * eew / sew + defvar vlmul = vti.LMul; + defvar octuple_lmul = octuple_from_str<vlmul.MX>.ret; + defvar octuple_emul = !srl(!mul(octuple_lmul, eew), shift_amount<vti.SEW>.val); + if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { + defvar emul_str = octuple_to_str<octuple_emul>.ret; + defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str); + defvar inst = instruction # "_VV_" # vti.LMul.MX # "_" # emul_str; + defm : VPatBinary<intrinsic, inst, + vti.Vector, vti.Vector, ivti.Vector, vti.Mask, + vti.SEW, vti.RegClass, + vti.RegClass, ivti.RegClass>; + } + } +} + multiclass VPatBinaryV_VX<string intrinsic, string instruction, list<VTypeInfo> vtilist> { foreach vti = vtilist in { @@ -3403,6 +3451,7 @@ let Predicates = [HasStdExtV, HasStdExtF] in { // 17.4. Vector Register Gather Instructions //===----------------------------------------------------------------------===// defm PseudoVRGATHER : VPseudoBinaryV_VV_VX_VI<uimm5, "@earlyclobber $rd">; +defm PseudoVRGATHEREI16 : VPseudoBinaryV_VV_EEW</* eew */ 16, "@earlyclobber $rd">; //===----------------------------------------------------------------------===// // 17.5. Vector Compress Instruction @@ -4081,11 +4130,15 @@ let Predicates = [HasStdExtV, HasStdExtF] in { let Predicates = [HasStdExtV] in { defm "" : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", AllIntegerVectors, uimm5>; + defm "" : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16", "PseudoVRGATHEREI16", + /* eew */ 16, AllIntegerVectors>; } // Predicates = [HasStdExtV] let Predicates = [HasStdExtV, HasStdExtF] in { defm "" : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", AllFloatVectors, uimm5>; + defm "" : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16", "PseudoVRGATHEREI16", + /* eew */ 16, AllFloatVectors>; } // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll new file mode 100644 index 000000000000..50dcdded0287 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll @@ -0,0 +1,1272 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.nxv1i8.nxv1i16( + <vscale x 1 x i8>, + <vscale x 1 x i16>, + i32); + +define <vscale x 1 x i8> @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.nxv1i8.nxv1i16( + <vscale x 1 x i8> %0, + <vscale x 1 x i16> %1, + i32 %2) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.mask.nxv1i8.nxv1i16( + <vscale x 1 x i8>, + <vscale x 1 x i8>, + <vscale x 1 x i16>, + <vscale x 1 x i1>, + i32); + +define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.mask.nxv1i8.nxv1i16( + <vscale x 1 x i8> %0, + <vscale x 1 x i8> %1, + <vscale x 1 x i16> %2, + <vscale x 1 x i1> %3, + i32 %4) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vrgatherei16.nxv2i8.nxv2i16( + <vscale x 2 x i8>, + <vscale x 2 x i16>, + i32); + +define <vscale x 2 x i8> @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.nxv2i8.nxv2i16( + <vscale x 2 x i8> %0, + <vscale x 2 x i16> %1, + i32 %2) + + ret <vscale x 2 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vrgatherei16.mask.nxv2i8.nxv2i16( + <vscale x 2 x i8>, + <vscale x 2 x i8>, + <vscale x 2 x i16>, + <vscale x 2 x i1>, + i32); + +define <vscale x 2 x i8> @intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.mask.nxv2i8.nxv2i16( + <vscale x 2 x i8> %0, + <vscale x 2 x i8> %1, + <vscale x 2 x i16> %2, + <vscale x 2 x i1> %3, + i32 %4) + + ret <vscale x 2 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vrgatherei16.nxv4i8.nxv4i16( + <vscale x 4 x i8>, + <vscale x 4 x i16>, + i32); + +define <vscale x 4 x i8> @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.nxv4i8.nxv4i16( + <vscale x 4 x i8> %0, + <vscale x 4 x i16> %1, + i32 %2) + + ret <vscale x 4 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vrgatherei16.mask.nxv4i8.nxv4i16( + <vscale x 4 x i8>, + <vscale x 4 x i8>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i32); + +define <vscale x 4 x i8> @intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.mask.nxv4i8.nxv4i16( + <vscale x 4 x i8> %0, + <vscale x 4 x i8> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i32 %4) + + ret <vscale x 4 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.nxv8i8.nxv8i16( + <vscale x 8 x i8>, + <vscale x 8 x i16>, + i32); + +define <vscale x 8 x i8> @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.nxv8i8.nxv8i16( + <vscale x 8 x i8> %0, + <vscale x 8 x i16> %1, + i32 %2) + + ret <vscale x 8 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.mask.nxv8i8.nxv8i16( + <vscale x 8 x i8>, + <vscale x 8 x i8>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i32); + +define <vscale x 8 x i8> @intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.mask.nxv8i8.nxv8i16( + <vscale x 8 x i8> %0, + <vscale x 8 x i8> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i32 %4) + + ret <vscale x 8 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vrgatherei16.nxv16i8.nxv16i16( + <vscale x 16 x i8>, + <vscale x 16 x i16>, + i32); + +define <vscale x 16 x i8> @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v26, v8, v12 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.nxv16i8.nxv16i16( + <vscale x 16 x i8> %0, + <vscale x 16 x i16> %1, + i32 %2) + + ret <vscale x 16 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vrgatherei16.mask.nxv16i8.nxv16i16( + <vscale x 16 x i8>, + <vscale x 16 x i8>, + <vscale x 16 x i16>, + <vscale x 16 x i1>, + i32); + +define <vscale x 16 x i8> @intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.mask.nxv16i8.nxv16i16( + <vscale x 16 x i8> %0, + <vscale x 16 x i8> %1, + <vscale x 16 x i16> %2, + <vscale x 16 x i1> %3, + i32 %4) + + ret <vscale x 16 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vrgatherei16.nxv32i8.nxv32i16( + <vscale x 32 x i8>, + <vscale x 32 x i16>, + i32); + +define <vscale x 32 x i8> @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v16 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.nxv32i8.nxv32i16( + <vscale x 32 x i8> %0, + <vscale x 32 x i16> %1, + i32 %2) + + ret <vscale x 32 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vrgatherei16.mask.nxv32i8.nxv32i16( + <vscale x 32 x i8>, + <vscale x 32 x i8>, + <vscale x 32 x i16>, + <vscale x 32 x i1>, + i32); + +define <vscale x 32 x i8> @intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.mask.nxv32i8.nxv32i16( + <vscale x 32 x i8> %0, + <vscale x 32 x i8> %1, + <vscale x 32 x i16> %2, + <vscale x 32 x i1> %3, + i32 %4) + + ret <vscale x 32 x i8> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vrgatherei16.nxv1i16.nxv1i16( + <vscale x 1 x i16>, + <vscale x 1 x i16>, + i32); + +define <vscale x 1 x i16> @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.nxv1i16.nxv1i16( + <vscale x 1 x i16> %0, + <vscale x 1 x i16> %1, + i32 %2) + + ret <vscale x 1 x i16> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vrgatherei16.mask.nxv1i16.nxv1i16( + <vscale x 1 x i16>, + <vscale x 1 x i16>, + <vscale x 1 x i16>, + <vscale x 1 x i1>, + i32); + +define <vscale x 1 x i16> @intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.mask.nxv1i16.nxv1i16( + <vscale x 1 x i16> %0, + <vscale x 1 x i16> %1, + <vscale x 1 x i16> %2, + <vscale x 1 x i1> %3, + i32 %4) + + ret <vscale x 1 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vrgatherei16.nxv2i16.nxv2i16( + <vscale x 2 x i16>, + <vscale x 2 x i16>, + i32); + +define <vscale x 2 x i16> @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.nxv2i16.nxv2i16( + <vscale x 2 x i16> %0, + <vscale x 2 x i16> %1, + i32 %2) + + ret <vscale x 2 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vrgatherei16.mask.nxv2i16.nxv2i16( + <vscale x 2 x i16>, + <vscale x 2 x i16>, + <vscale x 2 x i16>, + <vscale x 2 x i1>, + i32); + +define <vscale x 2 x i16> @intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.mask.nxv2i16.nxv2i16( + <vscale x 2 x i16> %0, + <vscale x 2 x i16> %1, + <vscale x 2 x i16> %2, + <vscale x 2 x i1> %3, + i32 %4) + + ret <vscale x 2 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vrgatherei16.nxv4i16.nxv4i16( + <vscale x 4 x i16>, + <vscale x 4 x i16>, + i32); + +define <vscale x 4 x i16> @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.nxv4i16.nxv4i16( + <vscale x 4 x i16> %0, + <vscale x 4 x i16> %1, + i32 %2) + + ret <vscale x 4 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vrgatherei16.mask.nxv4i16.nxv4i16( + <vscale x 4 x i16>, + <vscale x 4 x i16>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i32); + +define <vscale x 4 x i16> @intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.mask.nxv4i16.nxv4i16( + <vscale x 4 x i16> %0, + <vscale x 4 x i16> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i32 %4) + + ret <vscale x 4 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vrgatherei16.nxv8i16.nxv8i16( + <vscale x 8 x i16>, + <vscale x 8 x i16>, + i32); + +define <vscale x 8 x i16> @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.nxv8i16.nxv8i16( + <vscale x 8 x i16> %0, + <vscale x 8 x i16> %1, + i32 %2) + + ret <vscale x 8 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vrgatherei16.mask.nxv8i16.nxv8i16( + <vscale x 8 x i16>, + <vscale x 8 x i16>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i32); + +define <vscale x 8 x i16> @intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.mask.nxv8i16.nxv8i16( + <vscale x 8 x i16> %0, + <vscale x 8 x i16> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i32 %4) + + ret <vscale x 8 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vrgatherei16.nxv16i16.nxv16i16( + <vscale x 16 x i16>, + <vscale x 16 x i16>, + i32); + +define <vscale x 16 x i16> @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.nxv16i16.nxv16i16( + <vscale x 16 x i16> %0, + <vscale x 16 x i16> %1, + i32 %2) + + ret <vscale x 16 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vrgatherei16.mask.nxv16i16.nxv16i16( + <vscale x 16 x i16>, + <vscale x 16 x i16>, + <vscale x 16 x i16>, + <vscale x 16 x i1>, + i32); + +define <vscale x 16 x i16> @intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.mask.nxv16i16.nxv16i16( + <vscale x 16 x i16> %0, + <vscale x 16 x i16> %1, + <vscale x 16 x i16> %2, + <vscale x 16 x i1> %3, + i32 %4) + + ret <vscale x 16 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vrgatherei16.nxv32i16.nxv32i16( + <vscale x 32 x i16>, + <vscale x 32 x i16>, + i32); + +define <vscale x 32 x i16> @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.nxv32i16.nxv32i16( + <vscale x 32 x i16> %0, + <vscale x 32 x i16> %1, + i32 %2) + + ret <vscale x 32 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vrgatherei16.mask.nxv32i16.nxv32i16( + <vscale x 32 x i16>, + <vscale x 32 x i16>, + <vscale x 32 x i16>, + <vscale x 32 x i1>, + i32); + +define <vscale x 32 x i16> @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.mask.nxv32i16.nxv32i16( + <vscale x 32 x i16> %0, + <vscale x 32 x i16> %1, + <vscale x 32 x i16> %2, + <vscale x 32 x i1> %3, + i32 %4) + + ret <vscale x 32 x i16> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vrgatherei16.nxv1i32.nxv1i16( + <vscale x 1 x i32>, + <vscale x 1 x i16>, + i32); + +define <vscale x 1 x i32> @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.nxv1i32.nxv1i16( + <vscale x 1 x i32> %0, + <vscale x 1 x i16> %1, + i32 %2) + + ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vrgatherei16.mask.nxv1i32.nxv1i16( + <vscale x 1 x i32>, + <vscale x 1 x i32>, + <vscale x 1 x i16>, + <vscale x 1 x i1>, + i32); + +define <vscale x 1 x i32> @intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.mask.nxv1i32.nxv1i16( + <vscale x 1 x i32> %0, + <vscale x 1 x i32> %1, + <vscale x 1 x i16> %2, + <vscale x 1 x i1> %3, + i32 %4) + + ret <vscale x 1 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vrgatherei16.nxv4i32.nxv4i16( + <vscale x 4 x i32>, + <vscale x 4 x i16>, + i32); + +define <vscale x 4 x i32> @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.nxv4i32.nxv4i16( + <vscale x 4 x i32> %0, + <vscale x 4 x i16> %1, + i32 %2) + + ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vrgatherei16.mask.nxv4i32.nxv4i16( + <vscale x 4 x i32>, + <vscale x 4 x i32>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i32); + +define <vscale x 4 x i32> @intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.mask.nxv4i32.nxv4i16( + <vscale x 4 x i32> %0, + <vscale x 4 x i32> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i32 %4) + + ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vrgatherei16.nxv8i32.nxv8i16( + <vscale x 8 x i32>, + <vscale x 8 x i16>, + i32); + +define <vscale x 8 x i32> @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.nxv8i32.nxv8i16( + <vscale x 8 x i32> %0, + <vscale x 8 x i16> %1, + i32 %2) + + ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vrgatherei16.mask.nxv8i32.nxv8i16( + <vscale x 8 x i32>, + <vscale x 8 x i32>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i32); + +define <vscale x 8 x i32> @intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.mask.nxv8i32.nxv8i16( + <vscale x 8 x i32> %0, + <vscale x 8 x i32> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i32 %4) + + ret <vscale x 8 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vrgatherei16.nxv16i32.nxv16i16( + <vscale x 16 x i32>, + <vscale x 16 x i16>, + i32); + +define <vscale x 16 x i32> @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.nxv16i32.nxv16i16( + <vscale x 16 x i32> %0, + <vscale x 16 x i16> %1, + i32 %2) + + ret <vscale x 16 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vrgatherei16.mask.nxv16i32.nxv16i16( + <vscale x 16 x i32>, + <vscale x 16 x i32>, + <vscale x 16 x i16>, + <vscale x 16 x i1>, + i32); + +define <vscale x 16 x i32> @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.mask.nxv16i32.nxv16i16( + <vscale x 16 x i32> %0, + <vscale x 16 x i32> %1, + <vscale x 16 x i16> %2, + <vscale x 16 x i1> %3, + i32 %4) + + ret <vscale x 16 x i32> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vrgatherei16.nxv1f16.nxv1i16( + <vscale x 1 x half>, + <vscale x 1 x i16>, + i32); + +define <vscale x 1 x half> @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.nxv1f16.nxv1i16( + <vscale x 1 x half> %0, + <vscale x 1 x i16> %1, + i32 %2) + + ret <vscale x 1 x half> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vrgatherei16.mask.nxv1f16.nxv1i16( + <vscale x 1 x half>, + <vscale x 1 x half>, + <vscale x 1 x i16>, + <vscale x 1 x i1>, + i32); + +define <vscale x 1 x half> @intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.mask.nxv1f16.nxv1i16( + <vscale x 1 x half> %0, + <vscale x 1 x half> %1, + <vscale x 1 x i16> %2, + <vscale x 1 x i1> %3, + i32 %4) + + ret <vscale x 1 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vrgatherei16.nxv2f16.nxv2i16( + <vscale x 2 x half>, + <vscale x 2 x i16>, + i32); + +define <vscale x 2 x half> @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.nxv2f16.nxv2i16( + <vscale x 2 x half> %0, + <vscale x 2 x i16> %1, + i32 %2) + + ret <vscale x 2 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vrgatherei16.mask.nxv2f16.nxv2i16( + <vscale x 2 x half>, + <vscale x 2 x half>, + <vscale x 2 x i16>, + <vscale x 2 x i1>, + i32); + +define <vscale x 2 x half> @intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.mask.nxv2f16.nxv2i16( + <vscale x 2 x half> %0, + <vscale x 2 x half> %1, + <vscale x 2 x i16> %2, + <vscale x 2 x i1> %3, + i32 %4) + + ret <vscale x 2 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vrgatherei16.nxv4f16.nxv4i16( + <vscale x 4 x half>, + <vscale x 4 x i16>, + i32); + +define <vscale x 4 x half> @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.nxv4f16.nxv4i16( + <vscale x 4 x half> %0, + <vscale x 4 x i16> %1, + i32 %2) + + ret <vscale x 4 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vrgatherei16.mask.nxv4f16.nxv4i16( + <vscale x 4 x half>, + <vscale x 4 x half>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i32); + +define <vscale x 4 x half> @intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.mask.nxv4f16.nxv4i16( + <vscale x 4 x half> %0, + <vscale x 4 x half> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i32 %4) + + ret <vscale x 4 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vrgatherei16.nxv8f16.nxv8i16( + <vscale x 8 x half>, + <vscale x 8 x i16>, + i32); + +define <vscale x 8 x half> @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.nxv8f16.nxv8i16( + <vscale x 8 x half> %0, + <vscale x 8 x i16> %1, + i32 %2) + + ret <vscale x 8 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vrgatherei16.mask.nxv8f16.nxv8i16( + <vscale x 8 x half>, + <vscale x 8 x half>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i32); + +define <vscale x 8 x half> @intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.mask.nxv8f16.nxv8i16( + <vscale x 8 x half> %0, + <vscale x 8 x half> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i32 %4) + + ret <vscale x 8 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vrgatherei16.nxv16f16.nxv16i16( + <vscale x 16 x half>, + <vscale x 16 x i16>, + i32); + +define <vscale x 16 x half> @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.nxv16f16.nxv16i16( + <vscale x 16 x half> %0, + <vscale x 16 x i16> %1, + i32 %2) + + ret <vscale x 16 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vrgatherei16.mask.nxv16f16.nxv16i16( + <vscale x 16 x half>, + <vscale x 16 x half>, + <vscale x 16 x i16>, + <vscale x 16 x i1>, + i32); + +define <vscale x 16 x half> @intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.mask.nxv16f16.nxv16i16( + <vscale x 16 x half> %0, + <vscale x 16 x half> %1, + <vscale x 16 x i16> %2, + <vscale x 16 x i1> %3, + i32 %4) + + ret <vscale x 16 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vrgatherei16.nxv32f16.nxv32i16( + <vscale x 32 x half>, + <vscale x 32 x i16>, + i32); + +define <vscale x 32 x half> @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.nxv32f16.nxv32i16( + <vscale x 32 x half> %0, + <vscale x 32 x i16> %1, + i32 %2) + + ret <vscale x 32 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vrgatherei16.mask.nxv32f16.nxv32i16( + <vscale x 32 x half>, + <vscale x 32 x half>, + <vscale x 32 x i16>, + <vscale x 32 x i1>, + i32); + +define <vscale x 32 x half> @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.mask.nxv32f16.nxv32i16( + <vscale x 32 x half> %0, + <vscale x 32 x half> %1, + <vscale x 32 x i16> %2, + <vscale x 32 x i1> %3, + i32 %4) + + ret <vscale x 32 x half> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vrgatherei16.nxv1f32.nxv1i16( + <vscale x 1 x float>, + <vscale x 1 x i16>, + i32); + +define <vscale x 1 x float> @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.nxv1f32.nxv1i16( + <vscale x 1 x float> %0, + <vscale x 1 x i16> %1, + i32 %2) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vrgatherei16.mask.nxv1f32.nxv1i16( + <vscale x 1 x float>, + <vscale x 1 x float>, + <vscale x 1 x i16>, + <vscale x 1 x i1>, + i32); + +define <vscale x 1 x float> @intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.mask.nxv1f32.nxv1i16( + <vscale x 1 x float> %0, + <vscale x 1 x float> %1, + <vscale x 1 x i16> %2, + <vscale x 1 x i1> %3, + i32 %4) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vrgatherei16.nxv4f32.nxv4i16( + <vscale x 4 x float>, + <vscale x 4 x i16>, + i32); + +define <vscale x 4 x float> @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.nxv4f32.nxv4i16( + <vscale x 4 x float> %0, + <vscale x 4 x i16> %1, + i32 %2) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vrgatherei16.mask.nxv4f32.nxv4i16( + <vscale x 4 x float>, + <vscale x 4 x float>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i32); + +define <vscale x 4 x float> @intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.mask.nxv4f32.nxv4i16( + <vscale x 4 x float> %0, + <vscale x 4 x float> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i32 %4) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vrgatherei16.nxv8f32.nxv8i16( + <vscale x 8 x float>, + <vscale x 8 x i16>, + i32); + +define <vscale x 8 x float> @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.nxv8f32.nxv8i16( + <vscale x 8 x float> %0, + <vscale x 8 x i16> %1, + i32 %2) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vrgatherei16.mask.nxv8f32.nxv8i16( + <vscale x 8 x float>, + <vscale x 8 x float>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i32); + +define <vscale x 8 x float> @intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.mask.nxv8f32.nxv8i16( + <vscale x 8 x float> %0, + <vscale x 8 x float> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i32 %4) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vrgatherei16.nxv16f32.nxv16i16( + <vscale x 16 x float>, + <vscale x 16 x i16>, + i32); + +define <vscale x 16 x float> @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.nxv16f32.nxv16i16( + <vscale x 16 x float> %0, + <vscale x 16 x i16> %1, + i32 %2) + + ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vrgatherei16.mask.nxv16f32.nxv16i16( + <vscale x 16 x float>, + <vscale x 16 x float>, + <vscale x 16 x i16>, + <vscale x 16 x i1>, + i32); + +define <vscale x 16 x float> @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.mask.nxv16f32.nxv16i16( + <vscale x 16 x float> %0, + <vscale x 16 x float> %1, + <vscale x 16 x i16> %2, + <vscale x 16 x i1> %3, + i32 %4) + + ret <vscale x 16 x float> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vrgatherei16.nxv4f64.nxv4i16( + <vscale x 4 x double>, + <vscale x 4 x i16>, + i32); + +define <vscale x 4 x double> @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.nxv4f64.nxv4i16( + <vscale x 4 x double> %0, + <vscale x 4 x i16> %1, + i32 %2) + + ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vrgatherei16.mask.nxv4f64.nxv4i16( + <vscale x 4 x double>, + <vscale x 4 x double>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i32); + +define <vscale x 4 x double> @intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.mask.nxv4f64.nxv4i16( + <vscale x 4 x double> %0, + <vscale x 4 x double> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i32 %4) + + ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vrgatherei16.nxv8f64.nxv8i16( + <vscale x 8 x double>, + <vscale x 8 x i16>, + i32); + +define <vscale x 8 x double> @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x i16> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.nxv8f64.nxv8i16( + <vscale x 8 x double> %0, + <vscale x 8 x i16> %1, + i32 %2) + + ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vrgatherei16.mask.nxv8f64.nxv8i16( + <vscale x 8 x double>, + <vscale x 8 x double>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i32); + +define <vscale x 8 x double> @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.mask.nxv8f64.nxv8i16( + <vscale x 8 x double> %0, + <vscale x 8 x double> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i32 %4) + + ret <vscale x 8 x double> %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll new file mode 100644 index 000000000000..db5e0efadfbb --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll @@ -0,0 +1,1364 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.nxv1i8.nxv1i16( + <vscale x 1 x i8>, + <vscale x 1 x i16>, + i64); + +define <vscale x 1 x i8> @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.nxv1i8.nxv1i16( + <vscale x 1 x i8> %0, + <vscale x 1 x i16> %1, + i64 %2) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.mask.nxv1i8.nxv1i16( + <vscale x 1 x i8>, + <vscale x 1 x i8>, + <vscale x 1 x i16>, + <vscale x 1 x i1>, + i64); + +define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.mask.nxv1i8.nxv1i16( + <vscale x 1 x i8> %0, + <vscale x 1 x i8> %1, + <vscale x 1 x i16> %2, + <vscale x 1 x i1> %3, + i64 %4) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vrgatherei16.nxv2i8.nxv2i16( + <vscale x 2 x i8>, + <vscale x 2 x i16>, + i64); + +define <vscale x 2 x i8> @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.nxv2i8.nxv2i16( + <vscale x 2 x i8> %0, + <vscale x 2 x i16> %1, + i64 %2) + + ret <vscale x 2 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vrgatherei16.mask.nxv2i8.nxv2i16( + <vscale x 2 x i8>, + <vscale x 2 x i8>, + <vscale x 2 x i16>, + <vscale x 2 x i1>, + i64); + +define <vscale x 2 x i8> @intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.mask.nxv2i8.nxv2i16( + <vscale x 2 x i8> %0, + <vscale x 2 x i8> %1, + <vscale x 2 x i16> %2, + <vscale x 2 x i1> %3, + i64 %4) + + ret <vscale x 2 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vrgatherei16.nxv4i8.nxv4i16( + <vscale x 4 x i8>, + <vscale x 4 x i16>, + i64); + +define <vscale x 4 x i8> @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.nxv4i8.nxv4i16( + <vscale x 4 x i8> %0, + <vscale x 4 x i16> %1, + i64 %2) + + ret <vscale x 4 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vrgatherei16.mask.nxv4i8.nxv4i16( + <vscale x 4 x i8>, + <vscale x 4 x i8>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i64); + +define <vscale x 4 x i8> @intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.mask.nxv4i8.nxv4i16( + <vscale x 4 x i8> %0, + <vscale x 4 x i8> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i64 %4) + + ret <vscale x 4 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.nxv8i8.nxv8i16( + <vscale x 8 x i8>, + <vscale x 8 x i16>, + i64); + +define <vscale x 8 x i8> @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.nxv8i8.nxv8i16( + <vscale x 8 x i8> %0, + <vscale x 8 x i16> %1, + i64 %2) + + ret <vscale x 8 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.mask.nxv8i8.nxv8i16( + <vscale x 8 x i8>, + <vscale x 8 x i8>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i64); + +define <vscale x 8 x i8> @intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.mask.nxv8i8.nxv8i16( + <vscale x 8 x i8> %0, + <vscale x 8 x i8> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i64 %4) + + ret <vscale x 8 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vrgatherei16.nxv16i8.nxv16i16( + <vscale x 16 x i8>, + <vscale x 16 x i16>, + i64); + +define <vscale x 16 x i8> @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v26, v8, v12 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.nxv16i8.nxv16i16( + <vscale x 16 x i8> %0, + <vscale x 16 x i16> %1, + i64 %2) + + ret <vscale x 16 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vrgatherei16.mask.nxv16i8.nxv16i16( + <vscale x 16 x i8>, + <vscale x 16 x i8>, + <vscale x 16 x i16>, + <vscale x 16 x i1>, + i64); + +define <vscale x 16 x i8> @intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.mask.nxv16i8.nxv16i16( + <vscale x 16 x i8> %0, + <vscale x 16 x i8> %1, + <vscale x 16 x i16> %2, + <vscale x 16 x i1> %3, + i64 %4) + + ret <vscale x 16 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vrgatherei16.nxv32i8.nxv32i16( + <vscale x 32 x i8>, + <vscale x 32 x i16>, + i64); + +define <vscale x 32 x i8> @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v16 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.nxv32i8.nxv32i16( + <vscale x 32 x i8> %0, + <vscale x 32 x i16> %1, + i64 %2) + + ret <vscale x 32 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vrgatherei16.mask.nxv32i8.nxv32i16( + <vscale x 32 x i8>, + <vscale x 32 x i8>, + <vscale x 32 x i16>, + <vscale x 32 x i1>, + i64); + +define <vscale x 32 x i8> @intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.mask.nxv32i8.nxv32i16( + <vscale x 32 x i8> %0, + <vscale x 32 x i8> %1, + <vscale x 32 x i16> %2, + <vscale x 32 x i1> %3, + i64 %4) + + ret <vscale x 32 x i8> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vrgatherei16.nxv1i16.nxv1i16( + <vscale x 1 x i16>, + <vscale x 1 x i16>, + i64); + +define <vscale x 1 x i16> @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.nxv1i16.nxv1i16( + <vscale x 1 x i16> %0, + <vscale x 1 x i16> %1, + i64 %2) + + ret <vscale x 1 x i16> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vrgatherei16.mask.nxv1i16.nxv1i16( + <vscale x 1 x i16>, + <vscale x 1 x i16>, + <vscale x 1 x i16>, + <vscale x 1 x i1>, + i64); + +define <vscale x 1 x i16> @intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.mask.nxv1i16.nxv1i16( + <vscale x 1 x i16> %0, + <vscale x 1 x i16> %1, + <vscale x 1 x i16> %2, + <vscale x 1 x i1> %3, + i64 %4) + + ret <vscale x 1 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vrgatherei16.nxv2i16.nxv2i16( + <vscale x 2 x i16>, + <vscale x 2 x i16>, + i64); + +define <vscale x 2 x i16> @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.nxv2i16.nxv2i16( + <vscale x 2 x i16> %0, + <vscale x 2 x i16> %1, + i64 %2) + + ret <vscale x 2 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vrgatherei16.mask.nxv2i16.nxv2i16( + <vscale x 2 x i16>, + <vscale x 2 x i16>, + <vscale x 2 x i16>, + <vscale x 2 x i1>, + i64); + +define <vscale x 2 x i16> @intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.mask.nxv2i16.nxv2i16( + <vscale x 2 x i16> %0, + <vscale x 2 x i16> %1, + <vscale x 2 x i16> %2, + <vscale x 2 x i1> %3, + i64 %4) + + ret <vscale x 2 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vrgatherei16.nxv4i16.nxv4i16( + <vscale x 4 x i16>, + <vscale x 4 x i16>, + i64); + +define <vscale x 4 x i16> @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.nxv4i16.nxv4i16( + <vscale x 4 x i16> %0, + <vscale x 4 x i16> %1, + i64 %2) + + ret <vscale x 4 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vrgatherei16.mask.nxv4i16.nxv4i16( + <vscale x 4 x i16>, + <vscale x 4 x i16>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i64); + +define <vscale x 4 x i16> @intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.mask.nxv4i16.nxv4i16( + <vscale x 4 x i16> %0, + <vscale x 4 x i16> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i64 %4) + + ret <vscale x 4 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vrgatherei16.nxv8i16.nxv8i16( + <vscale x 8 x i16>, + <vscale x 8 x i16>, + i64); + +define <vscale x 8 x i16> @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.nxv8i16.nxv8i16( + <vscale x 8 x i16> %0, + <vscale x 8 x i16> %1, + i64 %2) + + ret <vscale x 8 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vrgatherei16.mask.nxv8i16.nxv8i16( + <vscale x 8 x i16>, + <vscale x 8 x i16>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i64); + +define <vscale x 8 x i16> @intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.mask.nxv8i16.nxv8i16( + <vscale x 8 x i16> %0, + <vscale x 8 x i16> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i64 %4) + + ret <vscale x 8 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vrgatherei16.nxv16i16.nxv16i16( + <vscale x 16 x i16>, + <vscale x 16 x i16>, + i64); + +define <vscale x 16 x i16> @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.nxv16i16.nxv16i16( + <vscale x 16 x i16> %0, + <vscale x 16 x i16> %1, + i64 %2) + + ret <vscale x 16 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vrgatherei16.mask.nxv16i16.nxv16i16( + <vscale x 16 x i16>, + <vscale x 16 x i16>, + <vscale x 16 x i16>, + <vscale x 16 x i1>, + i64); + +define <vscale x 16 x i16> @intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.mask.nxv16i16.nxv16i16( + <vscale x 16 x i16> %0, + <vscale x 16 x i16> %1, + <vscale x 16 x i16> %2, + <vscale x 16 x i1> %3, + i64 %4) + + ret <vscale x 16 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vrgatherei16.nxv32i16.nxv32i16( + <vscale x 32 x i16>, + <vscale x 32 x i16>, + i64); + +define <vscale x 32 x i16> @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.nxv32i16.nxv32i16( + <vscale x 32 x i16> %0, + <vscale x 32 x i16> %1, + i64 %2) + + ret <vscale x 32 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vrgatherei16.mask.nxv32i16.nxv32i16( + <vscale x 32 x i16>, + <vscale x 32 x i16>, + <vscale x 32 x i16>, + <vscale x 32 x i1>, + i64); + +define <vscale x 32 x i16> @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.mask.nxv32i16.nxv32i16( + <vscale x 32 x i16> %0, + <vscale x 32 x i16> %1, + <vscale x 32 x i16> %2, + <vscale x 32 x i1> %3, + i64 %4) + + ret <vscale x 32 x i16> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vrgatherei16.nxv1i32.nxv1i16( + <vscale x 1 x i32>, + <vscale x 1 x i16>, + i64); + +define <vscale x 1 x i32> @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.nxv1i32.nxv1i16( + <vscale x 1 x i32> %0, + <vscale x 1 x i16> %1, + i64 %2) + + ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vrgatherei16.mask.nxv1i32.nxv1i16( + <vscale x 1 x i32>, + <vscale x 1 x i32>, + <vscale x 1 x i16>, + <vscale x 1 x i1>, + i64); + +define <vscale x 1 x i32> @intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.mask.nxv1i32.nxv1i16( + <vscale x 1 x i32> %0, + <vscale x 1 x i32> %1, + <vscale x 1 x i16> %2, + <vscale x 1 x i1> %3, + i64 %4) + + ret <vscale x 1 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vrgatherei16.nxv4i32.nxv4i16( + <vscale x 4 x i32>, + <vscale x 4 x i16>, + i64); + +define <vscale x 4 x i32> @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.nxv4i32.nxv4i16( + <vscale x 4 x i32> %0, + <vscale x 4 x i16> %1, + i64 %2) + + ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vrgatherei16.mask.nxv4i32.nxv4i16( + <vscale x 4 x i32>, + <vscale x 4 x i32>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i64); + +define <vscale x 4 x i32> @intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.mask.nxv4i32.nxv4i16( + <vscale x 4 x i32> %0, + <vscale x 4 x i32> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i64 %4) + + ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vrgatherei16.nxv8i32.nxv8i16( + <vscale x 8 x i32>, + <vscale x 8 x i16>, + i64); + +define <vscale x 8 x i32> @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.nxv8i32.nxv8i16( + <vscale x 8 x i32> %0, + <vscale x 8 x i16> %1, + i64 %2) + + ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vrgatherei16.mask.nxv8i32.nxv8i16( + <vscale x 8 x i32>, + <vscale x 8 x i32>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i64); + +define <vscale x 8 x i32> @intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.mask.nxv8i32.nxv8i16( + <vscale x 8 x i32> %0, + <vscale x 8 x i32> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i64 %4) + + ret <vscale x 8 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vrgatherei16.nxv16i32.nxv16i16( + <vscale x 16 x i32>, + <vscale x 16 x i16>, + i64); + +define <vscale x 16 x i32> @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.nxv16i32.nxv16i16( + <vscale x 16 x i32> %0, + <vscale x 16 x i16> %1, + i64 %2) + + ret <vscale x 16 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vrgatherei16.mask.nxv16i32.nxv16i16( + <vscale x 16 x i32>, + <vscale x 16 x i32>, + <vscale x 16 x i16>, + <vscale x 16 x i1>, + i64); + +define <vscale x 16 x i32> @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.mask.nxv16i32.nxv16i16( + <vscale x 16 x i32> %0, + <vscale x 16 x i32> %1, + <vscale x 16 x i16> %2, + <vscale x 16 x i1> %3, + i64 %4) + + ret <vscale x 16 x i32> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vrgatherei16.nxv4i64.nxv4i16( + <vscale x 4 x i64>, + <vscale x 4 x i16>, + i64); + +define <vscale x 4 x i64> @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.nxv4i64.nxv4i16( + <vscale x 4 x i64> %0, + <vscale x 4 x i16> %1, + i64 %2) + + ret <vscale x 4 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vrgatherei16.mask.nxv4i64.nxv4i16( + <vscale x 4 x i64>, + <vscale x 4 x i64>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i64); + +define <vscale x 4 x i64> @intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.mask.nxv4i64.nxv4i16( + <vscale x 4 x i64> %0, + <vscale x 4 x i64> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i64 %4) + + ret <vscale x 4 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vrgatherei16.nxv8i64.nxv8i16( + <vscale x 8 x i64>, + <vscale x 8 x i16>, + i64); + +define <vscale x 8 x i64> @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.nxv8i64.nxv8i16( + <vscale x 8 x i64> %0, + <vscale x 8 x i16> %1, + i64 %2) + + ret <vscale x 8 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vrgatherei16.mask.nxv8i64.nxv8i16( + <vscale x 8 x i64>, + <vscale x 8 x i64>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i64); + +define <vscale x 8 x i64> @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.mask.nxv8i64.nxv8i16( + <vscale x 8 x i64> %0, + <vscale x 8 x i64> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i64 %4) + + ret <vscale x 8 x i64> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vrgatherei16.nxv1f16.nxv1i16( + <vscale x 1 x half>, + <vscale x 1 x i16>, + i64); + +define <vscale x 1 x half> @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.nxv1f16.nxv1i16( + <vscale x 1 x half> %0, + <vscale x 1 x i16> %1, + i64 %2) + + ret <vscale x 1 x half> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vrgatherei16.mask.nxv1f16.nxv1i16( + <vscale x 1 x half>, + <vscale x 1 x half>, + <vscale x 1 x i16>, + <vscale x 1 x i1>, + i64); + +define <vscale x 1 x half> @intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.mask.nxv1f16.nxv1i16( + <vscale x 1 x half> %0, + <vscale x 1 x half> %1, + <vscale x 1 x i16> %2, + <vscale x 1 x i1> %3, + i64 %4) + + ret <vscale x 1 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vrgatherei16.nxv2f16.nxv2i16( + <vscale x 2 x half>, + <vscale x 2 x i16>, + i64); + +define <vscale x 2 x half> @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.nxv2f16.nxv2i16( + <vscale x 2 x half> %0, + <vscale x 2 x i16> %1, + i64 %2) + + ret <vscale x 2 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vrgatherei16.mask.nxv2f16.nxv2i16( + <vscale x 2 x half>, + <vscale x 2 x half>, + <vscale x 2 x i16>, + <vscale x 2 x i1>, + i64); + +define <vscale x 2 x half> @intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.mask.nxv2f16.nxv2i16( + <vscale x 2 x half> %0, + <vscale x 2 x half> %1, + <vscale x 2 x i16> %2, + <vscale x 2 x i1> %3, + i64 %4) + + ret <vscale x 2 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vrgatherei16.nxv4f16.nxv4i16( + <vscale x 4 x half>, + <vscale x 4 x i16>, + i64); + +define <vscale x 4 x half> @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.nxv4f16.nxv4i16( + <vscale x 4 x half> %0, + <vscale x 4 x i16> %1, + i64 %2) + + ret <vscale x 4 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vrgatherei16.mask.nxv4f16.nxv4i16( + <vscale x 4 x half>, + <vscale x 4 x half>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i64); + +define <vscale x 4 x half> @intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.mask.nxv4f16.nxv4i16( + <vscale x 4 x half> %0, + <vscale x 4 x half> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i64 %4) + + ret <vscale x 4 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vrgatherei16.nxv8f16.nxv8i16( + <vscale x 8 x half>, + <vscale x 8 x i16>, + i64); + +define <vscale x 8 x half> @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.nxv8f16.nxv8i16( + <vscale x 8 x half> %0, + <vscale x 8 x i16> %1, + i64 %2) + + ret <vscale x 8 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vrgatherei16.mask.nxv8f16.nxv8i16( + <vscale x 8 x half>, + <vscale x 8 x half>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i64); + +define <vscale x 8 x half> @intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.mask.nxv8f16.nxv8i16( + <vscale x 8 x half> %0, + <vscale x 8 x half> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i64 %4) + + ret <vscale x 8 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vrgatherei16.nxv16f16.nxv16i16( + <vscale x 16 x half>, + <vscale x 16 x i16>, + i64); + +define <vscale x 16 x half> @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.nxv16f16.nxv16i16( + <vscale x 16 x half> %0, + <vscale x 16 x i16> %1, + i64 %2) + + ret <vscale x 16 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vrgatherei16.mask.nxv16f16.nxv16i16( + <vscale x 16 x half>, + <vscale x 16 x half>, + <vscale x 16 x i16>, + <vscale x 16 x i1>, + i64); + +define <vscale x 16 x half> @intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.mask.nxv16f16.nxv16i16( + <vscale x 16 x half> %0, + <vscale x 16 x half> %1, + <vscale x 16 x i16> %2, + <vscale x 16 x i1> %3, + i64 %4) + + ret <vscale x 16 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vrgatherei16.nxv32f16.nxv32i16( + <vscale x 32 x half>, + <vscale x 32 x i16>, + i64); + +define <vscale x 32 x half> @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.nxv32f16.nxv32i16( + <vscale x 32 x half> %0, + <vscale x 32 x i16> %1, + i64 %2) + + ret <vscale x 32 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vrgatherei16.mask.nxv32f16.nxv32i16( + <vscale x 32 x half>, + <vscale x 32 x half>, + <vscale x 32 x i16>, + <vscale x 32 x i1>, + i64); + +define <vscale x 32 x half> @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.mask.nxv32f16.nxv32i16( + <vscale x 32 x half> %0, + <vscale x 32 x half> %1, + <vscale x 32 x i16> %2, + <vscale x 32 x i1> %3, + i64 %4) + + ret <vscale x 32 x half> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vrgatherei16.nxv1f32.nxv1i16( + <vscale x 1 x float>, + <vscale x 1 x i16>, + i64); + +define <vscale x 1 x float> @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.nxv1f32.nxv1i16( + <vscale x 1 x float> %0, + <vscale x 1 x i16> %1, + i64 %2) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vrgatherei16.mask.nxv1f32.nxv1i16( + <vscale x 1 x float>, + <vscale x 1 x float>, + <vscale x 1 x i16>, + <vscale x 1 x i1>, + i64); + +define <vscale x 1 x float> @intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.mask.nxv1f32.nxv1i16( + <vscale x 1 x float> %0, + <vscale x 1 x float> %1, + <vscale x 1 x i16> %2, + <vscale x 1 x i1> %3, + i64 %4) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vrgatherei16.nxv4f32.nxv4i16( + <vscale x 4 x float>, + <vscale x 4 x i16>, + i64); + +define <vscale x 4 x float> @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.nxv4f32.nxv4i16( + <vscale x 4 x float> %0, + <vscale x 4 x i16> %1, + i64 %2) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vrgatherei16.mask.nxv4f32.nxv4i16( + <vscale x 4 x float>, + <vscale x 4 x float>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i64); + +define <vscale x 4 x float> @intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.mask.nxv4f32.nxv4i16( + <vscale x 4 x float> %0, + <vscale x 4 x float> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i64 %4) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vrgatherei16.nxv8f32.nxv8i16( + <vscale x 8 x float>, + <vscale x 8 x i16>, + i64); + +define <vscale x 8 x float> @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.nxv8f32.nxv8i16( + <vscale x 8 x float> %0, + <vscale x 8 x i16> %1, + i64 %2) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vrgatherei16.mask.nxv8f32.nxv8i16( + <vscale x 8 x float>, + <vscale x 8 x float>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i64); + +define <vscale x 8 x float> @intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.mask.nxv8f32.nxv8i16( + <vscale x 8 x float> %0, + <vscale x 8 x float> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i64 %4) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vrgatherei16.nxv16f32.nxv16i16( + <vscale x 16 x float>, + <vscale x 16 x i16>, + i64); + +define <vscale x 16 x float> @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.nxv16f32.nxv16i16( + <vscale x 16 x float> %0, + <vscale x 16 x i16> %1, + i64 %2) + + ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vrgatherei16.mask.nxv16f32.nxv16i16( + <vscale x 16 x float>, + <vscale x 16 x float>, + <vscale x 16 x i16>, + <vscale x 16 x i1>, + i64); + +define <vscale x 16 x float> @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.mask.nxv16f32.nxv16i16( + <vscale x 16 x float> %0, + <vscale x 16 x float> %1, + <vscale x 16 x i16> %2, + <vscale x 16 x i1> %3, + i64 %4) + + ret <vscale x 16 x float> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vrgatherei16.nxv4f64.nxv4i16( + <vscale x 4 x double>, + <vscale x 4 x i16>, + i64); + +define <vscale x 4 x double> @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.nxv4f64.nxv4i16( + <vscale x 4 x double> %0, + <vscale x 4 x i16> %1, + i64 %2) + + ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vrgatherei16.mask.nxv4f64.nxv4i16( + <vscale x 4 x double>, + <vscale x 4 x double>, + <vscale x 4 x i16>, + <vscale x 4 x i1>, + i64); + +define <vscale x 4 x double> @intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.mask.nxv4f64.nxv4i16( + <vscale x 4 x double> %0, + <vscale x 4 x double> %1, + <vscale x 4 x i16> %2, + <vscale x 4 x i1> %3, + i64 %4) + + ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vrgatherei16.nxv8f64.nxv8i16( + <vscale x 8 x double>, + <vscale x 8 x i16>, + i64); + +define <vscale x 8 x double> @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x i16> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.nxv8f64.nxv8i16( + <vscale x 8 x double> %0, + <vscale x 8 x i16> %1, + i64 %2) + + ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vrgatherei16.mask.nxv8f64.nxv8i16( + <vscale x 8 x double>, + <vscale x 8 x double>, + <vscale x 8 x i16>, + <vscale x 8 x i1>, + i64); + +define <vscale x 8 x double> @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu +; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.mask.nxv8f64.nxv8i16( + <vscale x 8 x double> %0, + <vscale x 8 x double> %1, + <vscale x 8 x i16> %2, + <vscale x 8 x i1> %3, + i64 %4) + + ret <vscale x 8 x double> %a +} _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits