khchen created this revision. khchen added reviewers: craig.topper, rogfer01, frasercrmck, kito-cheng, arcbbb, monkchiang, eopXD. Herald added subscribers: VincentWu, luke957, achieveartificialintelligence, vkmr, evandro, luismarques, apazos, sameer.abuasal, s.egerton, Jim, benna, psnobl, jocewei, PkmX, the_o, brucehoult, MartinMosbeck, edward-jones, zzheng, jrtc27, niosHD, sabuasal, simoncook, johnrusso, rbar, asb, hiraditya. khchen requested review of this revision. Herald added subscribers: llvm-commits, cfe-commits, pcwang-thead, MaskRay. Herald added projects: clang, LLVM.
Those masked operations are missed the policy operand. Repository: rG LLVM Github Monorepo https://reviews.llvm.org/D120227 Files: clang/include/clang/Basic/riscv_vector.td llvm/include/llvm/IR/IntrinsicsRISCV.td llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td llvm/test/CodeGen/RISCV/rvv/masked-tama.ll llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll llvm/test/CodeGen/RISCV/rvv/vid.ll llvm/test/CodeGen/RISCV/rvv/viota.ll
Index: llvm/test/CodeGen/RISCV/rvv/viota.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/viota.ll +++ llvm/test/CodeGen/RISCV/rvv/viota.ll @@ -27,7 +27,7 @@ <vscale x 1 x i8>, <vscale x 1 x i1>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: @@ -40,7 +40,7 @@ <vscale x 1 x i8> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i8> %a } @@ -69,7 +69,7 @@ <vscale x 2 x i8>, <vscale x 2 x i1>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1: @@ -82,7 +82,7 @@ <vscale x 2 x i8> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i8> %a } @@ -111,7 +111,7 @@ <vscale x 4 x i8>, <vscale x 4 x i1>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1: @@ -124,7 +124,7 @@ <vscale x 4 x i8> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i8> %a } @@ -153,7 +153,7 @@ <vscale x 8 x i8>, <vscale x 8 x i1>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1: @@ -166,7 +166,7 @@ <vscale x 8 x i8> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i8> %a } @@ -195,7 +195,7 @@ <vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1: @@ -208,7 +208,7 @@ <vscale x 16 x i8> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i8> %a } @@ -237,7 +237,7 @@ <vscale x 32 x i8>, <vscale x 32 x i1>, <vscale x 32 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1: @@ -250,7 +250,7 @@ <vscale x 32 x i8> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 32 x i8> %a } @@ -279,7 +279,7 @@ <vscale x 64 x i8>, <vscale x 64 x i1>, <vscale x 64 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1: @@ -292,7 +292,7 @@ <vscale x 64 x i8> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 64 x i8> %a } @@ -321,7 +321,7 @@ <vscale x 1 x i16>, <vscale x 1 x i1>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1: @@ -334,7 +334,7 @@ <vscale x 1 x i16> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i16> %a } @@ -363,7 +363,7 @@ <vscale x 2 x i16>, <vscale x 2 x i1>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1: @@ -376,7 +376,7 @@ <vscale x 2 x i16> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i16> %a } @@ -405,7 +405,7 @@ <vscale x 4 x i16>, <vscale x 4 x i1>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1: @@ -418,7 +418,7 @@ <vscale x 4 x i16> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i16> %a } @@ -447,7 +447,7 @@ <vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1: @@ -460,7 +460,7 @@ <vscale x 8 x i16> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i16> %a } @@ -489,7 +489,7 @@ <vscale x 16 x i16>, <vscale x 16 x i1>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1: @@ -502,7 +502,7 @@ <vscale x 16 x i16> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i16> %a } @@ -531,7 +531,7 @@ <vscale x 32 x i16>, <vscale x 32 x i1>, <vscale x 32 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1: @@ -544,7 +544,7 @@ <vscale x 32 x i16> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 32 x i16> %a } @@ -573,7 +573,7 @@ <vscale x 1 x i32>, <vscale x 1 x i1>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1: @@ -586,7 +586,7 @@ <vscale x 1 x i32> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i32> %a } @@ -615,7 +615,7 @@ <vscale x 2 x i32>, <vscale x 2 x i1>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1: @@ -628,7 +628,7 @@ <vscale x 2 x i32> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i32> %a } @@ -657,7 +657,7 @@ <vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1: @@ -670,7 +670,7 @@ <vscale x 4 x i32> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i32> %a } @@ -699,7 +699,7 @@ <vscale x 8 x i32>, <vscale x 8 x i1>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1: @@ -712,7 +712,7 @@ <vscale x 8 x i32> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i32> %a } @@ -741,7 +741,7 @@ <vscale x 16 x i32>, <vscale x 16 x i1>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1: @@ -754,7 +754,7 @@ <vscale x 16 x i32> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i32> %a } @@ -783,7 +783,7 @@ <vscale x 1 x i64>, <vscale x 1 x i1>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1: @@ -796,7 +796,7 @@ <vscale x 1 x i64> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i64> %a } @@ -825,7 +825,7 @@ <vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1: @@ -838,7 +838,7 @@ <vscale x 2 x i64> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i64> %a } @@ -867,7 +867,7 @@ <vscale x 4 x i64>, <vscale x 4 x i1>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1: @@ -880,7 +880,7 @@ <vscale x 4 x i64> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i64> %a } @@ -909,7 +909,7 @@ <vscale x 8 x i64>, <vscale x 8 x i1>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1: @@ -922,7 +922,7 @@ <vscale x 8 x i64> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i64> %a } Index: llvm/test/CodeGen/RISCV/rvv/vid.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vid.ll +++ llvm/test/CodeGen/RISCV/rvv/vid.ll @@ -24,7 +24,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( <vscale x 1 x i8>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: @@ -36,7 +36,7 @@ %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( <vscale x 1 x i8> %0, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i8> %a } @@ -62,7 +62,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8( <vscale x 2 x i8>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8: @@ -74,7 +74,7 @@ %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8( <vscale x 2 x i8> %0, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i8> %a } @@ -100,7 +100,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8( <vscale x 4 x i8>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8: @@ -112,7 +112,7 @@ %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8( <vscale x 4 x i8> %0, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i8> %a } @@ -138,7 +138,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8( <vscale x 8 x i8>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8: @@ -150,7 +150,7 @@ %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8( <vscale x 8 x i8> %0, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i8> %a } @@ -176,7 +176,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8( <vscale x 16 x i8>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8: @@ -188,7 +188,7 @@ %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8( <vscale x 16 x i8> %0, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i8> %a } @@ -214,7 +214,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8( <vscale x 32 x i8>, <vscale x 32 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8: @@ -226,7 +226,7 @@ %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8( <vscale x 32 x i8> %0, <vscale x 32 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 32 x i8> %a } @@ -252,7 +252,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16( <vscale x 1 x i16>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16: @@ -264,7 +264,7 @@ %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16( <vscale x 1 x i16> %0, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i16> %a } @@ -290,7 +290,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16( <vscale x 2 x i16>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16: @@ -302,7 +302,7 @@ %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16( <vscale x 2 x i16> %0, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i16> %a } @@ -328,7 +328,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16( <vscale x 4 x i16>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16: @@ -340,7 +340,7 @@ %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16( <vscale x 4 x i16> %0, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i16> %a } @@ -366,7 +366,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16( <vscale x 8 x i16>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16: @@ -378,7 +378,7 @@ %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16( <vscale x 8 x i16> %0, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i16> %a } @@ -404,7 +404,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16( <vscale x 16 x i16>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16: @@ -416,7 +416,7 @@ %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16( <vscale x 16 x i16> %0, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i16> %a } @@ -442,7 +442,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16( <vscale x 32 x i16>, <vscale x 32 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16: @@ -454,7 +454,7 @@ %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16( <vscale x 32 x i16> %0, <vscale x 32 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 32 x i16> %a } @@ -480,7 +480,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32( <vscale x 1 x i32>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32: @@ -492,7 +492,7 @@ %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32( <vscale x 1 x i32> %0, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i32> %a } @@ -518,7 +518,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32( <vscale x 2 x i32>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32: @@ -530,7 +530,7 @@ %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32( <vscale x 2 x i32> %0, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i32> %a } @@ -556,7 +556,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32( <vscale x 4 x i32>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32: @@ -568,7 +568,7 @@ %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32( <vscale x 4 x i32> %0, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i32> %a } @@ -594,7 +594,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32( <vscale x 8 x i32>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32: @@ -606,7 +606,7 @@ %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32( <vscale x 8 x i32> %0, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i32> %a } @@ -632,7 +632,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32( <vscale x 16 x i32>, <vscale x 16 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32: @@ -644,7 +644,7 @@ %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32( <vscale x 16 x i32> %0, <vscale x 16 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 16 x i32> %a } @@ -670,7 +670,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64( <vscale x 1 x i64>, <vscale x 1 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64: @@ -682,7 +682,7 @@ %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64( <vscale x 1 x i64> %0, <vscale x 1 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 1 x i64> %a } @@ -708,7 +708,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64( <vscale x 2 x i64>, <vscale x 2 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64: @@ -720,7 +720,7 @@ %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64( <vscale x 2 x i64> %0, <vscale x 2 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 2 x i64> %a } @@ -746,7 +746,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64( <vscale x 4 x i64>, <vscale x 4 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64: @@ -758,7 +758,7 @@ %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64( <vscale x 4 x i64> %0, <vscale x 4 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 4 x i64> %a } @@ -784,7 +784,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64( <vscale x 8 x i64>, <vscale x 8 x i1>, - iXLen); + iXLen, iXLen); define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64: @@ -796,7 +796,7 @@ %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64( <vscale x 8 x i64> %0, <vscale x 8 x i1> %1, - iXLen %2) + iXLen %2, iXLen 0) ret <vscale x 8 x i64> %a } Index: llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll +++ llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll @@ -1542,3 +1542,57 @@ ret <vscale x 1 x i8> %a } + +declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vid_mask_v_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vid.v v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vid_mask_v_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vid.v v8, v0.t +; RV64-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 0) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: viota.m v8, v0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: viota.m v8, v0, v0.t +; RV64-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 0) + + ret <vscale x 1 x i8> %a +} Index: llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll +++ llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll @@ -1542,3 +1542,57 @@ ret <vscale x 1 x i8> %a } + +declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vid_mask_v_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; RV32-NEXT: vid.v v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vid_mask_v_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; RV64-NEXT: vid.v v8, v0.t +; RV64-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 2) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; RV32-NEXT: viota.m v8, v0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; RV64-NEXT: viota.m v8, v0, v0.t +; RV64-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 2) + + ret <vscale x 1 x i8> %a +} Index: llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll +++ llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll @@ -1542,3 +1542,57 @@ ret <vscale x 1 x i8> %a } + +declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vid_mask_v_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vid.v v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vid_mask_v_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vid.v v8, v0.t +; RV64-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 1) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: viota.m v8, v0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: viota.m v8, v0, v0.t +; RV64-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8> %0, + <vscale x 1 x i1> %1, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 1) + + ret <vscale x 1 x i8> %a +} Index: llvm/test/CodeGen/RISCV/rvv/masked-tama.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/masked-tama.ll +++ llvm/test/CodeGen/RISCV/rvv/masked-tama.ll @@ -1581,3 +1581,61 @@ ret <vscale x 1 x i8> %a } + +declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i1> %0, iXLen %1) nounwind { +; RV32-LABEL: intrinsic_vid_mask_v_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; RV32-NEXT: vid.v v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vid_mask_v_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; RV64-NEXT: vid.v v8, v0.t +; RV64-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8( + <vscale x 1 x i8> undef, + <vscale x 1 x i1> %0, + iXLen %1, iXLen 3) + + ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8>, + <vscale x 1 x i1>, + <vscale x 1 x i1>, + iXLen, iXLen); + +define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vmv1r.v v9, v0 +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; RV32-NEXT: vmv1r.v v0, v8 +; RV32-NEXT: viota.m v8, v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v9, v0 +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; RV64-NEXT: vmv1r.v v0, v8 +; RV64-NEXT: viota.m v8, v9, v0.t +; RV64-NEXT: ret +entry: + %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8( + <vscale x 1 x i8> undef, + <vscale x 1 x i1> %0, + <vscale x 1 x i1> %1, + iXLen %2, iXLen 3) + + ret <vscale x 1 x i8> %a +} Index: llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -909,7 +909,7 @@ class VPseudoNullaryMask<VReg RegClass>: Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd), (ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, AVL:$vl, - ixlenimm:$sew), []>, RISCVVPseudo { + ixlenimm:$sew, ixlenimm:$policy), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -918,6 +918,7 @@ let HasSEWOp = 1; let HasMergeOp = 1; let IsValidMaskPolicy = 1; + let HasVecPolicyOp = 1; let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); } @@ -1733,7 +1734,7 @@ Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>; def "_" # m.MX # "_TU" : VPseudoUnaryNoMaskTU<m.vrclass, VR, constraint>, Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>; - def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>, + def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, VR, constraint>, Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>; } } @@ -3310,8 +3311,8 @@ vti.Log2SEW, vti.LMul, VR>; def : VPatUnaryNoMaskTU<intrinsic, instruction, "M", vti.Vector, vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass,VR>; - def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask, - vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>; + def : VPatUnaryMaskTA<intrinsic, instruction, "M", vti.Vector, vti.Mask, + vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>; } } @@ -3364,10 +3365,10 @@ vti.RegClass:$merge, GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask") (vti.Vector vti.RegClass:$merge), - (vti.Mask V0), VLOpFrag)), + (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))), (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK") vti.RegClass:$merge, (vti.Mask V0), - GPR:$vl, vti.Log2SEW)>; + GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; } } Index: llvm/include/llvm/IR/IntrinsicsRISCV.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsRISCV.td +++ llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -816,7 +816,7 @@ } // Output: (vector) // Input: (passthru, vl) - class RISCVNullaryIntrinsicTU + class RISCVID : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { @@ -1460,26 +1460,26 @@ let VLOperand = 2; } // Output: (vector) - // Input: (maskedoff, mask type vector_in, mask, vl) + // Input: (maskedoff, mask type vector_in, mask, vl, policy) def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic { + llvm_anyint_ty, LLVMMatchType<1>], + [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 3; } // Output: (vector) // Input: (passthru, vl) - def int_riscv_vid : RISCVNullaryIntrinsicTU; + def int_riscv_vid : RISCVID; // Output: (vector) - // Input: (maskedoff, mask, vl) + // Input: (maskedoff, mask, vl, policy) def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic { + llvm_anyint_ty, LLVMMatchType<1>], + [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 2; } Index: clang/include/clang/Basic/riscv_vector.td =================================================================== --- clang/include/clang/Basic/riscv_vector.td +++ clang/include/clang/Basic/riscv_vector.td @@ -2007,6 +2007,7 @@ // 16.6. vmsof.m set-only-first mask bit def vmsof : RVVMaskUnaryBuiltin; +} let NoMaskPolicy = HasPassthruOperand, HasNoMaskedOverloaded = false in { // 16.8. Vector Iota Instruction @@ -2016,7 +2017,6 @@ defm vid : RVVOutBuiltinSet<"vid", "csil", [["v", "v", "v"], ["v", "Uv", "Uv"]]>; } -} // 17. Vector Permutation Instructions // 17.1. Integer Scalar Move Instructions
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits