Author: Fraser Cormack Date: 2021-01-19T15:46:56Z New Revision: 15fd6bae0e4938ebbd4751a4ba1c85b4145894b5
URL: https://github.com/llvm/llvm-project/commit/15fd6bae0e4938ebbd4751a4ba1c85b4145894b5 DIFF: https://github.com/llvm/llvm-project/commit/15fd6bae0e4938ebbd4751a4ba1c85b4145894b5.diff LOG: [RISCV] Extend RVV VType info with the type's AVL (NFC) This patch factors out the "VLMax" operand passed to most scalable-vector ISel patterns into a property of each VType. This is seen as a preparatory change to allow RVV in the future to more easily support fixed-length vector types with constrained vector lengths, with the AVL operand set to the length of the fixed-length vector. It has no effect on the scalable code generation path. Reviewed By: HsiangKai Differential Revision: https://reviews.llvm.org/D94594 Added: Modified: llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td Removed: ################################################################################ diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 1406c8645f73..85826b26eedf 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -137,6 +137,9 @@ class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M, LMULInfo LMul = M; ValueType Scalar = Scal; RegisterClass ScalarRegClass = ScalarReg; + // The pattern fragment which produces the AVL operand, representing the + // "natural" vector length for this type. For scalable vectors this is VLMax. + OutPatFrag AVL = VLMax; } class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew, @@ -234,6 +237,10 @@ class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> { int SEW = 8; LMULInfo LMul = M; string BX = Bx; // Appendix of mask operations. + // The pattern fragment which produces the AVL operand, representing the + // "natural" vector length for this mask type. For scalable masks this is + // VLMax. + OutPatFrag AVL = VLMax; } defset list<MTypeInfo> AllMasks = { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index 5b4051c534a3..4a6571010ec1 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -47,6 +47,7 @@ multiclass VPatUSLoadStoreSDNode<LLVMType type, LLVMType mask_type, int sew, LMULInfo vlmul, + OutPatFrag avl, RegisterClass reg_rs1, VReg reg_class> { @@ -54,16 +55,16 @@ multiclass VPatUSLoadStoreSDNode<LLVMType type, defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX); // Load def : Pat<(type (load reg_rs1:$rs1)), - (load_instr reg_rs1:$rs1, VLMax, sew)>; + (load_instr reg_rs1:$rs1, avl, sew)>; // Store def : Pat<(store type:$rs2, reg_rs1:$rs1), - (store_instr reg_class:$rs2, reg_rs1:$rs1, VLMax, sew)>; + (store_instr reg_class:$rs2, reg_rs1:$rs1, avl, sew)>; } multiclass VPatUSLoadStoreSDNodes<RegisterClass reg_rs1> { foreach vti = AllVectors in defm "" : VPatUSLoadStoreSDNode<vti.Vector, vti.Mask, vti.SEW, vti.LMul, - reg_rs1, vti.RegClass>; + vti.AVL, reg_rs1, vti.RegClass>; } class VPatBinarySDNode_VV<SDNode vop, @@ -73,6 +74,7 @@ class VPatBinarySDNode_VV<SDNode vop, ValueType mask_type, int sew, LMULInfo vlmul, + OutPatFrag avl, VReg RetClass, VReg op_reg_class> : Pat<(result_type (vop @@ -81,7 +83,7 @@ class VPatBinarySDNode_VV<SDNode vop, (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX) op_reg_class:$rs1, op_reg_class:$rs2, - VLMax, sew)>; + avl, sew)>; class VPatBinarySDNode_XI<SDNode vop, string instruction_name, @@ -92,6 +94,7 @@ class VPatBinarySDNode_XI<SDNode vop, ValueType mask_type, int sew, LMULInfo vlmul, + OutPatFrag avl, VReg RetClass, VReg vop_reg_class, ComplexPattern SplatPatKind, @@ -102,17 +105,17 @@ class VPatBinarySDNode_XI<SDNode vop, (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX) vop_reg_class:$rs1, xop_kind:$rs2, - VLMax, sew)>; + avl, sew)>; multiclass VPatBinarySDNode_VV_VX<SDNode vop, string instruction_name> { foreach vti = AllIntegerVectors in { def : VPatBinarySDNode_VV<vop, instruction_name, vti.Vector, vti.Vector, vti.Mask, vti.SEW, - vti.LMul, vti.RegClass, vti.RegClass>; + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; def : VPatBinarySDNode_XI<vop, instruction_name, "VX", vti.Vector, vti.Vector, XLenVT, vti.Mask, vti.SEW, - vti.LMul, vti.RegClass, vti.RegClass, + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass, SplatPat, GPR>; } } @@ -123,14 +126,14 @@ multiclass VPatBinarySDNode_VV_VX_VI<SDNode vop, string instruction_name, foreach vti = AllIntegerVectors in { def : VPatBinarySDNode_VV<vop, instruction_name, vti.Vector, vti.Vector, vti.Mask, vti.SEW, - vti.LMul, vti.RegClass, vti.RegClass>; + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; def : VPatBinarySDNode_XI<vop, instruction_name, "VX", vti.Vector, vti.Vector, XLenVT, vti.Mask, vti.SEW, - vti.LMul, vti.RegClass, vti.RegClass, + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass, SplatPat, GPR>; def : VPatBinarySDNode_XI<vop, instruction_name, "VI", vti.Vector, vti.Vector, XLenVT, vti.Mask, vti.SEW, - vti.LMul, vti.RegClass, vti.RegClass, + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass, !cast<ComplexPattern>(SplatPat#_#ImmType), ImmType>; } @@ -144,6 +147,7 @@ class VPatBinarySDNode_VF<SDNode vop, ValueType mask_type, int sew, LMULInfo vlmul, + OutPatFrag avl, VReg RetClass, VReg vop_reg_class, DAGOperand xop_kind> : @@ -152,16 +156,16 @@ class VPatBinarySDNode_VF<SDNode vop, (!cast<Instruction>(instruction_name#"_VF_"#vlmul.MX) vop_reg_class:$rs1, ToFPR32<xop_type, xop_kind, "rs2">.ret, - VLMax, sew)>; + avl, sew)>; multiclass VPatBinaryFPSDNode_VV_VF<SDNode vop, string instruction_name> { foreach vti = AllFloatVectors in { def : VPatBinarySDNode_VV<vop, instruction_name, vti.Vector, vti.Vector, vti.Mask, vti.SEW, - vti.LMul, vti.RegClass, vti.RegClass>; + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; def : VPatBinarySDNode_VF<vop, instruction_name, vti.Vector, vti.Vector, vti.Scalar, vti.Mask, - vti.SEW, vti.LMul, vti.RegClass, vti.RegClass, + vti.SEW, vti.LMul, vti.AVL, vti.RegClass, vti.RegClass, vti.ScalarRegClass>; } } @@ -173,7 +177,7 @@ multiclass VPatBinaryFPSDNode_R_VF<SDNode vop, string instruction_name> { (!cast<Instruction>(instruction_name#"_VF_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32<fvti.Scalar, fvti.ScalarRegClass, "rs2">.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; } multiclass VPatIntegerSetCCSDNode_VV<CondCode cc, @@ -186,7 +190,7 @@ multiclass VPatIntegerSetCCSDNode_VV<CondCode cc, SwapHelper<(instruction), (instruction vti.RegClass:$rs1), (instruction vti.RegClass:$rs2), - (instruction VLMax, vti.SEW), + (instruction vti.AVL, vti.SEW), swap>.Value>; } } @@ -204,7 +208,7 @@ multiclass VPatIntegerSetCCSDNode_XI<CondCode cc, SwapHelper<(instruction), (instruction vti.RegClass:$rs1), (instruction xop_kind:$rs2), - (instruction VLMax, vti.SEW), + (instruction vti.AVL, vti.SEW), swap>.Value>; } } @@ -242,7 +246,7 @@ multiclass VPatFPSetCCSDNode_VV<CondCode cc, string instruction_name> { (fvti.Vector fvti.RegClass:$rs2), cc)), (!cast<Instruction>(instruction_name#"_VV_"#fvti.LMul.MX) - fvti.RegClass:$rs1, fvti.RegClass:$rs2, VLMax, fvti.SEW)>; + fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.SEW)>; } multiclass VPatFPSetCCSDNode_VF<CondCode cc, string instruction_name> { @@ -253,7 +257,7 @@ multiclass VPatFPSetCCSDNode_VF<CondCode cc, string instruction_name> { (!cast<Instruction>(instruction_name#"_VF_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32<fvti.Scalar, fvti.ScalarRegClass, "rs2">.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; } multiclass VPatFPSetCCSDNode_FV<CondCode cc, string swapped_op_instruction_name> { @@ -264,7 +268,7 @@ multiclass VPatFPSetCCSDNode_FV<CondCode cc, string swapped_op_instruction_name> (!cast<Instruction>(swapped_op_instruction_name#"_VF_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32<fvti.Scalar, fvti.ScalarRegClass, "rs2">.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; } multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc, @@ -283,7 +287,7 @@ multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix, foreach op = ops in def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX) - fti.RegClass:$rs2, VLMax, vti.SEW)>; + fti.RegClass:$rs2, fti.AVL, vti.SEW)>; } } @@ -306,11 +310,11 @@ foreach vti = AllIntegerVectors in { def : Pat<(sub (vti.Vector (SplatPat XLenVT:$rs2)), (vti.Vector vti.RegClass:$rs1)), (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX) - vti.RegClass:$rs1, GPR:$rs2, VLMax, vti.SEW)>; + vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.SEW)>; def : Pat<(sub (vti.Vector (SplatPat_simm5 XLenVT:$rs2)), (vti.Vector vti.RegClass:$rs1)), (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX) - vti.RegClass:$rs1, simm5:$rs2, VLMax, vti.SEW)>; + vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.SEW)>; } // 12.3. Vector Integer Extension @@ -343,7 +347,7 @@ foreach vtiTofti = AllFractionableVF2IntVectors in { defvar fti = vtiTofti.Fti; def : Pat<(fti.Vector (riscv_trunc_vector (vti.Vector vti.RegClass:$rs1))), (!cast<Instruction>("PseudoVNSRL_WI_"#fti.LMul.MX) - vti.RegClass:$rs1, 0, VLMax, fti.SEW)>; + vti.RegClass:$rs1, 0, fti.AVL, fti.SEW)>; } // 12.8. Vector Integer Comparison Instructions @@ -390,47 +394,48 @@ foreach vti = AllIntegerVectors in { def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), vti.RegClass:$rs1, vti.RegClass:$rs2)), (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) - vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm, VLMax, vti.SEW)>; + vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm, + vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat XLenVT:$rs1), vti.RegClass:$rs2)), (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) - vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, VLMax, vti.SEW)>; + vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat_simm5 simm5:$rs1), vti.RegClass:$rs2)), (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) - vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, VLMax, vti.SEW)>; + vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.SEW)>; } // 16.1. Vector Mask-Register Logical Instructions foreach mti = AllMasks in { def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (vnot (and VR:$rs1, VR:$rs2))), (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (vnot (or VR:$rs1, VR:$rs2))), (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (vnot (xor VR:$rs1, VR:$rs2))), (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (and VR:$rs1, (vnot VR:$rs2))), (!cast<Instruction>("PseudoVMANDNOT_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (or VR:$rs1, (vnot VR:$rs2))), (!cast<Instruction>("PseudoVMORNOT_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; } } // Predicates = [HasStdExtV] @@ -468,7 +473,7 @@ foreach fvti = AllFloatVectors in { fvti.RegClass:$rs2)), (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), (splat_vector fvti.ScalarRegClass:$rs1), @@ -476,13 +481,13 @@ foreach fvti = AllFloatVectors in { (!cast<Instruction>("PseudoVFMERGE_VFM_"#fvti.LMul.MX) fvti.RegClass:$rs2, ToFPR32<fvti.Scalar, fvti.ScalarRegClass, "rs1">.ret, - VMV0:$vm, VLMax, fvti.SEW)>; + VMV0:$vm, fvti.AVL, fvti.SEW)>; def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), (splat_vector (fvti.Scalar fpimm0)), fvti.RegClass:$rs2)), (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) - fvti.RegClass:$rs2, 0, VMV0:$vm, VLMax, fvti.SEW)>; + fvti.RegClass:$rs2, 0, VMV0:$vm, fvti.AVL, fvti.SEW)>; } } // Predicates = [HasStdExtV, HasStdExtF] @@ -494,17 +499,17 @@ let Predicates = [HasStdExtV] in { foreach vti = AllIntegerVectors in { def : Pat<(vti.Vector (splat_vector GPR:$rs1)), (!cast<Instruction>("PseudoVMV_V_X_" # vti.LMul.MX) - GPR:$rs1, VLMax, vti.SEW)>; + GPR:$rs1, vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (splat_vector simm5:$rs1)), (!cast<Instruction>("PseudoVMV_V_I_" # vti.LMul.MX) - simm5:$rs1, VLMax, vti.SEW)>; + simm5:$rs1, vti.AVL, vti.SEW)>; } foreach mti = AllMasks in { def : Pat<(mti.Mask immAllOnesV), - (!cast<Instruction>("PseudoVMSET_M_"#mti.BX) VLMax, mti.SEW)>; + (!cast<Instruction>("PseudoVMSET_M_"#mti.BX) mti.AVL, mti.SEW)>; def : Pat<(mti.Mask immAllZerosV), - (!cast<Instruction>("PseudoVMCLR_M_"#mti.BX) VLMax, mti.SEW)>; + (!cast<Instruction>("PseudoVMCLR_M_"#mti.BX) mti.AVL, mti.SEW)>; } } // Predicates = [HasStdExtV] @@ -513,10 +518,10 @@ foreach vti = AllIntegerVectors in { if !eq(vti.SEW, 64) then { def : Pat<(vti.Vector (rv32_splat_i64 GPR:$rs1)), (!cast<Instruction>("PseudoVMV_V_X_" # vti.LMul.MX) - GPR:$rs1, VLMax, vti.SEW)>; + GPR:$rs1, vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (rv32_splat_i64 simm5:$rs1)), (!cast<Instruction>("PseudoVMV_V_I_" # vti.LMul.MX) - simm5:$rs1, VLMax, vti.SEW)>; + simm5:$rs1, vti.AVL, vti.SEW)>; } } } // Predicates = [HasStdExtV, IsRV32] @@ -526,10 +531,10 @@ foreach fvti = AllFloatVectors in { def : Pat<(fvti.Vector (splat_vector fvti.ScalarRegClass:$rs1)), (!cast<Instruction>("PseudoVFMV_V_F_"#fvti.LMul.MX) ToFPR32<fvti.Scalar, fvti.ScalarRegClass, "rs1">.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; def : Pat<(fvti.Vector (splat_vector (fvti.Scalar fpimm0))), (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) - 0, VLMax, fvti.SEW)>; + 0, fvti.AVL, fvti.SEW)>; } } // Predicates = [HasStdExtV, HasStdExtF] _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits