HsiangKai updated this revision to Diff 358205.
HsiangKai added a comment.

Add the TA argument to most of the intrinsics with mask.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D105092/new/

https://reviews.llvm.org/D105092

Files:
  clang/include/clang/Basic/riscv_vector.td
  clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c
  clang/utils/TableGen/RISCVVEmitter.cpp
  llvm/include/llvm/IR/IntrinsicsRISCV.td
  llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
  llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
  llvm/lib/Target/RISCV/RISCVInstrFormats.td
  llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
  llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
  llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll

Index: llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll
@@ -0,0 +1,65 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64, i64);
+
+define <vscale x 8 x i8> @intrinsic_vadd_mask_tu(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_tu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 3, e8, m1, tu, mu
+; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4, i64 0)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vadd_mask_ta(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_ta:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 3, e8, m1, ta, mu
+; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4, i64 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
Index: llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
===================================================================
--- llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -22,6 +22,9 @@
 // Helpers to define the VL patterns.
 //===----------------------------------------------------------------------===//
 
+defvar TAIL_AGNOSTIC = 0;
+defvar TAIL_UNDISTURBED = 1;
+
 def SDT_RISCVVLE_VL : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisPtrTy<1>,
                                            SDTCisVT<2, XLenVT>]>;
 def SDT_RISCVVSE_VL : SDTypeProfile<0, 3, [SDTCisVec<0>, SDTCisPtrTy<1>,
@@ -266,7 +269,7 @@
                      (result_type (IMPLICIT_DEF)),
                      op_reg_class:$rs1,
                      op_reg_class:$rs2,
-                     VMV0:$vm, GPR:$vl, sew)>;
+                     VMV0:$vm, GPR:$vl, sew, TAIL_AGNOSTIC)>;
 }
 
 multiclass VPatBinaryVL_XI<SDNode vop,
@@ -299,7 +302,7 @@
                      (result_type (IMPLICIT_DEF)),
                      vop_reg_class:$rs1,
                      xop_kind:$rs2,
-                     VMV0:$vm, GPR:$vl, sew)>;
+                     VMV0:$vm, GPR:$vl, sew, TAIL_AGNOSTIC)>;
 }
 
 multiclass VPatBinaryVL_VV_VX<SDNode vop, string instruction_name> {
@@ -604,7 +607,7 @@
                           VLOpFrag),
             (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK")
                  (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2,
-                 VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
+                 VMV0:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
   def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
                           (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask),
                           VLOpFrag),
@@ -615,7 +618,7 @@
                           VLOpFrag),
             (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK")
                  (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, simm5:$rs2,
-                 VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
+                 VMV0:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
 }
 
 // 12.3. Vector Integer Extension
@@ -1210,7 +1213,7 @@
                                           VLOpFrag)),
             (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
                  vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
-                 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
+                 vti.Mask:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
 
   // emul = lmul * 16 / sew
   defvar vlmul = vti.LMul;
@@ -1237,7 +1240,7 @@
                                             VLOpFrag)),
               (!cast<Instruction>(inst#"_MASK")
                    vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
-                   vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
+                   vti.Mask:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
   }
 }
 
@@ -1281,7 +1284,7 @@
                                           VLOpFrag)),
             (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
                  vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
-                 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
+                 vti.Mask:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
 
   defvar vlmul = vti.LMul;
   defvar octuple_lmul = vlmul.octuple;
@@ -1307,7 +1310,7 @@
                                             VLOpFrag)),
               (!cast<Instruction>(inst#"_MASK")
                    vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
-                   vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
+                   vti.Mask:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
   }
 }
 
Index: llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
===================================================================
--- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -642,7 +642,7 @@
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
               (ins GetVRegNoV0<RetClass>.R:$merge,
                    GPR:$rs1,
-                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
+                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, uimm5:$policy),[]>,
       RISCVVPseudo,
       RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
   let mayLoad = 1;
@@ -652,6 +652,7 @@
   let HasVLOp = 1;
   let HasSEWOp = 1;
   let HasMergeOp = 1;
+  let HasPolicyOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -673,7 +674,7 @@
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
               (ins GetVRegNoV0<RetClass>.R:$merge,
                    GPR:$rs1, GPR:$rs2,
-                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
+                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, uimm5:$policy),[]>,
       RISCVVPseudo,
       RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
   let mayLoad = 1;
@@ -683,6 +684,7 @@
   let HasVLOp = 1;
   let HasSEWOp = 1;
   let HasMergeOp = 1;
+  let HasPolicyOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -707,7 +709,7 @@
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
               (ins GetVRegNoV0<RetClass>.R:$merge,
                    GPR:$rs1, IdxClass:$rs2,
-                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
+                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, uimm5:$policy),[]>,
       RISCVVPseudo,
       RISCVVLX</*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
   let mayLoad = 1;
@@ -717,6 +719,7 @@
   let HasVLOp = 1;
   let HasSEWOp = 1;
   let HasMergeOp = 1;
+  let HasPolicyOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -860,6 +863,22 @@
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
+class VPseudoUnaryMaskTA<VReg RetClass, VReg OpClass, string Constraint = ""> :
+        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, uimm5:$policy), []>,
+        RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
+  let HasPolicyOp = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
 // mask unary operation without maskedoff
 class VPseudoMaskUnarySOutMask:
         Pseudo<(outs GPR:$rd),
@@ -975,6 +994,26 @@
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
+class VPseudoBinaryMaskTA<VReg RetClass,
+                          RegisterClass Op1Class,
+                          DAGOperand Op2Class,
+                          string Constraint> :
+        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+                (ins GetVRegNoV0<RetClass>.R:$merge,
+                     Op1Class:$rs2, Op2Class:$rs1,
+                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, uimm5:$policy), []>,
+        RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
+  let HasPolicyOp = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
 // Like VPseudoBinaryMask, but output can be V0.
 class VPseudoBinaryMOutMask<VReg RetClass,
                             RegisterClass Op1Class,
@@ -1004,7 +1043,7 @@
         Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
                 (ins GetVRegNoV0<RetClass>.R:$merge,
                      Op2Class:$rs1,
-                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
+                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, uimm5:$policy), []>,
         RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1013,6 +1052,7 @@
   let HasVLOp = 1;
   let HasSEWOp = 1;
   let HasMergeOp = 0; // Merge is also rs2.
+  let HasPolicyOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -1491,8 +1531,8 @@
   let VLMul = MInfo.value in {
     def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
                                              Constraint>;
-    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class,
-                                                     Constraint>;
+    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskTA<RetClass, Op1Class, Op2Class,
+                                                       Constraint>;
   }
 }
 
@@ -1519,8 +1559,8 @@
   let VLMul = lmul.value in {
     def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
                                                             Constraint>;
-    def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class,
-                                                                    Constraint>;
+    def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskTA<RetClass, Op1Class, Op2Class,
+                                                                      Constraint>;
   }
 }
 
@@ -1712,6 +1752,15 @@
   }
 }
 
+multiclass VPseudoUnaryTAV_V {
+  foreach m = MxList.m in {
+    let VLMul = m.value in {
+      def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>;
+      def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>;
+    }
+  }
+}
+
 multiclass VPseudoUnaryV_V {
   foreach m = MxList.m in {
     let VLMul = m.value in {
@@ -1727,8 +1776,8 @@
   {
     let VLMul = m.value in {
       def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>;
-      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f2vrclass,
-                                                  constraints>;
+      def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.f2vrclass,
+                                                    constraints>;
     }
   }
 }
@@ -1739,8 +1788,8 @@
   {
     let VLMul = m.value in {
       def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>;
-      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f4vrclass,
-                                                  constraints>;
+      def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.f4vrclass,
+                                                    constraints>;
     }
   }
 }
@@ -1751,8 +1800,8 @@
   {
     let VLMul = m.value in {
       def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>;
-      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f8vrclass,
-                                                  constraints>;
+      def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.f8vrclass,
+                                                    constraints>;
     }
   }
 }
@@ -2024,8 +2073,8 @@
                              string Constraint = ""> {
   let VLMul = MInfo.value in {
     def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint>;
-    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask<RetClass, Op1Class,
-                                                    Constraint>;
+    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA<RetClass, Op1Class,
+                                                      Constraint>;
   }
 }
 
@@ -2208,6 +2257,26 @@
                    (op2_type op2_reg_class:$rs2),
                    (mask_type V0), GPR:$vl, sew)>;
 
+class VPatUnaryMaskTA<string intrinsic_name,
+                      string inst,
+                      string kind,
+                      ValueType result_type,
+                      ValueType op2_type,
+                      ValueType mask_type,
+                      int sew,
+                      LMULInfo vlmul,
+                      VReg result_reg_class,
+                      VReg op2_reg_class> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
+                   (result_type result_reg_class:$merge),
+                   (op2_type op2_reg_class:$rs2),
+                   (mask_type V0),
+                   VLOpFrag, (XLenVT uimm5:$policy))),
+                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
+                   (result_type result_reg_class:$merge),
+                   (op2_type op2_reg_class:$rs2),
+                   (mask_type V0), GPR:$vl, sew, (XLenVT uimm5:$policy))>;
+
 class VPatMaskUnaryNoMask<string intrinsic_name,
                           string inst,
                           MTypeInfo mti> :
@@ -2309,6 +2378,28 @@
                    (op2_type op2_kind:$rs2),
                    (mask_type V0), GPR:$vl, sew)>;
 
+class VPatBinaryMaskTA<string intrinsic_name,
+                       string inst,
+                       ValueType result_type,
+                       ValueType op1_type,
+                       ValueType op2_type,
+                       ValueType mask_type,
+                       int sew,
+                       VReg result_reg_class,
+                       VReg op1_reg_class,
+                       DAGOperand op2_kind> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
+                   (result_type result_reg_class:$merge),
+                   (op1_type op1_reg_class:$rs1),
+                   (op2_type op2_kind:$rs2),
+                   (mask_type V0),
+                   VLOpFrag, (XLenVT uimm5:$policy))),
+                   (!cast<Instruction>(inst#"_MASK")
+                   (result_type result_reg_class:$merge),
+                   (op1_type op1_reg_class:$rs1),
+                   (op2_type op2_kind:$rs2),
+                   (mask_type V0), GPR:$vl, sew, (XLenVT uimm5:$policy))>;
+
 // Same as above but source operands are swapped.
 class VPatBinaryMaskSwapped<string intrinsic_name,
                             string inst,
@@ -2361,11 +2452,11 @@
                    (result_type result_reg_class:$merge),
                    (op2_type op2_kind:$rs2),
                    (mask_type V0),
-                   VLOpFrag)),
+                   VLOpFrag, (XLenVT uimm5:$policy))),
                    (!cast<Instruction>(inst#"_MASK_TIED")
                    (result_type result_reg_class:$merge),
                    (op2_type op2_kind:$rs2),
-                   (mask_type V0), GPR:$vl, sew)>;
+                   (mask_type V0), GPR:$vl, sew, (XLenVT uimm5:$policy))>;
 
 class VPatTernaryNoMask<string intrinsic,
                         string inst,
@@ -2505,9 +2596,9 @@
       def : VPatUnaryNoMask<intrinsic, instruction, suffix,
                             vti.Vector, fti.Vector,
                             vti.Log2SEW, vti.LMul, fti.RegClass>;
-      def : VPatUnaryMask<intrinsic, instruction, suffix,
-                          vti.Vector, fti.Vector, vti.Mask,
-                          vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
+      def : VPatUnaryMaskTA<intrinsic, instruction, suffix,
+                            vti.Vector, fti.Vector, vti.Mask,
+                            vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
    }
 }
 
@@ -2517,9 +2608,9 @@
     def : VPatUnaryNoMask<intrinsic, instruction, "V",
                           vti.Vector, vti.Vector,
                           vti.Log2SEW, vti.LMul, vti.RegClass>;
-    def : VPatUnaryMask<intrinsic, instruction, "V",
-                        vti.Vector, vti.Vector, vti.Mask,
-                        vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
+    def : VPatUnaryMaskTA<intrinsic, instruction, "V",
+                          vti.Vector, vti.Vector, vti.Mask,
+                          vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
   }
 }
 
@@ -2565,6 +2656,24 @@
                        op2_kind>;
 }
 
+multiclass VPatBinaryTA<string intrinsic,
+                        string inst,
+                        ValueType result_type,
+                        ValueType op1_type,
+                        ValueType op2_type,
+                        ValueType mask_type,
+                        int sew,
+                        VReg result_reg_class,
+                        VReg op1_reg_class,
+                        DAGOperand op2_kind>
+{
+  def : VPatBinaryNoMask<intrinsic, inst, result_type, op1_type, op2_type,
+                         sew, op1_reg_class, op2_kind>;
+  def : VPatBinaryMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
+                         mask_type, sew, result_reg_class, op1_reg_class,
+                         op2_kind>;
+}
+
 multiclass VPatBinarySwapped<string intrinsic,
                       string inst,
                       ValueType result_type,
@@ -2644,23 +2753,40 @@
                       mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
 }
 
+multiclass VPatConversionTA<string intrinsic,
+                            string inst,
+                            string kind,
+                            ValueType result_type,
+                            ValueType op1_type,
+                            ValueType mask_type,
+                            int sew,
+                            LMULInfo vlmul,
+                            VReg result_reg_class,
+                            VReg op1_reg_class>
+{
+  def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
+                        sew, vlmul, op1_reg_class>;
+  def : VPatUnaryMaskTA<intrinsic, inst, kind, result_type, op1_type,
+                        mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
+}
+
 multiclass VPatBinaryV_VV<string intrinsic, string instruction,
                           list<VTypeInfo> vtilist> {
   foreach vti = vtilist in
-    defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
-                      vti.Vector, vti.Vector, vti.Vector,vti.Mask,
-                      vti.Log2SEW, vti.RegClass,
-                      vti.RegClass, vti.RegClass>;
+    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # vti.LMul.MX,
+                        vti.Vector, vti.Vector, vti.Vector,vti.Mask,
+                        vti.Log2SEW, vti.RegClass,
+                        vti.RegClass, vti.RegClass>;
 }
 
 multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
                           list<VTypeInfo> vtilist> {
   foreach vti = vtilist in {
     defvar ivti = GetIntVTypeInfo<vti>.Vti;
-    defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
-                      vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
-                      vti.Log2SEW, vti.RegClass,
-                      vti.RegClass, vti.RegClass>;
+    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # vti.LMul.MX,
+                        vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
+                        vti.Log2SEW, vti.RegClass,
+                        vti.RegClass, vti.RegClass>;
   }
 }
 
@@ -2675,10 +2801,10 @@
       defvar emul_str = octuple_to_str<octuple_emul>.ret;
       defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
       defvar inst = instruction # "_VV_" # vti.LMul.MX # "_" # emul_str;
-      defm : VPatBinary<intrinsic, inst,
-                        vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
-                        vti.Log2SEW, vti.RegClass,
-                        vti.RegClass, ivti.RegClass>;
+      defm : VPatBinaryTA<intrinsic, inst,
+                          vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
+                          vti.Log2SEW, vti.RegClass,
+                          vti.RegClass, ivti.RegClass>;
     }
   }
 }
@@ -2687,29 +2813,29 @@
                           list<VTypeInfo> vtilist> {
   foreach vti = vtilist in {
     defvar kind = "V"#vti.ScalarSuffix;
-    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
-                      vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
-                      vti.Log2SEW, vti.RegClass,
-                      vti.RegClass, vti.ScalarRegClass>;
+    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
+                        vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
+                        vti.Log2SEW, vti.RegClass,
+                        vti.RegClass, vti.ScalarRegClass>;
   }
 }
 
 multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
                           list<VTypeInfo> vtilist> {
   foreach vti = vtilist in
-    defm : VPatBinary<intrinsic, instruction # "_VX_" # vti.LMul.MX,
-                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
-                      vti.Log2SEW, vti.RegClass,
-                      vti.RegClass, GPR>;
+    defm : VPatBinaryTA<intrinsic, instruction # "_VX_" # vti.LMul.MX,
+                        vti.Vector, vti.Vector, XLenVT, vti.Mask,
+                        vti.Log2SEW, vti.RegClass,
+                        vti.RegClass, GPR>;
 }
 
 multiclass VPatBinaryV_VI<string intrinsic, string instruction,
                           list<VTypeInfo> vtilist, Operand imm_type> {
   foreach vti = vtilist in
-    defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
-                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
-                      vti.Log2SEW, vti.RegClass,
-                      vti.RegClass, imm_type>;
+    defm : VPatBinaryTA<intrinsic, instruction # "_VI_" # vti.LMul.MX,
+                        vti.Vector, vti.Vector, XLenVT, vti.Mask,
+                        vti.Log2SEW, vti.RegClass,
+                        vti.RegClass, imm_type>;
 }
 
 multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
@@ -2724,10 +2850,10 @@
   foreach VtiToWti = vtilist in {
     defvar Vti = VtiToWti.Vti;
     defvar Wti = VtiToWti.Wti;
-    defm : VPatBinary<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
-                      Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
-                      Vti.Log2SEW, Wti.RegClass,
-                      Vti.RegClass, Vti.RegClass>;
+    defm : VPatBinaryTA<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
+                        Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
+                        Vti.Log2SEW, Wti.RegClass,
+                        Vti.RegClass, Vti.RegClass>;
   }
 }
 
@@ -2737,10 +2863,10 @@
     defvar Vti = VtiToWti.Vti;
     defvar Wti = VtiToWti.Wti;
     defvar kind = "V"#Vti.ScalarSuffix;
-    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
-                      Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
-                      Vti.Log2SEW, Wti.RegClass,
-                      Vti.RegClass, Vti.ScalarRegClass>;
+    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
+                        Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
+                        Vti.Log2SEW, Wti.RegClass,
+                        Vti.RegClass, Vti.ScalarRegClass>;
   }
 }
 
@@ -2756,10 +2882,10 @@
     def : VPatTiedBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
                              Wti.Vector, Vti.Vector, Vti.Mask,
                              Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
-    def : VPatBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
-                         Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
-                         Vti.Log2SEW, Wti.RegClass,
-                         Wti.RegClass, Vti.RegClass>;
+    def : VPatBinaryMaskTA<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
+                           Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
+                           Vti.Log2SEW, Wti.RegClass,
+                           Wti.RegClass, Vti.RegClass>;
   }
 }
 
@@ -2769,10 +2895,10 @@
     defvar Vti = VtiToWti.Vti;
     defvar Wti = VtiToWti.Wti;
     defvar kind = "W"#Vti.ScalarSuffix;
-    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
-                      Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
-                      Vti.Log2SEW, Wti.RegClass,
-                      Wti.RegClass, Vti.ScalarRegClass>;
+    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
+                        Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
+                        Vti.Log2SEW, Wti.RegClass,
+                        Wti.RegClass, Vti.ScalarRegClass>;
   }
 }
 
@@ -2781,10 +2907,10 @@
   foreach VtiToWti = vtilist in {
     defvar Vti = VtiToWti.Vti;
     defvar Wti = VtiToWti.Wti;
-    defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
-                      Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
-                      Vti.Log2SEW, Vti.RegClass,
-                      Wti.RegClass, Vti.RegClass>;
+    defm : VPatBinaryTA<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
+                        Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
+                        Vti.Log2SEW, Vti.RegClass,
+                        Wti.RegClass, Vti.RegClass>;
   }
 }
 
@@ -2794,10 +2920,10 @@
     defvar Vti = VtiToWti.Vti;
     defvar Wti = VtiToWti.Wti;
     defvar kind = "W"#Vti.ScalarSuffix;
-    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
-                      Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
-                      Vti.Log2SEW, Vti.RegClass,
-                      Wti.RegClass, Vti.ScalarRegClass>;
+    defm : VPatBinaryTA<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
+                        Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
+                        Vti.Log2SEW, Vti.RegClass,
+                        Wti.RegClass, Vti.ScalarRegClass>;
   }
 }
 
@@ -2806,10 +2932,10 @@
   foreach VtiToWti = vtilist in {
     defvar Vti = VtiToWti.Vti;
     defvar Wti = VtiToWti.Wti;
-    defm : VPatBinary<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
-                      Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
-                      Vti.Log2SEW, Vti.RegClass,
-                      Wti.RegClass, uimm5>;
+    defm : VPatBinaryTA<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
+                        Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
+                        Vti.Log2SEW, Vti.RegClass,
+                        Wti.RegClass, uimm5>;
   }
 }
 
@@ -3122,8 +3248,8 @@
   }
 }
 
-multiclass VPatConversionVI_VF<string intrinsic,
-                               string instruction>
+multiclass VPatClassifyVI_VF<string intrinsic,
+                             string instruction>
 {
   foreach fvti = AllFloatVectors in
   {
@@ -3135,6 +3261,19 @@
   }
 }
 
+multiclass VPatConversionVI_VF<string intrinsic,
+                               string instruction>
+{
+  foreach fvti = AllFloatVectors in
+  {
+    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
+
+    defm : VPatConversionTA<intrinsic, instruction, "V",
+                            ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
+                            fvti.LMul, ivti.RegClass, fvti.RegClass>;
+  }
+}
+
 multiclass VPatConversionVF_VI<string intrinsic,
                                string instruction>
 {
@@ -3142,9 +3281,9 @@
   {
     defvar ivti = GetIntVTypeInfo<fvti>.Vti;
 
-    defm : VPatConversion<intrinsic, instruction, "V",
-                          fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
-                          ivti.LMul, fvti.RegClass, ivti.RegClass>;
+    defm : VPatConversionTA<intrinsic, instruction, "V",
+                            fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
+                            ivti.LMul, fvti.RegClass, ivti.RegClass>;
   }
 }
 
@@ -3154,9 +3293,9 @@
     defvar fvti = fvtiToFWti.Vti;
     defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
 
-    defm : VPatConversion<intrinsic, instruction, "V",
-                          iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
-                          fvti.LMul, iwti.RegClass, fvti.RegClass>;
+    defm : VPatConversionTA<intrinsic, instruction, "V",
+                            iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
+                            fvti.LMul, iwti.RegClass, fvti.RegClass>;
   }
 }
 
@@ -3166,9 +3305,9 @@
     defvar vti = vtiToWti.Vti;
     defvar fwti = vtiToWti.Wti;
 
-    defm : VPatConversion<intrinsic, instruction, "V",
-                          fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
-                          vti.LMul, fwti.RegClass, vti.RegClass>;
+    defm : VPatConversionTA<intrinsic, instruction, "V",
+                            fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
+                            vti.LMul, fwti.RegClass, vti.RegClass>;
   }
 }
 
@@ -3178,9 +3317,9 @@
     defvar fvti = fvtiToFWti.Vti;
     defvar fwti = fvtiToFWti.Wti;
 
-    defm : VPatConversion<intrinsic, instruction, "V",
-                          fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
-                          fvti.LMul, fwti.RegClass, fvti.RegClass>;
+    defm : VPatConversionTA<intrinsic, instruction, "V",
+                            fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
+                            fvti.LMul, fwti.RegClass, fvti.RegClass>;
   }
 }
 
@@ -3190,9 +3329,9 @@
     defvar vti = vtiToWti.Vti;
     defvar fwti = vtiToWti.Wti;
 
-    defm : VPatConversion<intrinsic, instruction, "W",
-                          vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
-                          vti.LMul, vti.RegClass, fwti.RegClass>;
+    defm : VPatConversionTA<intrinsic, instruction, "W",
+                            vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
+                            vti.LMul, vti.RegClass, fwti.RegClass>;
   }
 }
 
@@ -3202,9 +3341,9 @@
     defvar fvti = fvtiToFWti.Vti;
     defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
 
-    defm : VPatConversion<intrinsic, instruction, "W",
-                          fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
-                          fvti.LMul, fvti.RegClass, iwti.RegClass>;
+    defm : VPatConversionTA<intrinsic, instruction, "W",
+                            fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
+                            fvti.LMul, fvti.RegClass, iwti.RegClass>;
   }
 }
 
@@ -3214,9 +3353,9 @@
     defvar fvti = fvtiToFWti.Vti;
     defvar fwti = fvtiToFWti.Wti;
 
-    defm : VPatConversion<intrinsic, instruction, "W",
-                          fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
-                          fvti.LMul, fvti.RegClass, fwti.RegClass>;
+    defm : VPatConversionTA<intrinsic, instruction, "W",
+                            fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
+                            fvti.LMul, fvti.RegClass, fwti.RegClass>;
   }
 }
 
@@ -3418,14 +3557,16 @@
                                               (vti.Vector vti.RegClass:$rs2),
                                               (vti.Vector vti.RegClass:$rs1),
                                               (vti.Mask V0),
-                                              VLOpFrag)),
+                                              VLOpFrag,
+                                              (XLenVT uimm5:$policy))),
             (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
                                                       vti.RegClass:$merge,
                                                       vti.RegClass:$rs1,
                                                       vti.RegClass:$rs2,
                                                       (vti.Mask V0),
                                                       GPR:$vl,
-                                                      vti.Log2SEW)>;
+                                                      vti.Log2SEW,
+                                                      (XLenVT uimm5:$policy))>;
 
   // Match VSUB with a small immediate to vadd.vi by negating the immediate.
   def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1),
@@ -3439,14 +3580,16 @@
                                              (vti.Vector vti.RegClass:$rs1),
                                              (vti.Scalar simm5_plus1:$rs2),
                                              (vti.Mask V0),
-                                             VLOpFrag)),
+                                             VLOpFrag,
+                                             (XLenVT uimm5:$policy))),
             (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
                                                       vti.RegClass:$merge,
                                                       vti.RegClass:$rs1,
                                                       (NegImm simm5_plus1:$rs2),
                                                       (vti.Mask V0),
                                                       GPR:$vl,
-                                                      vti.Log2SEW)>;
+                                                      vti.Log2SEW,
+                                                      (XLenVT uimm5:$policy))>;
 }
 
 //===----------------------------------------------------------------------===//
@@ -3667,17 +3810,17 @@
 //===----------------------------------------------------------------------===//
 // 14.8. Vector Floating-Point Square-Root Instruction
 //===----------------------------------------------------------------------===//
-defm PseudoVFSQRT      : VPseudoUnaryV_V;
+defm PseudoVFSQRT      : VPseudoUnaryTAV_V;
 
 //===----------------------------------------------------------------------===//
 // 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
 //===----------------------------------------------------------------------===//
-defm PseudoVFRSQRT7    : VPseudoUnaryV_V;
+defm PseudoVFRSQRT7    : VPseudoUnaryTAV_V;
 
 //===----------------------------------------------------------------------===//
 // 14.10. Vector Floating-Point Reciprocal Estimate Instruction
 //===----------------------------------------------------------------------===//
-defm PseudoVFREC7      : VPseudoUnaryV_V;
+defm PseudoVFREC7      : VPseudoUnaryTAV_V;
 
 //===----------------------------------------------------------------------===//
 // 14.11. Vector Floating-Point Min/Max Instructions
@@ -4363,7 +4506,7 @@
 //===----------------------------------------------------------------------===//
 // 14.14. Vector Floating-Point Classify Instruction
 //===----------------------------------------------------------------------===//
-defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
+defm : VPatClassifyVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
 
 //===----------------------------------------------------------------------===//
 // 14.15. Vector Floating-Point Merge Instruction
Index: llvm/lib/Target/RISCV/RISCVInstrFormats.td
===================================================================
--- llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -178,6 +178,9 @@
 
   bit HasVLOp = 0;
   let TSFlags{15} = HasVLOp;
+
+  bit HasPolicyOp = false;
+  let TSFlags{16} = HasPolicyOp;
 }
 
 // Pseudo instructions
Index: llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
===================================================================
--- llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -365,7 +365,9 @@
 
   RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags);
 
-  unsigned Log2SEW = MI.getOperand(NumOperands - 1).getImm();
+  unsigned Log2SEWIndex =
+      RISCVII::hasPolicyOp(TSFlags) ? NumOperands - 2 : NumOperands - 1;
+  unsigned Log2SEW = MI.getOperand(Log2SEWIndex).getImm();
   // A Log2SEW of 0 is an operation on mask registers only.
   bool MaskRegOp = Log2SEW == 0;
   unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
@@ -393,6 +395,12 @@
     }
   }
 
+  // If the instruction has policy argument, use the argument.
+  if (RISCVII::hasPolicyOp(TSFlags)) {
+    const MachineOperand &Op = MI.getOperand(NumOperands - 1);
+    TailAgnostic = Op.getImm() & 0x1;
+  }
+
   if (RISCVII::hasVLOp(TSFlags)) {
     const MachineOperand &VLOp = MI.getOperand(MI.getNumExplicitOperands() - 2);
     if (VLOp.isImm())
Index: llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
===================================================================
--- llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -76,6 +76,9 @@
   // explicit operand. Used by RVV Pseudos.
   HasVLOpShift = HasSEWOpShift + 1,
   HasVLOpMask = 1 << HasVLOpShift,
+
+  HasPolicyOpShift = HasVLOpShift + 1,
+  HasPolicyOpMask = 1 << HasPolicyOpShift,
 };
 
 // Match with the definitions in RISCVInstrFormatsV.td
@@ -132,6 +135,10 @@
   return TSFlags & HasVLOpMask;
 }
 
+static inline bool hasPolicyOp(uint64_t TSFlags) {
+  return TSFlags & HasPolicyOpMask;
+}
+
 // RISC-V Specific Machine Operand Flags
 enum {
   MO_None = 0,
Index: llvm/include/llvm/IR/IntrinsicsRISCV.td
===================================================================
--- llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -267,6 +267,12 @@
   // For destination vector type is the same as first source vector (with mask).
   // Input: (vector_in, mask, vl)
   class RISCVUnaryAAMask
+        : Intrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, LLVMMatchType<0>,
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                     LLVMMatchType<1>],
+                    [IntrNoMem]>, RISCVVIntrinsic;
+  class RISCVUnaryAAMaskNoTA
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, LLVMMatchType<0>,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
@@ -288,7 +294,8 @@
   class RISCVRGatherVVMask
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
-                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                     LLVMMatchType<1>],
                     [IntrNoMem]>, RISCVVIntrinsic;
   // Input: (vector_in, int16_vector_in, vl)
   class RISCVRGatherEI16VVNoMask
@@ -302,7 +309,8 @@
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, LLVMMatchType<0>,
                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
-                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                     LLVMMatchType<1>],
                     [IntrNoMem]>, RISCVVIntrinsic;
   // For destination vector type is the same as first source vector, and the
   // second operand is XLen.
@@ -318,7 +326,8 @@
   class RISCVGatherVXMask
        : Intrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
-                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
+                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
+                    LLVMMatchType<1>],
                    [IntrNoMem]>, RISCVVIntrinsic {
   }
   // For destination vector type is the same as first source vector.
@@ -334,7 +343,8 @@
   class RISCVBinaryAAXMask
        : Intrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
-                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                    LLVMMatchType<2>],
                    [IntrNoMem]>, RISCVVIntrinsic {
     let SplatOperand = 3;
   }
@@ -351,7 +361,8 @@
   class RISCVBinaryAAShiftMask
        : Intrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
-                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                    LLVMMatchType<2>],
                    [IntrNoMem]>, RISCVVIntrinsic;
   // For destination vector type is NOT the same as first source vector.
   // Input: (vector_in, vector_in/scalar_in, vl)
@@ -366,7 +377,8 @@
   class RISCVBinaryABXMask
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
-                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                     LLVMMatchType<3>],
                     [IntrNoMem]>, RISCVVIntrinsic {
     let SplatOperand = 3;
   }
@@ -383,7 +395,8 @@
   class RISCVBinaryABShiftMask
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
-                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                     LLVMMatchType<3>],
                     [IntrNoMem]>, RISCVVIntrinsic;
   // For binary operations with V0 as input.
   // Input: (vector_in, vector_in/scalar_in, V0, vl)
@@ -465,7 +478,8 @@
   class RISCVSaturatingBinaryAAXMask
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
-                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                     LLVMMatchType<2>],
                     [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
     let SplatOperand = 3;
   }
@@ -484,7 +498,8 @@
   class RISCVSaturatingBinaryAAShiftMask
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
-                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                     LLVMMatchType<2>],
                     [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
   // For Saturating binary operations.
   // The destination vector type is NOT the same as first source vector.
@@ -501,7 +516,8 @@
   class RISCVSaturatingBinaryABShiftMask
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
-                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                     LLVMMatchType<3>],
                     [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
   class RISCVTernaryAAAXNoMask
         : Intrinsic<[llvm_anyvector_ty],
@@ -584,7 +600,7 @@
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, llvm_anyvector_ty,
                      LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
-                     llvm_anyint_ty],
+                     llvm_anyint_ty, LLVMMatchType<2>],
                     [IntrNoMem]>, RISCVVIntrinsic;
   // For unary operations with the same vector type in/out without mask
   // Output: (vector)
@@ -618,7 +634,8 @@
   class RISCVConversionMask
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, llvm_anyvector_ty,
-                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                     LLVMMatchType<2>],
                     [IntrNoMem]>, RISCVVIntrinsic;
   // For atomic operations without mask
   // Input: (base, index, value, vl)
@@ -1124,7 +1141,7 @@
   defm vrgather_vx : RISCVRGatherVX;
   defm vrgatherei16_vv : RISCVRGatherEI16VV;
 
-  def "int_riscv_vcompress" : RISCVUnaryAAMask;
+  def "int_riscv_vcompress" : RISCVUnaryAAMaskNoTA;
 
   defm vaaddu : RISCVSaturatingBinaryAAX;
   defm vaadd : RISCVSaturatingBinaryAAX;
Index: clang/utils/TableGen/RISCVVEmitter.cpp
===================================================================
--- clang/utils/TableGen/RISCVVEmitter.cpp
+++ clang/utils/TableGen/RISCVVEmitter.cpp
@@ -156,6 +156,7 @@
   bool IsMask;
   bool HasMaskedOffOperand;
   bool HasVL;
+  bool HasPolicy;
   bool HasNoMaskedOverloaded;
   bool HasAutoDef; // There is automiatic definition in header
   std::string ManualCodegen;
@@ -169,8 +170,9 @@
 public:
   RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName,
                StringRef IRName, bool HasSideEffects, bool IsMask,
-               bool HasMaskedOffOperand, bool HasVL, bool HasNoMaskedOverloaded,
-               bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types,
+               bool HasMaskedOffOperand, bool HasVL, bool HasPolicy,
+               bool HasNoMaskedOverloaded, bool HasAutoDef,
+               StringRef ManualCodegen, const RVVTypes &Types,
                const std::vector<int64_t> &IntrinsicTypes,
                StringRef RequiredExtension);
   ~RVVIntrinsic() = default;
@@ -180,6 +182,7 @@
   bool hasSideEffects() const { return HasSideEffects; }
   bool hasMaskedOffOperand() const { return HasMaskedOffOperand; }
   bool hasVL() const { return HasVL; }
+  bool hasPolicy() const { return HasPolicy; }
   bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; }
   bool hasManualCodegen() const { return !ManualCodegen.empty(); }
   bool hasAutoDef() const { return HasAutoDef; }
@@ -195,6 +198,9 @@
   // init the RVVIntrinsic ID and IntrinsicTypes.
   void emitCodeGenSwitchBody(raw_ostream &o) const;
 
+  // Emit the define macors for mask intrinsics using _mt intrinsics.
+  void emitIntrinsicMaskMacro(raw_ostream &o) const;
+
   // Emit the macros for mapping C/C++ intrinsic function to builtin functions.
   void emitIntrinsicMacro(raw_ostream &o) const;
 
@@ -227,6 +233,8 @@
 private:
   /// Create all intrinsics and add them to \p Out
   void createRVVIntrinsics(std::vector<std::unique_ptr<RVVIntrinsic>> &Out);
+  /// Create Headers and add them to \p Out
+  void createRVVHeaders(raw_ostream &OS);
   /// Compute output and input types by applying different config (basic type
   /// and LMUL with type transformers). It also record result of type in legal
   /// or illegal set to avoid compute the  same config again. The result maybe
@@ -631,7 +639,7 @@
     ScalarType = ScalarTypeKind::SignedLong;
     break;
   default:
-    PrintFatalError("Illegal primitive type transformers!");
+    PrintFatalError("Illegal primitive type transformers: " + PType);
   }
   Transformer = Transformer.drop_back();
 
@@ -745,15 +753,15 @@
 RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
                            StringRef NewMangledName, StringRef IRName,
                            bool HasSideEffects, bool IsMask,
-                           bool HasMaskedOffOperand, bool HasVL,
+                           bool HasMaskedOffOperand, bool HasVL, bool HasPolicy,
                            bool HasNoMaskedOverloaded, bool HasAutoDef,
                            StringRef ManualCodegen, const RVVTypes &OutInTypes,
                            const std::vector<int64_t> &NewIntrinsicTypes,
                            StringRef RequiredExtension)
     : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask),
       HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL),
-      HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef),
-      ManualCodegen(ManualCodegen.str()) {
+      HasPolicy(HasPolicy), HasNoMaskedOverloaded(HasNoMaskedOverloaded),
+      HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()) {
 
   // Init Name and MangledName
   Name = NewName.str();
@@ -765,6 +773,8 @@
     Name += "_" + Suffix.str();
   if (IsMask) {
     Name += "_m";
+    if (HasPolicy)
+      Name += "t";
   }
   // Init RISC-V extensions
   for (const auto &T : OutInTypes) {
@@ -813,7 +823,10 @@
 
   if (isMask()) {
     if (hasVL()) {
-      OS << "  std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n";
+      if (hasPolicy())
+        OS << "  std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);\n";
+      else
+        OS << "  std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n";
     } else {
       OS << "  std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n";
     }
@@ -853,6 +866,24 @@
   OS << ")\n";
 }
 
+void RVVIntrinsic::emitIntrinsicMaskMacro(raw_ostream &OS) const {
+  OS << "#define " << getName().drop_back() << "(";
+  if (!InputTypes.empty()) {
+    ListSeparator LS;
+    for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i)
+      OS << LS << "op" << i;
+  }
+  OS << ") \\\n";
+  OS << "__builtin_rvv_" << getName() << "(";
+  ListSeparator LS;
+  if (!InputTypes.empty()) {
+    for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i)
+      OS << LS << "(" << InputTypes[i]->getTypeStr() << ")(op" << i << ")";
+  }
+  OS << LS << "(size_t)VE_TAIL_AGNOSTIC";
+  OS << ")\n";
+}
+
 void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const {
   OS << "__attribute__((clang_builtin_alias(";
   OS << "__builtin_rvv_" << getName() << ")))\n";
@@ -898,6 +929,8 @@
   OS << "extern \"C\" {\n";
   OS << "#endif\n\n";
 
+  createRVVHeaders(OS);
+
   std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
   createRVVIntrinsics(Defs);
 
@@ -965,6 +998,12 @@
     Inst.emitIntrinsicMacro(OS);
   });
 
+  // Use _mt to implement _m intrinsics.
+  emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) {
+    if (Inst.isMask() && Inst.hasPolicy())
+      Inst.emitIntrinsicMaskMacro(OS);
+  });
+
   OS << "#define __riscv_v_intrinsic_overloading 1\n";
 
   // Print Overloaded APIs
@@ -1066,6 +1105,7 @@
     bool HasMask = R->getValueAsBit("HasMask");
     bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand");
     bool HasVL = R->getValueAsBit("HasVL");
+    bool HasPolicy = R->getValueAsBit("HasPolicy");
     bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded");
     bool HasSideEffects = R->getValueAsBit("HasSideEffects");
     std::vector<int64_t> Log2LMULList = R->getValueAsListOfInts("Log2LMUL");
@@ -1104,6 +1144,10 @@
       ProtoMaskSeq.push_back("z");
     }
 
+    if (HasPolicy) {
+      ProtoMaskSeq.push_back("Kz");
+    }
+
     // Create Intrinsics for each type and LMUL.
     for (char I : TypeRange) {
       for (int Log2LMUL : Log2LMULList) {
@@ -1116,7 +1160,7 @@
         // Create a non-mask intrinsic
         Out.push_back(std::make_unique<RVVIntrinsic>(
             Name, SuffixStr, MangledName, IRName, HasSideEffects,
-            /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL,
+            /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy,
             HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(),
             IntrinsicTypes, RequiredExtension));
         if (HasMask) {
@@ -1125,7 +1169,7 @@
               computeTypes(I, Log2LMUL, ProtoMaskSeq);
           Out.push_back(std::make_unique<RVVIntrinsic>(
               Name, SuffixStr, MangledName, IRNameMask, HasSideEffects,
-              /*IsMask=*/true, HasMaskedOffOperand, HasVL,
+              /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy,
               HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask,
               MaskTypes.getValue(), IntrinsicTypes, RequiredExtension));
         }
@@ -1134,6 +1178,15 @@
   }
 }
 
+void RVVEmitter::createRVVHeaders(raw_ostream &OS) {
+  std::vector<Record *> RVVHeaders =
+      Records.getAllDerivedDefinitions("RVVHeader");
+  for (auto *R : RVVHeaders) {
+    StringRef HeaderCodeStr = R->getValueAsString("HeaderCode");
+    OS << HeaderCodeStr.str();
+  }
+}
+
 Optional<RVVTypes>
 RVVEmitter::computeTypes(BasicType BT, int Log2LMUL,
                          ArrayRef<std::string> PrototypeSeq) {
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c
@@ -0,0 +1,44 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \
+// RUN:   | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vadd_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vadd_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_tu (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_UNDISTURBED);
+}
+
+// CHECK-RV64-LABEL: @test_vadd_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_ta (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
+}
Index: clang/include/clang/Basic/riscv_vector.td
===================================================================
--- clang/include/clang/Basic/riscv_vector.td
+++ clang/include/clang/Basic/riscv_vector.td
@@ -169,6 +169,13 @@
   // This builtin has a granted vector length parameter in the last position.
   bit HasVL = true;
 
+  // Normally, intrinsics have the policy argument if it is masked and
+  // have no policy argument if it is unmasked. When HasPolicy is false, it
+  // means the intrinsic has no policy argument regardless masked or unmasked.
+  // For example, when the output result is mask type or scalar type, there is
+  // no need to specify the policy.
+  bit HasPolicy = true;
+
   // This builtin supports non-masked function overloading api.
   // All masked operations support overloading api.
   bit HasNoMaskedOverloaded = true;
@@ -400,6 +407,7 @@
 
 class RVVMaskUnaryBuiltin : RVVOutBuiltin<"m", "mm", "c"> {
   let Name = NAME # "_m";
+  let HasPolicy = false;
 }
 
 class RVVMaskNullaryBuiltin : RVVOutBuiltin<"m", "m", "c"> {
@@ -978,6 +986,7 @@
 let HasVL = false,
     HasMask = false,
     HasSideEffects = true,
+    HasPolicy = false,
     Log2LMUL = [0],
     ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type
 {
@@ -1138,7 +1147,7 @@
 }
 
 // 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
-let HasMask = false in {
+let HasMask = false, HasPolicy = false in {
   defm vadc : RVVCarryinBuiltinSet;
   defm vmadc : RVVCarryOutInBuiltinSet<"vmadc_carry_in">;
   defm vmadc : RVVIntMaskOutBuiltinSet;
@@ -1166,6 +1175,7 @@
                                           ["Uv", "UvUw"]]>;
 
 // 12.8. Vector Integer Comparison Instructions
+let HasPolicy = false in {
 defm vmseq : RVVIntMaskOutBuiltinSet;
 defm vmsne : RVVIntMaskOutBuiltinSet;
 defm vmsltu : RVVUnsignedMaskOutBuiltinSet;
@@ -1176,6 +1186,7 @@
 defm vmsgt : RVVSignedMaskOutBuiltinSet;
 defm vmsgeu : RVVUnsignedMaskOutBuiltinSet;
 defm vmsge : RVVSignedMaskOutBuiltinSet;
+}
 
 // 12.9. Vector Integer Min/Max Instructions
 defm vminu : RVVUnsignedBinBuiltinSet;
@@ -1234,7 +1245,7 @@
 
 // 12.15. Vector Integer Merge Instructions
 // C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl)
-let HasMask = false,
+let HasMask = false, HasPolicy = false,
     ManualCodegen = [{
       std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
       IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
@@ -1247,7 +1258,7 @@
 }
 
 // 12.16. Vector Integer Move Instructions
-let HasMask = false in {
+let HasMask = false, HasPolicy = false in {
   let MangledName = "vmv_v" in {
     defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil",
                                    [["v", "Uv", "UvUv"]]>;
@@ -1347,20 +1358,22 @@
 defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "fd">;
 
 // 14.13. Vector Floating-Point Compare Instructions
+let HasPolicy = false in {
 defm vmfeq : RVVFloatingMaskOutBuiltinSet;
 defm vmfne : RVVFloatingMaskOutBuiltinSet;
 defm vmflt : RVVFloatingMaskOutBuiltinSet;
 defm vmfle : RVVFloatingMaskOutBuiltinSet;
 defm vmfgt : RVVFloatingMaskOutBuiltinSet;
 defm vmfge : RVVFloatingMaskOutBuiltinSet;
+}
 
 // 14.14. Vector Floating-Point Classify Instruction
-let Name = "vfclass_v" in
+let Name = "vfclass_v", HasPolicy = false in
   def vfclass : RVVOp0Builtin<"Uv", "Uvv", "fd">;
 
 // 14.15. Vector Floating-Point Merge Instructio
 // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl)
-let HasMask = false,
+let HasMask = false, HasPolicy = false,
     ManualCodegen = [{
       std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
       IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
@@ -1468,7 +1481,7 @@
 // 16.6. vmsof.m set-only-first mask bit
 def vmsof : RVVMaskUnaryBuiltin;
 
-let HasNoMaskedOverloaded = false in {
+let HasNoMaskedOverloaded = false, HasPolicy = false in {
   // 16.8. Vector Iota Instruction
   defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>;
 
@@ -1479,7 +1492,7 @@
 
 // 17. Vector Permutation Instructions
 // 17.1. Integer Scalar Move Instructions
-let HasMask = false in {
+let HasMask = false, HasPolicy = false in {
   let HasVL = false, MangledName = "vmv_x" in
     defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil",
                                    [["s", "ve", "ev"],
@@ -1491,7 +1504,7 @@
 }
 
 // 17.2. Floating-Point Scalar Move Instructions
-let HasMask = false in {
+let HasMask = false, HasPolicy = false in {
   let HasVL = false, MangledName = "vfmv_f" in
     defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "fd",
                                      [["s", "ve", "ev"]]>;
@@ -1532,7 +1545,7 @@
                                      [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>;
 
 // 17.5. Vector Compress Instruction
-let HasMask = false,
+let HasMask = false, HasPolicy = false,
     ManualCodegen = [{
       std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
       IntrinsicTypes = {ResultType, Ops[3]->getType()};
@@ -1548,7 +1561,7 @@
 // Miscellaneous
 let HasMask = false, HasVL = false, HasNoMaskedOverloaded = false,
     IRName = "" in {
-  let Name = "vreinterpret_v",
+  let Name = "vreinterpret_v", HasPolicy = false,
       ManualCodegen = [{
         return Builder.CreateBitCast(Ops[0], ResultType);
       }] in {
@@ -1568,7 +1581,7 @@
     }
   }
 
-  let Name = "vundefined",
+  let Name = "vundefined", HasPolicy = false,
       ManualCodegen = [{
         return llvm::UndefValue::get(ResultType);
       }] in {
@@ -1578,7 +1591,7 @@
 
   // LMUL truncation
   // C/C++ Operand: VecTy, IR Operand: VecTy, Index
-  let Name = "vlmul_trunc_v",
+  let Name = "vlmul_trunc_v", HasPolicy = false,
       ManualCodegen = [{ {
         ID = Intrinsic::experimental_vector_extract;
         IntrinsicTypes = {ResultType, Ops[0]->getType()};
@@ -1594,7 +1607,7 @@
 
   // LMUL extension
   // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index
-  let Name = "vlmul_ext_v",
+  let Name = "vlmul_ext_v", HasPolicy = false,
       ManualCodegen = [{
         ID = Intrinsic::experimental_vector_insert;
         IntrinsicTypes = {ResultType, Ops[0]->getType()};
@@ -1610,7 +1623,7 @@
     }
   }
 
-  let Name = "vget_v",
+  let Name = "vget_v", HasPolicy = false,
       ManualCodegen = [{
       {
         ID = Intrinsic::experimental_vector_extract;
@@ -1628,7 +1641,7 @@
     }
   }
 
-  let Name = "vset_v", Log2LMUL = [0, 1, 2],
+  let Name = "vset_v", Log2LMUL = [0, 1, 2], HasPolicy = false,
       ManualCodegen = [{
       {
         ID = Intrinsic::experimental_vector_insert;
@@ -1647,3 +1660,14 @@
     }
   }
 }
+
+class RVVHeader
+{
+  code HeaderCode;
+}
+
+let HeaderCode = [{
+#define VE_TAIL_UNDISTURBED 0
+#define VE_TAIL_AGNOSTIC 1
+}] in
+def policy : RVVHeader;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to