This revision was landed with ongoing or failed builds.
This revision was automatically updated to reflect the committed changes.
Closed by commit rG9caf2cc05c02: [RISCV][Clang] Support policy functions for 
Vector Comparison (authored by khchen).

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D126746/new/

https://reviews.llvm.org/D126746

Files:
  clang/include/clang/Basic/riscv_vector.td
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
  clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c

Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
@@ -1699,3 +1699,75 @@
                                   vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsne_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsne_vv_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsne_vx_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsne_vv_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsne_vx_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsne_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsne_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsne_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsne_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
@@ -1714,3 +1714,75 @@
                                    vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsltu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmslt_vv_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmslt_vx_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsltu_vv_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsltu_vx_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmslt_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmslt_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsltu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsltu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
@@ -1714,3 +1714,75 @@
                                    vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsleu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsle_vv_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsle_vx_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsleu_vv_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsleu_vx_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsle_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsle_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsleu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsleu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
@@ -1588,3 +1588,75 @@
   return vmsgtu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
+
+// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsgt_vv_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsgt_vx_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsgtu_vv_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsgtu_vx_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsgt_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsgt_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsgtu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsgtu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
@@ -1588,3 +1588,75 @@
   return vmsgeu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
+
+// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsge_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsge_vv_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsge_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsge_vx_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgeu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsgeu_vv_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgeu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsgeu_vx_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsge_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsge_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsge_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsge_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgeu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsgeu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgeu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsgeu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
@@ -1699,3 +1699,75 @@
                                   vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmseq_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmseq_vv_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmseq_vx_i32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmseq_vv_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmseq_vx_u32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmseq_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmseq_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmseq_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmseq_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
@@ -578,3 +578,39 @@
 vbool2_t test_vmfne_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
   return vmfne_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfne_vv_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfne_vf_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfne_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfne_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
@@ -578,3 +578,39 @@
 vbool2_t test_vmflt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
   return vmflt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmflt_vv_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmflt_vf_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmflt_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmflt_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
@@ -578,3 +578,39 @@
 vbool2_t test_vmfle_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
   return vmfle_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfle_vv_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfle_vf_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfle_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfle_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
@@ -545,3 +545,39 @@
 vbool2_t test_vmfgt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
   return vmfgt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfgt_vv_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfgt_vf_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfgt_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfgt_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
@@ -545,3 +545,39 @@
 vbool2_t test_vmfge_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
   return vmfge_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfge_vv_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfge_vf_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfge_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfge_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
@@ -578,3 +578,39 @@
 vbool2_t test_vmfeq_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
   return vmfeq_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfeq_vv_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfeq_vf_f32mf2_b64_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfeq_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfeq_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
@@ -1699,3 +1699,75 @@
                                   vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsne_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsne_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsne_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsne_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsne_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsne_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsne_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsne_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
@@ -1714,3 +1714,75 @@
                                    vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmslt_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmslt_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsltu_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsltu_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmslt_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmslt_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsltu_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsltu_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
@@ -1714,3 +1714,75 @@
                                    vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsle_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsle_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsleu_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsleu_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsle_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsle_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsleu_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsleu_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
@@ -1588,3 +1588,75 @@
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
+
+// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsgt_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsgt_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsgtu_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsgtu_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsgt_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsgt_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsgtu_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsgtu_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
@@ -1588,3 +1588,75 @@
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
+
+// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsge_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsge_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsge_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsge_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgeu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsgeu_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgeu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsgeu_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsge_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmsge_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsge_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmsge_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgeu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmsgeu_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgeu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmsgeu_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
@@ -1699,3 +1699,75 @@
                                   vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmseq_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmseq_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmseq_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmseq_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vmseq_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vmseq_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vmseq_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vmseq_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
@@ -361,3 +361,39 @@
                                   vfloat64m8_t op1, double op2, size_t vl) {
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfne_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfne_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfne_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfne_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
@@ -361,3 +361,39 @@
                                   vfloat64m8_t op1, double op2, size_t vl) {
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmflt_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmflt_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmflt_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmflt_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
@@ -361,3 +361,39 @@
                                   vfloat64m8_t op1, double op2, size_t vl) {
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfle_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfle_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfle_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfle_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
@@ -329,3 +329,39 @@
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfgt_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfgt_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfgt_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfgt_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
@@ -329,3 +329,39 @@
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfge_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfge_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfge_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfge_mu(mask, merge, op1, op2, vl);
+}
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
@@ -361,3 +361,39 @@
                                   vfloat64m8_t op1, double op2, size_t vl) {
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfeq_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfeq_ma(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vmfeq_mu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vmfeq_mu(mask, merge, op1, op2, vl);
+}
Index: clang/include/clang/Basic/riscv_vector.td
===================================================================
--- clang/include/clang/Basic/riscv_vector.td
+++ clang/include/clang/Basic/riscv_vector.td
@@ -1745,7 +1745,8 @@
                                           ["Uv", "UvUw"]]>;
 
 // 12.8. Vector Integer Comparison Instructions
-let MaskedPolicyScheme = NonePolicy in {
+let MaskedPolicyScheme = HasPassthruOperand,
+    HasTailPolicy = false in {
 defm vmseq : RVVIntMaskOutBuiltinSet;
 defm vmsne : RVVIntMaskOutBuiltinSet;
 defm vmsltu : RVVUnsignedMaskOutBuiltinSet;
@@ -1950,7 +1951,8 @@
 defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">;
 
 // 14.13. Vector Floating-Point Compare Instructions
-let MaskedPolicyScheme = NonePolicy in {
+let MaskedPolicyScheme = HasPassthruOperand,
+    HasTailPolicy = false in {
 defm vmfeq : RVVFloatingMaskOutBuiltinSet;
 defm vmfne : RVVFloatingMaskOutBuiltinSet;
 defm vmflt : RVVFloatingMaskOutBuiltinSet;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to