From 0a7fe5fcfa5141bab46b4eb90f4d67265f86a20d Mon Sep 17 00:00:00 2001
From: Kuan-Lin Chen <rufus@andestech.com>
Date: Fri, 23 May 2025 11:02:28 +0800
Subject: [PATCH 7/7] RISC-V: Add support for the XAndesvdot ISA extension.

This extension defines vector instructions to calculae of the signed/unsigned
dot product of four SEW/4-bit data and accumulate the result into a SEWbit
element for all elements in a vector register.

gcc/ChangeLog:

	* config/riscv/andes-vector-builtins-bases.cc (nds_vd4dot): New class.
	(class nds_vd4dotsu): New class.
	* config/riscv/andes-vector-builtins-bases.h: New def.
	* config/riscv/andes-vector-builtins-functions.def (nds_vd4dots): Ditto.
	(nds_vd4dotsu): Ditto.
	(nds_vd4dotu): Ditto.
	* config/riscv/andes-vector.md
	(@pred_nds_vd4dot<su><mode>): New pattern.
	(@pred_nds_vd4dotsu<mode>): New pattern.
	* config/riscv/genrvv-type-indexer.cc (main): Modify sew of QUAD_FIX,
	QUAD_FIX_SIGNED and QUAD_FIX_UNSIGNED.
	* config/riscv/riscv-vector-builtins.cc
	(qexti_vvvv_ops): New operand information.
	(qexti_su_vvvv_ops): New operand information.
	(qextu_vvvv_ops): New operand information.
	* config/riscv/riscv-vector-builtins.h (XANDESVDOT_EXT): New def.
	(required_ext_to_isa_name): Add case XANDESVDOT_EXT.
	(required_extensions_specified): Ditto.
	(struct function_group_info): Ditto.
	* config/riscv/vector-iterators.md (NDS_QUAD_FIX): New iterator.

gcc/testsuite/ChangeLog:

	* gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dots.c: New test.
	* gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dotsu.c: New test.
	* gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dotu.c: New test.
	* gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dots.c: New test.
	* gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dotsu.c: New test.
	* gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dotu.c: New test.
	* gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dots.c: New test.
	* gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dotsu.c: New test.
	* gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dotu.c: New test.
	* gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dots.c: New test.
	* gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dotsu.c: New test.
	* gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dotu.c: New test.
---
 .../riscv/andes-vector-builtins-bases.cc      |  33 +++
 .../riscv/andes-vector-builtins-bases.h       |   3 +
 .../riscv/andes-vector-builtins-functions.def |   7 +
 gcc/config/riscv/andes-vector.md              |  53 ++++
 gcc/config/riscv/genrvv-type-indexer.cc       |   6 +-
 gcc/config/riscv/riscv-vector-builtins.cc     |  31 +++
 gcc/config/riscv/riscv-vector-builtins.h      |   5 +
 gcc/config/riscv/vector-iterators.md          |  13 +
 .../non-policy/non-overloaded/nds_vd4dots.c   | 132 +++++++++
 .../non-policy/non-overloaded/nds_vd4dotsu.c  | 132 +++++++++
 .../non-policy/non-overloaded/nds_vd4dotu.c   | 132 +++++++++
 .../non-policy/overloaded/nds_vd4dots.c       | 132 +++++++++
 .../non-policy/overloaded/nds_vd4dotsu.c      | 132 +++++++++
 .../non-policy/overloaded/nds_vd4dotu.c       | 133 +++++++++
 .../policy/non-overloaded/nds_vd4dots.c       | 258 ++++++++++++++++++
 .../policy/non-overloaded/nds_vd4dotsu.c      | 258 ++++++++++++++++++
 .../policy/non-overloaded/nds_vd4dotu.c       | 258 ++++++++++++++++++
 .../policy/overloaded/nds_vd4dots.c           | 258 ++++++++++++++++++
 .../policy/overloaded/nds_vd4dotsu.c          | 258 ++++++++++++++++++
 .../policy/overloaded/nds_vd4dotu.c           | 258 ++++++++++++++++++
 20 files changed, 2489 insertions(+), 3 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dots.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dotsu.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dotu.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dots.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dotsu.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dotu.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dots.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dotsu.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dotu.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dots.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dotsu.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dotu.c

diff --git a/gcc/config/riscv/andes-vector-builtins-bases.cc b/gcc/config/riscv/andes-vector-builtins-bases.cc
index 1bf8b9dc088e..5b19eaddbde2 100644
--- a/gcc/config/riscv/andes-vector-builtins-bases.cc
+++ b/gcc/config/riscv/andes-vector-builtins-bases.cc
@@ -131,6 +131,33 @@ public:
   }
 };
 
+/* Implements Andes vdot.  */
+template<rtx_code EXTEND>
+class nds_vd4dot : public function_base
+{
+public:
+  bool has_merge_operand_p () const override { return false; }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_widen_ternop_insn
+      (code_for_pred_nds_vd4dot (EXTEND, e.vector_mode ()));
+  }
+};
+
+/* Implements vwmacc<su><su>.  */
+class nds_vd4dotsu : public function_base
+{
+public:
+  bool has_merge_operand_p () const override { return false; }
+
+  rtx expand (function_expander &e) const override
+  {
+    return e.use_widen_ternop_insn
+      (code_for_pred_nds_vd4dotsu (e.vector_mode ()));
+  }
+};
+
 static CONSTEXPR const nds_vfwcvt nds_vfwcvt_obj;
 static CONSTEXPR const nds_vfncvt nds_vfncvt_obj;
 static CONSTEXPR const nds_nibbleload<true> nds_vln8_obj;
@@ -139,6 +166,9 @@ static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADT, NO_FRM> nds_vfpmadt_obj;
 static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADB, NO_FRM> nds_vfpmadb_obj;
 static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADT, HAS_FRM> nds_vfpmadt_frm_obj;
 static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADB, HAS_FRM> nds_vfpmadb_frm_obj;
+static CONSTEXPR const nds_vd4dot<SIGN_EXTEND> nds_vd4dots_obj;
+static CONSTEXPR const nds_vd4dot<ZERO_EXTEND> nds_vd4dotu_obj;
+static CONSTEXPR const nds_vd4dotsu nds_vd4dotsu_obj;
 
 /* Declare the function base NAME, pointing it to an instance
    of class <NAME>_obj.  */
@@ -153,4 +183,7 @@ BASE (nds_vfpmadt)
 BASE (nds_vfpmadb)
 BASE (nds_vfpmadt_frm)
 BASE (nds_vfpmadb_frm)
+BASE (nds_vd4dots)
+BASE (nds_vd4dotu)
+BASE (nds_vd4dotsu)
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/andes-vector-builtins-bases.h b/gcc/config/riscv/andes-vector-builtins-bases.h
index 50c7496dbd33..27d5650e17ec 100644
--- a/gcc/config/riscv/andes-vector-builtins-bases.h
+++ b/gcc/config/riscv/andes-vector-builtins-bases.h
@@ -32,6 +32,9 @@ extern const function_base *const nds_vfpmadt;
 extern const function_base *const nds_vfpmadb;
 extern const function_base *const nds_vfpmadt_frm;
 extern const function_base *const nds_vfpmadb_frm;
+extern const function_base *const nds_vd4dots;
+extern const function_base *const nds_vd4dotu;
+extern const function_base *const nds_vd4dotsu;
 }
 
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/andes-vector-builtins-functions.def b/gcc/config/riscv/andes-vector-builtins-functions.def
index 5c3300751fdf..03164f6eb9ee 100644
--- a/gcc/config/riscv/andes-vector-builtins-functions.def
+++ b/gcc/config/riscv/andes-vector-builtins-functions.def
@@ -55,4 +55,11 @@ DEF_RVV_FUNCTION (nds_vfpmadt_frm, alu_frm, full_preds, f16_vvw_ops)
 DEF_RVV_FUNCTION (nds_vfpmadb_frm, alu_frm, full_preds, f16_vvw_ops)
 #undef REQUIRED_EXTENSIONS
 
+/* Andes Vector Dot Product Extension (XAndesVDot).  */
+#define REQUIRED_EXTENSIONS XANDESVDOT_EXT
+DEF_RVV_FUNCTION (nds_vd4dots, alu, full_preds, qexti_vvvv_ops)
+DEF_RVV_FUNCTION (nds_vd4dotsu, alu, full_preds, qexti_su_vvvv_ops)
+DEF_RVV_FUNCTION (nds_vd4dotu, alu, full_preds, qextu_vvvv_ops)
+#undef REQUIRED_EXTENSIONS
+
 #undef DEF_RVV_FUNCTION
diff --git a/gcc/config/riscv/andes-vector.md b/gcc/config/riscv/andes-vector.md
index 656e5ce2101a..ad8994705461 100644
--- a/gcc/config/riscv/andes-vector.md
+++ b/gcc/config/riscv/andes-vector.md
@@ -23,6 +23,7 @@
   UNSPEC_NDS_INTLOAD
   UNSPEC_NDS_VFPMADT
   UNSPEC_NDS_VFPMADB
+  UNSPEC_NDS_VD4DOT
 ])
 
 (define_int_iterator NDS_VFPMAD [UNSPEC_NDS_VFPMADT UNSPEC_NDS_VFPMADB])
@@ -108,3 +109,55 @@
    (set_attr "enabled" "yes")
    (set (attr "frm_mode")
 	(symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
+
+;; Vector Dot Product Extension
+
+(define_insn "@pred_nds_vd4dot<su><mode>"
+  [(set (match_operand:VQEXTI 0 "register_operand"                    "=&vr")
+	(if_then_else:VQEXTI
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"             "vmWc1")
+	     (match_operand 5 "vector_length_operand"                "   rK")
+	     (match_operand 6 "const_int_operand"                    "    i")
+	     (match_operand 7 "const_int_operand"                    "    i")
+	     (match_operand 8 "const_int_operand"                    "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (unspec:VQEXTI
+	    [(any_extend:VQEXTI
+	       (match_operand:<NDS_QUAD_FIX> 3 "register_operand" " vr"))
+	     (any_extend:VQEXTI
+	       (match_operand:<NDS_QUAD_FIX> 4 "register_operand" " vr"))
+	     (any_extend:VQEXTI
+	       (match_operand:VQEXTI 2 "register_operand" " 0"))]
+	     UNSPEC_NDS_VD4DOT)
+	  (match_dup 2)))]
+  "TARGET_VECTOR && TARGET_XANDESVDOT"
+  "nds.vd4dot<su>.vv\t%0,%3,%4%p1"
+  [(set_attr "type" "viwmuladd")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_nds_vd4dotsu<mode>"
+  [(set (match_operand:VQEXTI 0 "register_operand"                    "=&vr")
+	(if_then_else:VQEXTI
+	  (unspec:<VM>
+	    [(match_operand:<VM> 1 "vector_mask_operand"             "vmWc1")
+	     (match_operand 5 "vector_length_operand"                "   rK")
+	     (match_operand 6 "const_int_operand"                    "    i")
+	     (match_operand 7 "const_int_operand"                    "    i")
+	     (match_operand 8 "const_int_operand"                    "    i")
+	     (reg:SI VL_REGNUM)
+	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+	  (unspec:VQEXTI
+	    [(sign_extend:VQEXTI
+	       (match_operand:<NDS_QUAD_FIX> 3 "register_operand" " vr"))
+	     (zero_extend:VQEXTI
+	       (match_operand:<NDS_QUAD_FIX> 4 "register_operand" " vr"))
+	     (sign_extend:VQEXTI
+	       (match_operand:VQEXTI 2 "register_operand" " 0"))]
+	    UNSPEC_NDS_VD4DOT)
+	  (match_dup 2)))]
+  "TARGET_VECTOR && TARGET_XANDESVDOT"
+  "nds.vd4dotsu.vv\t%0,%3,%4%p1"
+  [(set_attr "type" "viwmuladd")
+   (set_attr "mode" "<MODE>")])
diff --git a/gcc/config/riscv/genrvv-type-indexer.cc b/gcc/config/riscv/genrvv-type-indexer.cc
index f296089fbfe7..d04dec536e36 100644
--- a/gcc/config/riscv/genrvv-type-indexer.cc
+++ b/gcc/config/riscv/genrvv-type-indexer.cc
@@ -364,11 +364,11 @@ main (int argc, const char **argv)
 	    fprintf (fp, "  /*QUAD_EMUL_UNSIGNED*/ %s,\n",
 		     inttype (8, lmul_log2 - 1, true).c_str ());
 	    fprintf (fp, "  /*QUAD_FIX*/ %s,\n",
-		     inttype (8, lmul_log2, unsigned_p).c_str ());
+		     inttype (sew / 4, lmul_log2, unsigned_p).c_str ());
 	    fprintf (fp, "  /*QUAD_FIX_SIGNED*/ %s,\n",
-		     inttype (8, lmul_log2, false).c_str ());
+		     inttype (sew / 4, lmul_log2, false).c_str ());
 	    fprintf (fp, "  /*QUAD_FIX_UNSIGNED*/ %s,\n",
-		     inttype (8, lmul_log2, true).c_str ());
+		     inttype (sew / 4, lmul_log2, true).c_str ());
 	    fprintf (fp, "  /*OCT_TRUNC*/ %s,\n",
 		     same_ratio_eew_type (sew, lmul_log2, sew / 8, unsigned_p,
 					  false)
diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc
index bd774c4b2732..170a89c1e49f 100644
--- a/gcc/config/riscv/riscv-vector-builtins.cc
+++ b/gcc/config/riscv/riscv-vector-builtins.cc
@@ -695,6 +695,19 @@ static CONSTEXPR const rvv_arg_type_info vvv_args[]
   = {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info (RVV_BASE_vector),
      rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
 
+/* A list of args for vector_type func (vector_type, vector_type, vector_type)
+ * function.  */
+static CONSTEXPR const rvv_arg_type_info vqq_args[]
+  = {rvv_arg_type_info (RVV_BASE_vector),
+     rvv_arg_type_info (RVV_BASE_quad_fixed_vector),
+     rvv_arg_type_info (RVV_BASE_quad_fixed_vector), rvv_arg_type_info_end};
+
+static CONSTEXPR const rvv_arg_type_info su_vqq_args[]
+  = {rvv_arg_type_info (RVV_BASE_vector),
+     rvv_arg_type_info (RVV_BASE_quad_fixed_vector),
+     rvv_arg_type_info (RVV_BASE_quad_fixed_unsigned_vector),
+     rvv_arg_type_info_end};
+
 /* A list of args for vector_type func (vector_type, vector_type, vector_type)
  * function.  */
 static CONSTEXPR const rvv_arg_type_info vxv_args[]
@@ -3147,6 +3160,24 @@ static CONSTEXPR const rvv_op_info f16_vvw_ops
      rvv_arg_type_info (RVV_BASE_vector), /* Return type */
      vw_args /* Args */};
 
+static CONSTEXPR const rvv_op_info qexti_vvvv_ops
+  = {qexti_ops,				  /* Types */
+     OP_TYPE_vv,			  /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vqq_args /* Args */};
+
+static CONSTEXPR const rvv_op_info qexti_su_vvvv_ops
+  = {qexti_ops,				  /* Types */
+     OP_TYPE_vv,			  /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     su_vqq_args /* Args */};
+
+static CONSTEXPR const rvv_op_info qextu_vvvv_ops
+  = {qextu_ops,				  /* Types */
+     OP_TYPE_vv,			  /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vqq_args /* Args */};
+
 /* A static operand information for vector_type func (vector_type).
    Some insns just supports SEW=32, such as the crypto vector Zvkg extension.
  * function registration.  */
diff --git a/gcc/config/riscv/riscv-vector-builtins.h b/gcc/config/riscv/riscv-vector-builtins.h
index 978a58b43812..c0d34665b04b 100644
--- a/gcc/config/riscv/riscv-vector-builtins.h
+++ b/gcc/config/riscv/riscv-vector-builtins.h
@@ -134,6 +134,7 @@ enum required_ext
   XANDESVBFHCVT_EXT,    /* XANDESVBFHCVT extension */
   XANDESVSINTLOAD_EXT,  /* XANDESVSINTLOAD extension */
   XANDESVPACKFPH_EXT,   /* XANDESVPACKFPH extension */
+  XANDESVDOT_EXT,       /* XANDESVDOT extension */
   /* Please update below to isa_name func when add or remove enum type(s).  */
 };
 
@@ -181,6 +182,8 @@ static inline const char * required_ext_to_isa_name (enum required_ext required)
       return "xandesvsintload";
     case XANDESVPACKFPH_EXT:
       return "xandesvpackfph";
+    case XANDESVDOT_EXT:
+      return "xandesvdot";
     default:
       gcc_unreachable ();
   }
@@ -232,6 +235,8 @@ static inline bool required_extensions_specified (enum required_ext required)
       return TARGET_XANDESVSINTLOAD;
     case XANDESVPACKFPH_EXT:
       return TARGET_XANDESVPACKFPH;
+    case XANDESVDOT_EXT:
+      return TARGET_XANDESVDOT;
     default:
       gcc_unreachable ();
   }
diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
index 7dbb7f7094d3..270785ea2208 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -4952,3 +4952,16 @@
   RVVM8HF RVVM4HF RVVM2HF RVVM1HF RVVMF2HF
   (RVVMF4HF "TARGET_MIN_VLEN > 32")
 ])
+
+(define_mode_attr NDS_QUAD_FIX [
+  (RVVM8SI "RVVM8QI") (RVVM4SI "RVVM4QI") (RVVM2SI "RVVM2QI")
+  (RVVM1SI "RVVM1QI") (RVVMF2SI "RVVMF2QI") (RVVM8DI "RVVM8HI")
+  (RVVM4DI "RVVM4HI") (RVVM2DI "RVVM2HI") (RVVM1DI "RVVM1HI")
+
+  (V1SI "V1QI") (V2SI "V2QI") (V4SI "V4QI") (V8SI "V8QI") (V16SI "V16QI")
+  (V32SI "V32QI") (V64SI "V64QI") (V128SI "V128QI") (V256SI "V256QI")
+  (V512SI "V512QI") (V1024SI "V1024QI")
+  (V1DI "V1HI") (V2DI "V2HI") (V4DI "V4HI") (V8DI "V8HI") (V16DI "V16HI")
+  (V32DI "V32HI") (V64DI "V64HI") (V128DI "V128HI") (V256DI "V256HI")
+  (V512DI "V512HI")
+])
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dots.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dots.c
new file mode 100644
index 000000000000..d76c41132007
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dots.c
@@ -0,0 +1,132 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2 (vint32mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32mf2(vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1 (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m1(vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2 (vint32m2_t vd, vint8m2_t vs1, vint8m2_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m2(vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4 (vint32m4_t vd, vint8m4_t vs1, vint8m4_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m4(vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8 (vint32m8_t vd, vint8m8_t vs1, vint8m8_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m8(vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m1(vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2 (vint64m2_t vd, vint16m2_t vs1, vint16m2_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m2(vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4 (vint64m4_t vd, vint16m4_t vs1, vint16m4_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m4(vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8 (vint64m8_t vd, vint16m8_t vs1, vint16m8_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m8(vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2_m (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+			      vint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32mf2_m (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1_m (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+			     vint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m1_m (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2_m (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+			     vint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m2_m (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4_m (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+			     vint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m4_m (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8_m (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+			     vint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m8_m (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1_m (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+			     vint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m1_m (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2_m (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+			     vint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m2_m (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4_m (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+			     vint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m4_m (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8_m (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+			     vint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m8_m (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dots[ivxfswum.]*\s+} 18 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dotsu.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dotsu.c
new file mode 100644
index 000000000000..aa2c4754e423
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dotsu.c
@@ -0,0 +1,132 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2 (vint32mf2_t vd, vint8mf2_t vs1, vuint8mf2_t vs2,
+			     size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32mf2(vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1 (vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m1(vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2 (vint32m2_t vd, vint8m2_t vs1, vuint8m2_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m2(vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4 (vint32m4_t vd, vint8m4_t vs1, vuint8m4_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m4(vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8 (vint32m8_t vd, vint8m8_t vs1, vuint8m8_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m8(vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1 (vint64m1_t vd, vint16m1_t vs1, vuint16m1_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m1(vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2 (vint64m2_t vd, vint16m2_t vs1, vuint16m2_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m2(vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4 (vint64m4_t vd, vint16m4_t vs1, vuint16m4_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m4(vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8 (vint64m8_t vd, vint16m8_t vs1, vuint16m8_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m8(vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2_m (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+			       vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32mf2_m (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1_m (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+			      vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m1_m (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2_m (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+			      vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m2_m (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4_m (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+			      vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m4_m (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8_m (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+			      vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m8_m (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1_m (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+			      vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m1_m (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2_m (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+			      vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m2_m (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4_m (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+			      vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m4_m (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8_m (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+			      vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m8_m (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dotsu[ivxfswum.]*\s+} 18 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dotu.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dotu.c
new file mode 100644
index 000000000000..788f70ad6d3f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vd4dotu.c
@@ -0,0 +1,132 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vuint32mf2_t
+test_nds_vd4dotu_vv_u32mf2 (vuint32mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32mf2(vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_nds_vd4dotu_vv_u32m1 (vuint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m1(vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_nds_vd4dotu_vv_u32m2 (vuint32m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m2(vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_nds_vd4dotu_vv_u32m4 (vuint32m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m4(vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_nds_vd4dotu_vv_u32m8 (vuint32m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m8(vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_nds_vd4dotu_vv_u64m1 (vuint64m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m1(vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_nds_vd4dotu_vv_u64m2 (vuint64m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m2(vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_nds_vd4dotu_vv_u64m4 (vuint64m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m4(vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_nds_vd4dotu_vv_u64m8 (vuint64m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m8(vd, vs1, vs2, vl);
+}
+
+vuint32mf2_t
+test_nds_vd4dotu_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs1,
+			      vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32mf2_m (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_nds_vd4dotu_vv_u32m1_m (vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs1,
+			     vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m1_m (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_nds_vd4dotu_vv_u32m2_m (vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs1,
+			     vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m2_m (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_nds_vd4dotu_vv_u32m4_m (vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs1,
+			     vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m4_m (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_nds_vd4dotu_vv_u32m8_m (vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs1,
+			     vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m8_m (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_nds_vd4dotu_vv_u64m1_m (vbool64_t vm, vuint64m1_t vd, vuint16m1_t vs1,
+			     vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m1_m (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_nds_vd4dotu_vv_u64m2_m (vbool32_t vm, vuint64m2_t vd, vuint16m2_t vs1,
+			     vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m2_m (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_nds_vd4dotu_vv_u64m4_m (vbool16_t vm, vuint64m4_t vd, vuint16m4_t vs1,
+			     vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m4_m (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_nds_vd4dotu_vv_u64m8_m (vbool8_t vm, vuint64m8_t vd, vuint16m8_t vs1,
+			     vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m8_m (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dotu[ivxfswum.]*\s+} 18 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dots.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dots.c
new file mode 100644
index 000000000000..03770c0c177f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dots.c
@@ -0,0 +1,132 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2 (vint32mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dots(vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1 (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots(vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2 (vint32m2_t vd, vint8m2_t vs1, vint8m2_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots(vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4 (vint32m4_t vd, vint8m4_t vs1, vint8m4_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots(vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8 (vint32m8_t vd, vint8m8_t vs1, vint8m8_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots(vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots(vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2 (vint64m2_t vd, vint16m2_t vs1, vint16m2_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots(vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4 (vint64m4_t vd, vint16m4_t vs1, vint16m4_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots(vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8 (vint64m8_t vd, vint16m8_t vs1, vint16m8_t vs2,
+			   size_t vl)
+{
+  return __riscv_nds_vd4dots(vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2_m (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+			      vint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1_m (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+			     vint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2_m (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+			     vint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4_m (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+			     vint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8_m (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+			     vint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1_m (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+			     vint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2_m (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+			     vint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4_m (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+			     vint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8_m (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+			     vint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dots[ivxfswum.]*\s+} 18 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dotsu.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dotsu.c
new file mode 100644
index 000000000000..7fe0ef42bffc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dotsu.c
@@ -0,0 +1,132 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2 (vint32mf2_t vd, vint8mf2_t vs1, vuint8mf2_t vs2,
+			     size_t vl)
+{
+  return __riscv_nds_vd4dotsu(vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1 (vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu(vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2 (vint32m2_t vd, vint8m2_t vs1, vuint8m2_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu(vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4 (vint32m4_t vd, vint8m4_t vs1, vuint8m4_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu(vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8 (vint32m8_t vd, vint8m8_t vs1, vuint8m8_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu(vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1 (vint64m1_t vd, vint16m1_t vs1, vuint16m1_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu(vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2 (vint64m2_t vd, vint16m2_t vs1, vuint16m2_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu(vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4 (vint64m4_t vd, vint16m4_t vs1, vuint16m4_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu(vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8 (vint64m8_t vd, vint16m8_t vs1, vuint16m8_t vs2,
+			    size_t vl)
+{
+  return __riscv_nds_vd4dotsu(vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2_m (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+			       vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1_m (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+			      vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2_m (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+			      vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4_m (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+			      vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8_m (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+			      vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1_m (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+			      vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2_m (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+			      vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4_m (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+			      vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8_m (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+			      vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dotsu[ivxfswum.]*\s+} 18 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dotu.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dotu.c
new file mode 100644
index 000000000000..1b89b813af78
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vd4dotu.c
@@ -0,0 +1,133 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+/* { dg-prune-output "warning: '.*' will be deprecated in next 2" } */
+
+#include "andes_vector.h"
+
+vuint32mf2_t
+test_vd4dotu_vv_u32mf2 (vuint32mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2,
+			size_t vl)
+{
+  return __riscv_nds_vd4dotu (vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_vd4dotu_vv_u32m1 (vuint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2,
+		       size_t vl)
+{
+  return __riscv_nds_vd4dotu (vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_vd4dotu_vv_u32m2 (vuint32m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2,
+		       size_t vl)
+{
+  return __riscv_nds_vd4dotu (vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_vd4dotu_vv_u32m4 (vuint32m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2,
+		       size_t vl)
+{
+  return __riscv_nds_vd4dotu (vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_vd4dotu_vv_u32m8 (vuint32m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2,
+		       size_t vl)
+{
+  return __riscv_nds_vd4dotu (vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_vd4dotu_vv_u64m1 (vuint64m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2,
+		       size_t vl)
+{
+  return __riscv_nds_vd4dotu (vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_vd4dotu_vv_u64m2 (vuint64m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2,
+		       size_t vl)
+{
+  return __riscv_nds_vd4dotu (vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_vd4dotu_vv_u64m4 (vuint64m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2,
+		       size_t vl)
+{
+  return __riscv_nds_vd4dotu (vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_vd4dotu_vv_u64m8 (vuint64m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2,
+		       size_t vl)
+{
+  return __riscv_nds_vd4dotu (vd, vs1, vs2, vl);
+}
+
+vuint32mf2_t
+test_vd4dotu_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs1,
+			  vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_vd4dotu_vv_u32m1_m (vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs1,
+			 vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_vd4dotu_vv_u32m2_m (vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs1,
+			 vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_vd4dotu_vv_u32m4_m (vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs1,
+			 vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_vd4dotu_vv_u32m8_m (vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs1,
+			 vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_vd4dotu_vv_u64m1_m (vbool64_t vm, vuint64m1_t vd, vuint16m1_t vs1,
+			 vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_vd4dotu_vv_u64m2_m (vbool32_t vm, vuint64m2_t vd, vuint16m2_t vs1,
+			 vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_vd4dotu_vv_u64m4_m (vbool16_t vm, vuint64m4_t vd, vuint16m4_t vs1,
+			 vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_vd4dotu_vv_u64m8_m (vbool8_t vm, vuint64m8_t vd, vuint16m8_t vs1,
+			 vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dotu[ivxfswum.]*\s+} 18 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dots.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dots.c
new file mode 100644
index 000000000000..0163f4c5650a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dots.c
@@ -0,0 +1,258 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2_tu (vint32mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32mf2_tu(vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1_tu (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m1_tu(vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2_tu (vint32m2_t vd, vint8m2_t vs1, vint8m2_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m2_tu(vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4_tu (vint32m4_t vd, vint8m4_t vs1, vint8m4_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m4_tu(vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8_tu (vint32m8_t vd, vint8m8_t vs1, vint8m8_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m8_tu(vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1_tu (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m1_tu(vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2_tu (vint64m2_t vd, vint16m2_t vs1, vint16m2_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m2_tu(vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4_tu (vint64m4_t vd, vint16m4_t vs1, vint16m4_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m4_tu(vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8_tu (vint64m8_t vd, vint16m8_t vs1, vint16m8_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m8_tu(vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2_tum (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+				vint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32mf2_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1_tum (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+			       vint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m1_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2_tum (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+			       vint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m2_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4_tum (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+			       vint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m4_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8_tum (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+			       vint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m8_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1_tum (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+			       vint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m1_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2_tum (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+			       vint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m2_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4_tum (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+			       vint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m4_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8_tum (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+			       vint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m8_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2_tumu (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+				 vint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32mf2_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1_tumu (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+				vint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m1_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2_tumu (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+				vint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m2_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4_tumu (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+				vint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m4_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8_tumu (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+				vint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m8_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1_tumu (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+				vint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m1_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2_tumu (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+				vint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m2_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4_tumu (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+				vint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m4_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8_tumu (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+				vint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m8_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2_mu (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+			       vint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32mf2_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1_mu (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+			      vint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m1_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2_mu (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+			      vint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m2_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4_mu (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+			      vint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m4_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8_mu (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+			      vint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i32m8_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1_mu (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+			      vint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m1_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2_mu (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+			      vint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m2_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4_mu (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+			      vint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m4_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8_mu (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+			      vint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_vv_i64m8_mu (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dots[ivxfswum.]*\s+} 36 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dotsu.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dotsu.c
new file mode 100644
index 000000000000..ec63c0abe5d9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dotsu.c
@@ -0,0 +1,258 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2_tu (vint32mf2_t vd, vint8mf2_t vs1, vuint8mf2_t vs2,
+				size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32mf2_tu(vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1_tu (vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m1_tu(vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2_tu (vint32m2_t vd, vint8m2_t vs1, vuint8m2_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m2_tu(vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4_tu (vint32m4_t vd, vint8m4_t vs1, vuint8m4_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m4_tu(vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8_tu (vint32m8_t vd, vint8m8_t vs1, vuint8m8_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m8_tu(vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1_tu (vint64m1_t vd, vint16m1_t vs1, vuint16m1_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m1_tu(vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2_tu (vint64m2_t vd, vint16m2_t vs1, vuint16m2_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m2_tu(vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4_tu (vint64m4_t vd, vint16m4_t vs1, vuint16m4_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m4_tu(vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8_tu (vint64m8_t vd, vint16m8_t vs1, vuint16m8_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m8_tu(vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2_tum (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+				 vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32mf2_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1_tum (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+				vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m1_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2_tum (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+				vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m2_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4_tum (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+				vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m4_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8_tum (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+				vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m8_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1_tum (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+				vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m1_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2_tum (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+				vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m2_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4_tum (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+				vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m4_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8_tum (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+				vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m8_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2_tumu (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+				  vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32mf2_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1_tumu (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+				 vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m1_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2_tumu (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+				 vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m2_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4_tumu (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+				 vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m4_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8_tumu (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+				 vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m8_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1_tumu (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+				 vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m1_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2_tumu (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+				 vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m2_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4_tumu (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+				 vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m4_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8_tumu (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+				 vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m8_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2_mu (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+				vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32mf2_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1_mu (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+			       vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m1_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2_mu (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+			       vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m2_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4_mu (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+			       vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m4_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8_mu (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+			       vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i32m8_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1_mu (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+			       vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m1_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2_mu (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+			       vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m2_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4_mu (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+			       vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m4_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8_mu (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+			       vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_vv_i64m8_mu (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dotsu[ivxfswum.]*\s+} 36 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dotu.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dotu.c
new file mode 100644
index 000000000000..9f48ca4b5001
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vd4dotu.c
@@ -0,0 +1,258 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vuint32mf2_t
+test_nds_vd4dotu_vv_u32mf2_tu (vuint32mf2_t vd, vuint8mf2_t vs1,
+			       vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32mf2_tu(vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_nds_vd4dotu_vv_u32m1_tu (vuint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m1_tu(vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_nds_vd4dotu_vv_u32m2_tu (vuint32m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m2_tu(vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_nds_vd4dotu_vv_u32m4_tu (vuint32m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m4_tu(vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_nds_vd4dotu_vv_u32m8_tu (vuint32m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m8_tu(vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_nds_vd4dotu_vv_u64m1_tu (vuint64m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m1_tu(vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_nds_vd4dotu_vv_u64m2_tu (vuint64m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m2_tu(vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_nds_vd4dotu_vv_u64m4_tu (vuint64m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m4_tu(vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_nds_vd4dotu_vv_u64m8_tu (vuint64m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m8_tu(vd, vs1, vs2, vl);
+}
+
+vuint32mf2_t
+test_nds_vd4dotu_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs1,
+				vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32mf2_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_nds_vd4dotu_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs1,
+			       vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m1_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_nds_vd4dotu_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs1,
+			       vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m2_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_nds_vd4dotu_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs1,
+			       vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m4_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_nds_vd4dotu_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs1,
+			       vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m8_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_nds_vd4dotu_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint16m1_t vs1,
+			       vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m1_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_nds_vd4dotu_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint16m2_t vs1,
+			       vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m2_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_nds_vd4dotu_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint16m4_t vs1,
+			       vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m4_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_nds_vd4dotu_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint16m8_t vs1,
+			       vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m8_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint32mf2_t
+test_nds_vd4dotu_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs1,
+				 vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32mf2_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_nds_vd4dotu_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs1,
+				vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m1_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_nds_vd4dotu_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs1,
+				vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m2_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_nds_vd4dotu_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs1,
+				vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m4_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_nds_vd4dotu_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs1,
+				vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m8_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_nds_vd4dotu_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint16m1_t vs1,
+				vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m1_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_nds_vd4dotu_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint16m2_t vs1,
+				vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m2_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_nds_vd4dotu_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint16m4_t vs1,
+				vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m4_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_nds_vd4dotu_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint16m8_t vs1,
+				vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m8_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32mf2_t
+test_nds_vd4dotu_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs1,
+			       vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32mf2_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_nds_vd4dotu_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs1,
+			      vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m1_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_nds_vd4dotu_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs1,
+			      vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m2_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_nds_vd4dotu_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs1,
+			      vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m4_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_nds_vd4dotu_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs1,
+			      vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u32m8_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_nds_vd4dotu_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint16m1_t vs1,
+			      vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m1_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_nds_vd4dotu_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint16m2_t vs1,
+			      vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m2_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_nds_vd4dotu_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint16m4_t vs1,
+			      vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m4_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_nds_vd4dotu_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint16m8_t vs1,
+			      vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_vv_u64m8_mu (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dotu[ivxfswum.]*\s+} 36 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dots.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dots.c
new file mode 100644
index 000000000000..ade40b944025
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dots.c
@@ -0,0 +1,258 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2_tu (vint32mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dots_tu(vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1_tu (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_tu(vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2_tu (vint32m2_t vd, vint8m2_t vs1, vint8m2_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_tu(vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4_tu (vint32m4_t vd, vint8m4_t vs1, vint8m4_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_tu(vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8_tu (vint32m8_t vd, vint8m8_t vs1, vint8m8_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_tu(vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1_tu (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_tu(vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2_tu (vint64m2_t vd, vint16m2_t vs1, vint16m2_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_tu(vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4_tu (vint64m4_t vd, vint16m4_t vs1, vint16m4_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_tu(vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8_tu (vint64m8_t vd, vint16m8_t vs1, vint16m8_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dots_tu(vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2_tum (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+				vint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1_tum (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+			       vint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2_tum (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+			       vint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4_tum (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+			       vint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8_tum (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+			       vint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1_tum (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+			       vint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2_tum (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+			       vint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4_tum (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+			       vint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8_tum (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+			       vint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2_tumu (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+				 vint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1_tumu (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+				vint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2_tumu (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+				vint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4_tumu (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+				vint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8_tumu (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+				vint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1_tumu (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+				vint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2_tumu (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+				vint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4_tumu (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+				vint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8_tumu (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+				vint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dots_vv_i32mf2_mu (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+			       vint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dots_vv_i32m1_mu (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+			      vint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dots_vv_i32m2_mu (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+			      vint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dots_vv_i32m4_mu (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+			      vint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dots_vv_i32m8_mu (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+			      vint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dots_vv_i64m1_mu (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+			      vint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dots_vv_i64m2_mu (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+			      vint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dots_vv_i64m4_mu (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+			      vint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dots_vv_i64m8_mu (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+			      vint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dots_mu (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dots[ivxfswum.]*\s+} 36 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dotsu.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dotsu.c
new file mode 100644
index 000000000000..0c44aa450920
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dotsu.c
@@ -0,0 +1,258 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2_tu (vint32mf2_t vd, vint8mf2_t vs1, vuint8mf2_t vs2,
+				size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tu(vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1_tu (vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tu(vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2_tu (vint32m2_t vd, vint8m2_t vs1, vuint8m2_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tu(vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4_tu (vint32m4_t vd, vint8m4_t vs1, vuint8m4_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tu(vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8_tu (vint32m8_t vd, vint8m8_t vs1, vuint8m8_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tu(vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1_tu (vint64m1_t vd, vint16m1_t vs1, vuint16m1_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tu(vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2_tu (vint64m2_t vd, vint16m2_t vs1, vuint16m2_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tu(vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4_tu (vint64m4_t vd, vint16m4_t vs1, vuint16m4_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tu(vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8_tu (vint64m8_t vd, vint16m8_t vs1, vuint16m8_t vs2,
+			       size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tu(vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2_tum (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+				 vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1_tum (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+				vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2_tum (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+				vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4_tum (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+				vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8_tum (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+				vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1_tum (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+				vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2_tum (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+				vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4_tum (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+				vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8_tum (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+				vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2_tumu (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+				  vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1_tumu (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+				 vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2_tumu (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+				 vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4_tumu (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+				 vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8_tumu (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+				 vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1_tumu (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+				 vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2_tumu (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+				 vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4_tumu (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+				 vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8_tumu (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+				 vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vint32mf2_t
+test_nds_vd4dotsu_vv_i32mf2_mu (vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs1,
+				vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m1_t
+test_nds_vd4dotsu_vv_i32m1_mu (vbool32_t vm, vint32m1_t vd, vint8m1_t vs1,
+			       vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m2_t
+test_nds_vd4dotsu_vv_i32m2_mu (vbool16_t vm, vint32m2_t vd, vint8m2_t vs1,
+			       vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m4_t
+test_nds_vd4dotsu_vv_i32m4_mu (vbool8_t vm, vint32m4_t vd, vint8m4_t vs1,
+			       vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint32m8_t
+test_nds_vd4dotsu_vv_i32m8_mu (vbool4_t vm, vint32m8_t vd, vint8m8_t vs1,
+			       vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m1_t
+test_nds_vd4dotsu_vv_i64m1_mu (vbool64_t vm, vint64m1_t vd, vint16m1_t vs1,
+			       vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m2_t
+test_nds_vd4dotsu_vv_i64m2_mu (vbool32_t vm, vint64m2_t vd, vint16m2_t vs1,
+			       vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m4_t
+test_nds_vd4dotsu_vv_i64m4_mu (vbool16_t vm, vint64m4_t vd, vint16m4_t vs1,
+			       vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vint64m8_t
+test_nds_vd4dotsu_vv_i64m8_mu (vbool8_t vm, vint64m8_t vd, vint16m8_t vs1,
+			       vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotsu_mu (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dotsu[ivxfswum.]*\s+} 36 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dotu.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dotu.c
new file mode 100644
index 000000000000..a62d93c2b59b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vd4dotu.c
@@ -0,0 +1,258 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_xandesvdot -O3" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_xandesvdot -O3" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vuint32mf2_t
+test_nds_vd4dotu_vv_u32mf2_tu (vuint32mf2_t vd, vuint8mf2_t vs1,
+			       vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tu(vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_nds_vd4dotu_vv_u32m1_tu (vuint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_tu(vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_nds_vd4dotu_vv_u32m2_tu (vuint32m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_tu(vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_nds_vd4dotu_vv_u32m4_tu (vuint32m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_tu(vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_nds_vd4dotu_vv_u32m8_tu (vuint32m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_tu(vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_nds_vd4dotu_vv_u64m1_tu (vuint64m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_tu(vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_nds_vd4dotu_vv_u64m2_tu (vuint64m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_tu(vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_nds_vd4dotu_vv_u64m4_tu (vuint64m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_tu(vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_nds_vd4dotu_vv_u64m8_tu (vuint64m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2,
+			      size_t vl)
+{
+  return __riscv_nds_vd4dotu_tu(vd, vs1, vs2, vl);
+}
+
+vuint32mf2_t
+test_nds_vd4dotu_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs1,
+				vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_nds_vd4dotu_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs1,
+			       vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_nds_vd4dotu_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs1,
+			       vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_nds_vd4dotu_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs1,
+			       vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_nds_vd4dotu_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs1,
+			       vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_nds_vd4dotu_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint16m1_t vs1,
+			       vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_nds_vd4dotu_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint16m2_t vs1,
+			       vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_nds_vd4dotu_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint16m4_t vs1,
+			       vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_nds_vd4dotu_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint16m8_t vs1,
+			       vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tum (vm, vd, vs1, vs2, vl);
+}
+
+vuint32mf2_t
+test_nds_vd4dotu_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs1,
+				 vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_nds_vd4dotu_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs1,
+				vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_nds_vd4dotu_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs1,
+				vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_nds_vd4dotu_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs1,
+				vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_nds_vd4dotu_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs1,
+				vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_nds_vd4dotu_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint16m1_t vs1,
+				vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_nds_vd4dotu_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint16m2_t vs1,
+				vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_nds_vd4dotu_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint16m4_t vs1,
+				vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_nds_vd4dotu_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint16m8_t vs1,
+				vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_tumu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32mf2_t
+test_nds_vd4dotu_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs1,
+			       vuint8mf2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m1_t
+test_nds_vd4dotu_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs1,
+			      vuint8m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m2_t
+test_nds_vd4dotu_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs1,
+			      vuint8m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m4_t
+test_nds_vd4dotu_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs1,
+			      vuint8m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint32m8_t
+test_nds_vd4dotu_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs1,
+			      vuint8m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m1_t
+test_nds_vd4dotu_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint16m1_t vs1,
+			      vuint16m1_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m2_t
+test_nds_vd4dotu_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint16m2_t vs1,
+			      vuint16m2_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m4_t
+test_nds_vd4dotu_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint16m4_t vs1,
+			      vuint16m4_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_mu (vm, vd, vs1, vs2, vl);
+}
+
+vuint64m8_t
+test_nds_vd4dotu_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint16m8_t vs1,
+			      vuint16m8_t vs2, size_t vl)
+{
+  return __riscv_nds_vd4dotu_mu (vm, vd, vs1, vs2, vl);
+}
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vd4dotu[ivxfswum.]*\s+} 36 } } */
-- 
2.34.1

