This extension defines vector load instructions to move sign-extended or zero-extended INT4 data into 8-bit vector register elements.
gcc/ChangeLog: * config/riscv/andes-vector-builtins-bases.cc (nds_nibbleload): New class. * config/riscv/andes-vector-builtins-bases.h (nds_vln8): New def. (nds_vlnu8): Ditto. * config/riscv/andes-vector-builtins-functions.def (nds_vln8): Ditto. (nds_vlnu8): Ditto. * config/riscv/andes.md (@pred_intload_mov<su><mode>): New pattern. * config/riscv/riscv-vector-builtins-types.def (DEF_RVV_Q_OPS): New def. (DEF_RVV_QU_OPS): Ditto. * config/riscv/riscv-vector-builtins.cc (q_v_void_const_ptr_ops): New operand information. (qu_v_void_const_ptr_ops): Ditto. * config/riscv/riscv-vector-builtins.def (void_const_ptr): New def. * config/riscv/riscv-vector-builtins.h (enum required_ext): Ditto. (required_ext_to_isa_name): Add case XANDESVSINTLOAD_EXT. (required_extensions_specified): Ditto. * config/riscv/vector-iterators.md (NDS_QVI): New iterator. gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/rvv.exp: Add regression for xandesvector. * gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vln8.c: New test. * gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vln8.c: New test. * gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vln8.c: New test. * gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vln8.c: New test. --- .../riscv/andes-vector-builtins-bases.cc | 30 ++++- .../riscv/andes-vector-builtins-bases.h | 2 + .../riscv/andes-vector-builtins-functions.def | 5 + gcc/config/riscv/andes-vector.md | 27 ++++ .../riscv/riscv-vector-builtins-types.def | 30 +++++ gcc/config/riscv/riscv-vector-builtins.cc | 32 +++++ gcc/config/riscv/riscv-vector-builtins.def | 1 + gcc/config/riscv/riscv-vector-builtins.h | 5 + gcc/config/riscv/vector-iterators.md | 5 + gcc/testsuite/gcc.target/riscv/rvv/rvv.exp | 10 ++ .../non-policy/non-overloaded/nds_vln8.c | 62 +++++++++ .../non-policy/overloaded/nds_vln8.c | 34 +++++ .../policy/non-overloaded/nds_vln8.c | 118 ++++++++++++++++++ .../xandesvector/policy/overloaded/nds_vln8.c | 118 ++++++++++++++++++ 14 files changed, 478 insertions(+), 1 deletion(-) create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vln8.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vln8.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vln8.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vln8.c diff --git a/gcc/config/riscv/andes-vector-builtins-bases.cc b/gcc/config/riscv/andes-vector-builtins-bases.cc index 69e16fd94543..2c19f32225b9 100644 --- a/gcc/config/riscv/andes-vector-builtins-bases.cc +++ b/gcc/config/riscv/andes-vector-builtins-bases.cc @@ -89,8 +89,35 @@ public: } }; +/* Implements Andes vln8.v/vln8.v. */ +template <bool SIGN> +class nds_nibbleload : public function_base +{ +public: + unsigned int call_properties (const function_instance &) const override + { + return CP_READ_MEMORY; + } + + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + return pred != PRED_TYPE_none; + } + + rtx expand (function_expander &e) const override + { + if (SIGN) + return e.use_contiguous_load_insn ( + code_for_pred_intload_mov (SIGN_EXTEND, e.vector_mode ())); + return e.use_contiguous_load_insn ( + code_for_pred_intload_mov (ZERO_EXTEND, e.vector_mode ())); + } +}; + static CONSTEXPR const nds_vfwcvt nds_vfwcvt_obj; static CONSTEXPR const nds_vfncvt nds_vfncvt_obj; +static CONSTEXPR const nds_nibbleload<true> nds_vln8_obj; +static CONSTEXPR const nds_nibbleload<false> nds_vlnu8_obj; /* Declare the function base NAME, pointing it to an instance of class <NAME>_obj. */ @@ -99,5 +126,6 @@ static CONSTEXPR const nds_vfncvt nds_vfncvt_obj; BASE (nds_vfwcvt) BASE (nds_vfncvt) - +BASE (nds_vln8) +BASE (nds_vlnu8) } // end namespace riscv_vector diff --git a/gcc/config/riscv/andes-vector-builtins-bases.h b/gcc/config/riscv/andes-vector-builtins-bases.h index 7d11761d8f6e..d983b44d2e9d 100644 --- a/gcc/config/riscv/andes-vector-builtins-bases.h +++ b/gcc/config/riscv/andes-vector-builtins-bases.h @@ -26,6 +26,8 @@ namespace riscv_vector { namespace bases { extern const function_base *const nds_vfwcvt; extern const function_base *const nds_vfncvt; +extern const function_base *const nds_vln8; +extern const function_base *const nds_vlnu8; } } // end namespace riscv_vector diff --git a/gcc/config/riscv/andes-vector-builtins-functions.def b/gcc/config/riscv/andes-vector-builtins-functions.def index 989db8c71bab..ebb0de3217ea 100644 --- a/gcc/config/riscv/andes-vector-builtins-functions.def +++ b/gcc/config/riscv/andes-vector-builtins-functions.def @@ -42,4 +42,9 @@ DEF_RVV_FUNCTION (nds_vfwcvt, alu, full_preds, bf16_to_f32_wf_v_ops) DEF_RVV_FUNCTION (nds_vfncvt, narrow_alu, full_preds, f32_to_bf16_nf_w_ops) #undef REQUIRED_EXTENSIONS +#define REQUIRED_EXTENSIONS XANDESVSINTLOAD_EXT +DEF_RVV_FUNCTION (nds_vln8, alu, full_preds, q_v_void_const_ptr_ops) +DEF_RVV_FUNCTION (nds_vlnu8, alu, full_preds, qu_v_void_const_ptr_ops) +#undef REQUIRED_EXTENSIONS + #undef DEF_RVV_FUNCTION diff --git a/gcc/config/riscv/andes-vector.md b/gcc/config/riscv/andes-vector.md index 31498ca9aab0..1e064f0b4fa6 100644 --- a/gcc/config/riscv/andes-vector.md +++ b/gcc/config/riscv/andes-vector.md @@ -20,6 +20,7 @@ (define_c_enum "unspec" [ UNSPEC_NDS_VFWCVTBF16 UNSPEC_NDS_VFNCVTBF16 + UNSPEC_NDS_INTLOAD ]) ;; .................... @@ -49,3 +50,29 @@ "nds.vfncvt.bf16.s\t%0,%1" [(set_attr "type" "fcvt") (set_attr "mode" "<NDS_V_DOUBLE_TRUNC_BF>")]) + +;; Vector INT4 Load Extension. + +(define_insn "@pred_intload_mov<su><mode>" + [(set (match_operand:NDS_QVI 0 "nonimmediate_operand" "=vr, vr, vd") + (if_then_else:NDS_QVI + (unspec:<VM> + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1, Wc1, vm") + (match_operand 4 "vector_length_operand" " rK, rK, rK") + (match_operand 5 "const_int_operand" " i, i, i") + (match_operand 6 "const_int_operand" " i, i, i") + (match_operand 7 "const_int_operand" " i, i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec:NDS_QVI + [(any_extend:NDS_QVI (match_operand:VOID 3 "memory_operand" "m, m, m"))] + UNSPEC_NDS_INTLOAD) + (match_operand:NDS_QVI 2 "vector_merge_operand" "0, vu, vu")))] + "(TARGET_VECTOR && TARGET_XANDESVSINTLOAD + && register_operand (operands[0], <MODE>mode))" + "@ + nds.vln<u>8.v\t%0,%3%p1 + nds.vln<u>8.v\t%0,%3 + nds.vln<u>8.v\t%0,%3,%1.t" + [(set_attr "type" "vlde,vlde,vlde") + (set_attr "mode" "<MODE>")]) diff --git a/gcc/config/riscv/riscv-vector-builtins-types.def b/gcc/config/riscv/riscv-vector-builtins-types.def index ade6644b56ee..d07a0baddb48 100644 --- a/gcc/config/riscv/riscv-vector-builtins-types.def +++ b/gcc/config/riscv/riscv-vector-builtins-types.def @@ -381,6 +381,18 @@ along with GCC; see the file COPYING3. If not see #define DEF_RVV_X2_WU_OPS(TYPE, REQUIRE) #endif +/* Use "DEF_RVV_Q_OPS" macro include all quad signed integer which will be + iterated and registered as intrinsic functions. */ +#ifndef DEF_RVV_Q_OPS +#define DEF_RVV_Q_OPS(TYPE, REQUIRE) +#endif + +/* Use "DEF_RVV_QU_OPS" macro include all quad unsigned integer which will be + iterated and registered as intrinsic functions. */ +#ifndef DEF_RVV_QU_OPS +#define DEF_RVV_QU_OPS(TYPE, REQUIRE) +#endif + DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64) DEF_RVV_I_OPS (vint8mf4_t, 0) DEF_RVV_I_OPS (vint8mf2_t, 0) @@ -1501,6 +1513,22 @@ DEF_RVV_X2_WU_OPS (vuint32m1_t, 0) DEF_RVV_X2_WU_OPS (vuint32m2_t, 0) DEF_RVV_X2_WU_OPS (vuint32m4_t, 0) +DEF_RVV_Q_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64) +DEF_RVV_Q_OPS (vint8mf4_t, 0) +DEF_RVV_Q_OPS (vint8mf2_t, 0) +DEF_RVV_Q_OPS (vint8m1_t, 0) +DEF_RVV_Q_OPS (vint8m2_t, 0) +DEF_RVV_Q_OPS (vint8m4_t, 0) +DEF_RVV_Q_OPS (vint8m8_t, 0) + +DEF_RVV_QU_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64) +DEF_RVV_QU_OPS (vuint8mf4_t, 0) +DEF_RVV_QU_OPS (vuint8mf2_t, 0) +DEF_RVV_QU_OPS (vuint8m1_t, 0) +DEF_RVV_QU_OPS (vuint8m2_t, 0) +DEF_RVV_QU_OPS (vuint8m4_t, 0) +DEF_RVV_QU_OPS (vuint8m8_t, 0) + #undef DEF_RVV_I_OPS #undef DEF_RVV_U_OPS #undef DEF_RVV_F_OPS @@ -1559,3 +1587,5 @@ DEF_RVV_X2_WU_OPS (vuint32m4_t, 0) #undef DEF_RVV_XFQF_OPS #undef DEF_RVV_X2_U_OPS #undef DEF_RVV_X2_WU_OPS +#undef DEF_RVV_Q_OPS +#undef DEF_RVV_QU_OPS diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc index 0da6e57cdd17..9d5475ec2f75 100644 --- a/gcc/config/riscv/riscv-vector-builtins.cc +++ b/gcc/config/riscv/riscv-vector-builtins.cc @@ -572,6 +572,18 @@ static const rvv_type_info xfqf_ops[] = { #include "riscv-vector-builtins-types.def" {NUM_VECTOR_TYPES, 0}}; +/* A list of all vint8m_t will be registered for intrinsic functions. */ +static const rvv_type_info q_ops[] = { +#define DEF_RVV_Q_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE}, +#include "riscv-vector-builtins-types.def" + {NUM_VECTOR_TYPES, 0}}; + +/* A list of all vuint8m_t will be registered for intrinsic functions. */ +static const rvv_type_info qu_ops[] = { +#define DEF_RVV_QU_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE}, +#include "riscv-vector-builtins-types.def" + {NUM_VECTOR_TYPES, 0}}; + static CONSTEXPR const rvv_arg_type_info rvv_arg_type_info_end = rvv_arg_type_info (NUM_BASE_TYPES); @@ -1225,6 +1237,10 @@ static CONSTEXPR const rvv_arg_type_info sf_vc_fvw_args[] rvv_arg_type_info (RVV_BASE_scalar_float), rvv_arg_type_info_end}; +/* A list of args for vector_type func (const void_type *) function. */ +static CONSTEXPR const rvv_arg_type_info void_const_ptr_args[] + = {rvv_arg_type_info (RVV_BASE_void_const_ptr), rvv_arg_type_info_end}; + /* A list of none preds that will be registered for intrinsic functions. */ static CONSTEXPR const predication_type_index none_preds[] = {PRED_TYPE_none, NUM_PRED_TYPES}; @@ -3097,6 +3113,22 @@ static CONSTEXPR const rvv_op_info bf16_to_f32_wf_v_ops rvv_arg_type_info (RVV_BASE_vector), /* Return type */ bf_w_v_args /* Args */}; +/* A static operand information for vector_type func (const void_type *) + * function registration. */ +static CONSTEXPR const rvv_op_info q_v_void_const_ptr_ops + = {q_ops, /* Types */ + OP_TYPE_v, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + void_const_ptr_args /* Args */}; + +/* A static operand information for vector_type func (const void_type *) + * function registration. */ +static CONSTEXPR const rvv_op_info qu_v_void_const_ptr_ops + = {qu_ops, /* Types */ + OP_TYPE_v, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + void_const_ptr_args /* Args */}; + /* A static operand information for vector_type func (vector_type). Some insns just supports SEW=32, such as the crypto vector Zvkg extension. * function registration. */ diff --git a/gcc/config/riscv/riscv-vector-builtins.def b/gcc/config/riscv/riscv-vector-builtins.def index c9859dc65f1b..a0a69015128b 100644 --- a/gcc/config/riscv/riscv-vector-builtins.def +++ b/gcc/config/riscv/riscv-vector-builtins.def @@ -759,6 +759,7 @@ DEF_RVV_BASE_TYPE (size_ptr, build_pointer_type (size_type_node)) DEF_RVV_BASE_TYPE (tuple_subpart, get_tuple_subpart_type (type_idx)) DEF_RVV_BASE_TYPE (xfqf_float, get_xfqf_float_type (type_idx)) DEF_RVV_BASE_TYPE (scalar_float, get_scalar_float_type (type_idx)) +DEF_RVV_BASE_TYPE (void_const_ptr, const_ptr_type_node) DEF_RVV_VXRM_ENUM (RNU, VXRM_RNU) DEF_RVV_VXRM_ENUM (RNE, VXRM_RNE) diff --git a/gcc/config/riscv/riscv-vector-builtins.h b/gcc/config/riscv/riscv-vector-builtins.h index 130ecd7eb537..db9a1b91363a 100644 --- a/gcc/config/riscv/riscv-vector-builtins.h +++ b/gcc/config/riscv/riscv-vector-builtins.h @@ -132,6 +132,7 @@ enum required_ext XSFVFNRCLIPXFQF_EXT, /* XSFVFNRCLIPXFQF extension */ XSFVCP_EXT, /* XSFVCP extension*/ XANDESVBFHCVT_EXT, /* XANDESVBFHCVT extension */ + XANDESVSINTLOAD_EXT, /* XANDESVSINTLOAD extension */ /* Please update below to isa_name func when add or remove enum type(s). */ }; @@ -175,6 +176,8 @@ static inline const char * required_ext_to_isa_name (enum required_ext required) return "xsfvcp"; case XANDESVBFHCVT_EXT: return "xandesvbfhcvt"; + case XANDESVSINTLOAD_EXT: + return "xandesvsintload"; default: gcc_unreachable (); } @@ -222,6 +225,8 @@ static inline bool required_extensions_specified (enum required_ext required) return TARGET_XSFVCP; case XANDESVBFHCVT_EXT: return TARGET_XANDESVBFHCVT; + case XANDESVSINTLOAD_EXT: + return TARGET_XANDESVSINTLOAD; default: gcc_unreachable (); } diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md index 4b4ceefa27a7..34b818bb07a1 100644 --- a/gcc/config/riscv/vector-iterators.md +++ b/gcc/config/riscv/vector-iterators.md @@ -4943,3 +4943,8 @@ (RVVM8SF "RVVM4BF") (RVVM4SF "RVVM2BF") (RVVM2SF "RVVM1BF") (RVVM1SF "RVVMF2BF") (RVVMF2SF "RVVMF4BF") ]) + +(define_mode_iterator NDS_QVI [ + RVVM8QI RVVM4QI RVVM2QI RVVM1QI + RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32") +]) diff --git a/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp b/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp index 720cea97d77b..877cc55bb88e 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp +++ b/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp @@ -151,5 +151,15 @@ foreach op $AUTOVEC_TEST_OPTS { "$op" "" } +set POLICY [list {policy} {non-policy} ] +set OVERLOAD [list {overloaded} {non-overloaded} ] +foreach po $POLICY { + foreach ov $OVERLOAD { + # For Andes Vector feature tests + dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/xandesvector/$po/$ov/*.\[cS\]]] \ + "" $CFLAGS + } +} + # All done. dg-finish diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vln8.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vln8.c new file mode 100644 index 000000000000..b5a4c57931b1 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vln8.c @@ -0,0 +1,62 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gv_xandesvsintload -O3" { target { rv32 } } } */ +/* { dg-options "-march=rv64gv_xandesvsintload -O3" { target { rv64 } } } */ + +#include "andes_vector.h" + +vint8mf8_t test_vln8_v_i8mf8(const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf8(rs1, vl); +} + +vint8mf4_t test_vln8_v_i8mf4(const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf4(rs1, vl); +} + +vint8mf2_t test_vln8_v_i8mf2(const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf2(rs1, vl); +} + +vint8m1_t test_vln8_v_i8m1(const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m1(rs1, vl); +} + +vint8m2_t test_vln8_v_i8m2(const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m2(rs1, vl); +} + +vint8m4_t test_vln8_v_i8m4(const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m4(rs1, vl); +} + +vint8m8_t test_vln8_v_i8m8(const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m8(rs1, vl); +} + +vint8mf8_t test_vln8_v_i8mf8_m(vbool64_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf8_m(vm, rs1, vl); +} + +vint8mf4_t test_vln8_v_i8mf4_m(vbool32_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf4_m(vm, rs1, vl); +} + +vint8mf2_t test_vln8_v_i8mf2_m(vbool16_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf2_m(vm, rs1, vl); +} + +vint8m1_t test_vln8_v_i8m1_m(vbool8_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m1_m(vm, rs1, vl); +} + +vint8m2_t test_vln8_v_i8m2_m(vbool4_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m2_m(vm, rs1, vl); +} + +vint8m4_t test_vln8_v_i8m4_m(vbool2_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m4_m(vm, rs1, vl); +} + +vint8m8_t test_vln8_v_i8m8_m(vbool1_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m8_m(vm, rs1, vl); +} +/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vln8\.v\s+} 14 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vln8.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vln8.c new file mode 100644 index 000000000000..d632432127bd --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vln8.c @@ -0,0 +1,34 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gv_xandesvsintload -O3" { target { rv32 } } } */ +/* { dg-options "-march=rv64gv_xandesvsintload -O3" { target { rv64 } } } */ + +#include "andes_vector.h" + +vint8mf8_t test_vln8_v_i8mf8_m(vbool64_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8(vm, rs1, vl); +} + +vint8mf4_t test_vln8_v_i8mf4_m(vbool32_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8(vm, rs1, vl); +} + +vint8mf2_t test_vln8_v_i8mf2_m(vbool16_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8(vm, rs1, vl); +} + +vint8m1_t test_vln8_v_i8m1_m(vbool8_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8(vm, rs1, vl); +} + +vint8m2_t test_vln8_v_i8m2_m(vbool4_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8(vm, rs1, vl); +} + +vint8m4_t test_vln8_v_i8m4_m(vbool2_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8(vm, rs1, vl); +} + +vint8m8_t test_vln8_v_i8m8_m(vbool1_t vm, const void *rs1, size_t vl) { + return __riscv_nds_vln8(vm, rs1, vl); +} +/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vln8\.v\s+} 7 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vln8.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vln8.c new file mode 100644 index 000000000000..9874b9aa6d50 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vln8.c @@ -0,0 +1,118 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gv_xandesvsintload -O3" { target { rv32 } } } */ +/* { dg-options "-march=rv64gv_xandesvsintload -O3" { target { rv64 } } } */ + +#include "andes_vector.h" + +vint8mf8_t test_vln8_v_i8mf8_tu(vint8mf8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf8_tu(vd, rs1, vl); +} + +vint8mf4_t test_vln8_v_i8mf4_tu(vint8mf4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf4_tu(vd, rs1, vl); +} + +vint8mf2_t test_vln8_v_i8mf2_tu(vint8mf2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf2_tu(vd, rs1, vl); +} + +vint8m1_t test_vln8_v_i8m1_tu(vint8m1_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m1_tu(vd, rs1, vl); +} + +vint8m2_t test_vln8_v_i8m2_tu(vint8m2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m2_tu(vd, rs1, vl); +} + +vint8m4_t test_vln8_v_i8m4_tu(vint8m4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m4_tu(vd, rs1, vl); +} + +vint8m8_t test_vln8_v_i8m8_tu(vint8m8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m8_tu(vd, rs1, vl); +} + +vint8mf8_t test_vln8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf8_tum(vm, vd, rs1, vl); +} + +vint8mf4_t test_vln8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf4_tum(vm, vd, rs1, vl); +} + +vint8mf2_t test_vln8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf2_tum(vm, vd, rs1, vl); +} + +vint8m1_t test_vln8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m1_tum(vm, vd, rs1, vl); +} + +vint8m2_t test_vln8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m2_tum(vm, vd, rs1, vl); +} + +vint8m4_t test_vln8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m4_tum(vm, vd, rs1, vl); +} + +vint8m8_t test_vln8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m8_tum(vm, vd, rs1, vl); +} + +vint8mf8_t test_vln8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf8_tumu(vm, vd, rs1, vl); +} + +vint8mf4_t test_vln8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf4_tumu(vm, vd, rs1, vl); +} + +vint8mf2_t test_vln8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf2_tumu(vm, vd, rs1, vl); +} + +vint8m1_t test_vln8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m1_tumu(vm, vd, rs1, vl); +} + +vint8m2_t test_vln8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m2_tumu(vm, vd, rs1, vl); +} + +vint8m4_t test_vln8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m4_tumu(vm, vd, rs1, vl); +} + +vint8m8_t test_vln8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m8_tumu(vm, vd, rs1, vl); +} + +vint8mf8_t test_vln8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf8_mu(vm, vd, rs1, vl); +} + +vint8mf4_t test_vln8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf4_mu(vm, vd, rs1, vl); +} + +vint8mf2_t test_vln8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8mf2_mu(vm, vd, rs1, vl); +} + +vint8m1_t test_vln8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m1_mu(vm, vd, rs1, vl); +} + +vint8m2_t test_vln8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m2_mu(vm, vd, rs1, vl); +} + +vint8m4_t test_vln8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m4_mu(vm, vd, rs1, vl); +} + +vint8m8_t test_vln8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_v_i8m8_mu(vm, vd, rs1, vl); +} +/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vln8\.v\s+} 28 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vln8.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vln8.c new file mode 100644 index 000000000000..a360d1f557d4 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vln8.c @@ -0,0 +1,118 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gv_xandesvsintload -O3" { target { rv32 } } } */ +/* { dg-options "-march=rv64gv_xandesvsintload -O3" { target { rv64 } } } */ + +#include "andes_vector.h" + +vint8mf8_t test_vln8_v_i8mf8_tu(vint8mf8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tu(vd, rs1, vl); +} + +vint8mf4_t test_vln8_v_i8mf4_tu(vint8mf4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tu(vd, rs1, vl); +} + +vint8mf2_t test_vln8_v_i8mf2_tu(vint8mf2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tu(vd, rs1, vl); +} + +vint8m1_t test_vln8_v_i8m1_tu(vint8m1_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tu(vd, rs1, vl); +} + +vint8m2_t test_vln8_v_i8m2_tu(vint8m2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tu(vd, rs1, vl); +} + +vint8m4_t test_vln8_v_i8m4_tu(vint8m4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tu(vd, rs1, vl); +} + +vint8m8_t test_vln8_v_i8m8_tu(vint8m8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tu(vd, rs1, vl); +} + +vint8mf8_t test_vln8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tum(vm, vd, rs1, vl); +} + +vint8mf4_t test_vln8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tum(vm, vd, rs1, vl); +} + +vint8mf2_t test_vln8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tum(vm, vd, rs1, vl); +} + +vint8m1_t test_vln8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tum(vm, vd, rs1, vl); +} + +vint8m2_t test_vln8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tum(vm, vd, rs1, vl); +} + +vint8m4_t test_vln8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tum(vm, vd, rs1, vl); +} + +vint8m8_t test_vln8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tum(vm, vd, rs1, vl); +} + +vint8mf8_t test_vln8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tumu(vm, vd, rs1, vl); +} + +vint8mf4_t test_vln8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tumu(vm, vd, rs1, vl); +} + +vint8mf2_t test_vln8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tumu(vm, vd, rs1, vl); +} + +vint8m1_t test_vln8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tumu(vm, vd, rs1, vl); +} + +vint8m2_t test_vln8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tumu(vm, vd, rs1, vl); +} + +vint8m4_t test_vln8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tumu(vm, vd, rs1, vl); +} + +vint8m8_t test_vln8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_tumu(vm, vd, rs1, vl); +} + +vint8mf8_t test_vln8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_mu(vm, vd, rs1, vl); +} + +vint8mf4_t test_vln8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_mu(vm, vd, rs1, vl); +} + +vint8mf2_t test_vln8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_mu(vm, vd, rs1, vl); +} + +vint8m1_t test_vln8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_mu(vm, vd, rs1, vl); +} + +vint8m2_t test_vln8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_mu(vm, vd, rs1, vl); +} + +vint8m4_t test_vln8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_mu(vm, vd, rs1, vl); +} + +vint8m8_t test_vln8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const void *rs1, size_t vl) { + return __riscv_nds_vln8_mu(vm, vd, rs1, vl); +} +/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vln8\.v\s+} 28 } } */ -- 2.34.1