cc Robin an Ju-Zhe

On Tue, Feb 4, 2025 at 3:16 PM Monk Chiang <monk.chi...@sifive.com> wrote:
>
> According to Section 3.4.2, Vector Register Grouping, in the RISC-V
> Vector Specification, the rule for LMUL is LMUL >= SEW/ELEN
> ---
>  gcc/config/riscv/riscv-v.cc                   |   8 +-
>  gcc/config/riscv/riscv-vector-switch.def      |  84 ++++++-------
>  .../gcc.target/riscv/rvv/autovec/pr111391-2.c |   2 +-
>  .../gcc.target/riscv/rvv/base/abi-14.c        |  84 ++++++-------
>  .../gcc.target/riscv/rvv/base/abi-16.c        |  98 +++++++--------
>  .../gcc.target/riscv/rvv/base/abi-18.c        | 112 +++++++++---------
>  .../gcc.target/riscv/rvv/base/vsetvl_zve32f.c |  73 ++++++++++++
>  7 files changed, 268 insertions(+), 193 deletions(-)
>  create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32f.c
>
> diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
> index 9847439ca77..24f3127e71d 100644
> --- a/gcc/config/riscv/riscv-v.cc
> +++ b/gcc/config/riscv/riscv-v.cc
> @@ -1730,13 +1730,15 @@ get_vlmul (machine_mode mode)
>        int inner_size = GET_MODE_BITSIZE (GET_MODE_INNER (mode));
>        if (size < TARGET_MIN_VLEN)
>         {
> +         /* Follow rule LMUL >= SEW / ELEN.  */
> +         int elen = TARGET_VECTOR_ELEN_64 ? 1 : 2;
>           int factor = TARGET_MIN_VLEN / size;
>           if (inner_size == 8)
> -           factor = MIN (factor, 8);
> +           factor = MIN (factor, 8 / elen);
>           else if (inner_size == 16)
> -           factor = MIN (factor, 4);
> +           factor = MIN (factor, 4 / elen);
>           else if (inner_size == 32)
> -           factor = MIN (factor, 2);
> +           factor = MIN (factor, 2 / elen);
>           else if (inner_size == 64)
>             factor = MIN (factor, 1);
>           else
> diff --git a/gcc/config/riscv/riscv-vector-switch.def 
> b/gcc/config/riscv/riscv-vector-switch.def
> index 23744d076f9..1b0d61940a6 100644
> --- a/gcc/config/riscv/riscv-vector-switch.def
> +++ b/gcc/config/riscv/riscv-vector-switch.def
> @@ -64,13 +64,13 @@ Encode the ratio of SEW/LMUL into the mask types.
>    |BI   |RVVM1BI|RVVMF2BI|RVVMF4BI|RVVMF8BI|RVVMF16BI|RVVMF32BI|RVVMF64BI|  
> */
>
>  /* Return 'REQUIREMENT' for machine_mode 'MODE'.
> -   For example: 'MODE' = RVVMF64BImode needs TARGET_MIN_VLEN > 32.  */
> +   For example: 'MODE' = RVVMF64BImode needs TARGET_VECTOR_ELEN_64.  */
>  #ifndef ENTRY
>  #define ENTRY(MODE, REQUIREMENT, VLMUL, RATIO)
>  #endif
>
>  /* Disable modes if TARGET_MIN_VLEN == 32.  */
> -ENTRY (RVVMF64BI, TARGET_MIN_VLEN > 32, TARGET_XTHEADVECTOR ? LMUL_1 
> :LMUL_F8, 64)
> +ENTRY (RVVMF64BI, TARGET_VECTOR_ELEN_64, TARGET_XTHEADVECTOR ? LMUL_1 
> :LMUL_F8, 64)
>  ENTRY (RVVMF32BI, true, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F4, 32)
>  ENTRY (RVVMF16BI, true, TARGET_XTHEADVECTOR ? LMUL_1 : LMUL_F2 , 16)
>  ENTRY (RVVMF8BI, true, LMUL_1, 8)
> @@ -85,7 +85,7 @@ ENTRY (RVVM2QI, true, LMUL_2, 4)
>  ENTRY (RVVM1QI, true, LMUL_1, 8)
>  ENTRY (RVVMF2QI, !TARGET_XTHEADVECTOR, LMUL_F2, 16)
>  ENTRY (RVVMF4QI, !TARGET_XTHEADVECTOR, LMUL_F4, 32)
> -ENTRY (RVVMF8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F8, 64)
> +ENTRY (RVVMF8QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F8, 64)
>
>  /* Disable modes if TARGET_MIN_VLEN == 32.  */
>  ENTRY (RVVM8HI, true, LMUL_8, 2)
> @@ -93,7 +93,7 @@ ENTRY (RVVM4HI, true, LMUL_4, 4)
>  ENTRY (RVVM2HI, true, LMUL_2, 8)
>  ENTRY (RVVM1HI, true, LMUL_1, 16)
>  ENTRY (RVVMF2HI, !TARGET_XTHEADVECTOR, LMUL_F2, 32)
> -ENTRY (RVVMF4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
> +ENTRY (RVVMF4HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
>
>  /* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_BF_16.  */
>  ENTRY (RVVM8BF, TARGET_VECTOR_ELEN_BF_16, LMUL_8, 2)
> @@ -109,21 +109,21 @@ ENTRY (RVVM4HF, TARGET_VECTOR_ELEN_FP_16, LMUL_4, 4)
>  ENTRY (RVVM2HF, TARGET_VECTOR_ELEN_FP_16, LMUL_2, 8)
>  ENTRY (RVVM1HF, TARGET_VECTOR_ELEN_FP_16, LMUL_1, 16)
>  ENTRY (RVVMF2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, LMUL_F2, 
> 32)
> -ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, LMUL_F4, 64)
> +ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && 
> !TARGET_XTHEADVECTOR, LMUL_F4, 64)
>
>  /* Disable modes if TARGET_MIN_VLEN == 32.  */
>  ENTRY (RVVM8SI, true, LMUL_8, 4)
>  ENTRY (RVVM4SI, true, LMUL_4, 8)
>  ENTRY (RVVM2SI, true, LMUL_2, 16)
>  ENTRY (RVVM1SI, true, LMUL_1, 32)
> -ENTRY (RVVMF2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
> +ENTRY (RVVMF2SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
>
>  /* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_FP_32.  */
>  ENTRY (RVVM8SF, TARGET_VECTOR_ELEN_FP_32, LMUL_8, 4)
>  ENTRY (RVVM4SF, TARGET_VECTOR_ELEN_FP_32, LMUL_4, 8)
>  ENTRY (RVVM2SF, TARGET_VECTOR_ELEN_FP_32, LMUL_2, 16)
>  ENTRY (RVVM1SF, TARGET_VECTOR_ELEN_FP_32, LMUL_1, 32)
> -ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, LMUL_F2, 64)
> +ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && 
> !TARGET_XTHEADVECTOR, LMUL_F2, 64)
>
>  /* Disable modes if !TARGET_VECTOR_ELEN_64.  */
>  ENTRY (RVVM8DI, TARGET_VECTOR_ELEN_64, LMUL_8, 8)
> @@ -152,61 +152,61 @@ ENTRY (RVVM1DF, TARGET_VECTOR_ELEN_FP_64, LMUL_1, 64)
>  TUPLE_ENTRY (RVVM1x8QI, true, RVVM1QI, 8, LMUL_1, 8)
>  TUPLE_ENTRY (RVVMF2x8QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 8, LMUL_F2, 16)
>  TUPLE_ENTRY (RVVMF4x8QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 8, LMUL_F4, 32)
> -TUPLE_ENTRY (RVVMF8x8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 8, LMUL_F8, 64)
> +TUPLE_ENTRY (RVVMF8x8QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 8, LMUL_F8, 64)
>  TUPLE_ENTRY (RVVM1x7QI, true, RVVM1QI, 7, LMUL_1, 8)
>  TUPLE_ENTRY (RVVMF2x7QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 7, LMUL_F2, 16)
>  TUPLE_ENTRY (RVVMF4x7QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 7, LMUL_F4, 32)
> -TUPLE_ENTRY (RVVMF8x7QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 7, LMUL_F8, 64)
> +TUPLE_ENTRY (RVVMF8x7QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 7, LMUL_F8, 64)
>  TUPLE_ENTRY (RVVM1x6QI, true, RVVM1QI, 6, LMUL_1, 8)
>  TUPLE_ENTRY (RVVMF2x6QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 6, LMUL_F2, 16)
>  TUPLE_ENTRY (RVVMF4x6QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 6, LMUL_F4, 32)
> -TUPLE_ENTRY (RVVMF8x6QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 6, LMUL_F8, 64)
> +TUPLE_ENTRY (RVVMF8x6QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 6, LMUL_F8, 64)
>  TUPLE_ENTRY (RVVM1x5QI, true, RVVM1QI, 5, LMUL_1, 8)
>  TUPLE_ENTRY (RVVMF2x5QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 5, LMUL_F2, 16)
>  TUPLE_ENTRY (RVVMF4x5QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 5, LMUL_F4, 32)
> -TUPLE_ENTRY (RVVMF8x5QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 5, LMUL_F8, 64)
> +TUPLE_ENTRY (RVVMF8x5QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 5, LMUL_F8, 64)
>  TUPLE_ENTRY (RVVM2x4QI, true, RVVM2QI, 4, LMUL_2, 4)
>  TUPLE_ENTRY (RVVM1x4QI, true, RVVM1QI, 4, LMUL_1, 8)
>  TUPLE_ENTRY (RVVMF2x4QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 4, LMUL_F2, 16)
>  TUPLE_ENTRY (RVVMF4x4QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 4, LMUL_F4, 32)
> -TUPLE_ENTRY (RVVMF8x4QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 4, LMUL_F8, 64)
> +TUPLE_ENTRY (RVVMF8x4QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 4, LMUL_F8, 64)
>  TUPLE_ENTRY (RVVM2x3QI, true, RVVM2QI, 3, LMUL_2, 4)
>  TUPLE_ENTRY (RVVM1x3QI, true, RVVM1QI, 3, LMUL_1, 8)
>  TUPLE_ENTRY (RVVMF2x3QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 3, LMUL_F2, 16)
>  TUPLE_ENTRY (RVVMF4x3QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 3, LMUL_F4, 32)
> -TUPLE_ENTRY (RVVMF8x3QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 3, LMUL_F8, 64)
> +TUPLE_ENTRY (RVVMF8x3QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 3, LMUL_F8, 64)
>  TUPLE_ENTRY (RVVM4x2QI, true, RVVM4QI, 2, LMUL_4, 2)
>  TUPLE_ENTRY (RVVM2x2QI, true, RVVM2QI, 2, LMUL_2, 4)
>  TUPLE_ENTRY (RVVM1x2QI, true, RVVM1QI, 2, LMUL_1, 8)
>  TUPLE_ENTRY (RVVMF2x2QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 2, LMUL_F2, 16)
>  TUPLE_ENTRY (RVVMF4x2QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 2, LMUL_F4, 32)
> -TUPLE_ENTRY (RVVMF8x2QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 2, LMUL_F8, 64)
> +TUPLE_ENTRY (RVVMF8x2QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF8QI, 2, LMUL_F8, 64)
>
>  TUPLE_ENTRY (RVVM1x8HI, true, RVVM1HI, 8, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x8HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 8, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x8HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 8, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x8HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 8, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM1x7HI, true, RVVM1HI, 7, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x7HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 7, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x7HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 7, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x7HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 7, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM1x6HI, true, RVVM1HI, 6, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x6HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 6, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x6HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 6, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x6HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 6, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM1x5HI, true, RVVM1HI, 5, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x5HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 5, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x5HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 5, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x5HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 5, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM2x4HI, true, RVVM2HI, 4, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x4HI, true, RVVM1HI, 4, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x4HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 4, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 4, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x4HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 4, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM2x3HI, true, RVVM2HI, 3, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x3HI, true, RVVM1HI, 3, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x3HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 3, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x3HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 3, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x3HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 3, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM4x2HI, true, RVVM4HI, 2, LMUL_4, 4)
>  TUPLE_ENTRY (RVVM2x2HI, true, RVVM2HI, 2, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x2HI, true, RVVM1HI, 2, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x2HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 2, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x2HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 2, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x2HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF4HI, 2, LMUL_F4, 64)
>
>  TUPLE_ENTRY (RVVM1x8BF, TARGET_VECTOR_ELEN_BF_16, RVVM1BF, 8, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x8BF, TARGET_VECTOR_ELEN_BF_16, RVVMF2BF, 8, LMUL_F2, 32)
> @@ -236,67 +236,67 @@ TUPLE_ENTRY (RVVMF4x2BF, TARGET_VECTOR_ELEN_BF_16 && 
> TARGET_MIN_VLEN > 32, RVVMF
>
>  TUPLE_ENTRY (RVVM1x8HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 8, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x8HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
> RVVMF2HF, 8, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF4HF, 8, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF4HF, 8, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM1x7HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 7, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x7HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
> RVVMF2HF, 7, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF4HF, 7, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF4HF, 7, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM1x6HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 6, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x6HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
> RVVMF2HF, 6, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF4HF, 6, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF4HF, 6, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM1x5HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 5, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x5HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
> RVVMF2HF, 5, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF4HF, 5, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF4HF, 5, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM2x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 4, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 4, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x4HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
> RVVMF2HF, 4, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF4HF, 4, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF4HF, 4, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM2x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 3, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 3, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x3HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
> RVVMF2HF, 3, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF4HF, 3, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF4HF, 3, LMUL_F4, 64)
>  TUPLE_ENTRY (RVVM4x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM4HF, 2, LMUL_4, 4)
>  TUPLE_ENTRY (RVVM2x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 2, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 2, LMUL_1, 16)
>  TUPLE_ENTRY (RVVMF2x2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
> RVVMF2HF, 2, LMUL_F2, 32)
> -TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF4HF, 2, LMUL_F4, 64)
> +TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF4HF, 2, LMUL_F4, 64)
>
>  TUPLE_ENTRY (RVVM1x8SI, true, RVVM1SI, 8, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x8SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 8, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x8SI, (TARGET_VECTOR_ELEN_64) && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 8, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM1x7SI, true, RVVM1SI, 7, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x7SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 7, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x7SI, (TARGET_VECTOR_ELEN_64) && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 7, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM1x6SI, true, RVVM1SI, 6, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x6SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 6, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x6SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 6, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM1x5SI, true, RVVM1SI, 5, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x5SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 5, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x5SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 5, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM2x4SI, true, RVVM2SI, 4, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x4SI, true, RVVM1SI, 4, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x4SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 4, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x4SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 4, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM2x3SI, true, RVVM2SI, 3, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x3SI, true, RVVM1SI, 3, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x3SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 3, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x3SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 3, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM4x2SI, true, RVVM4SI, 2, LMUL_4, 4)
>  TUPLE_ENTRY (RVVM2x2SI, true, RVVM2SI, 2, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x2SI, true, RVVM1SI, 2, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 2, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x2SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
> RVVMF2SI, 2, LMUL_F2, 32)
>
>  TUPLE_ENTRY (RVVM1x8SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 8, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF2SF, 8, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF2SF, 8, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM1x7SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 7, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF2SF, 7, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF2SF, 7, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM1x6SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 6, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF2SF, 6, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF2SF, 6, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM1x5SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 5, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF2SF, 5, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF2SF, 5, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM2x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 4, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 4, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF2SF, 4, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF2SF, 4, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM2x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 3, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 3, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF2SF, 3, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF2SF, 3, LMUL_F2, 32)
>  TUPLE_ENTRY (RVVM4x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM4SF, 2, LMUL_4, 4)
>  TUPLE_ENTRY (RVVM2x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 2, LMUL_2, 8)
>  TUPLE_ENTRY (RVVM1x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 2, LMUL_1, 16)
> -TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
> !TARGET_XTHEADVECTOR, RVVMF2SF, 2, LMUL_F2, 32)
> +TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 
> && !TARGET_XTHEADVECTOR, RVVMF2SF, 2, LMUL_F2, 32)
>
>  TUPLE_ENTRY (RVVM1x8DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 8, LMUL_1, 16)
>  TUPLE_ENTRY (RVVM1x7DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 7, LMUL_1, 16)
> diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111391-2.c 
> b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111391-2.c
> index 1f170c962e1..32db3a68fd3 100644
> --- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111391-2.c
> +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111391-2.c
> @@ -3,7 +3,7 @@
>
>  #include "pr111391-1.c"
>
> -/* { dg-final { scan-assembler-times 
> {vsetivli\s+zero,\s*2,\s*e32,\s*mf2,\s*t[au],\s*m[au]} 1 } }
> +/* { dg-final { scan-assembler-times 
> {vsetivli\s+zero,\s*2,\s*e32,\s*m1,\s*t[au],\s*m[au]} 1 } } */
>  /* { dg-final { scan-assembler-times {vmv\.x\.s} 2 } } */
>  /* { dg-final { scan-assembler-times 
> {vslidedown.vi\s+v[0-9]+,\s*v[0-9]+,\s*1} 1 } } */
>  /* { dg-final { scan-assembler-times {slli\s+[a-x0-9]+,[a-x0-9]+,32} 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-14.c 
> b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-14.c
> index 163152ae923..222d8c233ab 100644
> --- a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-14.c
> +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-14.c
> @@ -1,20 +1,20 @@
>  /* { dg-do compile } */
>  /* { dg-options "-O3 -march=rv32gc_zve32x_zvl64b -mabi=ilp32d" } */
>
> -void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;}
> -void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;}
> -void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;}
> -void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;}
> -void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;}
> -void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;}
> -void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;}
> -void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;}
> -void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;}
> -void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;}
> -void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;}
> -void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;}
> -void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;}
> -void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;}
> +void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x2_t'} } */
> +void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x2_t'} } */
> +void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x3_t'} } */
> +void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x3_t'} } */
> +void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x4_t'} } */
> +void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x4_t'} } */
> +void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x5_t'} } */
> +void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x5_t'} } */
> +void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x6_t'} } */
> +void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x6_t'} } */
> +void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x7_t'} } */
> +void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x7_t'} } */
> +void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x8_t'} } */
> +void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x8_t'} } */
>  void f___rvv_int8mf4x2_t () {__rvv_int8mf4x2_t t;}
>  void f___rvv_uint8mf4x2_t () {__rvv_uint8mf4x2_t t;}
>  void f___rvv_int8mf4x3_t () {__rvv_int8mf4x3_t t;}
> @@ -65,20 +65,20 @@ void f___rvv_int8m2x4_t () {__rvv_int8m2x4_t t;}
>  void f___rvv_uint8m2x4_t () {__rvv_uint8m2x4_t t;}
>  void f___rvv_int8m4x2_t () {__rvv_int8m4x2_t t;}
>  void f___rvv_uint8m4x2_t () {__rvv_uint8m4x2_t t;}
> -void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;}
> -void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;}
> -void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;}
> -void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;}
> -void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;}
> -void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;}
> -void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;}
> -void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;}
> -void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;}
> -void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;}
> -void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;}
> -void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;}
> -void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;}
> -void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;}
> +void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x2_t'} } */
> +void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x2_t'} } */
> +void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x3_t'} } */
> +void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x3_t'} } */
> +void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x4_t'} } */
> +void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x4_t'} } */
> +void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x5_t'} } */
> +void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x5_t'} } */
> +void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x6_t'} } */
> +void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x6_t'} } */
> +void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x7_t'} } */
> +void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x7_t'} } */
> +void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x8_t'} } */
> +void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x8_t'} } */
>  void f___rvv_int16mf2x2_t () {__rvv_int16mf2x2_t t;}
>  void f___rvv_uint16mf2x2_t () {__rvv_uint16mf2x2_t t;}
>  void f___rvv_int16mf2x3_t () {__rvv_int16mf2x3_t t;}
> @@ -115,20 +115,20 @@ void f___rvv_int16m2x4_t () {__rvv_int16m2x4_t t;}
>  void f___rvv_uint16m2x4_t () {__rvv_uint16m2x4_t t;}
>  void f___rvv_int16m4x2_t () {__rvv_int16m4x2_t t;}
>  void f___rvv_uint16m4x2_t () {__rvv_uint16m4x2_t t;}
> -void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;}
> -void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;}
> -void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;}
> -void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;}
> -void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;}
> -void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;}
> -void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;}
> -void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;}
> -void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;}
> -void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;}
> -void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;}
> -void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;}
> -void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;}
> -void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;}
> +void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x2_t'} } */
> +void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x2_t'} } */
> +void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x3_t'} } */
> +void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x3_t'} } */
> +void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x4_t'} } */
> +void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x4_t'} } */
> +void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x5_t'} } */
> +void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x5_t'} } */
> +void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x6_t'} } */
> +void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x6_t'} } */
> +void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x7_t'} } */
> +void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x7_t'} } */
> +void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x8_t'} } */
> +void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x8_t'} } */
>  void f___rvv_int32m1x2_t () {__rvv_int32m1x2_t t;}
>  void f___rvv_uint32m1x2_t () {__rvv_uint32m1x2_t t;}
>  void f___rvv_int32m1x3_t () {__rvv_int32m1x3_t t;}
> diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-16.c 
> b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-16.c
> index 9e962a70acf..2762b7a0e30 100644
> --- a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-16.c
> +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-16.c
> @@ -1,20 +1,20 @@
>  /* { dg-do compile } */
>  /* { dg-options "-O3 -march=rv32gc_zve32f_zvl64b -mabi=ilp32d" } */
>
> -void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;}
> -void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;}
> -void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;}
> -void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;}
> -void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;}
> -void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;}
> -void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;}
> -void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;}
> -void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;}
> -void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;}
> -void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;}
> -void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;}
> -void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;}
> -void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;}
> +void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x2_t'} } */
> +void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x2_t'} } */
> +void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x3_t'} } */
> +void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x3_t'} } */
> +void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x4_t'} } */
> +void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x4_t'} } */
> +void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x5_t'} } */
> +void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x5_t'} } */
> +void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x6_t'} } */
> +void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x6_t'} } */
> +void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x7_t'} } */
> +void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x7_t'} } */
> +void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x8_t'} } */
> +void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x8_t'} } */
>  void f___rvv_int8mf4x2_t () {__rvv_int8mf4x2_t t;}
>  void f___rvv_uint8mf4x2_t () {__rvv_uint8mf4x2_t t;}
>  void f___rvv_int8mf4x3_t () {__rvv_int8mf4x3_t t;}
> @@ -65,20 +65,20 @@ void f___rvv_int8m2x4_t () {__rvv_int8m2x4_t t;}
>  void f___rvv_uint8m2x4_t () {__rvv_uint8m2x4_t t;}
>  void f___rvv_int8m4x2_t () {__rvv_int8m4x2_t t;}
>  void f___rvv_uint8m4x2_t () {__rvv_uint8m4x2_t t;}
> -void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;}
> -void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;}
> -void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;}
> -void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;}
> -void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;}
> -void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;}
> -void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;}
> -void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;}
> -void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;}
> -void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;}
> -void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;}
> -void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;}
> -void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;}
> -void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;}
> +void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x2_t'} } */
> +void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x2_t'} } */
> +void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x3_t'} } */
> +void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x3_t'} } */
> +void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x4_t'} } */
> +void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x4_t'} } */
> +void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x5_t'} } */
> +void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x5_t'} } */
> +void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x6_t'} } */
> +void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x6_t'} } */
> +void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x7_t'} } */
> +void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x7_t'} } */
> +void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x8_t'} } */
> +void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x8_t'} } */
>  void f___rvv_int16mf2x2_t () {__rvv_int16mf2x2_t t;}
>  void f___rvv_uint16mf2x2_t () {__rvv_uint16mf2x2_t t;}
>  void f___rvv_int16mf2x3_t () {__rvv_int16mf2x3_t t;}
> @@ -115,20 +115,20 @@ void f___rvv_int16m2x4_t () {__rvv_int16m2x4_t t;}
>  void f___rvv_uint16m2x4_t () {__rvv_uint16m2x4_t t;}
>  void f___rvv_int16m4x2_t () {__rvv_int16m4x2_t t;}
>  void f___rvv_uint16m4x2_t () {__rvv_uint16m4x2_t t;}
> -void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;}
> -void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;}
> -void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;}
> -void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;}
> -void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;}
> -void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;}
> -void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;}
> -void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;}
> -void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;}
> -void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;}
> -void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;}
> -void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;}
> -void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;}
> -void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;}
> +void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x2_t'} } */
> +void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x2_t'} } */
> +void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x3_t'} } */
> +void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x3_t'} } */
> +void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x4_t'} } */
> +void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x4_t'} } */
> +void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x5_t'} } */
> +void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x5_t'} } */
> +void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x6_t'} } */
> +void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x6_t'} } */
> +void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x7_t'} } */
> +void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x7_t'} } */
> +void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x8_t'} } */
> +void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x8_t'} } */
>  void f___rvv_int32m1x2_t () {__rvv_int32m1x2_t t;}
>  void f___rvv_uint32m1x2_t () {__rvv_uint32m1x2_t t;}
>  void f___rvv_int32m1x3_t () {__rvv_int32m1x3_t t;}
> @@ -179,13 +179,13 @@ void f___rvv_float16m1_t () {__rvv_float16m1_t t;} /* { 
> dg-error {unknown type n
>  void f___rvv_float16m2_t () {__rvv_float16m2_t t;} /* { dg-error {unknown 
> type name '__rvv_float16m2_t'} } */
>  void f___rvv_float16m4_t () {__rvv_float16m4_t t;} /* { dg-error {unknown 
> type name '__rvv_float16m4_t'} } */
>  void f___rvv_float16m8_t () {__rvv_float16m8_t t;} /* { dg-error {unknown 
> type name '__rvv_float16m8_t'} } */
> -void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;}
> -void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;}
> -void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;}
> -void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;}
> -void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;}
> -void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;}
> -void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;}
> +void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x2_t'} } */
> +void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x3_t'} } */
> +void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x4_t'} } */
> +void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x5_t'} } */
> +void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x6_t'} } */
> +void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x7_t'} } */
> +void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x8_t'} } */
>  void f___rvv_float32m1x2_t () {__rvv_float32m1x2_t t;}
>  void f___rvv_float32m1x3_t () {__rvv_float32m1x3_t t;}
>  void f___rvv_float32m1x4_t () {__rvv_float32m1x4_t t;}
> diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-18.c 
> b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-18.c
> index 402e8f6ba22..95b760fa4e6 100644
> --- a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-18.c
> +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-18.c
> @@ -1,20 +1,20 @@
>  /* { dg-do compile } */
>  /* { dg-options "-O3 -march=rv32gc_zve32x_zvl64b_zvfhmin -mabi=ilp32d" } */
>
> -void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;}
> -void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;}
> -void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;}
> -void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;}
> -void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;}
> -void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;}
> -void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;}
> -void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;}
> -void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;}
> -void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;}
> -void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;}
> -void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;}
> -void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;}
> -void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;}
> +void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x2_t'} } */
> +void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x2_t'} } */
> +void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x3_t'} } */
> +void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x3_t'} } */
> +void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x4_t'} } */
> +void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x4_t'} } */
> +void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x5_t'} } */
> +void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x5_t'} } */
> +void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x6_t'} } */
> +void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x6_t'} } */
> +void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x7_t'} } */
> +void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x7_t'} } */
> +void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;} /* { dg-error {unknown 
> type name '__rvv_int8mf8x8_t'} } */
> +void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;} /* { dg-error {unknown 
> type name '__rvv_uint8mf8x8_t'} } */
>  void f___rvv_int8mf4x2_t () {__rvv_int8mf4x2_t t;}
>  void f___rvv_uint8mf4x2_t () {__rvv_uint8mf4x2_t t;}
>  void f___rvv_int8mf4x3_t () {__rvv_int8mf4x3_t t;}
> @@ -65,20 +65,20 @@ void f___rvv_int8m2x4_t () {__rvv_int8m2x4_t t;}
>  void f___rvv_uint8m2x4_t () {__rvv_uint8m2x4_t t;}
>  void f___rvv_int8m4x2_t () {__rvv_int8m4x2_t t;}
>  void f___rvv_uint8m4x2_t () {__rvv_uint8m4x2_t t;}
> -void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;}
> -void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;}
> -void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;}
> -void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;}
> -void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;}
> -void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;}
> -void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;}
> -void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;}
> -void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;}
> -void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;}
> -void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;}
> -void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;}
> -void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;}
> -void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;}
> +void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x2_t'} } */
> +void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x2_t'} } */
> +void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x3_t'} } */
> +void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x3_t'} } */
> +void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x4_t'} } */
> +void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x4_t'} } */
> +void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x5_t'} } */
> +void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x5_t'} } */
> +void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x6_t'} } */
> +void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x6_t'} } */
> +void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x7_t'} } */
> +void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x7_t'} } */
> +void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;} /* { dg-error {unknown 
> type name '__rvv_int16mf4x8_t'} } */
> +void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;} /* { dg-error 
> {unknown type name '__rvv_uint16mf4x8_t'} } */
>  void f___rvv_int16mf2x2_t () {__rvv_int16mf2x2_t t;}
>  void f___rvv_uint16mf2x2_t () {__rvv_uint16mf2x2_t t;}
>  void f___rvv_int16mf2x3_t () {__rvv_int16mf2x3_t t;}
> @@ -115,20 +115,20 @@ void f___rvv_int16m2x4_t () {__rvv_int16m2x4_t t;}
>  void f___rvv_uint16m2x4_t () {__rvv_uint16m2x4_t t;}
>  void f___rvv_int16m4x2_t () {__rvv_int16m4x2_t t;}
>  void f___rvv_uint16m4x2_t () {__rvv_uint16m4x2_t t;}
> -void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;}
> -void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;}
> -void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;}
> -void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;}
> -void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;}
> -void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;}
> -void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;}
> -void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;}
> -void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;}
> -void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;}
> -void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;}
> -void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;}
> -void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;}
> -void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;}
> +void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x2_t'} } */
> +void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x2_t'} } */
> +void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x3_t'} } */
> +void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x3_t'} } */
> +void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x4_t'} } */
> +void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x4_t'} } */
> +void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x5_t'} } */
> +void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x5_t'} } */
> +void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x6_t'} } */
> +void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x6_t'} } */
> +void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x7_t'} } */
> +void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x7_t'} } */
> +void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;} /* { dg-error {unknown 
> type name '__rvv_int32mf2x8_t'} } */
> +void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;} /* { dg-error 
> {unknown type name '__rvv_uint32mf2x8_t'} } */
>  void f___rvv_int32m1x2_t () {__rvv_int32m1x2_t t;}
>  void f___rvv_uint32m1x2_t () {__rvv_uint32m1x2_t t;}
>  void f___rvv_int32m1x3_t () {__rvv_int32m1x3_t t;}
> @@ -173,13 +173,13 @@ void f___rvv_int64m2x4_t () {__rvv_int64m2x4_t t;} /* { 
> dg-error {unknown type n
>  void f___rvv_uint64m2x4_t () {__rvv_uint64m2x4_t t;} /* { dg-error {unknown 
> type name '__rvv_uint64m2x4_t'} } */
>  void f___rvv_int64m4x2_t () {__rvv_int64m4x2_t t;} /* { dg-error {unknown 
> type name '__rvv_int64m4x2_t'} } */
>  void f___rvv_uint64m4x2_t () {__rvv_uint64m4x2_t t;} /* { dg-error {unknown 
> type name '__rvv_uint64m4x2_t'} } */
> -void f___rvv_float16mf4x2_t () {__rvv_float16mf4x2_t t;}
> -void f___rvv_float16mf4x3_t () {__rvv_float16mf4x3_t t;}
> -void f___rvv_float16mf4x4_t () {__rvv_float16mf4x4_t t;}
> -void f___rvv_float16mf4x5_t () {__rvv_float16mf4x5_t t;}
> -void f___rvv_float16mf4x6_t () {__rvv_float16mf4x6_t t;}
> -void f___rvv_float16mf4x7_t () {__rvv_float16mf4x7_t t;}
> -void f___rvv_float16mf4x8_t () {__rvv_float16mf4x8_t t;}
> +void f___rvv_float16mf4x2_t () {__rvv_float16mf4x2_t t;} /* { dg-error 
> {unknown type name '__rvv_float16mf4x2_t'} } */
> +void f___rvv_float16mf4x3_t () {__rvv_float16mf4x3_t t;} /* { dg-error 
> {unknown type name '__rvv_float16mf4x3_t'} } */
> +void f___rvv_float16mf4x4_t () {__rvv_float16mf4x4_t t;} /* { dg-error 
> {unknown type name '__rvv_float16mf4x4_t'} } */
> +void f___rvv_float16mf4x5_t () {__rvv_float16mf4x5_t t;} /* { dg-error 
> {unknown type name '__rvv_float16mf4x5_t'} } */
> +void f___rvv_float16mf4x6_t () {__rvv_float16mf4x6_t t;} /* { dg-error 
> {unknown type name '__rvv_float16mf4x6_t'} } */
> +void f___rvv_float16mf4x7_t () {__rvv_float16mf4x7_t t;} /* { dg-error 
> {unknown type name '__rvv_float16mf4x7_t'} } */
> +void f___rvv_float16mf4x8_t () {__rvv_float16mf4x8_t t;} /* { dg-error 
> {unknown type name '__rvv_float16mf4x8_t'} } */
>  void f___rvv_float16mf2x2_t () {__rvv_float16mf2x2_t t;}
>  void f___rvv_float16mf2x3_t () {__rvv_float16mf2x3_t t;}
>  void f___rvv_float16mf2x4_t () {__rvv_float16mf2x4_t t;}
> @@ -198,13 +198,13 @@ void f___rvv_float16m2x2_t () {__rvv_float16m2x2_t t;}
>  void f___rvv_float16m2x3_t () {__rvv_float16m2x3_t t;}
>  void f___rvv_float16m2x4_t () {__rvv_float16m2x4_t t;}
>  void f___rvv_float16m4x2_t () {__rvv_float16m4x2_t t;}
> -void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;}
> -void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;}
> -void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;}
> -void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;}
> -void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;}
> -void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;}
> -void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;}
> +void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x2_t'} } */
> +void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x3_t'} } */
> +void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x4_t'} } */
> +void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x5_t'} } */
> +void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x6_t'} } */
> +void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x7_t'} } */
> +void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;} /* { dg-error 
> {unknown type name '__rvv_float32mf2x8_t'} } */
>  void f___rvv_float32m1x2_t () {__rvv_float32m1x2_t t;}
>  void f___rvv_float32m1x3_t () {__rvv_float32m1x3_t t;}
>  void f___rvv_float32m1x4_t () {__rvv_float32m1x4_t t;}
> diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32f.c 
> b/gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32f.c
> new file mode 100644
> index 00000000000..f6899c3bc2f
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32f.c
> @@ -0,0 +1,73 @@
> +/* { dg-do compile } */
> +/* { dg-options "-march=rv32imafc_zve32f_zvl128b -mabi=ilp32 -O2" } */
> +
> +struct S0
> +{
> +  unsigned a : 15;
> +  int b;
> +  int c;
> +};
> +
> +struct S1
> +{
> +  struct S0 s0;
> +  int e;
> +};
> +
> +struct Z
> +{
> +  char c;
> +  int z;
> +} __attribute__((packed));
> +
> +union U
> +{
> +  struct S1 s1;
> +  struct Z z;
> +};
> +
> +int __attribute__((noinline, noclone))
> +return_zero (void)
> +{
> +  return 0;
> +}
> +
> +volatile union U gu;
> +struct S0 gs;
> +
> +int __attribute__((noinline, noclone))
> +check_outcome ()
> +{
> +  if (gs.a != 6
> +      || gs.b != 80000)
> +    __builtin_abort ();
> +}
> +
> +int
> +main (int argc, char *argv[])
> +{
> +  union U u;
> +  struct S1 m;
> +  struct S0 l;
> +
> +  if (return_zero ())
> +    u.z.z = 20000;
> +  else
> +    {
> +      u.s1.s0.a = 6;
> +      u.s1.s0.b = 80000;
> +      u.s1.e = 2;
> +
> +      m = u.s1;
> +      m.s0.c = 0;
> +      l = m.s0;
> +      gs = l;
> +    }
> +
> +  gu = u;
> +  check_outcome ();
> +  return 0;
> +}
> +
> +/* { dg-final { scan-assembler 
> {vsetivli\s+zero,\s*2,\s*e32,\s*m1,\s*t[au],\s*m[au]} } } */
> +/* { dg-final { scan-assembler 
> {vsetivli\s+zero,\s*4,\s*e32,\s*m1,\s*t[au],\s*m[au]} } } */
> --
> 2.47.1
>

Reply via email to