On Wed, Mar 04, 2026 at 09:25:07PM +0800, Max Chou wrote:
> According to the Zvfbfa ISA spec v0.1, the vtype CSR adds a new field:
> altfmt for BF16 support.
> This update changes the layout of the vtype CSR fields.
> 
> - Removed VEDIV field (bits 8-9) since EDIV extension is not planned to
>   be part of the base V extension
> - Added ALTFMT field at bit 8
> - Changed RESERVED field to start from bit 9 instead of bit 10
> 
> When Zvfbfa is disabled, bits 8+ are treated as reserved (preserving
> existing behavior for altfmt bit). When Zvfbfa is enabled, only bits 9+
> are reserved.
> 
> Reference:
> - https://github.com/riscvarchive/riscv-v-spec/blob/master/ediv.adoc
> 
> Reviewed-by: Daniel Henrique Barboza <[email protected]>
> Signed-off-by: Max Chou <[email protected]>
> ---
>  target/riscv/cpu.h           |  4 ++--
>  target/riscv/vector_helper.c | 39 +++++++++++++++++++++++++++++++-----
>  2 files changed, 36 insertions(+), 7 deletions(-)
> 
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 35d1f6362c..962cc45073 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -191,8 +191,8 @@ FIELD(VTYPE, VLMUL, 0, 3)
>  FIELD(VTYPE, VSEW, 3, 3)
>  FIELD(VTYPE, VTA, 6, 1)
>  FIELD(VTYPE, VMA, 7, 1)
> -FIELD(VTYPE, VEDIV, 8, 2)
> -FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
> +FIELD(VTYPE, ALTFMT, 8, 1)
> +FIELD(VTYPE, RESERVED, 9, sizeof(target_ulong) * 8 - 10)
>  
>  typedef struct PMUCTRState {
>      /* Current value of a counter */
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index caa8dd9c12..7575e24084 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -33,6 +33,22 @@
>  #include "vector_internals.h"
>  #include <math.h>
>  
> +static target_ulong vtype_reserved(CPURISCVState *env, target_ulong vtype)
> +{
> +    int xlen = riscv_cpu_xlen(env);
> +    target_ulong reserved = 0;
> +
> +    if (riscv_cpu_cfg(env)->ext_zvfbfa) {
> +        reserved = vtype & MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT,
> +                                           xlen - 1 - 
> R_VTYPE_RESERVED_SHIFT);
> +    } else {
> +        reserved = vtype & MAKE_64BIT_MASK(R_VTYPE_ALTFMT_SHIFT,
> +                                           xlen - 1 - R_VTYPE_ALTFMT_SHIFT);
> +    }
> +
> +    return reserved;
> +}
> +
Good -- when Zvfbfa is disabled, altfmt (bit 8) is
treated as reserved, preserving backward compat.
The ill_altfmt switch/case logic is also correct.

Reviewed-by: Chao Liu <[email protected]>

Best regards,
Chao Liu
>  target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
>                              target_ulong s2, target_ulong x0)
>  {
> @@ -41,12 +57,10 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, 
> target_ulong s1,
>      uint64_t vlmul = FIELD_EX64(s2, VTYPE, VLMUL);
>      uint8_t vsew = FIELD_EX64(s2, VTYPE, VSEW);
>      uint16_t sew = 8 << vsew;
> -    uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV);
> +    uint8_t altfmt = FIELD_EX64(s2, VTYPE, ALTFMT);
> +    bool ill_altfmt = true;
>      int xlen = riscv_cpu_xlen(env);
>      bool vill = (s2 >> (xlen - 1)) & 0x1;
> -    target_ulong reserved = s2 &
> -                            MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT,
> -                                            xlen - 1 - 
> R_VTYPE_RESERVED_SHIFT);
>      uint16_t vlen = cpu->cfg.vlenb << 3;
>      int8_t lmul;
>  
> @@ -63,7 +77,22 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, 
> target_ulong s1,
>          }
>      }
>  
> -    if ((sew > cpu->cfg.elen) || vill || (ediv != 0) || (reserved != 0)) {
> +    switch (vsew) {
> +    case MO_8:
> +        ill_altfmt &= !(cpu->cfg.ext_zvfbfa);
> +        break;
> +    case MO_16:
> +        ill_altfmt &= !(cpu->cfg.ext_zvfbfa);
> +        break;
> +    default:
> +        break;
> +    }
> +
> +    if (altfmt && ill_altfmt) {
> +        vill = true;
> +    }
> +
> +    if ((sew > cpu->cfg.elen) || vill || (vtype_reserved(env, s2) != 0)) {
>          /* only set vill bit. */
>          env->vill = 1;
>          env->vtype = 0;
> -- 
> 2.52.0
> 

Reply via email to