> +/* Implement TARGET_ESTIMATED_POLY_VALUE.
> + Look into the tuning structure for an estimate.
> + KIND specifies the type of requested estimate: min, max or likely.
> + For cores with a known RVV width all three estimates are the same.
> + For generic RVV tuning we want to distinguish the maximum estimate from
> + the minimum and likely ones.
> + The likely estimate is the same as the minimum in that case to give a
> + conservative behavior of auto-vectorizing with RVV when it is a win
> + even for 128-bit RVV.
> + When RVV width information is available VAL.coeffs[1] is multiplied by
> + the number of VQ chunks over the initial Advanced SIMD 128 bits. */
> +
> +static HOST_WIDE_INT
> +riscv_estimated_poly_value (poly_int64 val,
> + poly_value_estimate_kind kind = POLY_VALUE_LIKELY)
> +{
> + unsigned int width_source = BITS_PER_RISCV_VECTOR.is_constant ()
> + ? (unsigned int) BITS_PER_RISCV_VECTOR.to_constant ()
> + : (unsigned int) RVV_SCALABLE;
It could be RVV_SCALABLE only for now, so I would prefer to just
keep that switch only for now.
And adding assert (!BITS_PER_RISCV_VECTOR.is_constant ());
> +
> + /* If there is no core-specific information then the minimum and likely
> + values are based on 128-bit vectors and the maximum is based on
> + the architectural maximum of 2048 bits. */
Maximum is 65,536 bit per vector spec.
> + if (width_source == RVV_SCALABLE)
> + switch (kind)
> + {
> + case POLY_VALUE_MIN:
> + case POLY_VALUE_LIKELY:
> + return val.coeffs[0];
> +
> + case POLY_VALUE_MAX:
> + return val.coeffs[0] + val.coeffs[1] * 15;
> + }
> +
> + /* Allow BITS_PER_RISCV_VECTOR to be a bitmask of different VL, treating
> the
> + lowest as likely. This could be made more general if future -mtune
> + options need it to be. */
> + if (kind == POLY_VALUE_MAX)
> + width_source = 1 << floor_log2 (width_source);
> + else
> + width_source = least_bit_hwi (width_source);
> +
> + /* If the core provides width information, use that. */
> + HOST_WIDE_INT over_128 = width_source - 128;
> + return val.coeffs[0] + val.coeffs[1] * over_128 / 128;
> +}
> +
> +/* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
> +
> +static machine_mode
> +riscv_preferred_simd_mode (scalar_mode mode)
> +{
> + machine_mode vmode =
> + riscv_vector::riscv_vector_preferred_simd_mode (mode,
> +
> riscv_vectorization_factor);
> + if (VECTOR_MODE_P (vmode))
> + return vmode;
> +
> + return word_mode;
> +}
> +
> +/* Implement TARGET_AUTOVECTORIZE_VECTOR_MODES for RVV. */
> +static unsigned int
> +riscv_autovectorize_vector_modes (vector_modes *modes, bool)
> +{
> + if (!TARGET_VECTOR)
> + return 0;
> +
> + if (riscv_vectorization_factor == RVV_LMUL1)
> + {
> + modes->safe_push (VNx16QImode);
> + modes->safe_push (VNx8QImode);
> + modes->safe_push (VNx4QImode);
> + modes->safe_push (VNx2QImode);
> + }
Keep LMUL1 case only for this moment.