On 6/20/2025 5:27 PM, Zhao Liu wrote:
> As preparation for merging cache_info_cpuid4 and cache_info_amd in
> X86CPUState, set legacy cache model based on vendor in the CPUID 0x4
> leaf. For AMD CPU, select legacy AMD cache model (in cache_info_amd) as
> the default cache model, otherwise, select legacy Intel cache model (in
> cache_info_cpuid4) as before.
>
> To ensure compatibility is not broken, add an enable_legacy_vendor_cache
> flag based on x-vendor-only-v2 to indicate cases where the legacy cache
> model should be used regardless of the vendor. For CPUID 0x4 leaf,
> enable_legacy_vendor_cache flag indicates to pick legacy Intel cache
> model, which is for compatibility with the behavior of PC machine v10.0
> and older.
>
> The following explains how current vendor-based default legacy cache
> model ensures correctness without breaking compatibility.
>
> * For the PC machine v6.0 and older, vendor_cpuid_only=false, and
>   vendor_cpuid_only_v2=false.
>
>   - If the named CPU model has its own cache model, and doesn't use
>     legacy cache model (legacy_cache=false), then cache_info_cpuid4 and
>     cache_info_amd are same, so 0x4 leaf uses its own cache model
>     regardless of the vendor.
>
>   - For max/host/named CPU (without its own cache model), then the flag
>     enable_legacy_vendor_cache is true, they will use legacy Intel cache
>     model just like their previous behavior.
>
> * For the PC machine v10.0 and older (to v6.1), vendor_cpuid_only=true,
>   and vendor_cpuid_only_v2=false.
>
>   - If the named CPU model has its own cache model (legacy_cache=false),
>     then cache_info_cpuid4 & cache_info_amd both equal to its own cache
>     model, so it uses its own cache model in 0x4 leaf regardless of the
>     vendor. Only AMD CPUs have all-0 leaf due to vendor_cpuid_only=true,
>     and this is exactly the behavior of these old machines.
>
>   - For max/host/named CPU (without its own cache model), then the flag
>     enable_legacy_vendor_cache is true, they will use legacy Intel cache
>     model. Similarly, only AMD CPUs have all-0 leaf, and this is exactly
>     the behavior of these old machines.
>
> * For the PC machine v10.1 and newer, vendor_cpuid_only=true, and
>   vendor_cpuid_only_v2=true.
>
>   - If the named CPU model has its own cache model (legacy_cache=false),
>     then cache_info_cpuid4 & cache_info_amd both equal to its own cache
>     model, so it uses its own cache model in 0x4 leaf regardless of the
>     vendor. And AMD CPUs have all-0 leaf. Nothing will change.
>
>   - For max/host/named CPU (without its own cache model), then the flag
>     enable_legacy_vendor_cache is false, the legacy cache model is
>     selected based on vendor.
>
>     For AMD CPU, it will use legacy AMD cache but still get all-0 leaf
>     due to vendor_cpuid_only=true.
>
>     For non-AMD (Intel/Zhaoxin) CPU, it will use legacy Intel cache as
>     expected.
>
>     Here, selecting the legacy cache model based on the vendor does not
>     change the previous (before the change) behavior.
>
> Therefore, the above analysis proves that, with the help of the flag
> enable_legacy_vendor_cache, it is acceptable to select the default
> legacy cache model based on the vendor.
>
> For the CPUID 0x4 leaf, in X86CPUState, a unified cache_info is enough.
> It only needs to be initialized and configured with the corresponding
> legacy cache model based on the vendor.
>
> Signed-off-by: Zhao Liu <zhao1....@intel.com>
> ---
>  target/i386/cpu.c | 43 ++++++++++++++++++++++++++++++++++---------
>  1 file changed, 34 insertions(+), 9 deletions(-)
>
> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
> index 524d39de9ace..afbf11569ab4 100644
> --- a/target/i386/cpu.c
> +++ b/target/i386/cpu.c
> @@ -7517,7 +7517,35 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, 
> uint32_t count,
>          encode_cache_cpuid2(cpu, caches, eax, ebx, ecx, edx);
>          break;
>      }
> -    case 4:
> +    case 4: {
> +        const CPUCaches *caches;
> +
> +        if (env->enable_legacy_vendor_cache) {
> +            caches = &legacy_intel_cache_info;
> +        } else {
> +            /*
> +             * FIXME: Temporarily select cache info model here based on
> +             * vendor, and merge these 2 cache info models later.
> +             *
> +             * This condition covers the following cases (with
> +             * enable_legacy_vendor_cache=false):
> +             *  - When CPU model has its own cache model and doesn't use 
> legacy
> +             *    cache model (legacy_model=off). Then cache_info_amd and
> +             *    cache_info_cpuid4 are the same.
> +             *
> +             *  - For v10.1 and newer machines, when CPU model uses legacy 
> cache
> +             *    model. Non-AMD CPUs use cache_info_cpuid4 like before and 
> AMD
> +             *    CPU will use cache_info_amd. But this doesn't matter for 
> AMD
> +             *    CPU, because this leaf encodes all-0 for AMD whatever its 
> cache
> +             *    model is.
> +             */
> +            if (IS_AMD_CPU(env)) {
> +                caches = &env->cache_info_amd;
> +            } else {
> +                caches = &env->cache_info_cpuid4;
> +            }
> +        }
> +
>          /* cache info: needed for Core compatibility */
>          if (cpu->cache_info_passthrough) {
>              x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx);
> @@ -7545,30 +7573,26 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, 
> uint32_t count,
>  
>              switch (count) {
>              case 0: /* L1 dcache info */
> -                encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
> -                                    topo_info,
> +                encode_cache_cpuid4(caches->l1d_cache, topo_info,
>                                      eax, ebx, ecx, edx);
>                  if (!cpu->l1_cache_per_core) {
>                      *eax &= ~MAKE_64BIT_MASK(14, 12);
>                  }
>                  break;
>              case 1: /* L1 icache info */
> -                encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
> -                                    topo_info,
> +                encode_cache_cpuid4(caches->l1i_cache, topo_info,
>                                      eax, ebx, ecx, edx);
>                  if (!cpu->l1_cache_per_core) {
>                      *eax &= ~MAKE_64BIT_MASK(14, 12);
>                  }
>                  break;
>              case 2: /* L2 cache info */
> -                encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
> -                                    topo_info,
> +                encode_cache_cpuid4(caches->l2_cache, topo_info,
>                                      eax, ebx, ecx, edx);
>                  break;
>              case 3: /* L3 cache info */
>                  if (cpu->enable_l3_cache) {
> -                    encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
> -                                        topo_info,
> +                    encode_cache_cpuid4(caches->l3_cache, topo_info,
>                                          eax, ebx, ecx, edx);
>                      break;
>                  }
> @@ -7579,6 +7603,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, 
> uint32_t count,
>              }
>          }
>          break;
> +    }
>      case 5:
>          /* MONITOR/MWAIT Leaf */
>          *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */

LGTM. Thanks.

Reviewed-by: Dapeng Mi <dapeng1...@linux.intel.com>



Reply via email to