On Mon, Jul 14, 2025 at 10:59:58AM +0100, Alireza Sanaee wrote:
> On Sun, 13 Jul 2025 17:15:02 -0400
> "Michael S. Tsirkin" <m...@redhat.com> wrote:
> 
> > On Mon, Jul 07, 2025 at 01:19:06PM +0100, Alireza Sanaee wrote:
> > > Add cache topology to PPTT table. With this patch, both ACPI PPTT
> > > table and device tree will represent the same cache topology given
> > > users input.
> > > 
> > > Co-developed-by: Jonathan Cameron <jonathan.came...@huawei.com>
> > > Signed-off-by: Alireza Sanaee <alireza.san...@huawei.com>
> > > ---
> > >  hw/acpi/aml-build.c            | 248
> > > +++++++++++++++++++++++++++++++-- hw/arm/virt-acpi-build.c       |
> > >  8 +- hw/loongarch/virt-acpi-build.c |   2 +-
> > >  include/hw/acpi/aml-build.h    |   4 +-
> > >  4 files changed, 249 insertions(+), 13 deletions(-)
> > > 
> > > diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
> > > index f8f93a9f66..474f3035cb 100644
> > > --- a/hw/acpi/aml-build.c
> > > +++ b/hw/acpi/aml-build.c
> > > @@ -31,6 +31,8 @@
> > >  #include "hw/pci/pci_bus.h"
> > >  #include "hw/pci/pci_bridge.h"
> > >  #include "qemu/cutils.h"
> > > +#include "hw/acpi/cpu.h"
> > > +#include "hw/core/cpu.h"
> > >  
> > >  static GArray *build_alloc_array(void)
> > >  {
> > > @@ -2141,24 +2143,161 @@ void build_spcr(GArray *table_data,
> > > BIOSLinker *linker, }
> > >      acpi_table_end(linker, &table);
> > >  }
> > > +
> > > +static void build_cache_nodes(GArray *tbl, CPUCorePPTTCaches
> > > *cache,
> > > +                              uint32_t next_offset, unsigned int
> > > id) +{
> > > +    int val;
> > > +
> > > +    /* Type 1 - cache */
> > > +    build_append_byte(tbl, 1);
> > > +    /* Length */
> > > +    build_append_byte(tbl, 28);
> > > +    /* Reserved */
> > > +    build_append_int_noprefix(tbl, 0, 2);
> > > +    /* Flags - everything except possibly the ID */
> > > +    build_append_int_noprefix(tbl, 0xff, 4);
> > > +    /* Offset of next cache up */
> > > +    build_append_int_noprefix(tbl, next_offset, 4);
> > > +    build_append_int_noprefix(tbl, cache->size, 4);
> > > +    build_append_int_noprefix(tbl, cache->sets, 4);
> > > +    build_append_byte(tbl, cache->associativity);
> > > +    val = 0x3;
> > > +    switch (cache->type) {
> > > +    case INSTRUCTION_CACHE:
> > > +        val |= (1 << 2);
> > > +        break;
> > > +    case DATA_CACHE:
> > > +        val |= (0 << 2); /* Data */
> > > +        break;
> > > +    case UNIFIED_CACHE:
> > > +        val |= (3 << 2); /* Unified */
> > > +        break;
> > > +    }
> > > +    build_append_byte(tbl, val);
> > > +    build_append_int_noprefix(tbl, cache->linesize, 2);
> > > +    build_append_int_noprefix(tbl,
> > > +                              (cache->type << 24) | (cache->level
> > > << 16) | id,
> > > +                              4);
> > > +}
> > > +
> > > +/*
> > > + * builds caches from the top level (`level_high` parameter) to
> > > the bottom
> > > + * level (`level_low` parameter).  It searches for caches found in
> > > + * systems' registers, and fills up the table. Then it updates the
> > > + * `data_offset` and `instr_offset` parameters with the offset of
> > > the data
> > > + * and instruction caches of the lowest level, respectively.
> > > + */
> > > +static bool build_caches(GArray *table_data, uint32_t pptt_start,
> > > +                         int num_caches, CPUCorePPTTCaches *caches,
> > > +                         int base_id,
> > > +                         uint8_t level_high, /* Inclusive */
> > > +                         uint8_t level_low,  /* Inclusive */
> > > +                         uint32_t *data_offset,
> > > +                         uint32_t *instr_offset)
> > > +{
> > > +    uint32_t next_level_offset_data = 0,
> > > next_level_offset_instruction = 0;
> > > +    uint32_t this_offset, next_offset = 0;
> > > +    int c, level;
> > > +    bool found_cache = false;
> > > +
> > > +    /* Walk caches from top to bottom */
> > > +    for (level = level_high; level >= level_low; level--) {
> > > +        for (c = 0; c < num_caches; c++) {
> > > +            if (caches[c].level != level) {
> > > +                continue;
> > > +            }
> > > +
> > > +            /* Assume only unified above l1 for now */
> > > +            this_offset = table_data->len - pptt_start;
> > > +            switch (caches[c].type) {
> > > +            case INSTRUCTION_CACHE:
> > > +                next_offset = next_level_offset_instruction;
> > > +                break;
> > > +            case DATA_CACHE:
> > > +                next_offset = next_level_offset_data;
> > > +                break;
> > > +            case UNIFIED_CACHE:
> > > +                /* Either is fine here */
> > > +                next_offset = next_level_offset_instruction;
> > > +                break;
> > > +            }
> > > +            build_cache_nodes(table_data, &caches[c], next_offset,
> > > base_id);
> > > +            switch (caches[c].type) {
> > > +            case INSTRUCTION_CACHE:
> > > +                next_level_offset_instruction = this_offset;
> > > +                break;
> > > +            case DATA_CACHE:
> > > +                next_level_offset_data = this_offset;
> > > +                break;
> > > +            case UNIFIED_CACHE:
> > > +                next_level_offset_instruction = this_offset;
> > > +                next_level_offset_data = this_offset;
> > > +                break;
> > > +            }
> > > +            *data_offset = next_level_offset_data;
> > > +            *instr_offset = next_level_offset_instruction;
> > > +
> > > +            found_cache = true;
> > > +        }
> > > +    }
> > > +
> > > +    return found_cache;
> > > +}
> > > +
> > >  /*
> > >   * ACPI spec, Revision 6.3
> > >   * 5.2.29 Processor Properties Topology Table (PPTT)
> > >   */
> > >  void build_pptt(GArray *table_data, BIOSLinker *linker,
> > > MachineState *ms,
> > > -                const char *oem_id, const char *oem_table_id)
> > > +                const char *oem_id, const char *oem_table_id,
> > > +                int num_caches, CPUCorePPTTCaches *caches)
> > >  {
> > >      MachineClass *mc = MACHINE_GET_CLASS(ms);
> > >      CPUArchIdList *cpus = ms->possible_cpus;
> > > -    int64_t socket_id = -1, cluster_id = -1, core_id = -1;
> > > -    uint32_t socket_offset = 0, cluster_offset = 0, core_offset =
> > > 0;
> > > +    uint32_t core_data_offset = 0;
> > > +    uint32_t core_instr_offset = 0;
> > > +    uint32_t cluster_instr_offset = 0;
> > > +    uint32_t cluster_data_offset = 0;
> > > +    uint32_t node_data_offset = 0;
> > > +    uint32_t node_instr_offset = 0;
> > > +    int top_node = 3;
> > > +    int top_cluster = 3;
> > > +    int top_core = 3;
> > > +    int bottom_node = 3;
> > > +    int bottom_cluster = 3;
> > > +    int bottom_core = 3;
> > > +    int64_t socket_id = -1;
> > > +    int64_t cluster_id = -1;
> > > +    int64_t core_id = -1;
> > > +    uint32_t socket_offset = 0;
> > > +    uint32_t cluster_offset = 0;
> > > +    uint32_t core_offset = 0;
> > >      uint32_t pptt_start = table_data->len;
> > > +    uint32_t root_offset;
> > >      int n;
> > > +    uint32_t priv_rsrc[2];
> > > +    uint32_t num_priv = 0;
> > > +    bool cache_available;
> > > +    bool llevel;
> > > +
> > >      AcpiTable table = { .sig = "PPTT", .rev = 2,
> > >                          .oem_id = oem_id, .oem_table_id =
> > > oem_table_id }; 
> > >      acpi_table_begin(&table, table_data);
> > >  
> > > +    /*
> > > +     * Build a root node for all the processor nodes. Otherwise
> > > when  
> > 
> > This appears to use parts of
> >     Message-Id: <20250604115233.1234-4-alireza.san...@huawei.com>
> > 
> > which is:
> > 
> >     hw/acpi/aml-build: Build a root node in the PPTT table
> > 
> 
> Hi Michael,
> 
> I put a note on cover letter regarding the dependency.


To clarify, do you still want
20250604115233.1234-4-alireza.san...@huawei.com or should
I drop that too, for now?


> But I made a mistake preparing this patchset anyway. Will fix.
> 
> > 
> > but this time, without attribution, without documenting the change
> > in the commit log, and with worse grammar. What gives?
> > 
> > 
> > > +     * building a multi-socket system each socket tree are
> > > separated  
> > 
> > is separated
> 
> I think I know why this is still there, cuz I have fixed this error on
> the other patchset as this comes from the other patchset.
> 
> Will send a new version...
> 
> > 
> > > +     * and will be hard for the OS like Linux to know whether the
> > > +     * system is homogeneous.
> > > +     */
> > > +    root_offset = table_data->len - pptt_start;
> > > +    build_processor_hierarchy_node(table_data,
> > > +        (1 << 0) | /* Physical panodeckage */
> > > +        (1 << 4), /* Identical Implementation */
> > > +        0, 0, NULL, 0);
> > > +
> > >      /*
> > >       * This works with the assumption that cpus[n].props.*_id has
> > > been
> > >       * sorted from top to down levels in
> > > mc->possible_cpu_arch_ids(). @@ -2171,10 +2310,36 @@ void
> > > build_pptt(GArray *table_data, BIOSLinker *linker, MachineState
> > > *ms, socket_id = cpus->cpus[n].props.socket_id; cluster_id = -1;
> > >              core_id = -1;
> > > +            bottom_node = top_node;
> > > +            num_priv = 0;
> > > +            cache_available =
> > > +                machine_defines_cache_at_topo_level(ms,
> > > +
> > > CPU_TOPOLOGY_LEVEL_SOCKET);
> > > +            llevel =
> > > machine_find_lowest_level_cache_at_topo_level(ms,
> > > +                        &bottom_node,
> > > +                        CPU_TOPOLOGY_LEVEL_SOCKET);
> > > +            if (cache_available && llevel) {
> > > +                build_caches(table_data, pptt_start,
> > > +                             num_caches, caches,
> > > +                             n, top_node, bottom_node,
> > > +                             &node_data_offset,
> > > &node_instr_offset); +
> > > +                priv_rsrc[0] = node_instr_offset;
> > > +                priv_rsrc[1] = node_data_offset;
> > > +
> > > +                if (node_instr_offset || node_data_offset) {
> > > +                    num_priv = node_instr_offset ==
> > > node_data_offset ? 1 : 2;
> > > +                }
> > > +
> > > +                top_cluster = bottom_node - 1;
> > > +            }
> > > +
> > >              socket_offset = table_data->len - pptt_start;
> > >              build_processor_hierarchy_node(table_data,
> > > -                (1 << 0), /* Physical package */
> > > -                0, socket_id, NULL, 0);
> > > +                (1 << 0) | /* Physical package */
> > > +                (1 << 4), /* Identical Implementation */
> > > +                root_offset, socket_id,
> > > +                priv_rsrc, num_priv);
> > >          }
> > >  
> > >          if (mc->smp_props.clusters_supported &&
> > > mc->smp_props.has_clusters) { @@ -2182,28 +2347,91 @@ void
> > > build_pptt(GArray *table_data, BIOSLinker *linker, MachineState
> > > *ms, assert(cpus->cpus[n].props.cluster_id > cluster_id);
> > > cluster_id = cpus->cpus[n].props.cluster_id; core_id = -1;
> > > +                bottom_cluster = top_cluster;
> > > +                num_priv = 0;
> > > +                cache_available =
> > > +                    machine_defines_cache_at_topo_level(ms,
> > > +                        CPU_TOPOLOGY_LEVEL_CLUSTER);
> > > +                llevel =
> > > machine_find_lowest_level_cache_at_topo_level(ms,
> > > +                            &bottom_cluster,
> > > +                            CPU_TOPOLOGY_LEVEL_CLUSTER);
> > > +
> > > +                if (cache_available && llevel) {
> > > +
> > > +                    build_caches(table_data, pptt_start,
> > > +                                 num_caches, caches, n,
> > > top_cluster,
> > > +                                 bottom_cluster,
> > > &cluster_data_offset,
> > > +                                 &cluster_instr_offset);
> > > +
> > > +                    priv_rsrc[0] = cluster_instr_offset;
> > > +                    priv_rsrc[1] = cluster_data_offset;
> > > +
> > > +                    if (cluster_instr_offset ||
> > > cluster_data_offset) {
> > > +                        num_priv =
> > > +                        cluster_instr_offset ==
> > > cluster_data_offset ? 1 : 2;
> > > +                    }
> > > +
> > > +                    top_core = bottom_cluster - 1;
> > > +                } else if (top_cluster == bottom_node - 1) {
> > > +                    /* socket cache but no cluster cache */
> > > +                    top_core = bottom_node - 1;
> > > +                }
> > > +
> > >                  cluster_offset = table_data->len - pptt_start;
> > >                  build_processor_hierarchy_node(table_data,
> > > -                    (0 << 0), /* Not a physical package */
> > > -                    socket_offset, cluster_id, NULL, 0);
> > > +                    (0 << 0) | /* Not a physical package */
> > > +                    (1 << 4), /* Identical Implementation */
> > > +                    socket_offset, cluster_id,
> > > +                    priv_rsrc, num_priv);
> > >              }
> > >          } else {
> > > +            if (machine_defines_cache_at_topo_level(ms,
> > > +                    CPU_TOPOLOGY_LEVEL_CLUSTER)) {
> > > +                error_setg(&error_fatal, "Not clusters found for
> > > the cache");
> > > +                return;
> > > +            }
> > > +
> > >              cluster_offset = socket_offset;
> > > +            top_core = bottom_node - 1; /* there is no cluster */
> > > +        }
> > > +
> > > +        if (cpus->cpus[n].props.core_id != core_id) {
> > > +            bottom_core = top_core;
> > > +            num_priv = 0;
> > > +            cache_available =
> > > +                machine_defines_cache_at_topo_level(ms,
> > > CPU_TOPOLOGY_LEVEL_CORE);
> > > +            llevel =
> > > machine_find_lowest_level_cache_at_topo_level(ms,
> > > +                        &bottom_core, CPU_TOPOLOGY_LEVEL_CORE);
> > > +
> > > +            if (cache_available && llevel) {
> > > +                build_caches(table_data, pptt_start,
> > > +                             num_caches, caches,
> > > +                             n, top_core, bottom_core,
> > > +                             &core_data_offset,
> > > &core_instr_offset); +
> > > +                priv_rsrc[0] = core_instr_offset;
> > > +                priv_rsrc[1] = core_data_offset;
> > > +
> > > +                num_priv = core_instr_offset == core_data_offset ?
> > > 1 : 2;
> > > +            }
> > >          }
> > >  
> > >          if (ms->smp.threads == 1) {
> > >              build_processor_hierarchy_node(table_data,
> > >                  (1 << 1) | /* ACPI Processor ID valid */
> > >                  (1 << 3),  /* Node is a Leaf */
> > > -                cluster_offset, n, NULL, 0);
> > > +                cluster_offset, n,
> > > +                priv_rsrc, num_priv);
> > >          } else {
> > >              if (cpus->cpus[n].props.core_id != core_id) {
> > >                  assert(cpus->cpus[n].props.core_id > core_id);
> > >                  core_id = cpus->cpus[n].props.core_id;
> > >                  core_offset = table_data->len - pptt_start;
> > >                  build_processor_hierarchy_node(table_data,
> > > -                    (0 << 0), /* Not a physical package */
> > > -                    cluster_offset, core_id, NULL, 0);
> > > +                    (0 << 0) | /* Not a physical package */
> > > +                    (1 << 4), /* Identical Implementation */
> > > +                    cluster_offset, core_id,
> > > +                    priv_rsrc, num_priv);
> > >              }
> > >  
> > >              build_processor_hierarchy_node(table_data,
> > > diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
> > > index cd90c47976..a9b5aeac49 100644
> > > --- a/hw/arm/virt-acpi-build.c
> > > +++ b/hw/arm/virt-acpi-build.c
> > > @@ -962,6 +962,11 @@ void virt_acpi_build(VirtMachineState *vms,
> > > AcpiBuildTables *tables) GArray *tables_blob = tables->table_data;
> > >      MachineState *ms = MACHINE(vms);
> > >  
> > > +    CPUCorePPTTCaches caches[CPU_MAX_CACHES];
> > > +    unsigned int num_caches;
> > > +
> > > +    num_caches = virt_get_caches(vms, caches);
> > > +
> > >      table_offsets = g_array_new(false, true /* clear */,
> > >                                          sizeof(uint32_t));
> > >  
> > > @@ -983,7 +988,8 @@ void virt_acpi_build(VirtMachineState *vms,
> > > AcpiBuildTables *tables) if (!vmc->no_cpu_topology) {
> > >          acpi_add_table(table_offsets, tables_blob);
> > >          build_pptt(tables_blob, tables->linker, ms,
> > > -                   vms->oem_id, vms->oem_table_id);
> > > +                   vms->oem_id, vms->oem_table_id,
> > > +                   num_caches, caches);
> > >      }
> > >  
> > >      acpi_add_table(table_offsets, tables_blob);
> > > diff --git a/hw/loongarch/virt-acpi-build.c
> > > b/hw/loongarch/virt-acpi-build.c index 2cd2d9d842..dd34a520c7 100644
> > > --- a/hw/loongarch/virt-acpi-build.c
> > > +++ b/hw/loongarch/virt-acpi-build.c
> > > @@ -552,7 +552,7 @@ static void acpi_build(AcpiBuildTables *tables,
> > > MachineState *machine) 
> > >      acpi_add_table(table_offsets, tables_blob);
> > >      build_pptt(tables_blob, tables->linker, machine,
> > > -               lvms->oem_id, lvms->oem_table_id);
> > > +               lvms->oem_id, lvms->oem_table_id, 0, NULL);
> > >  
> > >      acpi_add_table(table_offsets, tables_blob);
> > >      build_srat(tables_blob, tables->linker, machine);
> > > diff --git a/include/hw/acpi/aml-build.h
> > > b/include/hw/acpi/aml-build.h index 6fa2e1eedf..3429cdae71 100644
> > > --- a/include/hw/acpi/aml-build.h
> > > +++ b/include/hw/acpi/aml-build.h
> > > @@ -3,6 +3,7 @@
> > >  
> > >  #include "hw/acpi/acpi-defs.h"
> > >  #include "hw/acpi/bios-linker-loader.h"
> > > +#include "hw/cpu/core.h"
> > >  
> > >  #define ACPI_BUILD_APPNAME6 "BOCHS "
> > >  #define ACPI_BUILD_APPNAME8 "BXPC    "
> > > @@ -499,7 +500,8 @@ void build_slit(GArray *table_data, BIOSLinker
> > > *linker, MachineState *ms, typedef struct CPUPPTTCaches
> > > CPUCorePPTTCaches; 
> > >  void build_pptt(GArray *table_data, BIOSLinker *linker,
> > > MachineState *ms,
> > > -                const char *oem_id, const char *oem_table_id);
> > > +                const char *oem_id, const char *oem_table_id,
> > > +                int num_caches, CPUCorePPTTCaches *caches);
> > >  
> > >  void build_fadt(GArray *tbl, BIOSLinker *linker, const
> > > AcpiFadtData *f, const char *oem_id, const char *oem_table_id);
> > > -- 
> > > 2.43.0  
> > 
> > 


Reply via email to