Qemu virt machine can support few cache events and cycle/instret counters. It also supports counter overflow for these events.
Add a DT node so that OpenSBI/Linux kernel is aware of the virt machine capabilities. There are some dummy nodes added for testing as well. Signed-off-by: Atish Patra <atish.pa...@wdc.com> --- hw/riscv/virt.c | 36 ++++++++++++++++++++++++++ target/riscv/pmp.c | 1 + target/riscv/pmu.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+) diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index ec0cb69b8c73..b246d2e339eb 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -28,6 +28,7 @@ #include "hw/qdev-properties.h" #include "hw/char/serial.h" #include "target/riscv/cpu.h" +#include "target/riscv/pmu.h" #include "hw/riscv/riscv_hart.h" #include "hw/riscv/virt.h" #include "hw/riscv/boot.h" @@ -406,6 +407,39 @@ static void create_fdt_socket_plic(RISCVVirtState *s, g_free(plic_cells); } +static void create_fdt_socket_pmu(RISCVVirtState *s, + int socket, uint32_t *phandle, + uint32_t *intc_phandles) +{ + int cpu; + char *pmu_name; + RISCVCPU hart; + uint32_t *pmu_cells; + MachineState *mc = MACHINE(s); + + pmu_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); + + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + pmu_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + pmu_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_PMU_OVF); + } + + pmu_name = g_strdup_printf("/soc/pmu"); + qemu_fdt_add_subnode(mc->fdt, pmu_name); + qemu_fdt_setprop_string(mc->fdt, pmu_name, "compatible", "riscv,pmu"); + hart = s->soc[0].harts[0]; + if (hart.cfg.ext_sscof) { + qemu_fdt_setprop_cell(mc->fdt, pmu_name, "#interrupt-cells", 1); + qemu_fdt_setprop(mc->fdt, pmu_name, "interrupts-extended", pmu_cells, + s->soc[socket].num_harts * sizeof(uint32_t) * 2); + } + riscv_pmu_generate_fdt_node(mc->fdt, pmu_name); + + g_free(pmu_name); + g_free(pmu_cells); +} + + static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, bool is_32_bit, uint32_t *phandle, uint32_t *irq_mmio_phandle, @@ -445,6 +479,8 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, create_fdt_socket_plic(s, memmap, socket, phandle, intc_phandles, xplic_phandles); + create_fdt_socket_pmu(s, socket, phandle, intc_phandles); + g_free(intc_phandles); g_free(clust_name); } diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 54abf425835c..2e2145e51903 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -25,6 +25,7 @@ #include "cpu.h" #include "trace.h" #include "exec/exec-all.h" +#include "sysemu/device_tree.h" static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, uint8_t val); diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c index 25bdbdf48ff7..9e11af85576d 100644 --- a/target/riscv/pmu.c +++ b/target/riscv/pmu.c @@ -19,9 +19,72 @@ #include "qemu/osdep.h" #include "cpu.h" #include "pmu.h" +#include "sysemu/device_tree.h" #define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */ +/** + * To keep it simple, any event can be mapped to any programmable counters in + * QEMU. The generic cycle & instruction count events can also be monitored + * using programmable counters. In that case, mcycle & minstret must continue + * to provide the correct value as well. + */ +void riscv_pmu_generate_fdt_node(void *fdt, char *pmu_name) +{ + uint32_t fdt_event_map[6] = {}; + uint32_t fdt_event_ctr_map[20] = {}; + uint32_t fdt_raw_event_ctr_map[6] = {}; + + /* Dummy event and mhpmevent values */ + fdt_event_map[0] = cpu_to_be32(0x00000009); + fdt_event_map[1] = cpu_to_be32(0x00000000); + fdt_event_map[2] = cpu_to_be32(0x00000200); + fdt_event_map[3] = cpu_to_be32(0x00010000); + fdt_event_map[4] = cpu_to_be32(0x00000100); + fdt_event_map[5] = cpu_to_be32(0x00000002); + qemu_fdt_setprop(fdt, pmu_name, "pmu,event-to-mhpmevent", + fdt_event_map, sizeof(fdt_event_map)); + + /* SBI_PMU_HW_CPU_CYCLES */ + fdt_event_ctr_map[0] = cpu_to_be32(0x00000001); + fdt_event_ctr_map[1] = cpu_to_be32(0x00000001); + fdt_event_ctr_map[2] = cpu_to_be32(0x00000FF9); + + /* SBI_PMU_HW_INSTRUCTIONS */ + fdt_event_ctr_map[3] = cpu_to_be32(0x00000002); + fdt_event_ctr_map[4] = cpu_to_be32(0x00000002); + fdt_event_ctr_map[5] = cpu_to_be32(0x00000FFC); + + /* SBI_PMU_HW_CACHE_DTLB : READ : MISS */ + fdt_event_ctr_map[6] = cpu_to_be32(0x00010019); + fdt_event_ctr_map[7] = cpu_to_be32(0x00010019); + fdt_event_ctr_map[8] = cpu_to_be32(0x00001F0); + + /* SBI_PMU_HW_CACHE_DTLB : WRITE : MISS */ + fdt_event_ctr_map[9] = cpu_to_be32(0x0001001B); + fdt_event_ctr_map[10] = cpu_to_be32(0x0001001B); + fdt_event_ctr_map[11] = cpu_to_be32(0x00001F0); + + /* SBI_PMU_HW_CACHE_ITLB : READ : MISS */ + fdt_event_ctr_map[12] = cpu_to_be32(0x00010021); + fdt_event_ctr_map[13] = cpu_to_be32(0x00010021); + fdt_event_ctr_map[14] = cpu_to_be32(0x00001F0); + + qemu_fdt_setprop(fdt, pmu_name, "pmu,event-to-mhpmcounters", + fdt_event_ctr_map, sizeof(fdt_event_ctr_map)); + + /* Dummy raw events */ + fdt_raw_event_ctr_map[0] = cpu_to_be32(0x00000000); + fdt_raw_event_ctr_map[1] = cpu_to_be32(0x00020002); + fdt_raw_event_ctr_map[2] = cpu_to_be32(0x00000F00); + fdt_raw_event_ctr_map[3] = cpu_to_be32(0x00000000); + fdt_raw_event_ctr_map[4] = cpu_to_be32(0x00020003); + fdt_raw_event_ctr_map[5] = cpu_to_be32(0x000000F0); + qemu_fdt_setprop(fdt, pmu_name, "pmu,raw-event-to-mhpmcounters", + fdt_raw_event_ctr_map, + sizeof(fdt_raw_event_ctr_map)); +} + static bool riscv_pmu_counter_valid(RISCVCPU *cpu, uint32_t ctr_idx) { if (ctr_idx < 3 || ctr_idx >= RV_MAX_MHPMCOUNTERS || -- 2.31.1