From: Nicolin Chen <[email protected]> Tegra241 CMDQV defines a set of global control and status registers used to configure virtual command queue allocation and interrupt behavior.
Add read/write emulation for the global CMDQV register page (offset 0x00000), backed by a simple register cache. This includes CONFIG, PARAM, STATUS, VI error and interrupt maps, CMDQ allocation map and the VINTF0 related registers defined in the global CMDQV register space. Signed-off-by: Nicolin Chen <[email protected]> Signed-off-by: Shameer Kolothum <[email protected]> --- hw/arm/tegra241-cmdqv.c | 102 +++++++++++++++++++++++++++++++++++++++- hw/arm/tegra241-cmdqv.h | 86 +++++++++++++++++++++++++++++++++ 2 files changed, 187 insertions(+), 1 deletion(-) diff --git a/hw/arm/tegra241-cmdqv.c b/hw/arm/tegra241-cmdqv.c index 97c9b9c8dc..49fca9d536 100644 --- a/hw/arm/tegra241-cmdqv.c +++ b/hw/arm/tegra241-cmdqv.c @@ -8,6 +8,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/arm/smmuv3.h" #include "smmuv3-accel.h" @@ -32,6 +33,25 @@ static bool tegra241_cmdqv_mmap_vintf_page0(Tegra241CMDQV *cmdqv, Error **errp) return true; } +static uint64_t tegra241_cmdqv_read_vintf(Tegra241CMDQV *cmdqv, hwaddr offset) +{ + int i; + + switch (offset) { + case A_VINTF0_CONFIG: + return cmdqv->vintf_config; + case A_VINTF0_STATUS: + return cmdqv->vintf_status; + case A_VINTF0_LVCMDQ_ERR_MAP_0 ... A_VINTF0_LVCMDQ_ERR_MAP_3: + i = (offset - A_VINTF0_LVCMDQ_ERR_MAP_0) / 4; + return cmdqv->vintf_cmdq_err_map[i]; + default: + qemu_log_mask(LOG_UNIMP, "%s unhandled read access at 0x%" PRIx64 "\n", + __func__, offset); + return 0; + } +} + static uint64_t tegra241_cmdqv_read(void *opaque, hwaddr offset, unsigned size) { Tegra241CMDQV *cmdqv = (Tegra241CMDQV *)opaque; @@ -44,7 +64,57 @@ static uint64_t tegra241_cmdqv_read(void *opaque, hwaddr offset, unsigned size) } } - return 0; + if (offset >= TEGRA241_CMDQV_IO_LEN) { + qemu_log_mask(LOG_UNIMP, + "%s offset 0x%" PRIx64 " off limit (0x50000)\n", __func__, + offset); + return 0; + } + + switch (offset) { + case A_CONFIG: + return cmdqv->config; + case A_PARAM: + return cmdqv->param; + case A_STATUS: + return cmdqv->status; + case A_VI_ERR_MAP ... A_VI_ERR_MAP_1: + return cmdqv->vi_err_map[(offset - A_VI_ERR_MAP) / 4]; + case A_VI_INT_MASK ... A_VI_INT_MASK_1: + return cmdqv->vi_int_mask[(offset - A_VI_INT_MASK) / 4]; + case A_CMDQ_ERR_MAP ... A_CMDQ_ERR_MAP_3: + return cmdqv->cmdq_err_map[(offset - A_CMDQ_ERR_MAP) / 4]; + case A_CMDQ_ALLOC_MAP_0 ... A_CMDQ_ALLOC_MAP_127: + return cmdqv->cmdq_alloc_map[(offset - A_CMDQ_ALLOC_MAP_0) / 4]; + case A_VINTF0_CONFIG ... A_VINTF0_LVCMDQ_ERR_MAP_3: + return tegra241_cmdqv_read_vintf(cmdqv, offset); + default: + qemu_log_mask(LOG_UNIMP, "%s unhandled read access at 0x%" PRIx64 "\n", + __func__, offset); + return 0; + } +} + +static void tegra241_cmdqv_write_vintf(Tegra241CMDQV *cmdqv, hwaddr offset, + uint64_t value) +{ + switch (offset) { + case A_VINTF0_CONFIG: + /* Strip off HYP_OWN setting from guest kernel */ + value &= ~R_VINTF0_CONFIG_HYP_OWN_MASK; + + cmdqv->vintf_config = value; + if (value & R_VINTF0_CONFIG_ENABLE_MASK) { + cmdqv->vintf_status |= R_VINTF0_STATUS_ENABLE_OK_MASK; + } else { + cmdqv->vintf_status &= ~R_VINTF0_STATUS_ENABLE_OK_MASK; + } + break; + default: + qemu_log_mask(LOG_UNIMP, "%s unhandled write access at 0x%" PRIx64 "\n", + __func__, offset); + return; + } } static void tegra241_cmdqv_write(void *opaque, hwaddr offset, uint64_t value, @@ -59,6 +129,36 @@ static void tegra241_cmdqv_write(void *opaque, hwaddr offset, uint64_t value, local_err = NULL; } } + + if (offset >= TEGRA241_CMDQV_IO_LEN) { + qemu_log_mask(LOG_UNIMP, + "%s offset 0x%" PRIx64 " off limit (0x50000)\n", __func__, + offset); + return; + } + + switch (offset) { + case A_CONFIG: + cmdqv->config = value; + if (value & R_CONFIG_CMDQV_EN_MASK) { + cmdqv->status |= R_STATUS_CMDQV_ENABLED_MASK; + } else { + cmdqv->status &= ~R_STATUS_CMDQV_ENABLED_MASK; + } + break; + case A_VI_INT_MASK ... A_VI_INT_MASK_1: + cmdqv->vi_int_mask[(offset - A_VI_INT_MASK) / 4] = value; + break; + case A_CMDQ_ALLOC_MAP_0 ... A_CMDQ_ALLOC_MAP_127: + cmdqv->cmdq_alloc_map[(offset - A_CMDQ_ALLOC_MAP_0) / 4] = value; + break; + case A_VINTF0_CONFIG ... A_VINTF0_LVCMDQ_ERR_MAP_3: + tegra241_cmdqv_write_vintf(cmdqv, offset, value); + break; + default: + qemu_log_mask(LOG_UNIMP, "%s unhandled write access at 0x%" PRIx64 "\n", + __func__, offset); + } } static void tegra241_cmdqv_free_veventq(SMMUv3State *s) diff --git a/hw/arm/tegra241-cmdqv.h b/hw/arm/tegra241-cmdqv.h index 94bef8c978..f157c8fd24 100644 --- a/hw/arm/tegra241-cmdqv.h +++ b/hw/arm/tegra241-cmdqv.h @@ -10,6 +10,7 @@ #ifndef HW_TEGRA241_CMDQV_H #define HW_TEGRA241_CMDQV_H +#include "hw/core/registerfields.h" #include "smmuv3-accel.h" #include CONFIG_DEVICES @@ -30,8 +31,93 @@ typedef struct Tegra241CMDQV { MemoryRegion mmio_cmdqv; qemu_irq irq; void *vintf_page0; + + /* Register Cache */ + uint32_t config; + uint32_t param; + uint32_t status; + uint32_t vi_err_map[2]; + uint32_t vi_int_mask[2]; + uint32_t cmdq_err_map[4]; + uint32_t cmdq_alloc_map[128]; + uint32_t vintf_config; + uint32_t vintf_status; + uint32_t vintf_cmdq_err_map[4]; } Tegra241CMDQV; +/* Global CMDQV MMIO registers (offset 0x00000) */ +REG32(CONFIG, 0x0) +FIELD(CONFIG, CMDQV_EN, 0, 1) +FIELD(CONFIG, CMDQV_PER_CMD_OFFSET, 1, 3) +FIELD(CONFIG, CMDQ_MAX_CLK_BATCH, 4, 8) +FIELD(CONFIG, CMDQ_MAX_CMD_BATCH, 12, 8) +FIELD(CONFIG, CONS_DRAM_EN, 20, 1) + +REG32(PARAM, 0x4) +FIELD(PARAM, CMDQV_VER, 0, 4) +FIELD(PARAM, CMDQV_NUM_CMDQ_LOG2, 4, 4) +FIELD(PARAM, CMDQV_NUM_VM_LOG2, 8, 4) +FIELD(PARAM, CMDQV_NUM_SID_PER_VM_LOG2, 12, 4) + +REG32(STATUS, 0x8) +FIELD(STATUS, CMDQV_ENABLED, 0, 1) + +#define A_VI_ERR_MAP 0x14 +#define A_VI_ERR_MAP_1 0x18 +#define V_VI_ERR_MAP_NO_ERROR (0) +#define V_VI_ERR_MAP_ERROR (1) + +#define A_VI_INT_MASK 0x1c +#define A_VI_INT_MASK_1 0x20 +#define V_VI_INT_MASK_NOT_MASKED (0) +#define V_VI_INT_MASK_MASKED (1) + +#define A_CMDQ_ERR_MAP 0x24 +#define A_CMDQ_ERR_MAP_1 0x28 +#define A_CMDQ_ERR_MAP_2 0x2c +#define A_CMDQ_ERR_MAP_3 0x30 + +/* i = [0, 127] */ +#define A_CMDQ_ALLOC_MAP_(i) \ + REG32(CMDQ_ALLOC_MAP_##i, 0x200 + i * 4) \ + FIELD(CMDQ_ALLOC_MAP_##i, ALLOC, 0, 1) \ + FIELD(CMDQ_ALLOC_MAP_##i, LVCMDQ, 1, 7) \ + FIELD(CMDQ_ALLOC_MAP_##i, VIRT_INTF_INDX, 15, 6) + +A_CMDQ_ALLOC_MAP_(0) +/* Omitting 1~126 as not being directly called */ +A_CMDQ_ALLOC_MAP_(127) + + +/* i = [0, 0] */ +#define A_VINTFi_CONFIG(i) \ + REG32(VINTF##i##_CONFIG, 0x1000 + i * 0x100) \ + FIELD(VINTF##i##_CONFIG, ENABLE, 0, 1) \ + FIELD(VINTF##i##_CONFIG, VMID, 1, 16) \ + FIELD(VINTF##i##_CONFIG, HYP_OWN, 17, 1) + +A_VINTFi_CONFIG(0) + +#define A_VINTFi_STATUS(i) \ + REG32(VINTF##i##_STATUS, 0x1004 + i * 0x100) \ + FIELD(VINTF##i##_STATUS, ENABLE_OK, 0, 1) \ + FIELD(VINTF##i##_STATUS, STATUS, 1, 3) \ + FIELD(VINTF##i##_STATUS, VI_NUM_LVCMDQ, 16, 8) + +A_VINTFi_STATUS(0) + +#define V_VINTF_STATUS_NO_ERROR (0 << 1) +#define V_VINTF_STATUS_VCMDQ_EROR (1 << 1) + +/* i = [0, 0], j = [0, 3] */ +#define A_VINTFi_LVCMDQ_ERR_MAP_(i, j) \ + REG32(VINTF##i##_LVCMDQ_ERR_MAP_##j, 0x10c0 + j * 4 + i * 0x100) \ + FIELD(VINTF##i##_LVCMDQ_ERR_MAP_##j, LVCMDQ_ERR_MAP, 0, 32) + +A_VINTFi_LVCMDQ_ERR_MAP_(0, 0) +/* Omitting [0][1~2] as not being directly called */ +A_VINTFi_LVCMDQ_ERR_MAP_(0, 3) + #define VINTF_REG_PAGE_SIZE 0x10000 #ifdef CONFIG_TEGRA241_CMDQV -- 2.43.0
