From: Nicolin Chen <[email protected]> Implement read support for Tegra241 CMDQV register blocks, including VINTF and per VCMDQ register regions. The patch decodes offsets, extracts queue indices, and returns the corresponding cached register state.
Subsequent patch will add write support. Signed-off-by: Nicolin Chen <[email protected]> Signed-off-by: Shameer Kolothum <[email protected]> --- hw/arm/tegra241-cmdqv.c | 144 +++++++++++++++++++- hw/arm/tegra241-cmdqv.h | 282 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 425 insertions(+), 1 deletion(-) diff --git a/hw/arm/tegra241-cmdqv.c b/hw/arm/tegra241-cmdqv.c index d8858322dc..185ef957bc 100644 --- a/hw/arm/tegra241-cmdqv.c +++ b/hw/arm/tegra241-cmdqv.c @@ -8,6 +8,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/arm/smmuv3.h" #include "smmuv3-accel.h" @@ -52,10 +53,94 @@ static bool tegra241_cmdqv_init_vcmdq_page0(Tegra241CMDQV *cmdqv, Error **errp) return true; } +/* Note that offset aligns down to 0x1000 */ +static uint64_t tegra241_cmdqv_read_vintf(Tegra241CMDQV *cmdqv, hwaddr offset) +{ + int i; + + switch (offset) { + case A_VINTF0_CONFIG: + return cmdqv->vintf_config; + case A_VINTF0_STATUS: + return cmdqv->vintf_status; + case A_VINTF0_LVCMDQ_ERR_MAP_0 ... A_VINTF0_LVCMDQ_ERR_MAP_3: + i = (offset - A_VINTF0_LVCMDQ_ERR_MAP_0) / 4; + return cmdqv->vintf_cmdq_err_map[i]; + default: + qemu_log_mask(LOG_UNIMP, "%s unhandled read access at 0x%" PRIx64 "\n", + __func__, offset); + return 0; + } +} + +/* Note that offset aligns down to 0x10000 */ +static uint64_t tegra241_cmdqv_read_vcmdq(Tegra241CMDQV *cmdqv, hwaddr offset, + int index) +{ + uint32_t *ptr = NULL; + uint64_t off; + + /* + * Each VCMDQ instance occupies a 128 byte region (0x80). + * The hardware layout is: + * vcmdq_page0 + (index * 0x80) + (offset - 0x10000) + */ + if (cmdqv->vcmdq_page0) { + off = (0x80 * index) + (offset - 0x10000); + ptr = (uint32_t *)(cmdqv->vcmdq_page0 + off); + } + + switch (offset) { + case A_VCMDQ0_CONS_INDX: + if (ptr) { + cmdqv->vcmdq_cons_indx[index] = *ptr; + } + return cmdqv->vcmdq_cons_indx[index]; + case A_VCMDQ0_PROD_INDX: + if (ptr) { + cmdqv->vcmdq_prod_indx[index] = *ptr; + } + return cmdqv->vcmdq_prod_indx[index]; + case A_VCMDQ0_CONFIG: + if (ptr) { + cmdqv->vcmdq_config[index] = *ptr; + } + return cmdqv->vcmdq_config[index]; + case A_VCMDQ0_STATUS: + if (ptr) { + cmdqv->vcmdq_status[index] = *ptr; + } + return cmdqv->vcmdq_status[index]; + case A_VCMDQ0_GERROR: + if (ptr) { + cmdqv->vcmdq_gerror[index] = *ptr; + } + return cmdqv->vcmdq_gerror[index]; + case A_VCMDQ0_GERRORN: + if (ptr) { + cmdqv->vcmdq_gerrorn[index] = *ptr; + } + return cmdqv->vcmdq_gerrorn[index]; + case A_VCMDQ0_BASE_L: + return cmdqv->vcmdq_base[index]; + case A_VCMDQ0_BASE_H: + return cmdqv->vcmdq_base[index] >> 32; + case A_VCMDQ0_CONS_INDX_BASE_DRAM_L: + return cmdqv->vcmdq_cons_indx_base[index]; + case A_VCMDQ0_CONS_INDX_BASE_DRAM_H: + return cmdqv->vcmdq_cons_indx_base[index] >> 32; + default: + qemu_log_mask(LOG_UNIMP, + "%s unhandled read access at 0x%" PRIx64 "\n", + __func__, offset); + return 0; + } +} static uint64_t tegra241_cmdqv_read(void *opaque, hwaddr offset, unsigned size) { Tegra241CMDQV *cmdqv = (Tegra241CMDQV *)opaque; Error *local_err = NULL; + int index; if (!cmdqv->vcmdq_page0) { tegra241_cmdqv_init_vcmdq_page0(cmdqv, &local_err); @@ -65,7 +150,64 @@ static uint64_t tegra241_cmdqv_read(void *opaque, hwaddr offset, unsigned size) } } - return 0; + if (offset > TEGRA241_CMDQV_IO_LEN) { + qemu_log_mask(LOG_UNIMP, + "%s offset 0x%" PRIx64 " off limit (0x50000)\n", __func__, + offset); + return 0; + } + + /* Fallback to cached register values */ + switch (offset) { + case A_CONFIG: + return cmdqv->config; + case A_PARAM: + return cmdqv->param; + case A_STATUS: + return cmdqv->status; + case A_VI_ERR_MAP ... A_VI_ERR_MAP_1: + return cmdqv->vi_err_map[(offset - A_VI_ERR_MAP) / 4]; + case A_VI_INT_MASK ... A_VI_INT_MASK_1: + return cmdqv->vi_int_mask[(offset - A_VI_INT_MASK) / 4]; + case A_CMDQ_ERR_MAP ... A_CMDQ_ERR_MAP_3: + return cmdqv->cmdq_err_map[(offset - A_CMDQ_ERR_MAP) / 4]; + case A_CMDQ_ALLOC_MAP_0 ... A_CMDQ_ALLOC_MAP_127: + return cmdqv->cmdq_alloc_map[(offset - A_CMDQ_ALLOC_MAP_0) / 4]; + case A_VINTF0_CONFIG ... A_VINTF0_LVCMDQ_ERR_MAP_3: + return tegra241_cmdqv_read_vintf(cmdqv, offset); + case A_VI_VCMDQ0_CONS_INDX ... A_VI_VCMDQ127_GERRORN: + offset -= 0x20000; + QEMU_FALLTHROUGH; + case A_VCMDQ0_CONS_INDX ... A_VCMDQ127_GERRORN: + /* + * Align offset down to 0x10000 while extracting the index: + * VCMDQ0_CONS_INDX (0x10000) => 0x10000, 0 + * VCMDQ1_CONS_INDX (0x10080) => 0x10000, 1 + * VCMDQ2_CONS_INDX (0x10100) => 0x10000, 2 + * ... + * VCMDQ127_CONS_INDX (0x13f80) => 0x10000, 127 + */ + index = (offset - 0x10000) / 0x80; + return tegra241_cmdqv_read_vcmdq(cmdqv, offset - 0x80 * index, index); + case A_VI_VCMDQ0_BASE_L ... A_VI_VCMDQ127_CONS_INDX_BASE_DRAM_H: + offset -= 0x20000; + QEMU_FALLTHROUGH; + case A_VCMDQ0_BASE_L ... A_VCMDQ127_CONS_INDX_BASE_DRAM_H: + /* + * Align offset down to 0x20000 while extracting the index: + * VCMDQ0_BASE_L (0x20000) => 0x20000, 0 + * VCMDQ1_BASE_L (0x20080) => 0x20000, 1 + * VCMDQ2_BASE_L (0x20100) => 0x20000, 2 + * ... + * VCMDQ127_BASE_L (0x23f80) => 0x20000, 127 + */ + index = (offset - 0x20000) / 0x80; + return tegra241_cmdqv_read_vcmdq(cmdqv, offset - 0x80 * index, index); + default: + qemu_log_mask(LOG_UNIMP, "%s unhandled read access at 0x%" PRIx64 "\n", + __func__, offset); + return 0; + } } static void tegra241_cmdqv_write(void *opaque, hwaddr offset, uint64_t value, diff --git a/hw/arm/tegra241-cmdqv.h b/hw/arm/tegra241-cmdqv.h index ccdf0651be..4972e367f6 100644 --- a/hw/arm/tegra241-cmdqv.h +++ b/hw/arm/tegra241-cmdqv.h @@ -10,6 +10,7 @@ #ifndef HW_TEGRA241_CMDQV_H #define HW_TEGRA241_CMDQV_H +#include "hw/registerfields.h" #include CONFIG_DEVICES #define TEGRA241_CMDQV_IO_LEN 0x50000 @@ -22,10 +23,291 @@ typedef struct Tegra241CMDQV { MemoryRegion mmio_vcmdq_page; MemoryRegion mmio_vintf_page; void *vcmdq_page0; + IOMMUFDHWqueue *vcmdq[128]; + + /* Register Cache */ + uint32_t config; + uint32_t param; + uint32_t status; + uint32_t vi_err_map[2]; + uint32_t vi_int_mask[2]; + uint32_t cmdq_err_map[4]; + uint32_t cmdq_alloc_map[128]; + uint32_t vintf_config; + uint32_t vintf_status; + uint32_t vintf_cmdq_err_map[4]; + uint32_t vcmdq_cons_indx[128]; + uint32_t vcmdq_prod_indx[128]; + uint32_t vcmdq_config[128]; + uint32_t vcmdq_status[128]; + uint32_t vcmdq_gerror[128]; + uint32_t vcmdq_gerrorn[128]; + uint64_t vcmdq_base[128]; + uint64_t vcmdq_cons_indx_base[128]; } Tegra241CMDQV; +/* MMIO Registers */ +REG32(CONFIG, 0x0) +FIELD(CONFIG, CMDQV_EN, 0, 1) +FIELD(CONFIG, CMDQV_PER_CMD_OFFSET, 1, 3) +FIELD(CONFIG, CMDQ_MAX_CLK_BATCH, 4, 8) +FIELD(CONFIG, CMDQ_MAX_CMD_BATCH, 12, 8) +FIELD(CONFIG, CONS_DRAM_EN, 20, 1) + +#define V_CONFIG_RESET 0x00020403 + +REG32(PARAM, 0x4) +FIELD(PARAM, CMDQV_VER, 0, 4) +FIELD(PARAM, CMDQV_NUM_CMDQ_LOG2, 4, 4) +FIELD(PARAM, CMDQV_NUM_VM_LOG2, 8, 4) +FIELD(PARAM, CMDQV_NUM_SID_PER_VM_LOG2, 12, 4) + +#define V_PARAM_RESET 0x00004011 + +REG32(STATUS, 0x8) +FIELD(STATUS, CMDQV_ENABLED, 0, 1) + +#define A_VI_ERR_MAP 0x14 +#define A_VI_ERR_MAP_1 0x18 +#define V_VI_ERR_MAP_NO_ERROR (0) +#define V_VI_ERR_MAP_ERROR (1) + +#define A_VI_INT_MASK 0x1c +#define A_VI_INT_MASK_1 0x20 +#define V_VI_INT_MASK_NOT_MASKED (0) +#define V_VI_INT_MASK_MASKED (1) + +#define A_CMDQ_ERR_MAP 0x24 +#define A_CMDQ_ERR_MAP_1 0x28 +#define A_CMDQ_ERR_MAP_2 0x2c +#define A_CMDQ_ERR_MAP_3 0x30 + +/* i = [0, 127] */ +#define A_CMDQ_ALLOC_MAP_(i) \ + REG32(CMDQ_ALLOC_MAP_##i, 0x200 + i * 4) \ + FIELD(CMDQ_ALLOC_MAP_##i, ALLOC, 0, 1) \ + FIELD(CMDQ_ALLOC_MAP_##i, LVCMDQ, 1, 7) \ + FIELD(CMDQ_ALLOC_MAP_##i, VIRT_INTF_INDX, 15, 6) + +A_CMDQ_ALLOC_MAP_(0) +/* Omitting 1~126 as not being directly called */ +A_CMDQ_ALLOC_MAP_(127) + +/* i = [0, 0] */ +#define A_VINTFi_CONFIG(i) \ + REG32(VINTF##i##_CONFIG, 0x1000 + i * 0x100) \ + FIELD(VINTF##i##_CONFIG, ENABLE, 0, 1) \ + FIELD(VINTF##i##_CONFIG, VMID, 1, 16) \ + FIELD(VINTF##i##_CONFIG, HYP_OWN, 17, 1) + +A_VINTFi_CONFIG(0) + +#define A_VINTFi_STATUS(i) \ + REG32(VINTF##i##_STATUS, 0x1004 + i * 0x100) \ + FIELD(VINTF##i##_STATUS, ENABLE_OK, 0, 1) \ + FIELD(VINTF##i##_STATUS, STATUS, 1, 3) \ + FIELD(VINTF##i##_STATUS, VI_NUM_LVCMDQ, 16, 8) + + A_VINTFi_STATUS(0) + +#define V_VINTF_STATUS_NO_ERROR (0 << 1) +#define V_VINTF_STATUS_VCMDQ_EROR (1 << 1) + +/* i = [0, 0], j = [0, 3] */ +#define A_VINTFi_LVCMDQ_ERR_MAP_(i, j) \ + REG32(VINTF##i##_LVCMDQ_ERR_MAP_##j, 0x10c0 + j * 4 + i * 0x100) \ + FIELD(VINTF##i##_LVCMDQ_ERR_MAP_##j, LVCMDQ_ERR_MAP, 0, 32) + + A_VINTFi_LVCMDQ_ERR_MAP_(0, 0) + /* Omitting [0][1~2] as not being directly called */ + A_VINTFi_LVCMDQ_ERR_MAP_(0, 3) + +/* VCMDQ registers -- starting from 0x10000 with size 64KB * 2 (0x20000) */ +#define VCMDQ_REG_OFFSET 0x10000 #define VCMDQ_REG_PAGE_SIZE 0x10000 +#define A_VCMDQi_CONS_INDX(i) \ + REG32(VCMDQ##i##_CONS_INDX, 0x10000 + i * 0x80) \ + FIELD(VCMDQ##i##_CONS_INDX, RD, 0, 20) \ + FIELD(VCMDQ##i##_CONS_INDX, ERR, 24, 7) + + A_VCMDQi_CONS_INDX(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_CONS_INDX(127) + +#define V_VCMDQ_CONS_INDX_ERR_CERROR_NONE 0 +#define V_VCMDQ_CONS_INDX_ERR_CERROR_ILL_OPCODE 1 +#define V_VCMDQ_CONS_INDX_ERR_CERROR_ABT 2 +#define V_VCMDQ_CONS_INDX_ERR_CERROR_ATC_INV_SYNC 3 +#define V_VCMDQ_CONS_INDX_ERR_CERROR_ILL_ACCESS 4 + +#define A_VCMDQi_PROD_INDX(i) \ + REG32(VCMDQ##i##_PROD_INDX, 0x10000 + 0x4 + i * 0x80) \ + FIELD(VCMDQ##i##_PROD_INDX, WR, 0, 20) + + A_VCMDQi_PROD_INDX(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_PROD_INDX(127) + +#define A_VCMDQi_CONFIG(i) \ + REG32(VCMDQ##i##_CONFIG, 0x10000 + 0x8 + i * 0x80) \ + FIELD(VCMDQ##i##_CONFIG, CMDQ_EN, 0, 1) + + A_VCMDQi_CONFIG(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_CONFIG(127) + +#define A_VCMDQi_STATUS(i) \ + REG32(VCMDQ##i##_STATUS, 0x10000 + 0xc + i * 0x80) \ + FIELD(VCMDQ##i##_STATUS, CMDQ_EN_OK, 0, 1) + + A_VCMDQi_STATUS(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_STATUS(127) + +#define A_VCMDQi_GERROR(i) \ + REG32(VCMDQ##i##_GERROR, 0x10000 + 0x10 + i * 0x80) \ + FIELD(VCMDQ##i##_GERROR, CMDQ_ERR, 0, 1) \ + FIELD(VCMDQ##i##_GERROR, CONS_DRAM_WR_ABT_ERR, 1, 1) \ + FIELD(VCMDQ##i##_GERROR, CMDQ_INIT_ERR, 2, 1) + + A_VCMDQi_GERROR(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_GERROR(127) + +#define A_VCMDQi_GERRORN(i) \ + REG32(VCMDQ##i##_GERRORN, 0x10000 + 0x14 + i * 0x80) \ + FIELD(VCMDQ##i##_GERRORN, CMDQ_ERR, 0, 1) \ + FIELD(VCMDQ##i##_GERRORN, CONS_DRAM_WR_ABT_ERR, 1, 1) \ + FIELD(VCMDQ##i##_GERRORN, CMDQ_INIT_ERR, 2, 1) + + A_VCMDQi_GERRORN(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_GERRORN(127) + +#define A_VCMDQi_BASE_L(i) \ + REG32(VCMDQ##i##_BASE_L, 0x20000 + i * 0x80) \ + FIELD(VCMDQ##i##_BASE_L, LOG2SIZE, 0, 5) \ + FIELD(VCMDQ##i##_BASE_L, ADDR, 5, 27) + + A_VCMDQi_BASE_L(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_BASE_L(127) + +#define A_VCMDQi_BASE_H(i) \ + REG32(VCMDQ##i##_BASE_H, 0x20000 + 0x4 + i * 0x80) \ + FIELD(VCMDQ##i##_BASE_H, ADDR, 0, 16) + + A_VCMDQi_BASE_H(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_BASE_H(127) + +#define A_VCMDQi_CONS_INDX_BASE_DRAM_L(i) \ + REG32(VCMDQ##i##_CONS_INDX_BASE_DRAM_L, 0x20000 + 0x8 + i * 0x80) \ + FIELD(VCMDQ##i##_CONS_INDX_BASE_DRAM_L, ADDR, 0, 32) + + A_VCMDQi_CONS_INDX_BASE_DRAM_L(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_CONS_INDX_BASE_DRAM_L(127) + +#define A_VCMDQi_CONS_INDX_BASE_DRAM_H(i) \ + REG32(VCMDQ##i##_CONS_INDX_BASE_DRAM_H, 0x20000 + 0xc + i * 0x80) \ + FIELD(VCMDQ##i##_CONS_INDX_BASE_DRAM_H, ADDR, 0, 16) + + A_VCMDQi_CONS_INDX_BASE_DRAM_H(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_CONS_INDX_BASE_DRAM_H(127) + +/* + * VINTF VI_VCMDQ registers -- starting from 0x30000 with size 64KB * 2 + * (0x20000) + */ +#define A_VI_VCMDQi_CONS_INDX(i) \ + REG32(VI_VCMDQ##i##_CONS_INDX, 0x30000 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_CONS_INDX, RD, 0, 20) \ + FIELD(VI_VCMDQ##i##_CONS_INDX, ERR, 24, 7) + + A_VI_VCMDQi_CONS_INDX(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_CONS_INDX(127) + +#define A_VI_VCMDQi_PROD_INDX(i) \ + REG32(VI_VCMDQ##i##_PROD_INDX, 0x30000 + 0x4 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_PROD_INDX, WR, 0, 20) + + A_VI_VCMDQi_PROD_INDX(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_PROD_INDX(127) + +#define A_VI_VCMDQi_CONFIG(i) \ + REG32(VI_VCMDQ##i##_CONFIG, 0x30000 + 0x8 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_CONFIG, CMDQ_EN, 0, 1) + + A_VI_VCMDQi_CONFIG(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_CONFIG(127) + +#define A_VI_VCMDQi_STATUS(i) \ + REG32(VI_VCMDQ##i##_STATUS, 0x30000 + 0xc + i * 0x80) \ + FIELD(VI_VCMDQ##i##_STATUS, CMDQ_EN_OK, 0, 1) + + A_VI_VCMDQi_STATUS(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_STATUS(127) + +#define A_VI_VCMDQi_GERROR(i) \ + REG32(VI_VCMDQ##i##_GERROR, 0x30000 + 0x10 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_GERROR, CMDQ_ERR, 0, 1) \ + FIELD(VI_VCMDQ##i##_GERROR, CONS_DRAM_WR_ABT_ERR, 1, 1) \ + FIELD(VI_VCMDQ##i##_GERROR, CMDQ_INIT_ERR, 2, 1) + + A_VI_VCMDQi_GERROR(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_GERROR(127) + +#define A_VI_VCMDQi_GERRORN(i) \ + REG32(VI_VCMDQ##i##_GERRORN, 0x30000 + 0x14 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_GERRORN, CMDQ_ERR, 0, 1) \ + FIELD(VI_VCMDQ##i##_GERRORN, CONS_DRAM_WR_ABT_ERR, 1, 1) \ + FIELD(VI_VCMDQ##i##_GERRORN, CMDQ_INIT_ERR, 2, 1) + + A_VI_VCMDQi_GERRORN(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_GERRORN(127) + +#define A_VI_VCMDQi_BASE_L(i) \ + REG32(VI_VCMDQ##i##_BASE_L, 0x40000 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_BASE_L, LOG2SIZE, 0, 5) \ + FIELD(VI_VCMDQ##i##_BASE_L, ADDR, 5, 27) + + A_VI_VCMDQi_BASE_L(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_BASE_L(127) + +#define A_VI_VCMDQi_BASE_H(i) \ + REG32(VI_VCMDQ##i##_BASE_H, 0x40000 + 0x4 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_BASE_H, ADDR, 0, 16) + + A_VI_VCMDQi_BASE_H(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_BASE_H(127) + +#define A_VI_VCMDQi_CONS_INDX_BASE_DRAM_L(i) \ + REG32(VI_VCMDQ##i##_CONS_INDX_BASE_DRAM_L, 0x40000 + 0x8 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_CONS_INDX_BASE_DRAM_L, ADDR, 0, 32) + + A_VI_VCMDQi_CONS_INDX_BASE_DRAM_L(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_CONS_INDX_BASE_DRAM_L(127) + +#define A_VI_VCMDQi_CONS_INDX_BASE_DRAM_H(i) \ + REG32(VI_VCMDQ##i##_CONS_INDX_BASE_DRAM_H, 0x40000 + 0xc + i * 0x80) \ + FIELD(VI_VCMDQ##i##_CONS_INDX_BASE_DRAM_H, ADDR, 0, 16) + + A_VI_VCMDQi_CONS_INDX_BASE_DRAM_H(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_CONS_INDX_BASE_DRAM_H(127) + #ifdef CONFIG_TEGRA241_CMDQV bool tegra241_cmdqv_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev, uint32_t *out_viommu_id, Error **errp); -- 2.43.0
