Hi Shameer,
On 1/27/26 11:33 AM, Shameer Kolothum wrote:
> From: Nicolin Chen <[email protected]>
>
> When the guest enables the Event Queue and a vIOMMU is present, allocate a
> vEVENTQ object so that host-side events related to the vIOMMU can be
> received and propagated back to the guest.
>
> For cold-plugged devices using SMMUv3 acceleration, the vIOMMU is created
> before the guest boots. In this case, the vEVENTQ is allocated when the
> guest writes to SMMU_CR0 and sets EVENTQEN = 1.
>
> If no cold-plugged device exists at boot (i.e. no vIOMMU initially), the
> vEVENTQ is allocated when a vIOMMU is created, i.e. during the first
> device hot-plug.
>
> Event read and propagation will be added in a later patch.
>
> Signed-off-by: Nicolin Chen <[email protected]>
> Tested-by: Nicolin Chen <[email protected]>
> Signed-off-by: Shameer Kolothum <[email protected]>
> ---
> hw/arm/smmuv3-accel.c | 65 +++++++++++++++++++++++++++++++++++++++++--
> hw/arm/smmuv3-accel.h | 6 ++++
> hw/arm/smmuv3.c | 4 +++
> 3 files changed, 73 insertions(+), 2 deletions(-)
>
> diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
> index f5cd4df336..e8028d4be5 100644
> --- a/hw/arm/smmuv3-accel.c
> +++ b/hw/arm/smmuv3-accel.c
> @@ -390,6 +390,58 @@ bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void
> *cmd, SMMUDevice *sdev,
> sizeof(Cmd), &entry_num, cmd, errp);
> }
>
> +static void smmuv3_accel_free_veventq(SMMUv3AccelState *accel)
> +{
> + IOMMUFDVeventq *veventq = accel->veventq;
> +
> + if (!veventq) {
> + return;
> + }
> + iommufd_backend_free_id(accel->viommu->iommufd, veventq->veventq_id);
> + g_free(veventq);
> + accel->veventq = NULL;
> +}
> +
> +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
> +{
> + SMMUv3AccelState *accel = s->s_accel;
> + IOMMUFDVeventq *veventq;
> + uint32_t veventq_id;
> + uint32_t veventq_fd;
> +
> + if (!accel->viommu) {
> + return true;
> + }
> +
> + if (accel->veventq) {
> + return true;
> + }
> +
> + /*
> + * Per Arm SMMUv3 specification (IHI0070 G.b, 6.3.26), the Event Queue
> + * is enabled only after its base and size registers are programmed.
> + * EVENTQEN is checked before allocating the vEVENTQ.
> + */
I would simply remove the above comment.
You gave me the right pointer on v2:
"
6.3.26 SMMU_CMDQ_BASE
The registers must be initialized in this
order:
1. Write SMMU_CMDQ_BASE to set the queue base and size.
2. Write initial values to SMMU_CMDQ_CONS and SMMU_CMDQ_PROD.
3. Enable the queue with an Update of the respective SMMU_CR0.CMDQEN to 1.
This also applies to the initialization of Event queue and PRI queue registers.
"
Meaning we can safely assume the queue is enabled when SMMU_CR0.CMDQEN is set
to 1, which is checked below.
> + if (!smmuv3_eventq_enabled(s)) {
> + return true;
> + }
> +
> + if (!iommufd_backend_alloc_veventq(accel->viommu->iommufd,
> + accel->viommu->viommu_id,
> + IOMMU_VEVENTQ_TYPE_ARM_SMMUV3,
> + 1 << s->eventq.log2size, &veventq_id,
> + &veventq_fd, errp)) {
> + return false;
> + }
> +
> + veventq = g_new(IOMMUFDVeventq, 1);
> + veventq->veventq_id = veventq_id;
> + veventq->veventq_fd = veventq_fd;
> + veventq->viommu = accel->viommu;
> + accel->veventq = veventq;
> + return true;
> +}
> +
> static bool
> smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
> Error **errp)
> @@ -415,6 +467,7 @@ smmuv3_accel_alloc_viommu(SMMUv3State *s,
> HostIOMMUDeviceIOMMUFD *idev,
> viommu->viommu_id = viommu_id;
> viommu->s2_hwpt_id = s2_hwpt_id;
> viommu->iommufd = idev->iommufd;
> + accel->viommu = viommu;
>
> /*
> * Pre-allocate HWPTs for S1 bypass and abort cases. These will be
> attached
> @@ -434,14 +487,20 @@ smmuv3_accel_alloc_viommu(SMMUv3State *s,
> HostIOMMUDeviceIOMMUFD *idev,
> goto free_abort_hwpt;
> }
>
> + /* Allocate a vEVENTQ if guest has enabled event queue */
> + if (!smmuv3_accel_alloc_veventq(s, errp)) {
> + goto free_bypass_hwpt;
> + }
> +
> /* Attach a HWPT based on SMMUv3 GBPA.ABORT value */
> hwpt_id = smmuv3_accel_gbpa_hwpt(s, accel);
> if (!host_iommu_device_iommufd_attach_hwpt(idev, hwpt_id, errp)) {
> - goto free_bypass_hwpt;
> + goto free_veventq;
> }
> - accel->viommu = viommu;
> return true;
>
> +free_veventq:
> + smmuv3_accel_free_veventq(accel);
> free_bypass_hwpt:
> iommufd_backend_free_id(idev->iommufd, accel->bypass_hwpt_id);
> free_abort_hwpt:
> @@ -449,6 +508,7 @@ free_abort_hwpt:
> free_viommu:
> iommufd_backend_free_id(idev->iommufd, viommu->viommu_id);
> g_free(viommu);
> + accel->viommu = NULL;
> return false;
> }
>
> @@ -549,6 +609,7 @@ static void smmuv3_accel_unset_iommu_device(PCIBus *bus,
> void *opaque,
> trace_smmuv3_accel_unset_iommu_device(devfn, idev->devid);
>
> if (QLIST_EMPTY(&accel->device_list)) {
> + smmuv3_accel_free_veventq(accel);
I would recommend we introduce a smmuv3_accel_free_viommu() to avoid
freeing some new stuff.
With that,
Reviewed-by: Eric Auger <[email protected]>
Thanks
Eric
> iommufd_backend_free_id(accel->viommu->iommufd,
> accel->bypass_hwpt_id);
> iommufd_backend_free_id(accel->viommu->iommufd,
> accel->abort_hwpt_id);
> iommufd_backend_free_id(accel->viommu->iommufd,
> diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
> index a8a64802ec..92048bb674 100644
> --- a/hw/arm/smmuv3-accel.h
> +++ b/hw/arm/smmuv3-accel.h
> @@ -22,6 +22,7 @@
> */
> typedef struct SMMUv3AccelState {
> IOMMUFDViommu *viommu;
> + IOMMUFDVeventq *veventq;
> uint32_t bypass_hwpt_id;
> uint32_t abort_hwpt_id;
> QLIST_HEAD(, SMMUv3AccelDevice) device_list;
> @@ -50,6 +51,7 @@ bool smmuv3_accel_attach_gbpa_hwpt(SMMUv3State *s, Error
> **errp);
> bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
> Error **errp);
> void smmuv3_accel_idr_override(SMMUv3State *s);
> +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp);
> void smmuv3_accel_reset(SMMUv3State *s);
> #else
> static inline void smmuv3_accel_init(SMMUv3State *s)
> @@ -80,6 +82,10 @@ smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd,
> SMMUDevice *sdev,
> static inline void smmuv3_accel_idr_override(SMMUv3State *s)
> {
> }
> +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
> +{
> + return true;
> +}
> static inline void smmuv3_accel_reset(SMMUv3State *s)
> {
> }
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index c08d58c579..210ac038fe 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -1605,6 +1605,10 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr
> offset,
> s->cr0ack = data & ~SMMU_CR0_RESERVED;
> /* in case the command queue has been enabled */
> smmuv3_cmdq_consume(s, &local_err);
> + /* Allocate vEVENTQ if EventQ is enabled and a vIOMMU is available */
> + if (local_err == NULL) {
> + smmuv3_accel_alloc_veventq(s, &local_err);
> + }
> break;
> case A_CR1:
> s->cr[1] = data;