From: Nicolin Chen <[email protected]>
When the guest enables the Event Queue and a vIOMMU is present, allocate a
vEVENTQ object so that host-side events related to the vIOMMU can be
received and propagated back to the guest.
For cold-plugged devices using SMMUv3 acceleration, the vIOMMU is created
before the guest boots. In this case, the vEVENTQ is allocated when the
guest writes to SMMU_CR0 and sets EVENTQEN = 1.
If no cold-plugged device exists at boot (i.e. no vIOMMU initially), the
vEVENTQ is allocated when a vIOMMU is created, i.e. during the first
device hot-plug.
Event read and propagation will be added in a later patch.
Signed-off-by: Nicolin Chen <[email protected]>
Tested-by: Nicolin Chen <[email protected]>
Signed-off-by: Shameer Kolothum <[email protected]>
---
hw/arm/smmuv3-accel.c | 65 +++++++++++++++++++++++++++++++++++++++++--
hw/arm/smmuv3-accel.h | 6 ++++
hw/arm/smmuv3.c | 4 +++
3 files changed, 73 insertions(+), 2 deletions(-)
diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
index f5cd4df336..e8028d4be5 100644
--- a/hw/arm/smmuv3-accel.c
+++ b/hw/arm/smmuv3-accel.c
@@ -390,6 +390,58 @@ bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void
*cmd, SMMUDevice *sdev,
sizeof(Cmd), &entry_num, cmd, errp);
}
+static void smmuv3_accel_free_veventq(SMMUv3AccelState *accel)
+{
+ IOMMUFDVeventq *veventq = accel->veventq;
+
+ if (!veventq) {
+ return;
+ }
+ iommufd_backend_free_id(accel->viommu->iommufd, veventq->veventq_id);
+ g_free(veventq);
+ accel->veventq = NULL;
+}
+
+bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)
+{
+ SMMUv3AccelState *accel = s->s_accel;
+ IOMMUFDVeventq *veventq;
+ uint32_t veventq_id;
+ uint32_t veventq_fd;
+
+ if (!accel->viommu) {
+ return true;
+ }
+
+ if (accel->veventq) {
+ return true;
+ }
+
+ /*
+ * Per Arm SMMUv3 specification (IHI0070 G.b, 6.3.26), the Event Queue
+ * is enabled only after its base and size registers are programmed.
+ * EVENTQEN is checked before allocating the vEVENTQ.
+ */
+ if (!smmuv3_eventq_enabled(s)) {
+ return true;
+ }
+
+ if (!iommufd_backend_alloc_veventq(accel->viommu->iommufd,
+ accel->viommu->viommu_id,
+ IOMMU_VEVENTQ_TYPE_ARM_SMMUV3,
+ 1 << s->eventq.log2size, &veventq_id,
+ &veventq_fd, errp)) {
+ return false;
+ }
+
+ veventq = g_new(IOMMUFDVeventq, 1);
+ veventq->veventq_id = veventq_id;
+ veventq->veventq_fd = veventq_fd;
+ veventq->viommu = accel->viommu;
+ accel->veventq = veventq;
+ return true;
+}
+
static bool
smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
Error **errp)
@@ -415,6 +467,7 @@ smmuv3_accel_alloc_viommu(SMMUv3State *s,
HostIOMMUDeviceIOMMUFD *idev,
viommu->viommu_id = viommu_id;
viommu->s2_hwpt_id = s2_hwpt_id;
viommu->iommufd = idev->iommufd;
+ accel->viommu = viommu;
/*
* Pre-allocate HWPTs for S1 bypass and abort cases. These will be
attached
@@ -434,14 +487,20 @@ smmuv3_accel_alloc_viommu(SMMUv3State *s,
HostIOMMUDeviceIOMMUFD *idev,
goto free_abort_hwpt;
}
+ /* Allocate a vEVENTQ if guest has enabled event queue */
+ if (!smmuv3_accel_alloc_veventq(s, errp)) {
+ goto free_bypass_hwpt;
+ }
+
/* Attach a HWPT based on SMMUv3 GBPA.ABORT value */
hwpt_id = smmuv3_accel_gbpa_hwpt(s, accel);
if (!host_iommu_device_iommufd_attach_hwpt(idev, hwpt_id, errp)) {
- goto free_bypass_hwpt;
+ goto free_veventq;
}
- accel->viommu = viommu;
return true;
+free_veventq:
+ smmuv3_accel_free_veventq(accel);
free_bypass_hwpt:
iommufd_backend_free_id(idev->iommufd, accel->bypass_hwpt_id);
free_abort_hwpt:
@@ -449,6 +508,7 @@ free_abort_hwpt:
free_viommu:
iommufd_backend_free_id(idev->iommufd, viommu->viommu_id);
g_free(viommu);
+ accel->viommu = NULL;
return false;
}
@@ -549,6 +609,7 @@ static void smmuv3_accel_unset_iommu_device(PCIBus *bus, void *opaque,
trace_smmuv3_accel_unset_iommu_device(devfn, idev->devid);
if (QLIST_EMPTY(&accel->device_list)) {
+ smmuv3_accel_free_veventq(accel);
iommufd_backend_free_id(accel->viommu->iommufd,
accel->bypass_hwpt_id);
iommufd_backend_free_id(accel->viommu->iommufd, accel->abort_hwpt_id);
iommufd_backend_free_id(accel->viommu->iommufd,
diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
index a8a64802ec..92048bb674 100644
--- a/hw/arm/smmuv3-accel.h
+++ b/hw/arm/smmuv3-accel.h
@@ -22,6 +22,7 @@
*/
typedef struct SMMUv3AccelState {
IOMMUFDViommu *viommu;
+ IOMMUFDVeventq *veventq;
uint32_t bypass_hwpt_id;
uint32_t abort_hwpt_id;
QLIST_HEAD(, SMMUv3AccelDevice) device_list;
@@ -50,6 +51,7 @@ bool smmuv3_accel_attach_gbpa_hwpt(SMMUv3State *s, Error
**errp);
bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
Error **errp);
void smmuv3_accel_idr_override(SMMUv3State *s);
+bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp);
void smmuv3_accel_reset(SMMUv3State *s);
#else
static inline void smmuv3_accel_init(SMMUv3State *s)
@@ -80,6 +82,10 @@ smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd,
SMMUDevice *sdev,
static inline void smmuv3_accel_idr_override(SMMUv3State *s)
{
}
+bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp)