> -----Original Message-----
> From: Andrew Cooper [mailto:[email protected]]
> Sent: 22 February 2019 19:13
> To: Xen-devel <[email protected]>
> Cc: Andrew Cooper <[email protected]>; Jan Beulich
> <[email protected]>; Paul Durrant
> <[email protected]>; Kevin Tian <[email protected]>
> Subject: [PATCH 3/6] x86/vtd: Drop struct qi_ctrl
>
> It is unclear why this abstraction exists, but iommu_qi_ctrl() returns
> possibly NULL and every user unconditionally dereferences the result. In
> practice, I can't spot a path where iommu is NULL, so I think it is mostly
> dead.
>
> Move the sole member into struct vtd_iommu, and delete iommu_qi_ctrl().
>
> Signed-off-by: Andrew Cooper <[email protected]>
> ---
> CC: Jan Beulich <[email protected]>
> CC: Paul Durrant <[email protected]>
> CC: Kevin Tian <[email protected]>
> ---
> xen/drivers/passthrough/vtd/iommu.h | 13 +++--------
> xen/drivers/passthrough/vtd/qinval.c | 42
> +++++++++++++++---------------------
> 2 files changed, 20 insertions(+), 35 deletions(-)
>
> diff --git a/xen/drivers/passthrough/vtd/iommu.h
> b/xen/drivers/passthrough/vtd/iommu.h
> index 556b3d6..12b531c 100644
> --- a/xen/drivers/passthrough/vtd/iommu.h
> +++ b/xen/drivers/passthrough/vtd/iommu.h
> @@ -506,10 +506,6 @@ extern struct list_head acpi_drhd_units;
> extern struct list_head acpi_rmrr_units;
> extern struct list_head acpi_ioapic_units;
>
> -struct qi_ctrl {
> - u64 qinval_maddr; /* queue invalidation page machine address */
> -};
> -
> struct ir_ctrl {
> u64 iremap_maddr; /* interrupt remap table machine address */
> int iremap_num; /* total num of used interrupt remap entry
> */
> @@ -527,7 +523,6 @@ struct iommu_flush {
> };
>
> struct intel_iommu {
> - struct qi_ctrl qi_ctrl;
> struct ir_ctrl ir_ctrl;
> struct iommu_flush flush;
> struct acpi_drhd_unit *drhd;
> @@ -545,16 +540,14 @@ struct vtd_iommu {
> u64 root_maddr; /* root entry machine address */
> struct msi_desc msi;
> struct intel_iommu *intel;
> +
> + uint64_t qinval_maddr; /* queue invalidation page machine address */
> +
> struct list_head ats_devices;
> unsigned long *domid_bitmap; /* domain id bitmap */
> u16 *domid_map; /* domain id mapping array */
> };
>
> -static inline struct qi_ctrl *iommu_qi_ctrl(struct vtd_iommu *iommu)
> -{
> - return iommu ? &iommu->intel->qi_ctrl : NULL;
> -}
> -
> static inline struct ir_ctrl *iommu_ir_ctrl(struct vtd_iommu *iommu)
> {
> return iommu ? &iommu->intel->ir_ctrl : NULL;
> diff --git a/xen/drivers/passthrough/vtd/qinval.c
> b/xen/drivers/passthrough/vtd/qinval.c
> index 3ddb9b6..f6fcee5 100644
> --- a/xen/drivers/passthrough/vtd/qinval.c
> +++ b/xen/drivers/passthrough/vtd/qinval.c
> @@ -84,7 +84,7 @@ static int __must_check
> queue_invalidate_context_sync(struct vtd_iommu *iommu,
>
> spin_lock_irqsave(&iommu->register_lock, flags);
> index = qinval_next_index(iommu);
> - entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
> + entry_base = iommu->qinval_maddr +
> ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
^ This calculation looks worthy of a macro or an inline. It is repeated a lot.
Paul
> qinval_entries = map_vtd_domain_page(entry_base);
> qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> @@ -118,7 +118,7 @@ static int __must_check
> queue_invalidate_iotlb_sync(struct vtd_iommu *iommu,
>
> spin_lock_irqsave(&iommu->register_lock, flags);
> index = qinval_next_index(iommu);
> - entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
> + entry_base = iommu->qinval_maddr +
> ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> qinval_entries = map_vtd_domain_page(entry_base);
> qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> @@ -155,7 +155,7 @@ static int __must_check queue_invalidate_wait(struct
> vtd_iommu *iommu,
>
> spin_lock_irqsave(&iommu->register_lock, flags);
> index = qinval_next_index(iommu);
> - entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
> + entry_base = iommu->qinval_maddr +
> ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> qinval_entries = map_vtd_domain_page(entry_base);
> qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> @@ -201,9 +201,7 @@ static int __must_check queue_invalidate_wait(struct
> vtd_iommu *iommu,
>
> static int __must_check invalidate_sync(struct vtd_iommu *iommu)
> {
> - struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
> -
> - ASSERT(qi_ctrl->qinval_maddr);
> + ASSERT(iommu->qinval_maddr);
>
> return queue_invalidate_wait(iommu, 0, 1, 1, 0);
> }
> @@ -211,10 +209,9 @@ static int __must_check invalidate_sync(struct vtd_iommu
> *iommu)
> static int __must_check dev_invalidate_sync(struct vtd_iommu *iommu,
> struct pci_dev *pdev, u16 did)
> {
> - struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
> int rc;
>
> - ASSERT(qi_ctrl->qinval_maddr);
> + ASSERT(iommu->qinval_maddr);
> rc = queue_invalidate_wait(iommu, 0, 1, 1, 1);
> if ( rc == -ETIMEDOUT )
> {
> @@ -248,7 +245,7 @@ int qinval_device_iotlb_sync(struct vtd_iommu *iommu,
> struct pci_dev *pdev,
> ASSERT(pdev);
> spin_lock_irqsave(&iommu->register_lock, flags);
> index = qinval_next_index(iommu);
> - entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
> + entry_base = iommu->qinval_maddr +
> ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> qinval_entries = map_vtd_domain_page(entry_base);
> qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> @@ -282,7 +279,7 @@ static int __must_check queue_invalidate_iec_sync(struct
> vtd_iommu *iommu,
>
> spin_lock_irqsave(&iommu->register_lock, flags);
> index = qinval_next_index(iommu);
> - entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
> + entry_base = iommu->qinval_maddr +
> ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> qinval_entries = map_vtd_domain_page(entry_base);
> qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> @@ -325,9 +322,8 @@ static int __must_check flush_context_qi(void *_iommu,
> u16 did,
> bool_t flush_non_present_entry)
> {
> struct vtd_iommu *iommu = _iommu;
> - struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
>
> - ASSERT(qi_ctrl->qinval_maddr);
> + ASSERT(iommu->qinval_maddr);
>
> /*
> * In the non-present entry flush case, if hardware doesn't cache
> @@ -355,9 +351,8 @@ static int __must_check flush_iotlb_qi(void *_iommu, u16
> did, u64 addr,
> u8 dr = 0, dw = 0;
> int ret = 0, rc;
> struct vtd_iommu *iommu = _iommu;
> - struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
>
> - ASSERT(qi_ctrl->qinval_maddr);
> + ASSERT(iommu->qinval_maddr);
>
> /*
> * In the non-present entry flush case, if hardware doesn't cache
> @@ -397,7 +392,6 @@ static int __must_check flush_iotlb_qi(void *_iommu, u16
> did, u64 addr,
> int enable_qinval(struct vtd_iommu *iommu)
> {
> struct acpi_drhd_unit *drhd;
> - struct qi_ctrl *qi_ctrl;
> struct iommu_flush *flush;
> u32 sts;
> unsigned long flags;
> @@ -405,24 +399,22 @@ int enable_qinval(struct vtd_iommu *iommu)
> if ( !ecap_queued_inval(iommu->ecap) || !iommu_qinval )
> return -ENOENT;
>
> - qi_ctrl = iommu_qi_ctrl(iommu);
> flush = iommu_get_flush(iommu);
>
> /* Return if already enabled by Xen */
> sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
> - if ( (sts & DMA_GSTS_QIES) && qi_ctrl->qinval_maddr )
> + if ( (sts & DMA_GSTS_QIES) && iommu->qinval_maddr )
> return 0;
>
> - if ( qi_ctrl->qinval_maddr == 0 )
> + if ( iommu->qinval_maddr == 0 )
> {
> drhd = iommu_to_drhd(iommu);
> - qi_ctrl->qinval_maddr = alloc_pgtable_maddr(drhd,
> QINVAL_ARCH_PAGE_NR);
> - if ( qi_ctrl->qinval_maddr == 0 )
> - {
> - dprintk(XENLOG_WARNING VTDPREFIX,
> - "Cannot allocate memory for qi_ctrl->qinval_maddr\n");
> +
> + iommu->qinval_maddr =
> + alloc_pgtable_maddr(drhd, QINVAL_ARCH_PAGE_NR);
> +
> + if ( iommu->qinval_maddr == 0 )
> return -ENOMEM;
> - }
> }
>
> flush->context = flush_context_qi;
> @@ -438,7 +430,7 @@ int enable_qinval(struct vtd_iommu *iommu)
> * to IQA register.
> */
> dmar_writeq(iommu->reg, DMAR_IQA_REG,
> - qi_ctrl->qinval_maddr | QINVAL_PAGE_ORDER);
> + iommu->qinval_maddr | QINVAL_PAGE_ORDER);
>
> dmar_writeq(iommu->reg, DMAR_IQT_REG, 0);
>
> --
> 2.1.4
_______________________________________________
Xen-devel mailing list
[email protected]
https://lists.xenproject.org/mailman/listinfo/xen-devel