Let's use the macro in the one place it's supposed to be used, and in
favor of then unnecessary manipulations of the address in
iommu_flush_iotlb_psi(): All leaf functions then already deal correctly
with the supplied address.

There also has never been a need to require (i.e. assert for) the
passing in of 4k-aligned addresses - it'll always be the order-sized
range containing the address which gets flushed.

Signed-off-by: Jan Beulich <[email protected]>

--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -546,7 +546,8 @@ int vtd_flush_iotlb_reg(struct vtd_iommu
     if ( type == DMA_TLB_PSI_FLUSH )
     {
         /* Note: always flush non-leaf currently. */
-        dmar_writeq(iommu->reg, tlb_offset, size_order | addr);
+        dmar_writeq(iommu->reg, tlb_offset,
+                    size_order | DMA_TLB_IVA_ADDR(addr));
     }
     dmar_writeq(iommu->reg, tlb_offset + 8, val);
 
@@ -606,8 +607,6 @@ static int __must_check iommu_flush_iotl
 {
     int status;
 
-    ASSERT(!(addr & (~PAGE_MASK_4K)));
-
     /* Fallback to domain selective flush if no PSI support */
     if ( !cap_pgsel_inv(iommu->cap) )
         return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
@@ -618,9 +617,6 @@ static int __must_check iommu_flush_iotl
         return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
                                      flush_dev_iotlb);
 
-    addr >>= PAGE_SHIFT_4K + order;
-    addr <<= PAGE_SHIFT_4K + order;
-
     /* apply platform specific errata workarounds */
     vtd_ops_preamble_quirk(iommu);
 


Reply via email to