Hi Zhenzhong,

On 6/20/25 9:18 AM, Zhenzhong Duan wrote:
> From: Yi Liu <yi.l....@intel.com>
>
> This traps the guest PASID-based iotlb invalidation request and propagate it
> to host.
>
> Intel VT-d 3.0 supports nested translation in PASID granular. Guest SVA 
> support
s/granular/granularity
> could be implemented by configuring nested translation on specific PASID. This
> is also known as dual stage DMA translation.
>
> Under such configuration, guest owns the GVA->GPA translation which is
> configured as stage-1 page table in host side for a specific pasid, and host
> owns GPA->HPA translation. As guest owns stage-1 translation table, piotlb
> invalidation should be propagated to host since host IOMMU will cache first
> level page table related mappings during DMA address translation.
>
> Signed-off-by: Yi Liu <yi.l....@intel.com>
> Signed-off-by: Yi Sun <yi.y....@linux.intel.com>
> Signed-off-by: Zhenzhong Duan <zhenzhong.d...@intel.com>
> ---
>  hw/i386/intel_iommu_internal.h |   6 ++
>  hw/i386/intel_iommu.c          | 113 ++++++++++++++++++++++++++++++++-
>  2 files changed, 117 insertions(+), 2 deletions(-)
>
> diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
> index b3e4aa23f1..07bfb97499 100644
> --- a/hw/i386/intel_iommu_internal.h
> +++ b/hw/i386/intel_iommu_internal.h
> @@ -589,6 +589,12 @@ typedef struct VTDPASIDCacheInfo {
>      bool error_happened;
>  } VTDPASIDCacheInfo;
>  
> +typedef struct VTDPIOTLBInvInfo {
> +    uint16_t domain_id;
> +    uint32_t pasid;
> +    struct iommu_hwpt_vtd_s1_invalidate *inv_data;
> +} VTDPIOTLBInvInfo;
> +
>  /* PASID Table Related Definitions */
>  #define VTD_PASID_DIR_BASE_ADDR_MASK  (~0xfffULL)
>  #define VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL)
> diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
> index 621b07aa02..d1fa395274 100644
> --- a/hw/i386/intel_iommu.c
> +++ b/hw/i386/intel_iommu.c
> @@ -2639,12 +2639,105 @@ static int vtd_bind_guest_pasid(VTDAddressSpace 
> *vtd_as,
>  
>      return ret;
>  }
> +
> +/*
> + * Caller of this function should hold iommu_lock.
> + */
> +static void vtd_invalidate_piotlb(VTDAddressSpace *vtd_as,
> +                                  struct iommu_hwpt_vtd_s1_invalidate *cache)
> +{
> +    VTDHostIOMMUDevice *vtd_hiod;
> +    HostIOMMUDeviceIOMMUFD *idev;
> +    int devfn = vtd_as->devfn;
> +    struct vtd_as_key key = {
> +        .bus = vtd_as->bus,
> +        .devfn = devfn,
> +    };
> +    IntelIOMMUState *s = vtd_as->iommu_state;
> +    uint32_t entry_num = 1; /* Only implement one request for simplicity */
> +    Error *err;
> +
> +    vtd_hiod = g_hash_table_lookup(s->vtd_host_iommu_dev, &key);
> +    if (!vtd_hiod || !vtd_hiod->hiod) {
> +        return;
> +    }
> +    idev = HOST_IOMMU_DEVICE_IOMMUFD(vtd_hiod->hiod);
> +
> +    if (!iommufd_backend_invalidate_cache(idev->iommufd, vtd_hiod->s1_hwpt,
> +                                          IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
> +                                          sizeof(*cache), &entry_num, cache,
> +                                          &err)) {
> +        error_report_err(err);
> +    }
> +}
> +
> +/*
> + * This function is a loop function for the s->vtd_address_spaces
> + * list with VTDPIOTLBInvInfo as execution filter. It propagates
> + * the piotlb invalidation to host. Caller of this function
> + * should hold iommu_lock.
instead of having this mention everywhere may be more efficient to
postfix each function with _locked and I don't know if it exists have a
checker.
> + */
> +static void vtd_flush_pasid_iotlb(gpointer key, gpointer value,
> +                                  gpointer user_data)
> +{
> +    VTDPIOTLBInvInfo *piotlb_info = user_data;
> +    VTDAddressSpace *vtd_as = value;
> +    VTDPASIDCacheEntry *pc_entry = &vtd_as->pasid_cache_entry;
> +    uint32_t pasid;
> +    uint16_t did;
> +
> +    /* Replay only fill pasid entry cache for passthrough device */
fills
> +    if (!pc_entry->cache_filled ||
> +        !vtd_pe_pgtt_is_flt(&pc_entry->pasid_entry)) {
> +        return;
> +    }
> +
> +    if (vtd_as_to_iommu_pasid_locked(vtd_as, &pasid)) {
> +        return;
> +    }
> +
> +    did = vtd_pe_get_did(&pc_entry->pasid_entry);
> +
> +    if (piotlb_info->domain_id == did && piotlb_info->pasid == pasid) {
> +        vtd_invalidate_piotlb(vtd_as, piotlb_info->inv_data);
> +    }
> +}
> +
> +static void vtd_flush_pasid_iotlb_all(IntelIOMMUState *s,
> +                                      uint16_t domain_id, uint32_t pasid,
> +                                      hwaddr addr, uint64_t npages, bool ih)
> +{
> +    struct iommu_hwpt_vtd_s1_invalidate cache_info = { 0 };
> +    VTDPIOTLBInvInfo piotlb_info;
> +
> +    cache_info.addr = addr;
> +    cache_info.npages = npages;
> +    cache_info.flags = ih ? IOMMU_VTD_INV_FLAGS_LEAF : 0;
> +
> +    piotlb_info.domain_id = domain_id;
> +    piotlb_info.pasid = pasid;
> +    piotlb_info.inv_data = &cache_info;
> +
> +    /*
> +     * Here loops all the vtd_as instances in s->vtd_address_spaces
I am not a native english speaker but I am not sure loop something is
OK. Go though each?
Besides that comment is not that useful as it paraphrases the code.
> +     * to find out the affected devices since piotlb invalidation
> +     * should check pasid cache per architecture point of view.
> +     */
> +    g_hash_table_foreach(s->vtd_address_spaces,
> +                         vtd_flush_pasid_iotlb, &piotlb_info);
> +}
>  #else
>  static int vtd_bind_guest_pasid(VTDAddressSpace *vtd_as,
>                                  VTDPASIDEntry *pe, VTDPASIDOp op)
>  {
>      return 0;
>  }
> +
> +static void vtd_flush_pasid_iotlb_all(IntelIOMMUState *s,
> +                                      uint16_t domain_id, uint32_t pasid,
> +                                      hwaddr addr, uint64_t npages, bool ih)
> +{
> +}
>  #endif
>  
>  /* Do a context-cache device-selective invalidation.
> @@ -3300,6 +3393,13 @@ static void 
> vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
>      info.pasid = pasid;
>  
>      vtd_iommu_lock(s);
> +    /*
> +     * Here loops all the vtd_as instances in s->vtd_as
would frop above.
> +     * to find out the affected devices since piotlb invalidation
Find out ...
> +     * should check pasid cache per architecture point of view.
> +     */
> +    vtd_flush_pasid_iotlb_all(s, domain_id, pasid, 0, (uint64_t)-1, 0);
> +
>      g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_pasid,
>                                  &info);
>      vtd_iommu_unlock(s);
> @@ -3323,7 +3423,8 @@ static void vtd_piotlb_pasid_invalidate(IntelIOMMUState 
> *s,
>  }
>  
>  static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t 
> domain_id,
> -                                       uint32_t pasid, hwaddr addr, uint8_t 
> am)
> +                                       uint32_t pasid, hwaddr addr, uint8_t 
> am,
> +                                       bool ih)
>  {
>      VTDIOTLBPageInvInfo info;
>  
> @@ -3333,6 +3434,13 @@ static void vtd_piotlb_page_invalidate(IntelIOMMUState 
> *s, uint16_t domain_id,
>      info.mask = ~((1 << am) - 1);
>  
>      vtd_iommu_lock(s);
> +    /*
> +     * Here loops all the vtd_as instances in s->vtd_as
> +     * to find out the affected devices since piotlb invalidation
> +     * should check pasid cache per architecture point of view.
> +     */
> +    vtd_flush_pasid_iotlb_all(s, domain_id, pasid, addr, 1 << am, ih);
> +
>      g_hash_table_foreach_remove(s->iotlb,
>                                  vtd_hash_remove_by_page_piotlb, &info);
>      vtd_iommu_unlock(s);
> @@ -3366,7 +3474,8 @@ static bool vtd_process_piotlb_desc(IntelIOMMUState *s,
>      case VTD_INV_DESC_PIOTLB_PSI_IN_PASID:
>          am = VTD_INV_DESC_PIOTLB_AM(inv_desc->val[1]);
>          addr = (hwaddr) VTD_INV_DESC_PIOTLB_ADDR(inv_desc->val[1]);
> -        vtd_piotlb_page_invalidate(s, domain_id, pasid, addr, am);
> +        vtd_piotlb_page_invalidate(s, domain_id, pasid, addr, am,
> +                                   VTD_INV_DESC_PIOTLB_IH(inv_desc->val[1]));
>          break;
>  
>      default:
Eric


Reply via email to