On 10/11/2025 9:38 PM, Michał Winiarski wrote:
> In an upcoming change, the VF GGTT migration data will be handled as
> part of VF control state machine. Add the necessary helpers to allow the
> migration data transfer to/from the HW GGTT resource.
> 
> Signed-off-by: Michał Winiarski <[email protected]>
> ---
>  drivers/gpu/drm/xe/xe_ggtt.c               | 92 ++++++++++++++++++++++
>  drivers/gpu/drm/xe/xe_ggtt.h               |  2 +
>  drivers/gpu/drm/xe/xe_ggtt_types.h         |  2 +
>  drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 64 +++++++++++++++
>  drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h |  5 ++
>  5 files changed, 165 insertions(+)
> 
> diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
> index aca7ae5489b91..89c0ad56c6a8a 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt.c
> +++ b/drivers/gpu/drm/xe/xe_ggtt.c
> @@ -138,6 +138,14 @@ static void xe_ggtt_set_pte_and_flush(struct xe_ggtt 
> *ggtt, u64 addr, u64 pte)
>       ggtt_update_access_counter(ggtt);
>  }
>  
> +static u64 xe_ggtt_get_pte(struct xe_ggtt *ggtt, u64 addr)
> +{
> +     xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
> +     xe_tile_assert(ggtt->tile, addr < ggtt->size);
> +
> +     return readq(&ggtt->gsm[addr >> XE_PTE_SHIFT]);
> +}
> +
>  static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
>  {
>       u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
> @@ -220,16 +228,19 @@ void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
>  static const struct xe_ggtt_pt_ops xelp_pt_ops = {
>       .pte_encode_flags = xelp_ggtt_pte_flags,
>       .ggtt_set_pte = xe_ggtt_set_pte,
> +     .ggtt_get_pte = xe_ggtt_get_pte,
>  };
>  
>  static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
>       .pte_encode_flags = xelpg_ggtt_pte_flags,
>       .ggtt_set_pte = xe_ggtt_set_pte,
> +     .ggtt_get_pte = xe_ggtt_get_pte,
>  };
>  
>  static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
>       .pte_encode_flags = xelpg_ggtt_pte_flags,
>       .ggtt_set_pte = xe_ggtt_set_pte_and_flush,
> +     .ggtt_get_pte = xe_ggtt_get_pte,
>  };
>  
>  static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
> @@ -914,6 +925,87 @@ void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 
> vfid)
>       xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
>       mutex_unlock(&node->ggtt->lock);
>  }
> +
> +/**
> + * xe_ggtt_node_save - Save a &struct xe_ggtt_node to a buffer
> + * @node: the &struct xe_ggtt_node to be saved
> + * @dst: destination buffer

correct me: this is buffer for the PTEs

> + * @size: destination buffer size in bytes

and this is size of above buffer

> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size)
> +{
> +     struct xe_ggtt *ggtt;
> +     u64 start, end;
> +     u64 *buf = dst;
> +
> +     if (!node || !node->ggtt)
> +             return -ENOENT;

hmm, non-NULL node must be initialized by xe_ggtt_node_init() which sets the 
.ggtt so this second check is redundant

> +
> +     mutex_lock(&node->ggtt->lock);

        guard(mutex)(&node->ggtt->lock);

> +
> +     ggtt = node->ggtt;
> +     start = node->base.start;
> +     end = start + node->base.size - 1;
> +
> +     if (node->base.size < size) {

so that's looks wrong, we are about to save 64bit PTEs of that node

we should compare size of all PTEs not the size of address space allocated by 
this node

> +             mutex_unlock(&node->ggtt->lock);
> +             return -EINVAL;
> +     }
> +
> +     while (start < end) {
> +             *buf++ = ggtt->pt_ops->ggtt_get_pte(ggtt, start) & 
> ~GGTT_PTE_VFID;
> +             start += XE_PAGE_SIZE;
> +     }
> +
> +     mutex_unlock(&node->ggtt->lock);
> +
> +     return 0;
> +}
> +
> +/**
> + * xe_ggtt_node_load - Load a &struct xe_ggtt_node from a buffer
> + * @node: the &struct xe_ggtt_node to be loaded
> + * @src: source buffer
> + * @size: source buffer size in bytes
> + * @vfid: VF identifier
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t 
> size, u16 vfid)
> +{
> +     struct xe_ggtt *ggtt;
> +     u64 start, end;
> +     const u64 *buf = src;
> +     u64 vfid_pte = xe_encode_vfid_pte(vfid);

try to define vars in reverse xmas tree order

> +
> +     if (!node || !node->ggtt)
> +             return -ENOENT;
> +
> +     mutex_lock(&node->ggtt->lock);

use guard(mutex)

> +
> +     ggtt = node->ggtt;
> +     start = node->base.start;
> +     end = start + size - 1;
> +
> +     if (node->base.size != size) {
> +             mutex_unlock(&node->ggtt->lock);
> +             return -EINVAL;
> +     }
> +
> +     while (start < end) {
> +             ggtt->pt_ops->ggtt_set_pte(ggtt, start, (*buf & ~GGTT_PTE_VFID) 
> | vfid_pte);
> +             start += XE_PAGE_SIZE;
> +             buf++;
> +     }
> +     xe_ggtt_invalidate(ggtt);
> +
> +     mutex_unlock(&node->ggtt->lock);
> +
> +     return 0;
> +}
> +
>  #endif
>  
>  /**
> diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
> index 75fc7a1efea76..469b3a6ca14b4 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt.h
> +++ b/drivers/gpu/drm/xe/xe_ggtt.h
> @@ -43,6 +43,8 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 
> alignment, struct drm_printer
>  
>  #ifdef CONFIG_PCI_IOV
>  void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid);
> +int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size);
> +int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t 
> size, u16 vfid);
>  #endif
>  
>  #ifndef CONFIG_LOCKDEP
> diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h 
> b/drivers/gpu/drm/xe/xe_ggtt_types.h
> index c5e999d58ff2a..dacd796f81844 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt_types.h
> +++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
> @@ -78,6 +78,8 @@ struct xe_ggtt_pt_ops {
>       u64 (*pte_encode_flags)(struct xe_bo *bo, u16 pat_index);
>       /** @ggtt_set_pte: Directly write into GGTT's PTE */
>       void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte);
> +     /** @ggtt_get_pte: Directly read from GGTT's PTE */
> +     u64 (*ggtt_get_pte)(struct xe_ggtt *ggtt, u64 addr);
>  };
>  
>  #endif
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c 
> b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> index b2e5c52978e6a..51027921b2988 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> @@ -726,6 +726,70 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt 
> *gt, unsigned int vfid,
>       return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
>  }
>  
> +/**
> + * xe_gt_sriov_pf_config_ggtt_save - Save a VF provisioned GGTT data into a 
> buffer.
> + * @gt: the &struct xe_gt
> + * @vfid: VF identifier
> + * @buf: the GGTT data destination buffer
> + * @size: the size of the buffer
> + *
> + * This function can only be called on PF.
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid,
> +                                 void *buf, size_t size)
> +{
> +     struct xe_gt_sriov_config *config;
> +     ssize_t ret;

int

> +
> +     xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> +     xe_gt_assert(gt, vfid);
> +     xe_gt_assert(gt, !(!buf ^ !size));

there seems to be no "query" option for this call, so both buf & size must be 
valid

> +
> +     mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
> +     config = pf_pick_vf_config(gt, vfid);
> +     size = size / sizeof(u64) * XE_PAGE_SIZE;

?? something is wrong here - why do we have to change the size of the buf?

> +
> +     ret = xe_ggtt_node_save(config->ggtt_region, buf, size);
> +
> +     mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
> +
> +     return ret;
> +}
> +
> +/**
> + * xe_gt_sriov_pf_config_ggtt_restore - Restore a VF provisioned GGTT data 
> from a buffer.
> + * @gt: the &struct xe_gt
> + * @vfid: VF identifier
> + * @buf: the GGTT data source buffer
> + * @size: the size of the buffer
> + *
> + * This function can only be called on PF.
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
> +                                    const void *buf, size_t size)
> +{
> +     struct xe_gt_sriov_config *config;
> +     ssize_t ret;
> +
> +     xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> +     xe_gt_assert(gt, vfid);
> +     xe_gt_assert(gt, !(!buf ^ !size));
> +
> +     mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
> +     config = pf_pick_vf_config(gt, vfid);
> +     size = size / sizeof(u64) * XE_PAGE_SIZE;
> +
> +     ret = xe_ggtt_node_load(config->ggtt_region, buf, size, vfid);
> +
> +     mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
> +
> +     return ret;
> +}

ditto

> +
>  static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
>  {
>       /* XXX: preliminary */
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h 
> b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
> index 513e6512a575b..6916b8f58ebf2 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
> @@ -61,6 +61,11 @@ ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, 
> unsigned int vfid, void *bu
>  int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
>                                 const void *buf, size_t size);
>  
> +int xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid,
> +                                 void *buf, size_t size);
> +int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
> +                                    const void *buf, size_t size);
> +
>  bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid);
>  
>  int xe_gt_sriov_pf_config_init(struct xe_gt *gt);

Reply via email to