On Sat, Oct 25, 2025 at 02:04:06PM +0200, Thomas Hellström wrote:
> As a consequence, struct xe_vma_mem_attr() can't simply be assigned
> or freed without taking the reference count of individual members
> into account. Also add helpers to do that.
> 
> Signed-off-by: Thomas Hellström <[email protected]>

Reviewed-by: Matthew Brost <[email protected]>

> ---
>  drivers/gpu/drm/xe/xe_svm.c        |  2 +-
>  drivers/gpu/drm/xe/xe_vm.c         | 36 +++++++++++++++++++++++++-----
>  drivers/gpu/drm/xe/xe_vm.h         |  1 +
>  drivers/gpu/drm/xe/xe_vm_madvise.c |  1 +
>  drivers/gpu/drm/xe/xe_vm_types.h   |  9 ++++++++
>  5 files changed, 43 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index d27e366f8e14..d27cedeaf70c 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -330,7 +330,7 @@ static int xe_svm_range_set_default_attr(struct xe_vm 
> *vm, u64 range_start, u64
>       if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
>               default_attr.pat_index = vma->attr.default_pat_index;
>               default_attr.default_pat_index  = vma->attr.default_pat_index;
> -             vma->attr = default_attr;
> +             xe_vma_mem_attr_copy(&vma->attr, &default_attr);
>       } else {
>               vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, 
> vma_end=0x%016llx",
>                      range_start, range_end);
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 1dffcd9ab61b..3c3dc1b1ace9 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -964,6 +964,27 @@ static void xe_vma_free(struct xe_vma *vma)
>               kfree(vma);
>  }
>  
> +static void xe_vma_mem_attr_fini(struct xe_vma_mem_attr *attr)
> +{
> +     drm_pagemap_put(attr->preferred_loc.dpagemap);
> +}
> +
> +/**
> + * xe_vma_mem_attr_copy() - copy an xe_vma_mem_attr structure.
> + * @to: Destination.
> + * @from: Source.
> + *
> + * Copies an xe_vma_mem_attr structure taking care to get reference
> + * counting of individual members right.
> + */
> +void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr 
> *from)
> +{
> +     xe_vma_mem_attr_fini(to);
> +     *to = *from;
> +     if (to->preferred_loc.dpagemap)
> +             drm_pagemap_get(to->preferred_loc.dpagemap);
> +}
> +
>  static struct xe_vma *xe_vma_create(struct xe_vm *vm,
>                                   struct xe_bo *bo,
>                                   u64 bo_offset_or_userptr,
> @@ -1014,8 +1035,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
>       if (vm->xe->info.has_atomic_enable_pte_bit)
>               vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
>  
> -     vma->attr = *attr;
> -
> +     xe_vma_mem_attr_copy(&vma->attr, attr);
>       if (bo) {
>               struct drm_gpuvm_bo *vm_bo;
>  
> @@ -1023,6 +1043,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
>  
>               vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
>               if (IS_ERR(vm_bo)) {
> +                     xe_vma_mem_attr_fini(&vma->attr);
>                       xe_vma_free(vma);
>                       return ERR_CAST(vm_bo);
>               }
> @@ -1042,6 +1063,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
>  
>                       err = xe_userptr_setup(uvma, xe_vma_userptr(vma), size);
>                       if (err) {
> +                             xe_vma_mem_attr_fini(&vma->attr);
>                               xe_vma_free(vma);
>                               return ERR_PTR(err);
>                       }
> @@ -1057,6 +1079,8 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
>  {
>       struct xe_vm *vm = xe_vma_vm(vma);
>  
> +     xe_vma_mem_attr_fini(&vma->attr);
> +
>       if (vma->ufence) {
>               xe_sync_ufence_put(vma->ufence);
>               vma->ufence = NULL;
> @@ -4221,7 +4245,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
>       struct drm_gpuva_op *__op;
>       unsigned int vma_flags = 0;
>       bool remap_op = false;
> -     struct xe_vma_mem_attr tmp_attr;
> +     struct xe_vma_mem_attr tmp_attr = {};
>       u16 default_pat;
>       int err;
>  
> @@ -4314,7 +4338,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
>                        * VMA, so they can be assigned to newly MAP created 
> vma.
>                        */
>                       if (is_madvise)
> -                             tmp_attr = vma->attr;
> +                             xe_vma_mem_attr_copy(&tmp_attr, &vma->attr);
>  
>                       xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), 
> NULL);
>               } else if (__op->op == DRM_GPUVA_OP_MAP) {
> @@ -4324,12 +4348,13 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
>                        * copy them to new vma.
>                        */
>                       if (is_madvise)
> -                             vma->attr = tmp_attr;
> +                             xe_vma_mem_attr_copy(&vma->attr, &tmp_attr);
>               }
>       }
>  
>       xe_vm_unlock(vm);
>       drm_gpuva_ops_free(&vm->gpuvm, ops);
> +     xe_vma_mem_attr_fini(&tmp_attr);
>       return 0;
>  
>  unwind_ops:
> @@ -4387,3 +4412,4 @@ int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, 
> uint64_t start, uint64_t r
>  
>       return xe_vm_alloc_vma(vm, &map_req, false);
>  }
> +
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index ef8a5019574e..d328d31afe8e 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -411,4 +411,5 @@ static inline struct drm_exec 
> *xe_vm_validation_exec(struct xe_vm *vm)
>  #define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated)    
> \
>       ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & 
> BIT((tile)->id))
>  
> +void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr 
> *from);
>  #endif
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c 
> b/drivers/gpu/drm/xe/xe_vm_madvise.c
> index cad3cf627c3f..9553008409d1 100644
> --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> @@ -95,6 +95,7 @@ static void madvise_preferred_mem_loc(struct xe_device *xe, 
> struct xe_vm *vm,
>                        */
>                       vmas[i]->attr.preferred_loc.migration_policy =
>                                               
> op->preferred_mem_loc.migration_policy;
> +                     vmas[i]->attr.preferred_loc.dpagemap = NULL;
>               }
>       }
>  }
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h 
> b/drivers/gpu/drm/xe/xe_vm_types.h
> index 4f9a6cdb5d02..70856d536047 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -20,6 +20,8 @@
>  #include "xe_range_fence.h"
>  #include "xe_userptr.h"
>  
> +struct drm_pagemap;
> +
>  struct xe_bo;
>  struct xe_svm_range;
>  struct xe_sync_entry;
> @@ -65,6 +67,13 @@ struct xe_vma_mem_attr {
>                * closest device memory respectively.
>                */
>               u32 devmem_fd;
> +             /**
> +              * @preferred_loc.dpagemap: Reference-counted pointer to the 
> drm_pagemap preferred
> +              * for migration on a SVM page-fault. The pointer is protected 
> by the
> +              * vm lock, and is %NULL if @devmem_fd should be consulted for 
> special
> +              * values.
> +              */
> +             struct drm_pagemap *dpagemap;
>       } preferred_loc;
>  
>       /**
> -- 
> 2.51.0
> 

Reply via email to