As a consequence, struct xe_vma_mem_attr() can't simply be assigned
or freed without taking the reference count of individual members
into account. Also add helpers to do that.

v2:
- Move some calls to xe_vma_mem_attr_fini() to xe_vma_free(). (Matt Brost)

Signed-off-by: Thomas Hellström <[email protected]>
---
 drivers/gpu/drm/xe/xe_svm.c        |  2 +-
 drivers/gpu/drm/xe/xe_vm.c         | 34 +++++++++++++++++++++++++-----
 drivers/gpu/drm/xe/xe_vm.h         |  1 +
 drivers/gpu/drm/xe/xe_vm_madvise.c |  1 +
 drivers/gpu/drm/xe/xe_vm_types.h   |  9 ++++++++
 5 files changed, 41 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 7db9eafec66b..4a3853a5cd64 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -329,7 +329,7 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, 
u64 range_start, u64
        if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
                default_attr.pat_index = vma->attr.default_pat_index;
                default_attr.default_pat_index  = vma->attr.default_pat_index;
-               vma->attr = default_attr;
+               xe_vma_mem_attr_copy(&vma->attr, &default_attr);
        } else {
                vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, 
vma_end=0x%016llx",
                       range_start, range_end);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 2321e7c8ae76..27669f80b7ff 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -957,14 +957,37 @@ struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
        return fence;
 }
 
+static void xe_vma_mem_attr_fini(struct xe_vma_mem_attr *attr)
+{
+       drm_pagemap_put(attr->preferred_loc.dpagemap);
+}
+
 static void xe_vma_free(struct xe_vma *vma)
 {
+       xe_vma_mem_attr_fini(&vma->attr);
+
        if (xe_vma_is_userptr(vma))
                kfree(to_userptr_vma(vma));
        else
                kfree(vma);
 }
 
+/**
+ * xe_vma_mem_attr_copy() - copy an xe_vma_mem_attr structure.
+ * @to: Destination.
+ * @from: Source.
+ *
+ * Copies an xe_vma_mem_attr structure taking care to get reference
+ * counting of individual members right.
+ */
+void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr 
*from)
+{
+       xe_vma_mem_attr_fini(to);
+       *to = *from;
+       if (to->preferred_loc.dpagemap)
+               drm_pagemap_get(to->preferred_loc.dpagemap);
+}
+
 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
                                    struct xe_bo *bo,
                                    u64 bo_offset_or_userptr,
@@ -1015,8 +1038,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        if (vm->xe->info.has_atomic_enable_pte_bit)
                vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
 
-       vma->attr = *attr;
-
+       xe_vma_mem_attr_copy(&vma->attr, attr);
        if (bo) {
                struct drm_gpuvm_bo *vm_bo;
 
@@ -4240,7 +4262,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
        struct drm_gpuva_op *__op;
        unsigned int vma_flags = 0;
        bool remap_op = false;
-       struct xe_vma_mem_attr tmp_attr;
+       struct xe_vma_mem_attr tmp_attr = {};
        u16 default_pat;
        int err;
 
@@ -4333,7 +4355,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
                         * VMA, so they can be assigned to newly MAP created 
vma.
                         */
                        if (is_madvise)
-                               tmp_attr = vma->attr;
+                               xe_vma_mem_attr_copy(&tmp_attr, &vma->attr);
 
                        xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), 
NULL);
                } else if (__op->op == DRM_GPUVA_OP_MAP) {
@@ -4343,12 +4365,13 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
                         * copy them to new vma.
                         */
                        if (is_madvise)
-                               vma->attr = tmp_attr;
+                               xe_vma_mem_attr_copy(&vma->attr, &tmp_attr);
                }
        }
 
        xe_vm_unlock(vm);
        drm_gpuva_ops_free(&vm->gpuvm, ops);
+       xe_vma_mem_attr_fini(&tmp_attr);
        return 0;
 
 unwind_ops:
@@ -4406,3 +4429,4 @@ int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, 
uint64_t start, uint64_t r
 
        return xe_vm_alloc_vma(vm, &map_req, false);
 }
+
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index ef8a5019574e..d328d31afe8e 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -411,4 +411,5 @@ static inline struct drm_exec *xe_vm_validation_exec(struct 
xe_vm *vm)
 #define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated)      
\
        ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & 
BIT((tile)->id))
 
+void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr 
*from);
 #endif
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c 
b/drivers/gpu/drm/xe/xe_vm_madvise.c
index cad3cf627c3f..9553008409d1 100644
--- a/drivers/gpu/drm/xe/xe_vm_madvise.c
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
@@ -95,6 +95,7 @@ static void madvise_preferred_mem_loc(struct xe_device *xe, 
struct xe_vm *vm,
                         */
                        vmas[i]->attr.preferred_loc.migration_policy =
                                                
op->preferred_mem_loc.migration_policy;
+                       vmas[i]->attr.preferred_loc.dpagemap = NULL;
                }
        }
 }
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 0d09a322199d..ca489aa7c652 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -20,6 +20,8 @@
 #include "xe_range_fence.h"
 #include "xe_userptr.h"
 
+struct drm_pagemap;
+
 struct xe_bo;
 struct xe_svm_range;
 struct xe_sync_entry;
@@ -65,6 +67,13 @@ struct xe_vma_mem_attr {
                 * closest device memory respectively.
                 */
                u32 devmem_fd;
+               /**
+                * @preferred_loc.dpagemap: Reference-counted pointer to the 
drm_pagemap preferred
+                * for migration on a SVM page-fault. The pointer is protected 
by the
+                * vm lock, and is %NULL if @devmem_fd should be consulted for 
special
+                * values.
+                */
+               struct drm_pagemap *dpagemap;
        } preferred_loc;
 
        /**
-- 
2.51.1

Reply via email to