To be able to keep track of drm_pagemap usage, add a refcounted
backpointer to struct drm_pagemap_zdd. This will keep the drm_pagemap
reference count from dropping to zero as long as there are drm_pagemap
pages present in a CPU address space.

Signed-off-by: Thomas Hellström <[email protected]>
Reviewed-by: Matthew Brost <[email protected]>
---
 drivers/gpu/drm/drm_pagemap.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 4b8692f0b2a2..173b3ecb07d5 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -62,6 +62,7 @@
  *
  * @refcount: Reference count for the zdd
  * @devmem_allocation: device memory allocation
+ * @dpagemap: Refcounted pointer to the underlying struct drm_pagemap.
  * @device_private_page_owner: Device private pages owner
  *
  * This structure serves as a generic wrapper installed in
@@ -74,11 +75,13 @@
 struct drm_pagemap_zdd {
        struct kref refcount;
        struct drm_pagemap_devmem *devmem_allocation;
+       struct drm_pagemap *dpagemap;
        void *device_private_page_owner;
 };
 
 /**
  * drm_pagemap_zdd_alloc() - Allocate a zdd structure.
+ * @dpagemap: Pointer to the underlying struct drm_pagemap.
  * @device_private_page_owner: Device private pages owner
  *
  * This function allocates and initializes a new zdd structure. It sets up the
@@ -87,7 +90,7 @@ struct drm_pagemap_zdd {
  * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
  */
 static struct drm_pagemap_zdd *
-drm_pagemap_zdd_alloc(void *device_private_page_owner)
+drm_pagemap_zdd_alloc(struct drm_pagemap *dpagemap, void 
*device_private_page_owner)
 {
        struct drm_pagemap_zdd *zdd;
 
@@ -98,6 +101,7 @@ drm_pagemap_zdd_alloc(void *device_private_page_owner)
        kref_init(&zdd->refcount);
        zdd->devmem_allocation = NULL;
        zdd->device_private_page_owner = device_private_page_owner;
+       zdd->dpagemap = drm_pagemap_get(dpagemap);
 
        return zdd;
 }
@@ -127,6 +131,7 @@ static void drm_pagemap_zdd_destroy(struct kref *ref)
        struct drm_pagemap_zdd *zdd =
                container_of(ref, struct drm_pagemap_zdd, refcount);
        struct drm_pagemap_devmem *devmem = zdd->devmem_allocation;
+       struct drm_pagemap *dpagemap = zdd->dpagemap;
 
        if (devmem) {
                complete_all(&devmem->detached);
@@ -134,6 +139,7 @@ static void drm_pagemap_zdd_destroy(struct kref *ref)
                        devmem->ops->devmem_release(devmem);
        }
        kfree(zdd);
+       drm_pagemap_put(dpagemap);
 }
 
 /**
@@ -366,7 +372,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem 
*devmem_allocation,
        pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
        pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * 
npages;
 
-       zdd = drm_pagemap_zdd_alloc(pgmap_owner);
+       zdd = drm_pagemap_zdd_alloc(devmem_allocation->dpagemap, pgmap_owner);
        if (!zdd) {
                err = -ENOMEM;
                goto err_free;
-- 
2.51.1

Reply via email to