On Mon, 26 Jun 2023 14:02:45 +0200
Boris Brezillon <[email protected]> wrote:

> Move code drm_gem_shmem_{get,put}_pages() code to
> drm_gem_shmem_{pin,unpin}_locked().

After having a closer look at 'Add generic memory shrinker to VirtIO-GPU
and  Panfrost DRM drivers', I realize that's not what we want. We must
differentiate hard-pinning (as in, can't be evicted until all users
give up the ref they have) and soft-pinning (users can survive a
swapout, basically userspace mappings created through
drm_gem_shmem_mmap()).

> 
> Signed-off-by: Boris Brezillon <[email protected]>
> Cc: Daniel Vetter <[email protected]>
> Cc: Thomas Zimmermann <[email protected]>
> Cc: Emil Velikov <[email protected]>
> Cc: Dmitry Osipenko <[email protected]>
> ---
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 108 ++++++++++---------------
>  1 file changed, 41 insertions(+), 67 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
> b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index d6fc034164c0..f406556e42e0 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -128,46 +128,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct 
> drm_device *dev, size_t
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
>  
> -static void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
> -
> -/**
> - * drm_gem_shmem_free - Free resources associated with a shmem GEM object
> - * @shmem: shmem GEM object to free
> - *
> - * This function cleans up the GEM object state and frees the memory used to
> - * store the object itself.
> - */
> -void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
> -{
> -     struct drm_gem_object *obj = &shmem->base;
> -
> -     if (obj->import_attach) {
> -             drm_prime_gem_destroy(obj, shmem->sgt);
> -     } else {
> -             dma_resv_lock(shmem->base.resv, NULL);
> -
> -             drm_WARN_ON(obj->dev, shmem->vmap_use_count);
> -
> -             if (shmem->sgt) {
> -                     dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
> -                                       DMA_BIDIRECTIONAL, 0);
> -                     sg_free_table(shmem->sgt);
> -                     kfree(shmem->sgt);
> -             }
> -             if (shmem->pages)
> -                     drm_gem_shmem_put_pages(shmem);
> -
> -             drm_WARN_ON(obj->dev, shmem->pages_use_count);
> -
> -             dma_resv_unlock(shmem->base.resv);
> -     }
> -
> -     drm_gem_object_release(obj);
> -     kfree(shmem);
> -}
> -EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
> -
> -static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
> +static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
>  {
>       struct drm_gem_object *obj = &shmem->base;
>       struct page **pages;
> @@ -200,13 +161,7 @@ static int drm_gem_shmem_get_pages(struct 
> drm_gem_shmem_object *shmem)
>       return 0;
>  }
>  
> -/*
> - * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a 
> shmem GEM object
> - * @shmem: shmem GEM object
> - *
> - * This function decreases the use count and puts the backing pages when use 
> drops to zero.
> - */
> -static void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
> +static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
>  {
>       struct drm_gem_object *obj = &shmem->base;
>  
> @@ -229,23 +184,42 @@ static void drm_gem_shmem_put_pages(struct 
> drm_gem_shmem_object *shmem)
>       shmem->pages = NULL;
>  }
>  
> -static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
> +/**
> + * drm_gem_shmem_free - Free resources associated with a shmem GEM object
> + * @shmem: shmem GEM object to free
> + *
> + * This function cleans up the GEM object state and frees the memory used to
> + * store the object itself.
> + */
> +void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
>  {
> -     int ret;
> +     struct drm_gem_object *obj = &shmem->base;
>  
> -     dma_resv_assert_held(shmem->base.resv);
> +     if (obj->import_attach) {
> +             drm_prime_gem_destroy(obj, shmem->sgt);
> +     } else {
> +             dma_resv_lock(shmem->base.resv, NULL);
>  
> -     ret = drm_gem_shmem_get_pages(shmem);
> +             drm_WARN_ON(obj->dev, shmem->vmap_use_count);
>  
> -     return ret;
> -}
> -
> -static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
> -{
> -     dma_resv_assert_held(shmem->base.resv);
> -
> -     drm_gem_shmem_put_pages(shmem);
> +             if (shmem->sgt) {
> +                     dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
> +                                       DMA_BIDIRECTIONAL, 0);
> +                     sg_free_table(shmem->sgt);
> +                     kfree(shmem->sgt);
> +             }
> +             if (shmem->pages)
> +                     drm_gem_shmem_unpin_locked(shmem);
> +
> +             drm_WARN_ON(obj->dev, shmem->pages_use_count);
> +
> +             dma_resv_unlock(shmem->base.resv);
> +     }
> +
> +     drm_gem_object_release(obj);
> +     kfree(shmem);
>  }
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
>  
>  /**
>   * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
> @@ -332,7 +306,7 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
>                       return 0;
>               }
>  
> -             ret = drm_gem_shmem_get_pages(shmem);
> +             ret = drm_gem_shmem_pin_locked(shmem);
>               if (ret)
>                       goto err_zero_use;
>  
> @@ -355,7 +329,7 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
>  
>  err_put_pages:
>       if (!obj->import_attach)
> -             drm_gem_shmem_put_pages(shmem);
> +             drm_gem_shmem_unpin_locked(shmem);
>  err_zero_use:
>       shmem->vmap_use_count = 0;
>  
> @@ -392,7 +366,7 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object 
> *shmem,
>                       return;
>  
>               vunmap(shmem->vaddr);
> -             drm_gem_shmem_put_pages(shmem);
> +             drm_gem_shmem_unpin_locked(shmem);
>       }
>  
>       shmem->vaddr = NULL;
> @@ -452,7 +426,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object 
> *shmem)
>       kfree(shmem->sgt);
>       shmem->sgt = NULL;
>  
> -     drm_gem_shmem_put_pages(shmem);
> +     drm_gem_shmem_unpin_locked(shmem);
>  
>       shmem->madv = -1;
>  
> @@ -565,7 +539,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct 
> *vma)
>       struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>  
>       dma_resv_lock(shmem->base.resv, NULL);
> -     drm_gem_shmem_put_pages(shmem);
> +     drm_gem_shmem_unpin_locked(shmem);
>       dma_resv_unlock(shmem->base.resv);
>  
>       drm_gem_vm_close(vma);
> @@ -606,7 +580,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object 
> *shmem, struct vm_area_struct
>       }
>  
>       dma_resv_lock(shmem->base.resv, NULL);
> -     ret = drm_gem_shmem_get_pages(shmem);
> +     ret = drm_gem_shmem_pin_locked(shmem);
>       dma_resv_unlock(shmem->base.resv);
>  
>       if (ret)
> @@ -674,7 +648,7 @@ static struct sg_table 
> *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
>  
>       drm_WARN_ON(obj->dev, obj->import_attach);
>  
> -     ret = drm_gem_shmem_get_pages(shmem);
> +     ret = drm_gem_shmem_pin_locked(shmem);
>       if (ret)
>               return ERR_PTR(ret);
>  
> @@ -696,7 +670,7 @@ static struct sg_table 
> *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
>       sg_free_table(sgt);
>       kfree(sgt);
>  err_put_pages:
> -     drm_gem_shmem_put_pages(shmem);
> +     drm_gem_shmem_unpin_locked(shmem);
>       return ERR_PTR(ret);
>  }
>  

Reply via email to