On 15/10/2025 14:00, Boris Brezillon wrote:
> Hook-up drm_gem_dmabuf_{begin,end}_cpu_access() to drm_gem_sync() so
> that drivers relying on the default prime_dmabuf_ops can still have
> a way to prepare for CPU accesses from outside the UMD.
> 
> v2:
> - New commit
> 
> v3:
> - Don't return an error on NOP syncs, and document that case in a
>   comment
> 
> Signed-off-by: Boris Brezillon <[email protected]>

Reviewed-by: Steven Price <[email protected]>

> ---
>  drivers/gpu/drm/drm_prime.c | 42 +++++++++++++++++++++++++++++++++++++
>  include/drm/drm_prime.h     |  5 +++++
>  2 files changed, 47 insertions(+)
> 
> diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
> index 43a10b4af43a..30d495c70afb 100644
> --- a/drivers/gpu/drm/drm_prime.c
> +++ b/drivers/gpu/drm/drm_prime.c
> @@ -823,6 +823,46 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct 
> vm_area_struct *vma)
>  }
>  EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
>  
> +int drm_gem_dmabuf_begin_cpu_access(struct dma_buf *dma_buf,
> +                                 enum dma_data_direction direction)
> +{
> +     struct drm_gem_object *obj = dma_buf->priv;
> +     enum drm_gem_object_access_flags access = DRM_GEM_OBJECT_CPU_ACCESS;
> +
> +     /* begin_cpu_access(DMA_TO_DEVICE) is a NOP, the sync will happen
> +      * in the end_cpu_access() path.
> +      */
> +     if (direction == DMA_FROM_DEVICE)
> +             access |= DRM_GEM_OBJECT_READ_ACCESS;
> +     else if (direction == DMA_BIDIRECTIONAL)
> +             access |= DRM_GEM_OBJECT_RW_ACCESS;
> +     else
> +             return 0;
> +
> +     return drm_gem_sync(obj, 0, obj->size, access);
> +}
> +EXPORT_SYMBOL(drm_gem_dmabuf_begin_cpu_access);
> +
> +int drm_gem_dmabuf_end_cpu_access(struct dma_buf *dma_buf,
> +                               enum dma_data_direction direction)
> +{
> +     struct drm_gem_object *obj = dma_buf->priv;
> +     enum drm_gem_object_access_flags access = DRM_GEM_OBJECT_DEV_ACCESS;
> +
> +     /* end_cpu_access(DMA_FROM_DEVICE) is a NOP, the sync should have
> +      * happened in the begin_cpu_access() path already.
> +      */
> +     if (direction == DMA_TO_DEVICE)
> +             access |= DRM_GEM_OBJECT_READ_ACCESS;
> +     else if (direction == DMA_BIDIRECTIONAL)
> +             access |= DRM_GEM_OBJECT_RW_ACCESS;
> +     else
> +             return 0;
> +
> +     return drm_gem_sync(obj, 0, obj->size, access);
> +}
> +EXPORT_SYMBOL(drm_gem_dmabuf_end_cpu_access);
> +
>  static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
>       .attach = drm_gem_map_attach,
>       .detach = drm_gem_map_detach,
> @@ -832,6 +872,8 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops 
> =  {
>       .mmap = drm_gem_dmabuf_mmap,
>       .vmap = drm_gem_dmabuf_vmap,
>       .vunmap = drm_gem_dmabuf_vunmap,
> +     .begin_cpu_access = drm_gem_dmabuf_begin_cpu_access,
> +     .end_cpu_access = drm_gem_dmabuf_end_cpu_access,
>  };
>  
>  /**
> diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
> index f50f862f0d8b..052fba039bb6 100644
> --- a/include/drm/drm_prime.h
> +++ b/include/drm/drm_prime.h
> @@ -92,6 +92,11 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct 
> iosys_map *map);
>  int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct 
> *vma);
>  int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
>  
> +int drm_gem_dmabuf_begin_cpu_access(struct dma_buf *dma_buf,
> +                                 enum dma_data_direction direction);
> +int drm_gem_dmabuf_end_cpu_access(struct dma_buf *dma_buf,
> +                               enum dma_data_direction direction);
> +
>  struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
>                                      struct page **pages, unsigned int 
> nr_pages);
>  struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,

Reply via email to