Use DMA_ATTR_SKIP_CPU_SYNC flag for exported buffers during DMA mapping. The same flag is already used by drm_gem_map_dma_buf() for imported buffers.
Signed-off-by: Jacek Lawrynowicz <[email protected]> --- drivers/gpu/drm/drm_gem_shmem_helper.c | 8 ++++++-- include/drm/drm_gem.h | 12 ++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index d99dee67353a1..8938d8e3de52f 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -699,7 +699,7 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - int ret; + int ret, flags = 0; struct sg_table *sgt; if (shmem->sgt) @@ -716,8 +716,12 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ ret = PTR_ERR(sgt); goto err_put_pages; } + + if (drm_gem_is_exported()) + flags |= DMA_ATTR_SKIP_CPU_SYNC; + /* Map the pages for use by the h/w. */ - ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0); + ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, flags); if (ret) goto err_free_sgt; diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 2bf893eabb4b2..7c355a44d0a49 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -589,6 +589,18 @@ static inline bool drm_gem_is_imported(const struct drm_gem_object *obj) return obj->dma_buf && (obj->dma_buf->priv != obj); } +/** + * drm_gem_is_exported() - Tests if GEM object's buffer has been exported + * @obj: the GEM object + * + * Returns: + * True if the GEM object's buffer has been exported, false otherwise + */ +static inline bool drm_gem_is_exported(const struct drm_gem_object *obj) +{ + return obj->dma_buf && (obj->dma_buf->priv == obj); +} + #ifdef CONFIG_LOCKDEP /** * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list. -- 2.45.1
