Before we introduce cached CPU mappings, we want a dma_buf implementation satisfying synchronization requests around CPU accesses coming from a dma_buf exported by our driver. Let's provide our own implementation relying on the default gem_shmem_prime helpers designed for that purpose.
v5: - New patch v6: - Collect R-b v7: - Hand-roll the dma_buf sync/import logic (was previously done by generic prime/shmem helpers) Signed-off-by: Boris Brezillon <[email protected]> Reviewed-by: Steven Price <[email protected]> --- drivers/gpu/drm/panthor/panthor_drv.c | 1 + drivers/gpu/drm/panthor/panthor_gem.c | 118 +++++++++++++++++++++++++- drivers/gpu/drm/panthor/panthor_gem.h | 4 + 3 files changed, 122 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index d1d4c50da5bf..d12ac4cb0ac4 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -1621,6 +1621,7 @@ static const struct drm_driver panthor_drm_driver = { .gem_create_object = panthor_gem_create_object, .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, + .gem_prime_import = panthor_gem_prime_import, #ifdef CONFIG_DEBUG_FS .debugfs_init = panthor_debugfs_init, #endif diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index 10d255cccc09..173d42d65000 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -184,14 +184,130 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, return ERR_PTR(ret); } +static struct sg_table * +panthor_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct sg_table *sgt = drm_gem_map_dma_buf(attach, dir); + + if (!IS_ERR(sgt)) + attach->priv = sgt; + + return sgt; +} + +static void +panthor_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + attach->priv = NULL; + drm_gem_unmap_dma_buf(attach, sgt, dir); +} + +static int +panthor_gem_prime_begin_cpu_access(struct dma_buf *dma_buf, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->dev; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct dma_buf_attachment *attach; + + dma_resv_lock(obj->resv, NULL); + if (shmem->sgt) + dma_sync_sgtable_for_cpu(dev->dev, shmem->sgt, dir); + + if (shmem->vaddr) + invalidate_kernel_vmap_range(shmem->vaddr, shmem->base.size); + + list_for_each_entry(attach, &dma_buf->attachments, node) { + struct sg_table *sgt = attach->priv; + + if (sgt) + dma_sync_sgtable_for_cpu(attach->dev, sgt, dir); + } + dma_resv_unlock(obj->resv); + + return 0; +} + +static int +panthor_gem_prime_end_cpu_access(struct dma_buf *dma_buf, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->dev; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct dma_buf_attachment *attach; + + dma_resv_lock(obj->resv, NULL); + list_for_each_entry(attach, &dma_buf->attachments, node) { + struct sg_table *sgt = attach->priv; + + if (sgt) + dma_sync_sgtable_for_device(attach->dev, sgt, dir); + } + + if (shmem->vaddr) + flush_kernel_vmap_range(shmem->vaddr, shmem->base.size); + + if (shmem->sgt) + dma_sync_sgtable_for_device(dev->dev, shmem->sgt, dir); + + dma_resv_unlock(obj->resv); + return 0; +} + +static const struct dma_buf_ops panthor_dma_buf_ops = { + .attach = drm_gem_map_attach, + .detach = drm_gem_map_detach, + .map_dma_buf = panthor_gem_prime_map_dma_buf, + .unmap_dma_buf = panthor_gem_prime_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .mmap = drm_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, + .begin_cpu_access = panthor_gem_prime_begin_cpu_access, + .end_cpu_access = panthor_gem_prime_end_cpu_access, +}; + static struct dma_buf * panthor_gem_prime_export(struct drm_gem_object *obj, int flags) { + struct drm_device *dev = obj->dev; + struct dma_buf_export_info exp_info = { + .exp_name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .ops = &panthor_dma_buf_ops, + .size = obj->size, + .flags = flags, + .priv = obj, + .resv = obj->resv, + }; + /* We can't export GEMs that have an exclusive VM. */ if (to_panthor_bo(obj)->exclusive_vm_root_gem) return ERR_PTR(-EINVAL); - return drm_gem_prime_export(obj, flags); + return drm_gem_dmabuf_export(dev, &exp_info); +} + +struct drm_gem_object * +panthor_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct drm_gem_object *obj = dma_buf->priv; + + if (dma_buf->ops == &panthor_dma_buf_ops && obj->dev == dev) { + /* Importing dmabuf exported from our own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_get(obj); + return obj; + } + + return drm_gem_prime_import(dev, dma_buf); } static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h index 80c6e24112d0..91d1880f8a5d 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.h +++ b/drivers/gpu/drm/panthor/panthor_gem.h @@ -147,6 +147,10 @@ panthor_gem_create_with_handle(struct drm_file *file, void panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label); void panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label); +struct drm_gem_object * +panthor_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + static inline u64 panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo) { -- 2.51.1
