Before we introduce cached CPU mappings, we want a dma_buf implementation satisfying synchronization requests around CPU accesses coming from a dma_buf exported by our driver. Let's provide our own implementation relying on the default gem_shmem_prime helpers designed for that purpose.
v5: - New patch v6: - Collect R-b v7: - Hand-roll our own dma_buf boilerplate Signed-off-by: Boris Brezillon <[email protected]> Reviewed-by: Steven Price <[email protected]> --- drivers/gpu/drm/panfrost/panfrost_drv.c | 1 + drivers/gpu/drm/panfrost/panfrost_gem.c | 123 ++++++++++++++++++++++++ drivers/gpu/drm/panfrost/panfrost_gem.h | 3 + 3 files changed, 127 insertions(+) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 7d8c7c337606..cd6001a7faa2 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -852,6 +852,7 @@ static const struct drm_driver panfrost_drm_driver = { .minor = 5, .gem_create_object = panfrost_gem_create_object, + .gem_prime_import = panfrost_gem_prime_import, .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, #ifdef CONFIG_DEBUG_FS .debugfs_init = panfrost_debugfs_init, diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 8041b65c6609..4afd1a7f77d5 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -259,6 +259,128 @@ static size_t panfrost_gem_rss(struct drm_gem_object *obj) return 0; } +static struct sg_table * +panfrost_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct sg_table *sgt = drm_gem_map_dma_buf(attach, dir); + + if (!IS_ERR(sgt)) + attach->priv = sgt; + + return sgt; +} + +static void +panfrost_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + attach->priv = NULL; + drm_gem_unmap_dma_buf(attach, sgt, dir); +} + +static int +panfrost_gem_prime_begin_cpu_access(struct dma_buf *dma_buf, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->dev; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct dma_buf_attachment *attach; + + dma_resv_lock(obj->resv, NULL); + if (shmem->sgt) + dma_sync_sgtable_for_cpu(dev->dev, shmem->sgt, dir); + + if (shmem->vaddr) + invalidate_kernel_vmap_range(shmem->vaddr, shmem->base.size); + + list_for_each_entry(attach, &dma_buf->attachments, node) { + struct sg_table *sgt = attach->priv; + + if (sgt) + dma_sync_sgtable_for_cpu(attach->dev, sgt, dir); + } + dma_resv_unlock(obj->resv); + + return 0; +} + +static int +panfrost_gem_prime_end_cpu_access(struct dma_buf *dma_buf, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->dev; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct dma_buf_attachment *attach; + + dma_resv_lock(obj->resv, NULL); + list_for_each_entry(attach, &dma_buf->attachments, node) { + struct sg_table *sgt = attach->priv; + + if (sgt) + dma_sync_sgtable_for_device(attach->dev, sgt, dir); + } + + if (shmem->vaddr) + flush_kernel_vmap_range(shmem->vaddr, shmem->base.size); + + if (shmem->sgt) + dma_sync_sgtable_for_device(dev->dev, shmem->sgt, dir); + + dma_resv_unlock(obj->resv); + return 0; +} + +static const struct dma_buf_ops panfrost_dma_buf_ops = { + .attach = drm_gem_map_attach, + .detach = drm_gem_map_detach, + .map_dma_buf = panfrost_gem_prime_map_dma_buf, + .unmap_dma_buf = panfrost_gem_prime_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .mmap = drm_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, + .begin_cpu_access = panfrost_gem_prime_begin_cpu_access, + .end_cpu_access = panfrost_gem_prime_end_cpu_access, +}; + +static struct dma_buf * +panfrost_gem_prime_export(struct drm_gem_object *obj, int flags) +{ + struct drm_device *dev = obj->dev; + struct dma_buf_export_info exp_info = { + .exp_name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .ops = &panfrost_dma_buf_ops, + .size = obj->size, + .flags = flags, + .priv = obj, + .resv = obj->resv, + }; + + return drm_gem_dmabuf_export(dev, &exp_info); +} + +struct drm_gem_object * +panfrost_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct drm_gem_object *obj = dma_buf->priv; + + if (dma_buf->ops == &panfrost_dma_buf_ops && obj->dev == dev) { + /* Importing dmabuf exported from our own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_get(obj); + return obj; + } + + return drm_gem_prime_import(dev, dma_buf); +} + static const struct drm_gem_object_funcs panfrost_gem_funcs = { .free = panfrost_gem_free_object, .open = panfrost_gem_open, @@ -267,6 +389,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { .pin = panfrost_gem_pin, .unpin = drm_gem_shmem_object_unpin, .get_sg_table = drm_gem_shmem_object_get_sg_table, + .export = panfrost_gem_prime_export, .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = drm_gem_shmem_object_mmap, diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 8de3e76f2717..7fec20339354 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -130,6 +130,9 @@ struct drm_gem_object * panfrost_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); +struct drm_gem_object * +panfrost_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); struct panfrost_gem_object * panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags); -- 2.51.1
