Hi,

on patches 2 to 4: sync is really begin/end access wrapped into one interface, which I find questionable. I also don't like that these patches add generic infrastructure for a single driver.

My proposal is to make your own dma_buf exporter in panthor and set the begin/end_cpu_access functions as you need. Panthor already contains its own GEM export helper at [1]. If you inline drm_gem_prime_export() [2] you can set the cpu_access callbacks to panthor-specific code. The other dma-buf helpers' symbols should be exported and can be re-used. That is a lot less intrusive and should provide what you need.

I found that a hand full of other DRM drivers implement dma-buf's begin/end access callbacks. If you want a generic begin/end interface for GEM, you certainly want to get them on board. If there's something common to share, this should be done in a separate series.

[1] https://elixir.bootlin.com/linux/v6.17.1/source/drivers/gpu/drm/panthor/panthor_gem.c#L196 [2] https://elixir.bootlin.com/linux/v6.17.1/source/drivers/gpu/drm/drm_prime.c#L919

Best regards
Thomas

Am 15.10.25 um 18:03 schrieb Boris Brezillon:
Prepare things for standardizing synchronization around CPU accesses
of GEM buffers. This will be used to provide default
drm_gem_dmabuf_{begin,end}_cpu_access() implementations, and provide
a way for drivers to add their own ioctls to synchronize CPU
writes/reads when they can't do it directly from userland.

v2:
- New commit

v3:
- No changes

v4:
- Add Steve's R-b

Signed-off-by: Boris Brezillon <[email protected]>
Reviewed-by: Steven Price <[email protected]>
---
  drivers/gpu/drm/drm_gem.c | 10 +++++++++
  include/drm/drm_gem.h     | 45 +++++++++++++++++++++++++++++++++++++++
  2 files changed, 55 insertions(+)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index a1a9c828938b..a1431e4f2404 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1333,6 +1333,16 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct 
iosys_map *map)
  }
  EXPORT_SYMBOL(drm_gem_vunmap);
+int drm_gem_sync(struct drm_gem_object *obj, size_t offset, size_t size,
+                enum drm_gem_object_access_flags access)
+{
+       if (obj->funcs->sync)
+               return obj->funcs->sync(obj, offset, size, access);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_gem_sync);
+
  /**
   * drm_gem_lock_reservations - Sets up the ww context and acquires
   * the lock on an array of GEM objects.
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 8d48d2af2649..1c33e59ab305 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -66,6 +66,33 @@ enum drm_gem_object_status {
        DRM_GEM_OBJECT_ACTIVE    = BIT(2),
  };
+/**
+ * enum drm_gem_object_status - bitmask describing GEM access types to prepare 
for
+ */
+enum drm_gem_object_access_flags {
+       /** @DRM_GEM_OBJECT_CPU_ACCESS: Prepare for a CPU access. */
+       DRM_GEM_OBJECT_CPU_ACCESS = 0,
+
+       /** @DRM_GEM_OBJECT_DEV_ACCESS: Prepare for a device access. */
+       DRM_GEM_OBJECT_DEV_ACCESS = BIT(0),
+
+       /** @DRM_GEM_OBJECT_ACCESSOR_MASK: Mask used to check the entity doing 
the access. */
+       DRM_GEM_OBJECT_ACCESSOR_MASK = BIT(0),
+
+       /** @DRM_GEM_OBJECT_READ_ACCESS: Prepare for read-only accesses. */
+       DRM_GEM_OBJECT_READ_ACCESS = BIT(1),
+
+       /** @DRM_GEM_OBJECT_WRITE_ACCESS: Prepare for write-only accesses. */
+       DRM_GEM_OBJECT_WRITE_ACCESS = BIT(2),
+
+       /** @DRM_GEM_OBJECT_RW_ACCESS: Prepare for a read/write accesses. */
+       DRM_GEM_OBJECT_RW_ACCESS = DRM_GEM_OBJECT_READ_ACCESS |
+                                  DRM_GEM_OBJECT_WRITE_ACCESS,
+
+       /** @DRM_GEM_OBJECT_ACCESS_TYPE_MASK: Mask used to check the access 
type. */
+       DRM_GEM_OBJECT_ACCESS_TYPE_MASK = DRM_GEM_OBJECT_RW_ACCESS,
+};
+
  /**
   * struct drm_gem_object_funcs - GEM object functions
   */
@@ -191,6 +218,21 @@ struct drm_gem_object_funcs {
         */
        int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
+ /**
+        * @sync:
+        *
+        * Prepare for CPU/device access. This can involve migration of
+        * a buffer to the system-RAM/VRAM, or for UMA, flushing/invalidating
+        * the CPU caches. The range can be used to optimize the synchronization
+        * when possible.
+        *
+        * Returns 0 on success, -errno otherwise.
+        *
+        * This callback is optional.
+        */
+       int (*sync)(struct drm_gem_object *obj, size_t offset, size_t size,
+                   enum drm_gem_object_access_flags access);
+
        /**
         * @evict:
         *
@@ -559,6 +601,9 @@ void drm_gem_unlock(struct drm_gem_object *obj);
  int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
  void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+int drm_gem_sync(struct drm_gem_object *obj, size_t offset, size_t size,
+                enum drm_gem_object_access_flags access);
+
  int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
                           int count, struct drm_gem_object ***objs_out);
  struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 
handle);

--
--
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Frankenstrasse 146, 90461 Nuernberg, Germany
GF: Ivo Totev, Andrew Myers, Andrew McDonald, Boudien Moerman
HRB 36809 (AG Nuernberg)


Reply via email to