Introduce a per-drm_file eventfd manager to support render-node event
subscriptions.

The manager is implemented in amdgpu_eventfd.[ch] and is owned by the
drm_file (amdgpu_fpriv). It maps event_id -> eventfd_id object, where
each eventfd_id can have multiple eventfds bound (fan-out).

The design is IRQ-safe for signaling: IRQ path takes the xarray lock
(irqsave) and signals eventfds while still holding the lock.

This patch only adds the core manager

v4:
- Use eventfd_ctx pointer as binding identity instead of fd number
- Make duplicate (event_id, ctx) binds idempotent
- Replace mgr lock with atomic bind limit
- Add helper for xa get-or-create event_id

v5:
- Rework event_id get/create helper so it is callable without holding xa
  lock
- Move event_id allocation into the helper and use xa_insert() for
  insertion
- Drop GFP_NOWAIT usage in xa insertion path
- Allocate eventfd entry only after ctx/id prerequisites are satisfied
- Simplify fini path by removing per-entry __xa_erase() and relying on
  xa_destroy()
- Keep duplicate (event_id, ctx) binds idempotent

Cc: Alex Deucher <[email protected]>
Suggested-by: Christian König <[email protected]>
Signed-off-by: Srinivasan Shanmugam <[email protected]>
Change-Id: I87ba09b5daaad7b84d96ed570923e6afe37fb3bd
---
 drivers/gpu/drm/amd/amdgpu/Makefile         |   3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_eventfd.c | 386 ++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_eventfd.h |  59 +++
 3 files changed, 447 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_eventfd.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_eventfd.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
b/drivers/gpu/drm/amd/amdgpu/Makefile
index 006d49d6b4af..30b1cf3c6cdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -67,7 +67,8 @@ amdgpu-y += amdgpu_device.o amdgpu_reg_access.o 
amdgpu_doorbell_mgr.o amdgpu_kms
        amdgpu_fw_attestation.o amdgpu_securedisplay.o \
        amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
        amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o 
amdgpu_dev_coredump.o \
-       amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o
+       amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o \
+       amdgpu_eventfd.o
 
 amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eventfd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_eventfd.c
new file mode 100644
index 000000000000..0b0c9268aedc
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eventfd.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Render-node eventfd subscription infrastructure.
+ *
+ * This module provides a simple event notification mechanism for render-node
+ * clients using Linux eventfd objects.
+ *
+ * Userspace can bind an eventfd to a userspace-defined event_id. When the
+ * driver signals that event_id, all eventfds bound to it are notified.
+ *
+ * This mechanism is intended to support lightweight GPU event notifications
+ * (for example, user queue EOP events) without polling from userspace.
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "amdgpu_eventfd.h"
+
+#define AMDGPU_EVENTFD_MAX_BINDS 4096
+
+/**
+ * amdgpu_eventfd_id_alloc - allocate an event id container
+ * @event_id: userspace-defined event identifier
+ *
+ * Each event_id represents a notification category. Multiple eventfds can
+ * be bound to the same event_id.
+ *
+ * This function allocates the container which stores the list of eventfds
+ * associated with that event_id.
+ *
+ * Return:
+ * Pointer to the newly allocated structure or NULL on failure.
+ */
+static struct amdgpu_eventfd_id *amdgpu_eventfd_id_alloc(u32 event_id)
+{
+       struct amdgpu_eventfd_id *id;
+
+       id = kzalloc(sizeof(*id), GFP_KERNEL);
+       if (!id)
+               return NULL;
+
+       id->event_id = event_id;
+       INIT_HLIST_HEAD(&id->entries);
+       id->n_entries = 0;
+       return id;
+}
+
+/**
+ * amdgpu_eventfd_id_get_or_create - find or create an event_id entry
+ * @mgr: eventfd manager
+ * @event_id: event identifier
+ *
+ * This helper returns the container associated with the given event_id.
+ * If it does not exist, it will create one.
+ *
+ * The function is designed to be callable without holding any locks.
+ * Memory allocation is done outside the xarray lock to avoid blocking
+ * inside critical sections.
+ *
+ * Return:
+ * Pointer to the event_id structure or NULL on failure.
+ */
+static struct amdgpu_eventfd_id *
+amdgpu_eventfd_id_get_or_create(struct amdgpu_eventfd_mgr *mgr, u32 event_id)
+{
+       struct amdgpu_eventfd_id *id;
+       struct amdgpu_eventfd_id *new_id;
+       unsigned long flags;
+       int r;
+
+       xa_lock_irqsave(&mgr->ids, flags);
+       id = xa_load(&mgr->ids, event_id);
+       xa_unlock_irqrestore(&mgr->ids, flags);
+       if (id)
+               return id;
+
+       new_id = amdgpu_eventfd_id_alloc(event_id);
+       if (!new_id)
+               return NULL;
+
+       xa_lock_irqsave(&mgr->ids, flags);
+
+       /* Re-check after taking the lock in case another thread inserted it. */
+       id = xa_load(&mgr->ids, event_id);
+       if (id) {
+               xa_unlock_irqrestore(&mgr->ids, flags);
+               kfree(new_id);
+               return id;
+       }
+
+       /*
+        * xa_insert() returns -EBUSY if an entry already exists.
+        * Since we are in irqsave context here, use GFP_ATOMIC.
+        */
+       r = xa_insert(&mgr->ids, event_id, new_id, GFP_ATOMIC);
+       if (r == -EBUSY)
+               id = xa_load(&mgr->ids, event_id);
+
+       xa_unlock_irqrestore(&mgr->ids, flags);
+
+       if (r == -EBUSY) {
+               kfree(new_id);
+               return id;
+       }
+
+       if (r) {
+               kfree(new_id);
+               return NULL;
+       }
+
+       return new_id;
+}
+
+/**
+ * amdgpu_eventfd_mgr_init - initialize eventfd manager
+ * @mgr: manager instance
+ *
+ * Each DRM file (amdgpu_fpriv) owns one eventfd manager.
+ *
+ * The manager stores mappings:
+ *
+ *   event_id -> list of eventfds
+ *
+ * The xarray provides efficient lookup of event_id containers.
+ */
+void amdgpu_eventfd_mgr_init(struct amdgpu_eventfd_mgr *mgr)
+{
+       xa_init_flags(&mgr->ids, XA_FLAGS_LOCK_IRQ);
+       atomic_set(&mgr->bind_count, 0);
+}
+
+/**
+ * amdgpu_eventfd_mgr_fini - destroy eventfd manager
+ * @mgr: manager instance
+ *
+ * Frees all registered eventfd bindings.
+ *
+ * The caller is expected to ensure that no IRQ-side signaling can occur
+ * after this function begins. This typically happens during drm_file
+ * teardown.
+ */
+void amdgpu_eventfd_mgr_fini(struct amdgpu_eventfd_mgr *mgr)
+{
+       unsigned long index;
+       struct amdgpu_eventfd_id *id;
+
+       /*
+        * Expected teardown ordering: caller ensures no further IRQ-side
+        * signaling can race with this before fini() is called.
+        */
+       xa_lock(&mgr->ids);
+       xa_for_each(&mgr->ids, index, id) {
+               struct amdgpu_eventfd_entry *e;
+               struct hlist_node *tmp;
+
+               hlist_for_each_entry_safe(e, tmp, &id->entries, hnode) {
+                       hlist_del(&e->hnode);
+                       eventfd_ctx_put(e->ctx);
+                       kfree(e);
+               }
+
+               kfree(id);
+       }
+       xa_unlock(&mgr->ids);
+
+       xa_destroy(&mgr->ids);
+}
+
+/**
+ * amdgpu_eventfd_bind - bind eventfd to an event_id
+ * @mgr: eventfd manager
+ * @event_id: userspace event identifier
+ * @eventfd: eventfd file descriptor
+ *
+ * This function allows userspace to subscribe to notifications for a
+ * specific event_id.
+ *
+ * Multiple eventfds can be bound to the same event_id.
+ *
+ * Duplicate bindings of the same eventfd are treated as success and do
+ * not create additional entries.
+ *
+ * Return:
+ * 0 on success, negative error code on failure.
+ */
+int amdgpu_eventfd_bind(struct amdgpu_eventfd_mgr *mgr, u32 event_id, int 
eventfd)
+{
+       struct amdgpu_eventfd_id *id;
+       struct amdgpu_eventfd_entry *e, *it;
+       struct eventfd_ctx *ctx;
+       unsigned long flags;
+       bool dup = false;
+
+       if (!mgr || !event_id || eventfd < 0)
+               return -EINVAL;
+
+       /*
+        * Enforce total bind limit without a separate manager lock.
+        * For duplicate binds, we decrement back before returning success.
+        */
+       if (atomic_inc_return(&mgr->bind_count) > AMDGPU_EVENTFD_MAX_BINDS) {
+               atomic_dec(&mgr->bind_count);
+               return -ENOSPC;
+       }
+
+       ctx = eventfd_ctx_fdget(eventfd);
+       if (IS_ERR(ctx)) {
+               atomic_dec(&mgr->bind_count);
+               return PTR_ERR(ctx);
+       }
+
+       id = amdgpu_eventfd_id_get_or_create(mgr, event_id);
+       if (!id) {
+               eventfd_ctx_put(ctx);
+               atomic_dec(&mgr->bind_count);
+               return -ENOMEM;
+       }
+
+       /* check for duplicate binding */
+       xa_lock_irqsave(&mgr->ids, flags);
+       hlist_for_each_entry(it, &id->entries, hnode) {
+               if (it->ctx == ctx) {
+                       dup = true;
+                       break;
+               }
+       }
+       xa_unlock_irqrestore(&mgr->ids, flags);
+
+       if (dup) {
+               eventfd_ctx_put(ctx);
+               atomic_dec(&mgr->bind_count);
+               return 0;
+       }
+
+       /* Allocate entry only after ctx/id prerequisites are satisfied. */
+       e = kzalloc(sizeof(*e), GFP_KERNEL);
+       if (!e) {
+               eventfd_ctx_put(ctx);
+               atomic_dec(&mgr->bind_count);
+               return -ENOMEM;
+       }
+       e->ctx = ctx;
+
+       /*
+        * Re-check duplicate under lock to close the race with another bind()
+        * for the same (event_id, ctx).
+        */
+       xa_lock_irqsave(&mgr->ids, flags);
+       hlist_for_each_entry(it, &id->entries, hnode) {
+               if (it->ctx == ctx) {
+                       dup = true;
+                       break;
+               }
+       }
+
+       if (!dup) {
+               hlist_add_head(&e->hnode, &id->entries);
+               id->n_entries++;
+               e = NULL; /* consumed */
+       }
+       xa_unlock_irqrestore(&mgr->ids, flags);
+
+       if (dup) {
+               eventfd_ctx_put(ctx);
+               kfree(e);
+               atomic_dec(&mgr->bind_count);
+               return 0;
+       }
+
+       return 0;
+}
+
+/**
+ * amdgpu_eventfd_unbind - remove eventfd binding
+ * @mgr: eventfd manager
+ * @event_id: event identifier
+ * @eventfd: eventfd file descriptor
+ *
+ * Removes an existing binding between an event_id and an eventfd.
+ *
+ * Return:
+ * 0 if removed, -ENOENT if binding does not exist.
+ */
+int amdgpu_eventfd_unbind(struct amdgpu_eventfd_mgr *mgr, u32 event_id, int 
eventfd)
+{
+       struct amdgpu_eventfd_id *id;
+       struct amdgpu_eventfd_entry *e;
+       struct hlist_node *tmp;
+       struct eventfd_ctx *ctx;
+       unsigned long flags;
+       bool removed = false;
+
+       if (!mgr || !event_id || eventfd < 0)
+               return -EINVAL;
+
+       ctx = eventfd_ctx_fdget(eventfd);
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+
+       xa_lock_irqsave(&mgr->ids, flags);
+
+       id = xa_load(&mgr->ids, event_id);
+       if (!id)
+               goto out_unlock;
+
+       hlist_for_each_entry_safe(e, tmp, &id->entries, hnode) {
+               if (e->ctx == ctx) {
+                       hlist_del(&e->hnode);
+                       id->n_entries--;
+                       removed = true;
+
+                       eventfd_ctx_put(e->ctx);
+                       kfree(e);
+
+                       atomic_dec(&mgr->bind_count);
+
+                       if (!id->n_entries) {
+                               __xa_erase(&mgr->ids, event_id);
+                               kfree(id);
+                       }
+                       break;
+               }
+       }
+
+out_unlock:
+       xa_unlock_irqrestore(&mgr->ids, flags);
+       eventfd_ctx_put(ctx);
+
+       return removed ? 0 : -ENOENT;
+}
+
+/**
+ * amdgpu_eventfd_signal - notify all eventfds bound to event_id
+ * @mgr: eventfd manager
+ * @event_id: event identifier
+ *
+ * This function is typically called from interrupt context.
+ *
+ * All eventfds registered for the given event_id will be signaled.
+ * Userspace processes waiting on those eventfds will wake up.
+ */
+void amdgpu_eventfd_signal(struct amdgpu_eventfd_mgr *mgr, u32 event_id)
+{
+       struct amdgpu_eventfd_id *id;
+       struct amdgpu_eventfd_entry *e;
+       unsigned long flags;
+
+       if (!mgr || !event_id)
+               return;
+
+       /*
+        * IRQ-safe signaling path: keep xarray lock held while iterating and
+        * signaling. eventfd_signal() is IRQ-safe.
+        */
+       xa_lock_irqsave(&mgr->ids, flags);
+
+       id = xa_load(&mgr->ids, event_id);
+       if (id) {
+               hlist_for_each_entry(e, &id->entries, hnode)
+                       eventfd_signal(e->ctx);
+       }
+
+       xa_unlock_irqrestore(&mgr->ids, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eventfd.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_eventfd.h
new file mode 100644
index 000000000000..248afb1f2f14
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eventfd.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Render-node eventfd subscription infrastructure.
+ */
+
+#ifndef __AMDGPU_EVENTFD_H__
+#define __AMDGPU_EVENTFD_H__
+
+#include <linux/eventfd.h>
+#include <linux/xarray.h>
+#include <linux/atomic.h>
+
+struct amdgpu_eventfd_entry {
+       struct eventfd_ctx *ctx;
+       struct hlist_node hnode;
+};
+
+struct amdgpu_eventfd_id {
+       u32 event_id;
+       struct hlist_head entries;
+       u32 n_entries;
+};
+
+struct amdgpu_eventfd_mgr {
+       struct xarray ids;          /* event_id -> struct amdgpu_eventfd_id* */
+       atomic_t bind_count;        /* total binds across all event_ids */
+};
+
+void amdgpu_eventfd_mgr_init(struct amdgpu_eventfd_mgr *mgr);
+void amdgpu_eventfd_mgr_fini(struct amdgpu_eventfd_mgr *mgr);
+
+int amdgpu_eventfd_bind(struct amdgpu_eventfd_mgr *mgr, u32 event_id, int 
eventfd);
+int amdgpu_eventfd_unbind(struct amdgpu_eventfd_mgr *mgr, u32 event_id, int 
eventfd);
+
+void amdgpu_eventfd_signal(struct amdgpu_eventfd_mgr *mgr, u32 event_id);
+
+#endif /* __AMDGPU_EVENTFD_H__ */
-- 
2.34.1

Reply via email to