From: Tvrtko Ursulin <[email protected]>

If we make GEM contexts keep a reference to i915_drm_client for the whole
of their lifetime, we can consolidate the current task pid and name usage
by getting it from the client.

v2:
 * Don't bother supporting selftests contexts from debugfs. (Chris)

v3:
 * Trivial rebase.

v4:
 * Fully initialize context before adding to the list. (Lucas)

Signed-off-by: Tvrtko Ursulin <[email protected]>
Reviewed-by: Chris Wilson <[email protected]>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   | 20 +++++++++----
 .../gpu/drm/i915/gem/i915_gem_context_types.h | 13 ++-------
 drivers/gpu/drm/i915/i915_debugfs.c           | 29 +++++++------------
 drivers/gpu/drm/i915/i915_gpu_error.c         | 22 ++++++++------
 4 files changed, 40 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c 
b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 694641aa656b..b0bcfce2a1e4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -341,13 +341,14 @@ void i915_gem_context_release(struct kref *ref)
        trace_i915_context_free(ctx);
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
-       mutex_destroy(&ctx->engines_mutex);
-       mutex_destroy(&ctx->lut_mutex);
+       if (ctx->client)
+               i915_drm_client_put(ctx->client);
 
        if (ctx->timeline)
                intel_timeline_put(ctx->timeline);
 
-       put_pid(ctx->pid);
+       mutex_destroy(&ctx->engines_mutex);
+       mutex_destroy(&ctx->lut_mutex);
        mutex_destroy(&ctx->mutex);
 
        kfree_rcu(ctx, rcu);
@@ -878,6 +879,7 @@ static int gem_context_register(struct i915_gem_context 
*ctx,
                                u32 *id)
 {
        struct drm_i915_private *i915 = ctx->i915;
+       struct i915_drm_client *client;
        struct i915_address_space *vm;
        int ret;
 
@@ -889,15 +891,21 @@ static int gem_context_register(struct i915_gem_context 
*ctx,
                WRITE_ONCE(vm->file, fpriv); /* XXX */
        mutex_unlock(&ctx->mutex);
 
-       ctx->pid = get_task_pid(current, PIDTYPE_PID);
+       client = i915_drm_client_get(fpriv->client);
+
+       rcu_read_lock();
        snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
-                current->comm, pid_nr(ctx->pid));
+                rcu_dereference(client->name)->name,
+                pid_nr(rcu_dereference(client->name)->pid));
+       rcu_read_unlock();
 
        /* And finally expose ourselves to userspace via the idr */
        ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
        if (ret)
                goto err_pid;
 
+       ctx->client = client;
+
        spin_lock(&i915->gem.contexts.lock);
        list_add_tail(&ctx->link, &i915->gem.contexts.list);
        spin_unlock(&i915->gem.contexts.lock);
@@ -905,7 +913,7 @@ static int gem_context_register(struct i915_gem_context 
*ctx,
        return 0;
 
 err_pid:
-       put_pid(fetch_and_zero(&ctx->pid));
+       i915_drm_client_put(client);
        return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 1449f54924e0..c47bb45d2110 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -96,19 +96,12 @@ struct i915_gem_context {
         */
        struct i915_address_space __rcu *vm;
 
-       /**
-        * @pid: process id of creator
-        *
-        * Note that who created the context may not be the principle user,
-        * as the context may be shared across a local socket. However,
-        * that should only affect the default context, all contexts created
-        * explicitly by the client are expected to be isolated.
-        */
-       struct pid *pid;
-
        /** link: place with &drm_i915_private.context_list */
        struct list_head link;
 
+       /** client: struct i915_drm_client */
+       struct i915_drm_client *client;
+
        /**
         * @ref: reference count
         *
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index f29487ea4528..289ee65b669a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -333,17 +333,15 @@ static void print_context_stats(struct seq_file *m,
                                .vm = rcu_access_pointer(ctx->vm),
                        };
                        struct drm_file *file = ctx->file_priv->file;
-                       struct task_struct *task;
                        char name[80];
 
                        rcu_read_lock();
+
                        idr_for_each(&file->object_idr, per_file_stats, &stats);
-                       rcu_read_unlock();
 
-                       rcu_read_lock();
-                       task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
                        snprintf(name, sizeof(name), "%s",
-                                task ? task->comm : "<unknown>");
+                                rcu_dereference(ctx->client->name)->name);
+
                        rcu_read_unlock();
 
                        print_file_stats(m, name, stats);
@@ -1075,20 +1073,13 @@ static int i915_context_status(struct seq_file *m, void 
*unused)
                spin_unlock(&i915->gem.contexts.lock);
 
                seq_puts(m, "HW context ");
-               if (ctx->pid) {
-                       struct task_struct *task;
-
-                       task = get_pid_task(ctx->pid, PIDTYPE_PID);
-                       if (task) {
-                               seq_printf(m, "(%s [%d]) ",
-                                          task->comm, task->pid);
-                               put_task_struct(task);
-                       }
-               } else if (IS_ERR(ctx->file_priv)) {
-                       seq_puts(m, "(deleted) ");
-               } else {
-                       seq_puts(m, "(kernel) ");
-               }
+
+               rcu_read_lock();
+               seq_printf(m, "(%s [%d]) %s",
+                          rcu_dereference(ctx->client->name)->name,
+                          pid_nr(rcu_dereference(ctx->client->name)->pid),
+                          ctx->client->closed ? "(closed) " : "");
+               rcu_read_unlock();
 
                seq_putc(m, ctx->remap_slice ? 'R' : 'r');
                seq_putc(m, '\n');
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index d8cac4c5881f..e38dab8d2fed 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1238,7 +1238,9 @@ static void record_request(const struct i915_request 
*request,
 
                ctx = rcu_dereference(request->context->gem_context);
                if (ctx)
-                       erq->pid = pid_nr(ctx->pid);
+                       erq->pid = I915_SELFTEST_ONLY(!ctx->client) ?
+                                  0 :
+                                  
pid_nr(rcu_dereference(ctx->client->name)->pid);
        }
        rcu_read_unlock();
 }
@@ -1259,23 +1261,25 @@ static bool record_context(struct 
i915_gem_context_coredump *e,
                           const struct i915_request *rq)
 {
        struct i915_gem_context *ctx;
-       struct task_struct *task;
        bool simulated;
 
        rcu_read_lock();
+
        ctx = rcu_dereference(rq->context->gem_context);
        if (ctx && !kref_get_unless_zero(&ctx->ref))
                ctx = NULL;
-       rcu_read_unlock();
-       if (!ctx)
+       if (!ctx) {
+               rcu_read_unlock();
                return true;
+       }
 
-       rcu_read_lock();
-       task = pid_task(ctx->pid, PIDTYPE_PID);
-       if (task) {
-               strcpy(e->comm, task->comm);
-               e->pid = task->pid;
+       if (I915_SELFTEST_ONLY(!ctx->client)) {
+               strcpy(e->comm, "[kernel]");
+       } else {
+               strcpy(e->comm, rcu_dereference(ctx->client->name)->name);
+               e->pid = pid_nr(rcu_dereference(ctx->client->name)->pid);
        }
+
        rcu_read_unlock();
 
        e->sched_attr = ctx->sched;
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to