If the idle_pulse fails to flush the i915_active, dump the tree to see
if that has any clues.

Signed-off-by: Chris Wilson <[email protected]>
---
 .../drm/i915/gt/selftest_engine_heartbeat.c   |  4 ++
 drivers/gpu/drm/i915/i915_active.h            |  2 +
 drivers/gpu/drm/i915/selftests/i915_active.c  | 45 +++++++++++++++++++
 3 files changed, 51 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c 
b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 155c508024df..131c49ddf33f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -100,8 +100,12 @@ static int __live_idle_pulse(struct intel_engine_cs 
*engine,
        pulse_unlock_wait(p); /* synchronize with the retirement callback */
 
        if (!i915_active_is_idle(&p->active)) {
+               struct drm_printer m = drm_err_printer("pulse");
+
                pr_err("%s: heartbeat pulse did not flush idle tasks\n",
                       engine->name);
+               i915_active_print(&p->active, &m);
+
                err = -EINVAL;
                goto out;
        }
diff --git a/drivers/gpu/drm/i915/i915_active.h 
b/drivers/gpu/drm/i915/i915_active.h
index 4f52fe6146d2..44859356ce97 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -214,4 +214,6 @@ int i915_active_acquire_preallocate_barrier(struct 
i915_active *ref,
 void i915_active_acquire_barrier(struct i915_active *ref);
 void i915_request_add_active_barriers(struct i915_request *rq);
 
+void i915_active_print(struct i915_active *ref, struct drm_printer *m);
+
 #endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c 
b/drivers/gpu/drm/i915/selftests/i915_active.c
index 96513a7d4739..260b0ee5d1e3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -205,3 +205,48 @@ int i915_active_live_selftests(struct drm_i915_private 
*i915)
 
        return i915_subtests(tests, i915);
 }
+
+static struct intel_engine_cs *node_to_barrier(struct active_node *it)
+{
+       struct intel_engine_cs *engine;
+
+       if (!is_barrier(&it->base))
+               return NULL;
+
+       engine = __barrier_to_engine(it);
+       smp_rmb(); /* serialise with add_active_barriers */
+       if (!is_barrier(&it->base))
+               return NULL;
+
+       return engine;
+}
+
+void i915_active_print(struct i915_active *ref, struct drm_printer *m)
+{
+       drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
+       drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
+       drm_printf(m, "\tpreallocated barriers? %s\n",
+                  yesno(!llist_empty(&ref->preallocated_barriers)));
+
+       if (i915_active_acquire_if_busy(ref)) {
+               struct active_node *it, *n;
+
+               rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+                       struct intel_engine_cs *engine;
+
+                       engine = node_to_barrier(it);
+                       if (engine) {
+                               drm_printf(m, "\tbarrier: %s\n", engine->name);
+                               continue;
+                       }
+
+                       if (i915_active_fence_isset(&it->base)) {
+                               drm_printf(m,
+                                          "\ttimeline: %llx\n", it->timeline);
+                               continue;
+                       }
+               }
+
+               i915_active_release(ref);
+       }
+}
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to