In order to maximise concurrency between engines, if we queue a request
to a current idle ring, reorder its dependencies to execute that request
as early as possible and ideally improve occupancy of multiple engines
simultaneously.

Signed-off-by: Chris Wilson <[email protected]>
---
 drivers/gpu/drm/i915/i915_request.c     | 6 +++++-
 drivers/gpu/drm/i915/i915_scheduler.h   | 5 +++--
 drivers/gpu/drm/i915/intel_ringbuffer.h | 6 ++++++
 3 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c 
b/drivers/gpu/drm/i915/i915_request.c
index 2d7a785dd88c..d618e7127e88 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1053,7 +1053,8 @@ void i915_request_add(struct i915_request *request)
        local_bh_disable();
        rcu_read_lock(); /* RCU serialisation for set-wedged protection */
        if (engine->schedule) {
-               struct i915_sched_attr attr = request->gem_context->sched;
+               struct i915_gem_context *ctx = request->gem_context;
+               struct i915_sched_attr attr = ctx->sched;
 
                /*
                 * Boost priorities to new clients (new request flows).
@@ -1064,6 +1065,9 @@ void i915_request_add(struct i915_request *request)
                if (!prev || i915_request_completed(prev))
                        attr.priority |= I915_PRIORITY_NEWCLIENT;
 
+               if (intel_engine_queue_is_empty(engine))
+                       attr.priority |= I915_PRIORITY_STALL;
+
                engine->schedule(request, &attr);
        }
        rcu_read_unlock();
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h 
b/drivers/gpu/drm/i915/i915_scheduler.h
index e9fb6c1d5e42..be132ceb83d9 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -19,13 +19,14 @@ enum {
        I915_PRIORITY_INVALID = INT_MIN
 };
 
-#define I915_USER_PRIORITY_SHIFT 1
+#define I915_USER_PRIORITY_SHIFT 2
 #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
 
 #define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
 #define I915_PRIORITY_MASK (-I915_PRIORITY_COUNT)
 
-#define I915_PRIORITY_NEWCLIENT BIT(0)
+#define I915_PRIORITY_NEWCLIENT BIT(1)
+#define I915_PRIORITY_STALL BIT(0)
 
 struct i915_sched_attr {
        /**
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3497b79b4e8b..9d3d9491f527 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1069,6 +1069,12 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
        return cs;
 }
 
+static inline bool
+intel_engine_queue_is_empty(const struct intel_engine_cs *engine)
+{
+       return RB_EMPTY_ROOT(&engine->execlists.queue.rb_root);
+}
+
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
 
-- 
2.17.1

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to