Round-robin being the non-default policy and unclear how much it is used,
we can notice that it can be implemented using the FIFO data structures if
we only invent a fake submit timestamp which is monotonically increasing
inside drm_sched_rq instances.

So instead of remembering which was the last entity the scheduler worker
picked we can simply bump the picked one to the bottom of the tree, which
ensures round-robin behaviour between all active queued jobs.

If the picked job was the last from a given entity, we remember the
assigned fake timestamp and use it to re-insert the job once it re-joins
the queue. This ensures the job neither overtakes all already queued jobs,
neither it goes last. Instead it keeps the position after the currently
queued jobs and before the ones which haven't yet been queued at the point
the entity left the queue.

Advantage is that we can consolidate to a single code path and remove a
bunch of code. Downside is round-robin mode now needs to lock on the job
pop path but that should not have a measurable performance impact.

Signed-off-by: Tvrtko Ursulin <[email protected]>
Cc: Christian König <[email protected]>
Cc: Danilo Krummrich <[email protected]>
Cc: Matthew Brost <[email protected]>
Cc: Philipp Stanner <[email protected]>
---
 drivers/gpu/drm/scheduler/sched_internal.h |   7 +-
 drivers/gpu/drm/scheduler/sched_main.c     |   6 +-
 drivers/gpu/drm/scheduler/sched_rq.c       | 123 +++++++--------------
 include/drm/gpu_scheduler.h                |  16 ++-
 4 files changed, 56 insertions(+), 96 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_internal.h 
b/drivers/gpu/drm/scheduler/sched_internal.h
index aab63a8721d9..b683cf813469 100644
--- a/drivers/gpu/drm/scheduler/sched_internal.h
+++ b/drivers/gpu/drm/scheduler/sched_internal.h
@@ -24,11 +24,8 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 void drm_sched_rq_pop_entity(struct drm_sched_entity *entity);
 
 struct drm_sched_entity *
-drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
-                             struct drm_sched_rq *rq);
-struct drm_sched_entity *
-drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
-                               struct drm_sched_rq *rq);
+drm_sched_rq_select_entity(struct drm_gpu_scheduler *sched,
+                          struct drm_sched_rq *rq);
 
 void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity 
*entity);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
b/drivers/gpu/drm/scheduler/sched_main.c
index 0f45f08fcbd1..94cba321e423 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -889,15 +889,13 @@ void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
 static struct drm_sched_entity *
 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 {
-       struct drm_sched_entity *entity;
+       struct drm_sched_entity *entity = NULL;
        int i;
 
        /* Start with the highest priority.
         */
        for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
-               entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
-                       drm_sched_rq_select_entity_fifo(sched, 
sched->sched_rq[i]) :
-                       drm_sched_rq_select_entity_rr(sched, 
sched->sched_rq[i]);
+               entity = drm_sched_rq_select_entity(sched, sched->sched_rq[i]);
                if (entity)
                        break;
        }
diff --git a/drivers/gpu/drm/scheduler/sched_rq.c 
b/drivers/gpu/drm/scheduler/sched_rq.c
index cfc18fc7a411..cf03b38c4eb5 100644
--- a/drivers/gpu/drm/scheduler/sched_rq.c
+++ b/drivers/gpu/drm/scheduler/sched_rq.c
@@ -95,8 +95,9 @@ drm_sched_rq_add_entity(struct drm_sched_entity *entity, 
ktime_t ts)
                list_add_tail(&entity->list, &rq->entities);
        }
 
-       if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
-               drm_sched_rq_update_fifo_locked(entity, rq, ts);
+       if (drm_sched_policy == DRM_SCHED_POLICY_RR)
+               ts = entity->rr_ts;
+       drm_sched_rq_update_fifo_locked(entity, rq, ts);
 
        spin_unlock(&rq->lock);
        spin_unlock(&entity->lock);
@@ -124,15 +125,27 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
        atomic_dec(rq->sched->score);
        list_del_init(&entity->list);
 
-       if (rq->current_entity == entity)
-               rq->current_entity = NULL;
-
-       if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
-               drm_sched_rq_remove_fifo_locked(entity, rq);
+       drm_sched_rq_remove_fifo_locked(entity, rq);
 
        spin_unlock(&rq->lock);
 }
 
+static ktime_t
+drm_sched_rq_next_rr_ts(struct drm_sched_rq *rq,
+                       struct drm_sched_entity *entity)
+{
+       ktime_t ts;
+
+       lockdep_assert_held(&entity->lock);
+       lockdep_assert_held(&rq->lock);
+
+       ts = ktime_add_ns(rq->rr_ts, 1);
+       entity->rr_ts = ts;
+       rq->rr_ts = ts;
+
+       return ts;
+}
+
 /**
  * drm_sched_rq_pop_entity - pops an entity
  * @entity: scheduler entity
@@ -141,88 +154,32 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
  */
 void drm_sched_rq_pop_entity(struct drm_sched_entity *entity)
 {
+       struct drm_sched_job *next_job;
+
        /*
         * Update the entity's location in the min heap according to
         * the timestamp of the next job, if any.
         */
-       if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
-               struct drm_sched_job *next;
-
-               next = drm_sched_entity_queue_peek(entity);
-               if (next) {
-                       struct drm_sched_rq *rq;
-
-                       spin_lock(&entity->lock);
-                       rq = entity->rq;
-                       spin_lock(&rq->lock);
-                       drm_sched_rq_update_fifo_locked(entity, rq,
-                                                       next->submit_ts);
-                       spin_unlock(&rq->lock);
-                       spin_unlock(&entity->lock);
-               }
+       next_job = drm_sched_entity_queue_peek(entity);
+       if (next_job) {
+               struct drm_sched_rq *rq;
+               ktime_t ts;
+
+               spin_lock(&entity->lock);
+               rq = entity->rq;
+               spin_lock(&rq->lock);
+               if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+                       ts = next_job->submit_ts;
+               else
+                       ts = drm_sched_rq_next_rr_ts(rq, entity);
+               drm_sched_rq_update_fifo_locked(entity, rq, ts);
+               spin_unlock(&rq->lock);
+               spin_unlock(&entity->lock);
        }
 }
 
 /**
- * drm_sched_rq_select_entity_rr - Select an entity which could provide a job 
to run
- *
- * @sched: the gpu scheduler
- * @rq: scheduler run queue to check.
- *
- * Try to find the next ready entity.
- *
- * Return an entity if one is found; return an error-pointer (!NULL) if an
- * entity was ready, but the scheduler had insufficient credits to accommodate
- * its job; return NULL, if no ready entity was found.
- */
-struct drm_sched_entity *
-drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
-                             struct drm_sched_rq *rq)
-{
-       struct drm_sched_entity *entity;
-
-       spin_lock(&rq->lock);
-
-       entity = rq->current_entity;
-       if (entity) {
-               list_for_each_entry_continue(entity, &rq->entities, list) {
-                       if (drm_sched_entity_is_ready(entity))
-                               goto found;
-               }
-       }
-
-       list_for_each_entry(entity, &rq->entities, list) {
-               if (drm_sched_entity_is_ready(entity))
-                       goto found;
-
-               if (entity == rq->current_entity)
-                       break;
-       }
-
-       spin_unlock(&rq->lock);
-
-       return NULL;
-
-found:
-       if (!drm_sched_can_queue(sched, entity)) {
-               /*
-                * If scheduler cannot take more jobs signal the caller to not
-                * consider lower priority queues.
-                */
-               entity = ERR_PTR(-ENOSPC);
-       } else {
-               rq->current_entity = entity;
-               reinit_completion(&entity->entity_idle);
-       }
-
-       spin_unlock(&rq->lock);
-
-       return entity;
-}
-
-/**
- * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to 
run
- *
+ * drm_sched_rq_select_entity - Select an entity which provides a job to run
  * @sched: the gpu scheduler
  * @rq: scheduler run queue to check.
  *
@@ -233,8 +190,8 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler 
*sched,
  * its job; return NULL, if no ready entity was found.
  */
 struct drm_sched_entity *
-drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
-                               struct drm_sched_rq *rq)
+drm_sched_rq_select_entity(struct drm_gpu_scheduler *sched,
+                          struct drm_sched_rq *rq)
 {
        struct rb_node *rb;
 
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index fb88301b3c45..803dde52dc31 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -94,7 +94,8 @@ struct drm_sched_entity {
         * @lock:
         *
         * Lock protecting the run-queue (@rq) to which this entity belongs,
-        * @priority and the list of schedulers (@sched_list, @num_sched_list).
+        * @priority, the list of schedulers (@sched_list, @num_sched_list) and
+        * the @rr_ts field.
         */
        spinlock_t                      lock;
 
@@ -142,6 +143,13 @@ struct drm_sched_entity {
         */
        enum drm_sched_priority         priority;
 
+       /**
+        * @rr_ts:
+        *
+        * Fake timestamp of the last popped job from the entity.
+        */
+       ktime_t                         rr_ts;
+
        /**
         * @job_queue: the list of jobs of this entity.
         */
@@ -239,8 +247,8 @@ struct drm_sched_entity {
  * struct drm_sched_rq - queue of entities to be scheduled.
  *
  * @sched: the scheduler to which this rq belongs to.
- * @lock: protects @entities, @rb_tree_root and @current_entity.
- * @current_entity: the entity which is to be scheduled.
+ * @lock: protects @entities, @rb_tree_root and @rr_ts.
+ * @rr_ts: monotonically incrementing fake timestamp for RR mode.
  * @entities: list of the entities to be scheduled.
  * @rb_tree_root: root of time based priority queue of entities for FIFO 
scheduling
  *
@@ -253,7 +261,7 @@ struct drm_sched_rq {
 
        spinlock_t                      lock;
        /* Following members are protected by the @lock: */
-       struct drm_sched_entity         *current_entity;
+       ktime_t                         rr_ts;
        struct list_head                entities;
        struct rb_root_cached           rb_tree_root;
 };
-- 
2.48.0

Reply via email to