/**
* DOC: sched_policy (int)
* Used to override default entities scheduling policy in a run queue.
*/
-MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, "
__stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " =
FIFO (default).");
+MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, "
__stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO, "
__stringify(DRM_SCHED_POLICY_FAIR) " = Fair (default).");
module_param_named(sched_policy, drm_sched_policy, int, 0444);
static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
@@ -1132,11 +1132,15 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_init_
sched->own_submit_wq = true;
}
- sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq),
+ sched->num_user_rqs = args->num_rqs;
+ sched->num_rqs = drm_sched_policy != DRM_SCHED_POLICY_FAIR ?
+ args->num_rqs : 1;
+ sched->sched_rq = kmalloc_array(sched->num_rqs,
+ sizeof(*sched->sched_rq),
GFP_KERNEL | __GFP_ZERO);
if (!sched->sched_rq)
goto Out_check_own;
- sched->num_rqs = args->num_rqs;
+
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]),
GFP_KERNEL);
if (!sched->sched_rq[i])
@@ -1278,7 +1282,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
- for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
diff --git a/drivers/gpu/drm/scheduler/sched_rq.c
b/drivers/gpu/drm/scheduler/sched_rq.c
index 67804815ca0d..b0cf7d2143c8 100644
--- a/drivers/gpu/drm/scheduler/sched_rq.c
+++ b/drivers/gpu/drm/scheduler/sched_rq.c
@@ -16,6 +16,24 @@ drm_sched_entity_compare_before(struct rb_node *a, const
struct rb_node *b)
return ktime_before(ea->oldest_job_waiting, eb->oldest_job_waiting);
}
+static void drm_sched_rq_update_prio(struct drm_sched_rq *rq)
+{
+ enum drm_sched_priority prio = -1;
+ struct rb_node *rb;
+
+ lockdep_assert_held(&rq->lock);
+
+ rb = rb_first_cached(&rq->rb_tree_root);
+ if (rb) {
+ struct drm_sched_entity *entity =
+ rb_entry(rb, typeof(*entity), rb_tree_node);
+
+ prio = entity->priority; /* Unlocked read */
+ }
+
+ rq->head_prio = prio;
+}
+
static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
struct drm_sched_rq *rq)
{
@@ -25,6 +43,7 @@ static void drm_sched_rq_remove_fifo_locked(struct
drm_sched_entity *entity,
if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
RB_CLEAR_NODE(&entity->rb_tree_node);
+ drm_sched_rq_update_prio(rq);
}
}
@@ -46,6 +65,7 @@ static void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
drm_sched_entity_compare_before);
+ drm_sched_rq_update_prio(rq);
}
/**
@@ -63,6 +83,114 @@ void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
INIT_LIST_HEAD(&rq->entities);
rq->rb_tree_root = RB_ROOT_CACHED;
rq->sched = sched;
+ rq->head_prio = -1;
+}
+
+static ktime_t
+drm_sched_rq_get_min_vruntime(struct drm_sched_rq *rq)
+{
+ struct drm_sched_entity *entity;
+ struct rb_node *rb;
+
+ lockdep_assert_held(&rq->lock);
+
+ for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
+ entity = rb_entry(rb, typeof(*entity), rb_tree_node);
+
+ return entity->stats->vruntime; /* Unlocked read */
+ }
+
+ return 0;
+}
+
+static void
+drm_sched_entity_save_vruntime(struct drm_sched_entity *entity,
+ ktime_t min_vruntime)
+{
+ struct drm_sched_entity_stats *stats = entity->stats;
+ ktime_t vruntime;
+
+ spin_lock(&stats->lock);
+ vruntime = stats->vruntime;
+ if (min_vruntime && vruntime > min_vruntime)
+ vruntime = ktime_sub(vruntime, min_vruntime);
+ else
+ vruntime = 0;
+ stats->vruntime = vruntime;
+ spin_unlock(&stats->lock);
+}
+
+static ktime_t
+drm_sched_entity_restore_vruntime(struct drm_sched_entity *entity,
+ ktime_t min_vruntime,
+ enum drm_sched_priority rq_prio)
+{
+ struct drm_sched_entity_stats *stats = entity->stats;
+ enum drm_sched_priority prio = entity->priority;
+ ktime_t vruntime;
+
+ BUILD_BUG_ON(DRM_SCHED_PRIORITY_NORMAL < DRM_SCHED_PRIORITY_HIGH);
+
+ spin_lock(&stats->lock);
+ vruntime = stats->vruntime;
+
+ /*
+ * Special handling for entities which were picked from the top of the
+ * queue and are now re-joining the top with another one already there.
+ */
+ if (!vruntime && min_vruntime) {
+ if (prio > rq_prio) {
+ /*
+ * Lower priority should not overtake higher when re-
+ * joining at the top of the queue.
+ */
+ vruntime = us_to_ktime(prio - rq_prio);
+ } else if (prio < rq_prio) {
+ /*
+ * Higher priority can go first.
+ */
+ vruntime = -us_to_ktime(rq_prio - prio);
+ }
+ }
+
+ /*
+ * Restore saved relative position in the queue.
+ */
+ vruntime = ktime_add(min_vruntime, vruntime);
+
+ stats->vruntime = vruntime;
+ spin_unlock(&stats->lock);
+
+ return vruntime;
+}
+
+static ktime_t drm_sched_entity_update_vruntime(struct drm_sched_entity
*entity)
+{
+ static const unsigned int shift[] = {
+ [DRM_SCHED_PRIORITY_KERNEL] = 1,
+ [DRM_SCHED_PRIORITY_HIGH] = 2,
+ [DRM_SCHED_PRIORITY_NORMAL] = 4,
+ [DRM_SCHED_PRIORITY_LOW] = 7,
+ };
+ struct drm_sched_entity_stats *stats = entity->stats;
+ ktime_t runtime, prev;
+
+ spin_lock(&stats->lock);
+ prev = stats->prev_runtime;
+ runtime = stats->runtime;
+ stats->prev_runtime = runtime;
+ runtime = ktime_add_ns(stats->vruntime,
+ ktime_to_ns(ktime_sub(runtime, prev)) <<
+ shift[entity->priority]);
+ stats->vruntime = runtime;
+ spin_unlock(&stats->lock);
+
+ return runtime;
+}
+
+static ktime_t drm_sched_entity_get_job_ts(struct drm_sched_entity *entity)
+{
+ return drm_sched_entity_update_vruntime(entity);
}
/**
@@ -99,8 +227,14 @@ drm_sched_rq_add_entity(struct drm_sched_entity *entity,
ktime_t ts)
list_add_tail(&entity->list, &rq->entities);
}
- if (drm_sched_policy == DRM_SCHED_POLICY_RR)
+ if (drm_sched_policy == DRM_SCHED_POLICY_FAIR) {
+ ts = drm_sched_rq_get_min_vruntime(rq);
+ ts = drm_sched_entity_restore_vruntime(entity, ts,
+ rq->head_prio);
+ } else if (drm_sched_policy == DRM_SCHED_POLICY_RR) {
ts = entity->rr_ts;
+ }
+
drm_sched_rq_update_fifo_locked(entity, rq, ts);
spin_unlock(&rq->lock);
@@ -173,7 +307,9 @@ void drm_sched_rq_pop_entity(struct drm_sched_entity
*entity)
if (next_job) {
ktime_t ts;
- if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+ if (drm_sched_policy == DRM_SCHED_POLICY_FAIR)
+ ts = drm_sched_entity_get_job_ts(entity);
+ else if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
ts = next_job->submit_ts;
else
ts = drm_sched_rq_get_rr_ts(rq, entity);
@@ -181,6 +317,13 @@ void drm_sched_rq_pop_entity(struct drm_sched_entity
*entity)
drm_sched_rq_update_fifo_locked(entity, rq, ts);
} else {
drm_sched_rq_remove_fifo_locked(entity, rq);
+
+ if (drm_sched_policy == DRM_SCHED_POLICY_FAIR) {
+ ktime_t min_vruntime;
+
+ min_vruntime = drm_sched_rq_get_min_vruntime(rq);
+ drm_sched_entity_save_vruntime(entity, min_vruntime);
+ }
}
spin_unlock(&rq->lock);
spin_unlock(&entity->lock);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index f33c78473867..327b75a052c7 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -150,6 +150,11 @@ struct drm_sched_entity {
*/
enum drm_sched_priority priority;
+ /**
+ * @rq_priority: Run-queue priority
+ */
+ enum drm_sched_priority rq_priority;
+
/**
* @rr_ts:
*
@@ -254,10 +259,11 @@ struct drm_sched_entity {
* struct drm_sched_rq - queue of entities to be scheduled.
*
* @sched: the scheduler to which this rq belongs to.
- * @lock: protects @entities, @rb_tree_root and @rr_ts.
+ * @lock: protects @entities, @rb_tree_root, @rr_ts and @head_prio.
* @rr_ts: monotonically incrementing fake timestamp for RR mode
* @entities: list of the entities to be scheduled.
* @rb_tree_root: root of time based priority queue of entities for FIFO
scheduling
+ * @head_prio: priority of the top tree element
*
* Run queue is a set of entities scheduling command submissions for
* one specific ring. It implements the scheduling policy that selects
@@ -271,6 +277,7 @@ struct drm_sched_rq {
ktime_t rr_ts;
struct list_head entities;
struct rb_root_cached rb_tree_root;
+ enum drm_sched_priority head_prio;
};
/**
@@ -597,6 +604,7 @@ struct drm_gpu_scheduler {
long timeout;
const char *name;
u32 num_rqs;
+ u32 num_user_rqs;