On Wed, Nov 12, 2025 at 4:21 AM Boris Brezillon
<[email protected]> wrote:
>
> When we have multiple active groups with the same priority, we need to
> keep ticking for the priority rotation to take place. If we don't do
> that, we might starve slots with lower priorities.
>
> It's annoying to deal with that in tick_ctx_update_resched_target(),
> so let's add a ::stop_tick field to the tick context which is
> initialized to true, and downgraded to false as soon as we detect
> something that requires to tick to happen. This way we can complement
> the current logic with extra conditions if needed.
>
> v2:
> - Add R-b
>
> Fixes: de8548813824 ("drm/panthor: Add the scheduler logical block")
> Signed-off-by: Boris Brezillon <[email protected]>
> Reviewed-by: Steven Price <[email protected]>
> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 36 ++++++++++++-------------
>  1 file changed, 17 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
> b/drivers/gpu/drm/panthor/panthor_sched.c
> index 4c137581fe40..0394f377c284 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -1882,6 +1882,7 @@ struct panthor_sched_tick_ctx {
>         struct panthor_vm *vms[MAX_CS_PER_CSG];
>         u32 as_count;
>         bool immediate_tick;
> +       bool stop_tick;
>         u32 csg_upd_failed_mask;
>  };
>
> @@ -1941,10 +1942,17 @@ tick_ctx_pick_groups_from_list(const struct 
> panthor_scheduler *sched,
>                 if (!owned_by_tick_ctx)
>                         group_get(group);
>
> -               list_move_tail(&group->run_node, 
> &ctx->groups[group->priority]);
>                 ctx->group_count++;
> +
> +               /* If we have more than one active group with the same 
> priority,
> +                * we need to keep ticking to rotate the CSG priority.
> +                */
>                 if (group_is_idle(group))
>                         ctx->idle_group_count++;
> +               else if (!list_empty(&ctx->groups[group->priority]))
> +                       ctx->stop_tick = false;
> +
> +               list_move_tail(&group->run_node, 
> &ctx->groups[group->priority]);
>
>                 if (i == ctx->as_count)
>                         ctx->vms[ctx->as_count++] = group->vm;
> @@ -1996,6 +2004,7 @@ tick_ctx_init(struct panthor_scheduler *sched,
>         memset(ctx, 0, sizeof(*ctx));
>         csgs_upd_ctx_init(&upd_ctx);
>
> +       ctx->stop_tick = true;
>         ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT;
>         for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
>                 INIT_LIST_HEAD(&ctx->groups[i]);
> @@ -2308,32 +2317,21 @@ static u64
>  tick_ctx_update_resched_target(struct panthor_scheduler *sched,
>                                const struct panthor_sched_tick_ctx *ctx)
>  {
> -       /* We had space left, no need to reschedule until some external event 
> happens. */
> -       if (!tick_ctx_is_full(sched, ctx))
> -               goto no_tick;
> +       u64 resched_target;
>
> -       /* If idle groups were scheduled, no need to wake up until some 
> external
> -        * event happens (group unblocked, new job submitted, ...).
> -        */
> -       if (ctx->idle_group_count)
> +       if (ctx->stop_tick)
>                 goto no_tick;
>
>         if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= 
> PANTHOR_CSG_PRIORITY_COUNT))
>                 goto no_tick;
>
> -       /* If there are groups of the same priority waiting, we need to
> -        * keep the scheduler ticking, otherwise, we'll just wait for
> -        * new groups with higher priority to be queued.
> -        */
> -       if (!list_empty(&sched->groups.runnable[ctx->min_priority])) {
It looks like we can drop ctx->min_priority completely. It was mainly
a safeguard to avoid OOB access here.

> -               u64 resched_target = sched->last_tick + sched->tick_period;
> +       resched_target = sched->last_tick + sched->tick_period;
>
> -               if (time_before64(sched->resched_target, sched->last_tick) ||
> -                   time_before64(resched_target, sched->resched_target))
> -                       sched->resched_target = resched_target;
> +       if (time_before64(sched->resched_target, sched->last_tick) ||
> +           time_before64(resched_target, sched->resched_target))
> +               sched->resched_target = resched_target;
>
> -               return sched->resched_target - sched->last_tick;
> -       }
> +       return sched->resched_target - sched->last_tick;
>
>  no_tick:
>         sched->resched_target = U64_MAX;
> --
> 2.51.1
>

Reply via email to