On Thu, 27 Nov 2025 08:12:39 +0000
Akash Goel <[email protected]> wrote:

> This commit prevents the possibility of a use after free issue in the
> GROUP_CREATE ioctl function, which arose as pointer to the group is
> accessed in that ioctl function after storing it in the Xarray.
> A malicious userspace can second guess the handle of a group and try
> to call GROUP_DESTROY ioctl from another thread around the same time
> as GROUP_CREATE ioctl.
> 
> To prevent the use after free exploit, this commit uses a mark on an
> entry of group pool Xarray which is added just before returning from
> the GROUP_CREATE ioctl function. The mark is checked for all ioctls
> that specify the group handle and so userspace won't be abe to delete
> a group that isn't marked yet.
> 
> Co-developed-by: Boris Brezillon <[email protected]>
> Signed-off-by: Akash Goel <[email protected]>

Already approved privately, but here's the official

Reviewed-by: Boris Brezillon <[email protected]>

> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 19 +++++++++++++++----
>  1 file changed, 15 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
> b/drivers/gpu/drm/panthor/panthor_sched.c
> index b834123a6560..a6b8024e1a3c 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -779,6 +779,12 @@ struct panthor_job_profiling_data {
>   */
>  #define MAX_GROUPS_PER_POOL 128
>  
> +/*
> + * Mark added on an entry of group pool Xarray to identify if the group has
> + * been fully initialized and can be accessed elsewhere in the driver code.
> + */
> +#define GROUP_REGISTERED XA_MARK_1
> +
>  /**
>   * struct panthor_group_pool - Group pool
>   *
> @@ -3007,7 +3013,7 @@ void panthor_fdinfo_gather_group_samples(struct 
> panthor_file *pfile)
>               return;
>  
>       xa_lock(&gpool->xa);
> -     xa_for_each(&gpool->xa, i, group) {
> +     xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) {
>               guard(spinlock)(&group->fdinfo.lock);
>               pfile->stats.cycles += group->fdinfo.data.cycles;
>               pfile->stats.time += group->fdinfo.data.time;
> @@ -3727,6 +3733,8 @@ int panthor_group_create(struct panthor_file *pfile,
>  
>       group_init_task_info(group);
>  
> +     xa_set_mark(&gpool->xa, gid, GROUP_REGISTERED);
> +
>       return gid;
>  
>  err_erase_gid:
> @@ -3744,6 +3752,9 @@ int panthor_group_destroy(struct panthor_file *pfile, 
> u32 group_handle)
>       struct panthor_scheduler *sched = ptdev->scheduler;
>       struct panthor_group *group;
>  
> +     if (!xa_get_mark(&gpool->xa, group_handle, GROUP_REGISTERED))
> +             return -EINVAL;
> +
>       group = xa_erase(&gpool->xa, group_handle);
>       if (!group)
>               return -EINVAL;
> @@ -3769,12 +3780,12 @@ int panthor_group_destroy(struct panthor_file *pfile, 
> u32 group_handle)
>  }
>  
>  static struct panthor_group *group_from_handle(struct panthor_group_pool 
> *pool,
> -                                            u32 group_handle)
> +                                            unsigned long group_handle)
>  {
>       struct panthor_group *group;
>  
>       xa_lock(&pool->xa);
> -     group = group_get(xa_load(&pool->xa, group_handle));
> +     group = group_get(xa_find(&pool->xa, &group_handle, group_handle, 
> GROUP_REGISTERED));
>       xa_unlock(&pool->xa);
>  
>       return group;
> @@ -3861,7 +3872,7 @@ panthor_fdinfo_gather_group_mem_info(struct 
> panthor_file *pfile,
>               return;
>  
>       xa_lock(&gpool->xa);
> -     xa_for_each(&gpool->xa, i, group) {
> +     xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) {
>               stats->resident += group->fdinfo.kbo_sizes;
>               if (group->csg_id >= 0)
>                       stats->active += group->fdinfo.kbo_sizes;

Reply via email to