On 07/08/2025 17:26, Karunika Choo wrote:
> In certain scenarios, it is possible for multiple cache flushes to be
> requested before the previous one completes. This patch introduces the
> cache_flush_lock mutex to serialize these operations and ensure that
> any requested cache flushes are completed instead of dropped.
> 
> Reviewed-by: Liviu Dudau <[email protected]>
> Co-developed-by: Dennis Tsiang <[email protected]>
> Signed-off-by: Dennis Tsiang <[email protected]>
> Signed-off-by: Karunika Choo <[email protected]>

Reviewed-by: Steven Price <[email protected]>

> ---
>  drivers/gpu/drm/panthor/panthor_gpu.c | 7 +++++++
>  1 file changed, 7 insertions(+)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c 
> b/drivers/gpu/drm/panthor/panthor_gpu.c
> index 5e2c3173ae27..db69449a5be0 100644
> --- a/drivers/gpu/drm/panthor/panthor_gpu.c
> +++ b/drivers/gpu/drm/panthor/panthor_gpu.c
> @@ -35,6 +35,9 @@ struct panthor_gpu {
>  
>       /** @reqs_acked: GPU request wait queue. */
>       wait_queue_head_t reqs_acked;
> +
> +     /** @cache_flush_lock: Lock to serialize cache flushes */
> +     struct mutex cache_flush_lock;
>  };
>  
>  #define GPU_INTERRUPTS_MASK  \
> @@ -110,6 +113,7 @@ int panthor_gpu_init(struct panthor_device *ptdev)
>  
>       spin_lock_init(&gpu->reqs_lock);
>       init_waitqueue_head(&gpu->reqs_acked);
> +     mutex_init(&gpu->cache_flush_lock);
>       ptdev->gpu = gpu;
>  
>       dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
> @@ -258,6 +262,9 @@ int panthor_gpu_flush_caches(struct panthor_device *ptdev,
>       bool timedout = false;
>       unsigned long flags;
>  
> +     /* Serialize cache flush operations. */
> +     guard(mutex)(&ptdev->gpu->cache_flush_lock);
> +
>       spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
>       if (!drm_WARN_ON(&ptdev->base,
>                        ptdev->gpu->pending_reqs & 
> GPU_IRQ_CLEAN_CACHES_COMPLETED)) {

Reply via email to