On 12/15/25 10:20, Tvrtko Ursulin wrote:
> 
> On 12/12/2025 15:50, Christian König wrote:
>> On 12/11/25 16:13, Tvrtko Ursulin wrote:
>>>
>>> On 11/12/2025 13:16, Christian König wrote:
>>>> Using the inline lock is now the recommended way for dma_fence 
>>>> implementations.
>>>>
>>>> So use this approach for the scheduler fences as well just in case if
>>>> anybody uses this as blueprint for its own implementation.
>>>>
>>>> Also saves about 4 bytes for the external spinlock.
>>>>
>>>> Signed-off-by: Christian König <[email protected]>
>>>> ---
>>>>    drivers/gpu/drm/scheduler/sched_fence.c | 7 +++----
>>>>    include/drm/gpu_scheduler.h             | 4 ----
>>>>    2 files changed, 3 insertions(+), 8 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/scheduler/sched_fence.c 
>>>> b/drivers/gpu/drm/scheduler/sched_fence.c
>>>> index 08ccbde8b2f5..47471b9e43f9 100644
>>>> --- a/drivers/gpu/drm/scheduler/sched_fence.c
>>>> +++ b/drivers/gpu/drm/scheduler/sched_fence.c
>>>> @@ -161,7 +161,7 @@ static void 
>>>> drm_sched_fence_set_deadline_finished(struct dma_fence *f,
>>>>        /* If we already have an earlier deadline, keep it: */
>>>>        if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
>>>>            ktime_before(fence->deadline, deadline)) {
>>>> -        spin_unlock_irqrestore(&fence->lock, flags);
>>>> +        dma_fence_unlock_irqrestore(f, flags);
>>>
>>> Rebase error I guess. Pull into the locking helpers patch.
>>
>> No that is actually completely intentional here.
>>
>> Previously we had a separate lock which protected both the DMA-fences as 
>> well as the deadline state.
>>
>> Now we turn that upside down by dropping the separate lock and protecting 
>> the deadline state with the dma_fence lock instead.
> 
> I don't follow. The code is currently like this:
> 
> static void drm_sched_fence_set_deadline_finished(struct dma_fence *f,
>                           ktime_t deadline)
> {
>     struct drm_sched_fence *fence = to_drm_sched_fence(f);
>     struct dma_fence *parent;
>     unsigned long flags;
> 
>     spin_lock_irqsave(&fence->lock, flags);
> 
>     /* If we already have an earlier deadline, keep it: */
>     if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
>         ktime_before(fence->deadline, deadline)) {
>         spin_unlock_irqrestore(&fence->lock, flags);
>         return;
>     }
> 
>     fence->deadline = deadline;
>     set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
> 
>     spin_unlock_irqrestore(&fence->lock, flags);...
> 
> The diff changes one out of the three lock/unlock operations. Other two are 
> changed in 3/19. All three should surely be changed in the same patch.

We could change those spin_lock/unlock calls in patch #3, but I don't think 
that this is clean.

See the code here currently uses fence->lock and patch #3 would change it to 
use fence->finished->lock instead. That might be the pointer at the moment, but 
that is just by coincident and not design.

Only this change here ontop makes it intentional that we use 
fence->finished->lock for everything.

Regards,
Christian.

> 
> Regards,
> 
> Tvrtko
> 
>>
>> Regards,
>> Christian.
>>
>>>
>>> Regards,
>>>
>>> Tvrtko
>>>
>>>>            return;
>>>>        }
>>>>    @@ -217,7 +217,6 @@ struct drm_sched_fence 
>>>> *drm_sched_fence_alloc(struct drm_sched_entity *entity,
>>>>          fence->owner = owner;
>>>>        fence->drm_client_id = drm_client_id;
>>>> -    spin_lock_init(&fence->lock);
>>>>          return fence;
>>>>    }
>>>> @@ -230,9 +229,9 @@ void drm_sched_fence_init(struct drm_sched_fence 
>>>> *fence,
>>>>        fence->sched = entity->rq->sched;
>>>>        seq = atomic_inc_return(&entity->fence_seq);
>>>>        dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
>>>> -               &fence->lock, entity->fence_context, seq);
>>>> +               NULL, entity->fence_context, seq);
>>>>        dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
>>>> -               &fence->lock, entity->fence_context + 1, seq);
>>>> +               NULL, entity->fence_context + 1, seq);
>>>>    }
>>>>      module_init(drm_sched_fence_slab_init);
>>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>>> index fb88301b3c45..b77f24a783e3 100644
>>>> --- a/include/drm/gpu_scheduler.h
>>>> +++ b/include/drm/gpu_scheduler.h
>>>> @@ -297,10 +297,6 @@ struct drm_sched_fence {
>>>>             * belongs to.
>>>>             */
>>>>        struct drm_gpu_scheduler    *sched;
>>>> -        /**
>>>> -         * @lock: the lock used by the scheduled and the finished fences.
>>>> -         */
>>>> -    spinlock_t            lock;
>>>>            /**
>>>>             * @owner: job owner for debugging
>>>>             */
>>>
>>
> 

Reply via email to