On Thu, 2024-11-14 at 11:01 +0100, Pierre-Eric Pelloux-Prayer wrote:
> A fence uniquely identify a job, so this commits updates the places

s/identify/identifies

> where a kernel pointer was used as an identifier by:

But better sth like this:

"Currently, the scheduler's tracing infrastructure uses the job's
dma_fence and the drm_sched_entity the job belongs to. The dma_fence
alone, however, already uniquely identifies a job.

Use the dma_fence's context and sequence number exclusively identify a
job in debug prints like so: 

> 
>    "fence=(context:%llu, seqno:%lld)"
> 
> Signed-off-by: Pierre-Eric Pelloux-Prayer
> <[email protected]>
> ---
>  .../gpu/drm/scheduler/gpu_scheduler_trace.h   | 39 +++++++++++------
> --
>  1 file changed, 22 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> index c4ec28540656..24358c4d5bbe 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> @@ -36,28 +36,29 @@ DECLARE_EVENT_CLASS(drm_sched_job,
>           TP_PROTO(struct drm_sched_job *sched_job, struct
> drm_sched_entity *entity),
>           TP_ARGS(sched_job, entity),
>           TP_STRUCT__entry(
> -                          __field(struct drm_sched_entity *,
> entity)
> -                          __field(struct dma_fence *, fence)
>                            __string(name, sched_job->sched->name)
>                            __field(uint64_t, id)
>                            __field(u32, job_count)
>                            __field(int, hw_job_count)
>                            __string(dev, dev_name(sched_job-
> >sched->dev))
> +                          __field(uint64_t, fence_context)
> +                          __field(uint64_t, fence_seqno)
>                            ),
>  
>           TP_fast_assign(
> -                        __entry->entity = entity;
>                          __entry->id = sched_job->id;
> -                        __entry->fence = &sched_job->s_fence-
> >finished;
>                          __assign_str(name);
>                          __entry->job_count =
> spsc_queue_count(&entity->job_queue);
>                          __entry->hw_job_count = atomic_read(
>                                  &sched_job->sched->credit_count);
>                          __assign_str(dev);
> +                        __entry->fence_context = sched_job-
> >s_fence->finished.context;
> +                        __entry->fence_seqno = sched_job-
> >s_fence->finished.seqno;
> +
>                          ),
> -         TP_printk("dev=%s, entity=%p, id=%llu, fence=%p,
> ring=%s, job count:%u, hw job count:%d",
> -                   __get_str(dev), __entry->entity, __entry->id,
> -                   __entry->fence, __get_str(name),
> +         TP_printk("dev=%s, id=%llu, fence=(context:%llu,
> seqno:%lld), ring=%s, job count:%u, hw job count:%d",
> +                   __get_str(dev), __entry->id,
> +                   __entry->fence_context, __entry->fence_seqno,
> __get_str(name),
>                     __entry->job_count, __entry->hw_job_count)
>  );
>  
> @@ -75,37 +76,41 @@ TRACE_EVENT(drm_sched_process_job,
>           TP_PROTO(struct drm_sched_fence *fence),
>           TP_ARGS(fence),
>           TP_STRUCT__entry(
> -                 __field(struct dma_fence *, fence)
> +                 __field(uint64_t, fence_context)
> +                 __field(uint64_t, fence_seqno)
>                   ),
>  
>           TP_fast_assign(
> -                 __entry->fence = &fence->finished;
> +                 __entry->fence_context = fence-
> >finished.context;
> +                 __entry->fence_seqno = fence->finished.seqno;
>                   ),
> -         TP_printk("fence=%p signaled", __entry->fence)
> +         TP_printk("fence=(context:%llu, seqno:%lld) signaled",
> +                   __entry->fence_context, __entry->fence_seqno)
>  );
>  
>  TRACE_EVENT(drm_sched_job_wait_dep,
>           TP_PROTO(struct drm_sched_job *sched_job, struct
> dma_fence *fence),
>           TP_ARGS(sched_job, fence),
>           TP_STRUCT__entry(
> -                          __string(name, sched_job->sched->name)
> +                          __field(uint64_t, fence_context)
> +                          __field(uint64_t, fence_seqno)
>                            __field(uint64_t, id)
>                            __field(struct dma_fence *, fence)
>                            __field(uint64_t, ctx)
> -                          __field(unsigned, seqno)
> +                          __field(uint64_t, seqno)
>                            ),
>  
>           TP_fast_assign(
> -                        __assign_str(name);
> +                        __entry->fence_context = sched_job-
> >s_fence->finished.context;
> +                        __entry->fence_seqno = sched_job-
> >s_fence->finished.seqno;
>                          __entry->id = sched_job->id;
>                          __entry->fence = fence;
>                          __entry->ctx = fence->context;
>                          __entry->seqno = fence->seqno;
>                          ),
> -         TP_printk("job ring=%s, id=%llu, depends fence=%p,
> context=%llu, seq=%u",
> -                   __get_str(name), __entry->id,
> -                   __entry->fence, __entry->ctx,
> -                   __entry->seqno)
> +         TP_printk("fence=(context:%llu, seqno:%lld), id=%llu,
> dependencies:{(context:%llu, seqno:%lld)}",
> +                   __entry->fence_context, __entry->fence_seqno,
> __entry->id,
> +                   __entry->ctx, __entry->seqno)
>  );
>  
>  #endif

Reply via email to