On 2023-06-15 07:56, Christian König wrote:
> Instead of implementing this ourself.

Spellcheck: "ourselves".

Acked-by: Luben Tuikov <[email protected]>

Regards,
Luben

> 
> Signed-off-by: Christian König <[email protected]>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 52 ++++---------------------
>  1 file changed, 8 insertions(+), 44 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 1445e030d788..f787a9b06d62 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -163,41 +163,6 @@ static unsigned int amdgpu_ctx_get_hw_prio(struct 
> amdgpu_ctx *ctx, u32 hw_ip)
>       return hw_prio;
>  }
>  
> -/* Calculate the time spend on the hw */
> -static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
> -{
> -     struct drm_sched_fence *s_fence;
> -
> -     if (!fence)
> -             return ns_to_ktime(0);
> -
> -     /* When the fence is not even scheduled it can't have spend time */
> -     s_fence = to_drm_sched_fence(fence);
> -     if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
> -             return ns_to_ktime(0);
> -
> -     /* When it is still running account how much already spend */
> -     if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
> -             return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
> -
> -     return ktime_sub(s_fence->finished.timestamp,
> -                      s_fence->scheduled.timestamp);
> -}
> -
> -static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
> -                                   struct amdgpu_ctx_entity *centity)
> -{
> -     ktime_t res = ns_to_ktime(0);
> -     uint32_t i;
> -
> -     spin_lock(&ctx->ring_lock);
> -     for (i = 0; i < amdgpu_sched_jobs; i++) {
> -             res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
> -     }
> -     spin_unlock(&ctx->ring_lock);
> -     return res;
> -}
> -
>  static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
>                                 const u32 ring)
>  {
> @@ -257,16 +222,15 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx 
> *ctx, u32 hw_ip,
>  
>  static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
>  {
> -     ktime_t res = ns_to_ktime(0);
> +     ktime_t res;
>       int i;
>  
>       if (!entity)
> -             return res;
> +             return ns_to_ktime(0);
>  
> -     for (i = 0; i < amdgpu_sched_jobs; ++i) {
> -             res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
> +     for (i = 0; i < amdgpu_sched_jobs; ++i)
>               dma_fence_put(entity->fences[i]);
> -     }
> +     res = drm_sched_entity_time_spend(&entity->entity);
>       drm_sched_entity_destroy(&entity->entity);
>       kfree(entity);
>       return res;
> @@ -718,9 +682,6 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
>       centity->sequence++;
>       spin_unlock(&ctx->ring_lock);
>  
> -     atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
> -                  &ctx->mgr->time_spend[centity->hw_ip]);
> -
>       dma_fence_put(other);
>       return seq;
>  }
> @@ -900,12 +861,15 @@ void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
>               for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
>                       for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
>                               struct amdgpu_ctx_entity *centity;
> +                             struct drm_sched_entity *entity;
>                               ktime_t spend;
>  
>                               centity = ctx->entities[hw_ip][i];
>                               if (!centity)
>                                       continue;
> -                             spend = amdgpu_ctx_entity_time(ctx, centity);
> +
> +                             entity = &centity->entity;
> +                             spend = drm_sched_entity_time_spend(entity);
>                               usage[hw_ip] = ktime_add(usage[hw_ip], spend);
>                       }
>               }

Reply via email to