Drawing from commit d2624d90a0b7 ("drm/panthor: assign unique names to
queues"), give scheduler queues proper names that reflect the function
of their JM slot, so that this will be shown when gathering DRM
scheduler tracepoints.Signed-off-by: Adrián Larumbe <[email protected]> --- drivers/gpu/drm/panfrost/panfrost_drv.c | 16 ++++++---------- drivers/gpu/drm/panfrost/panfrost_job.c | 8 +++++++- drivers/gpu/drm/panfrost/panfrost_job.h | 2 ++ 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 22350ce8a08f..607a5b8448d0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -668,23 +668,19 @@ static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev, * job spent on the GPU. */ - static const char * const engine_names[] = { - "fragment", "vertex-tiler", "compute-only" - }; - - BUILD_BUG_ON(ARRAY_SIZE(engine_names) != NUM_JOB_SLOTS); - for (i = 0; i < NUM_JOB_SLOTS - 1; i++) { if (pfdev->profile_mode) { drm_printf(p, "drm-engine-%s:\t%llu ns\n", - engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]); + panfrost_engine_names[i], + panfrost_priv->engine_usage.elapsed_ns[i]); drm_printf(p, "drm-cycles-%s:\t%llu\n", - engine_names[i], panfrost_priv->engine_usage.cycles[i]); + panfrost_engine_names[i], + panfrost_priv->engine_usage.cycles[i]); } drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n", - engine_names[i], pfdev->pfdevfreq.fast_rate); + panfrost_engine_names[i], pfdev->pfdevfreq.fast_rate); drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n", - engine_names[i], pfdev->pfdevfreq.current_frequency); + panfrost_engine_names[i], pfdev->pfdevfreq.current_frequency); } } diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index c47d14eabbae..0cc80da12562 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -28,6 +28,10 @@ #define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) #define job_read(dev, reg) readl(dev->iomem + (reg)) +const char * const panfrost_engine_names[] = { + "fragment", "vertex-tiler", "compute-only" +}; + struct panfrost_queue_state { struct drm_gpu_scheduler sched; u64 fence_context; @@ -846,12 +850,13 @@ int panfrost_job_init(struct panfrost_device *pfdev) .num_rqs = DRM_SCHED_PRIORITY_COUNT, .credit_limit = 2, .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), - .name = "pan_js", .dev = pfdev->dev, }; struct panfrost_job_slot *js; int ret, j; + BUILD_BUG_ON(ARRAY_SIZE(panfrost_engine_names) != NUM_JOB_SLOTS); + /* All GPUs have two entries per queue, but without jobchain * disambiguation stopping the right job in the close path is tricky, * so let's just advertise one entry in that case. @@ -887,6 +892,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) for (j = 0; j < NUM_JOB_SLOTS; j++) { js->queue[j].fence_context = dma_fence_context_alloc(1); + args.name = panfrost_engine_names[j]; ret = drm_sched_init(&js->queue[j].sched, &args); if (ret) { diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 5a30ff1503c6..458666bf684b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -53,6 +53,8 @@ struct panfrost_jm_ctx { struct drm_sched_entity slot_entity[NUM_JOB_SLOTS]; }; +extern const char * const panfrost_engine_names[]; + int panfrost_jm_ctx_create(struct drm_file *file, struct drm_panfrost_jm_ctx_create *args); int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle); base-commit: 30531e9ca7cd4f8c5740babd35cdb465edf73a2d -- 2.51.0
