On 11/10/25 5:37 PM, Akhil P Oommen wrote:
> A8x is the next generation of Adreno GPUs, featuring a significant
> hardware design change. A major update to the design is the introduction
> of Slice architecture. Slices are sort of mini-GPUs within the GPU which
> are more independent in processing Graphics and compute workloads. Also,
> in addition to the BV and BR pipe we saw in A7x, CP has more concurrency
> with additional pipes.
> 
> From a software interface perspective, these changes have a significant
> impact on the KMD side. First, the GPU register space has been extensively
> reorganized. Second, to avoid  a register space explosion caused by the
> new slice architecture and additional pipes, many registers are now
> virtualized, instead of duplicated as in A7x. KMD must configure an
> aperture register with the appropriate slice and pipe ID before accessing
> these virtualized registers.
> 
> This patch adds only a skeleton support for the A8x family. An A8x GPU
> support will be added in an upcoming patch.
> 
> Signed-off-by: Akhil P Oommen <[email protected]>
> ---

[...]

> +static void a8xx_aperture_slice_set(struct msm_gpu *gpu, enum adreno_pipe 
> pipe, u32 slice)
> +{
> +     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> +     struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> +     u32 val;
> +
> +     val = A8XX_CP_APERTURE_CNTL_HOST_PIPEID(pipe) | 
> A8XX_CP_APERTURE_CNTL_HOST_SLICEID(slice);

There's also a BIT(23) value here which is seemingly never set, but
perhaps may come in useful for the bigger GPU

> +
> +     if (a6xx_gpu->cached_aperture == val)
> +             return;
> +
> +     gpu_write(gpu, REG_A8XX_CP_APERTURE_CNTL_HOST, val);
> +
> +     a6xx_gpu->cached_aperture = val;
> +}
> +
> +static void a8xx_aperture_aquire(struct msm_gpu *gpu, enum adreno_pipe pipe, 
> unsigned long *flags)

"acquire"

> +{
> +     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> +     struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> +
> +     spin_lock_irqsave(&a6xx_gpu->aperture_lock, *flags);
> +
> +     a8xx_aperture_slice_set(gpu, pipe, 0);

Maybe we can add "unsigned long flags[MAX_NUM_SLICES]" to a6xx_gpu
to make the API a little more ergonomic.. but maybe that's too much
IDK

[...]

> +     a6xx_gpu->slice_mask = a6xx_llc_read(a6xx_gpu,
> +                     REG_A8XX_CX_MISC_SLICE_ENABLE_FINAL) & GENMASK(3, 0);

Please define that field in the XML

[...]

> +}
> +
> +static u32 a8xx_get_first_slice(struct a6xx_gpu *a6xx_gpu)
> +{
> +     return ffs(a6xx_gpu->slice_mask) - 1;
> +}
> +
> +static inline bool _a8xx_check_idle(struct msm_gpu *gpu)
> +{
> +     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> +     struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> +
> +     /* Check that the GMU is idle */
> +     if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
> +             return false;
> +
> +     /* Check that the CX master is idle */
> +     if (gpu_read(gpu, REG_A8XX_RBBM_STATUS) &
> +                     ~A8XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
> +             return false;
> +
> +     return !(gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS) &
> +             A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);

Odd parenthesis-alignment (couple times in the file), checkpatch
usually mumbles at that

[...]

> +
> +void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
> +{
> +     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> +     struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> +     uint32_t wptr;
> +     unsigned long flags;
> +
> +     spin_lock_irqsave(&ring->preempt_lock, flags);
> +
> +     /* Copy the shadow to the actual register */
> +     ring->cur = ring->next;
> +
> +     /* Make sure to wrap wptr if we need to */
> +     wptr = get_wptr(ring);
> +
> +     /* Update HW if this is the current ring and we are not in preempt*/
> +     if (!a6xx_in_preempt(a6xx_gpu)) {
> +             if (a6xx_gpu->cur_ring == ring)
> +                     gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);

I think this should use _fenced too, but I guess the preempt detail
is just a harmless copypasta

[...]

> +static void a8xx_set_hwcg(struct msm_gpu *gpu, bool state)
> +{
> +     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> +     struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> +     struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
> +     u32 val;
> +
> +     gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
> +                     state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0);
> +     gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
> +                     state ? 0x110111 : 0);

a840 sets this, a830 sets 0x10111, please confirm which way x2 skews

> +     gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
> +                     state ? 0x55555 : 0);
> +
> +     gpu_write(gpu, REG_A8XX_RBBM_CLOCK_CNTL_GLOBAL, 1);
> +     gpu_write(gpu, REG_A8XX_RBBM_CGC_GLOBAL_LOAD_CMD, state ? 1 : 0);

!!state

[...]

> +static void a8xx_nonctxt_config(struct msm_gpu *gpu, u32 *gmem_protect)
> +{
> +     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> +     const struct a6xx_info *info = adreno_gpu->info->a6xx;
> +     const struct adreno_reglist_pipe *regs = info->nonctxt_reglist;
> +     unsigned int pipe_id, i;
> +     unsigned long flags;
> +
> +     for (pipe_id = PIPE_NONE; pipe_id <= PIPE_DDE_BV; pipe_id++) {
> +             /* We don't have support for LPAC yet */
> +             if (pipe_id == PIPE_LPAC)
> +                     continue;

This seems arbitrary - one because there are no defines targetting PIPE_LPAC
specifcally in the reg lists you shared and two because it would almost
certainly not hurt to configure these registers and otherwise not power up
the LPAC pipeline

> +
> +             a8xx_aperture_aquire(gpu, pipe_id, &flags);
> +
> +             for (i = 0; regs[i].offset; i++) {
> +                     if (!(BIT(pipe_id) & regs[i].pipe))
> +                             continue;
> +
> +                     if (regs[i].offset == REG_A8XX_RB_GC_GMEM_PROTECT)
> +                             *gmem_protect = regs[i].value;
> +
> +                     gpu_write(gpu, regs[i].offset, regs[i].value);
> +             }
> +
> +             a8xx_aperture_release(gpu, flags);
> +     }
> +
> +     a8xx_aperture_clear(gpu);
> +}
> +
> +static int a8xx_cp_init(struct msm_gpu *gpu)
> +{
> +     struct msm_ringbuffer *ring = gpu->rb[0];
> +     u32 mask;
> +
> +     /* Disable concurrent binning before sending CP init */
> +     OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
> +     OUT_RING(ring, BIT(27));
> +
> +     OUT_PKT7(ring, CP_ME_INIT, 4);
> +
> +     /* Use multiple HW contexts */
> +     mask = BIT(0);
> +
> +     /* Enable error detection */
> +     mask |= BIT(1);
> +
> +     /* Set default reset state */
> +     mask |= BIT(3);
> +
> +     /* Disable save/restore of performance counters across preemption */
> +     mask |= BIT(6);
> +
> +     OUT_RING(ring, mask);
> +
> +     /* Enable multiple hardware contexts */
> +     OUT_RING(ring, 0x00000003);
> +
> +     /* Enable error detection */
> +     OUT_RING(ring, 0x20000000);
> +
> +     /* Operation mode mask */
> +     OUT_RING(ring, 0x00000002);

Should we include the pwrup reglist from the get-go too? I don't think
you used the ones you declared in patch 15 (or at least my ctrl-f can't
find the use of it)

[...]

> +#define A8XX_CP_INTERRUPT_STATUS_MASK_PIPE \
> +     (A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFRBWRAP | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB1WRAP | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB2WRAP | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB3WRAP | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFSDSWRAP | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFMRBWRAP | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFVSDWRAP | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_OPCODEERROR | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VSDPARITYERROR | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_REGISTERPROTECTIONERROR | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_ILLEGALINSTRUCTION | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_SMMUFAULT | \
> +      A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESP | \

kgsl also enables VBIFRESTP(TYPE/READ/LIENT)

[...]

> +     /* Setup GMEM Range in UCHE */
> +     gmem_range_min = SZ_64M;

this doesn't seem to ever change, you can inline it

[...]

> +static void a8xx_dump(struct msm_gpu *gpu)
> +{
> +     DRM_DEV_INFO(&gpu->pdev->dev, "status:   %08x\n",
> +                     gpu_read(gpu, REG_A8XX_RBBM_STATUS));

This can be a single line

Konrad

Reply via email to