Shashank can you take a look as well.

Thanks,
Christian.

Am 25.04.24 um 15:40 schrieb Alex Deucher:
Series looks good to me.

Reviewed-by: Alex Deucher <[email protected]>

On Thu, Apr 25, 2024 at 6:07 AM Jack Xiao <[email protected]> wrote:
Enable mes to map legacy queue support.

Signed-off-by: Jack Xiao <[email protected]>
Reviewed-by: Hawking Zhang <[email protected]>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 39 +++++++++++++++++++++----
  1 file changed, 34 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index d95555dc5485..172b7ba5d0a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -622,10 +622,28 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int 
xcc_id)
                 queue_mask |= (1ull << 
amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
         }

-       DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
-                                                       kiq_ring->queue);
         amdgpu_device_flush_hdp(adev, NULL);

+       if (adev->enable_mes)
+               queue_mask = ~0ULL;
+
+       if (adev->enable_mes) {
+               for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+                       j = i + xcc_id * adev->gfx.num_compute_rings;
+                       r = amdgpu_mes_map_legacy_queue(adev,
+                                                       
&adev->gfx.compute_ring[j]);
+                       if (r) {
+                               DRM_ERROR("failed to map compute queue\n");
+                               return r;
+                       }
+               }
+
+               return 0;
+       }
+
+       DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
+                kiq_ring->queue);
+
         spin_lock(&kiq->ring_lock);
         r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
                                         adev->gfx.num_compute_rings +
@@ -636,9 +654,6 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int 
xcc_id)
                 return r;
         }

-       if (adev->enable_mes)
-               queue_mask = ~0ULL;
-
         kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                 j = i + xcc_id * adev->gfx.num_compute_rings;
@@ -665,6 +680,20 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int 
xcc_id)

         amdgpu_device_flush_hdp(adev, NULL);

+       if (adev->enable_mes) {
+               for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+                       j = i + xcc_id * adev->gfx.num_gfx_rings;
+                       r = amdgpu_mes_map_legacy_queue(adev,
+                                                       &adev->gfx.gfx_ring[j]);
+                       if (r) {
+                               DRM_ERROR("failed to map gfx queue\n");
+                               return r;
+                       }
+               }
+
+               return 0;
+       }
+
         spin_lock(&kiq->ring_lock);
         /* No need to map kcq on the slave */
         if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
--
2.41.0


Reply via email to