When submitting MQD to CP, set SDMA_RLCx_IB_CNTL/SWITCH_INSIDE_IB bit so
it'll allow SDMA preemption if there is a massive command buffer of
long-running SDMA commands.

Signed-off-by: Amber Lin <[email protected]>
---
 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 80320a6c8854..116932a20b3f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -495,6 +495,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void 
*mqd,
        m->sdma_engine_id = q->sdma_engine_id;
        m->sdma_queue_id = q->sdma_queue_id;
        m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
+       /* Allow context switch so we don't cross-process starve with a massive
+        * command buffer of long-running SDMA commands
+        */
+       m->sdmax_rlcx_ib_cntl |= SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK; 
 
        q->is_active = QUEUE_IS_ACTIVE(*q);
 }
-- 
2.34.1

Reply via email to