From: Aurabindo Pillai <[email protected]>

Accessing DC from amdgpu_dm is usually preceded by acquisition of
dc_lock mutex. Most of the DC API that DM calls are under a DC lock.
However, there are a few that are not. Some DC API called from interrupt
context end up sending DMUB commands via a DC API, while other threads were
using DMUB. This was apparent from a race between calls for setting idle
optimization enable/disable and the DC API to set vmin/vmax.

Offload the call to dc_stream_adjust_vmin_vmax() to a thread instead
of directly calling them from the interrupt handler such that it waits
for dc_lock.

Reviewed-by: Nicholas Kazlauskas <[email protected]>
Signed-off-by: Aurabindo Pillai <[email protected]>
Signed-off-by: Roman Li <[email protected]>
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 55 +++++++++++++++++--
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 14 +++++
 2 files changed, 63 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c71167ffdb76..6762fc7de769 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -530,6 +530,50 @@ static void dm_pflip_high_irq(void *interrupt_params)
                      amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
 }
 
+static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
+{
+       struct vupdate_offload_work *work = container_of(offload_work, struct 
vupdate_offload_work, work);
+       struct amdgpu_device *adev = work->adev;
+       struct dc_stream_state *stream = work->stream;
+       struct dc_crtc_timing_adjust *adjust = work->adjust;
+
+       mutex_lock(&adev->dm.dc_lock);
+       dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
+       mutex_unlock(&adev->dm.dc_lock);
+
+       dc_stream_release(stream);
+       kfree(work->adjust);
+       kfree(work);
+}
+
+static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
+       struct dc_stream_state *stream,
+       struct dc_crtc_timing_adjust *adjust)
+{
+       struct vupdate_offload_work *offload_work = 
kzalloc(sizeof(*offload_work), GFP_KERNEL);
+       if (!offload_work) {
+               drm_dbg_driver(adev_to_drm(adev), "Failed to allocate 
vupdate_offload_work\n");
+               return;
+       }
+
+       struct dc_crtc_timing_adjust *adjust_copy = 
kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
+       if (!adjust_copy) {
+               drm_dbg_driver(adev_to_drm(adev), "Failed to allocate 
adjust_copy\n");
+               kfree(offload_work);
+               return;
+       }
+
+       dc_stream_retain(stream);
+       memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
+
+       INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
+       offload_work->adev = adev;
+       offload_work->stream = stream;
+       offload_work->adjust = adjust_copy;
+
+       queue_work(system_wq, &offload_work->work);
+}
+
 static void dm_vupdate_high_irq(void *interrupt_params)
 {
        struct common_irq_params *irq_params = interrupt_params;
@@ -579,10 +623,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
                                    acrtc->dm_irq_params.stream,
                                    &acrtc->dm_irq_params.vrr_params);
 
-                               dc_stream_adjust_vmin_vmax(
-                                   adev->dm.dc,
-                                   acrtc->dm_irq_params.stream,
-                                   &acrtc->dm_irq_params.vrr_params.adjust);
+                               schedule_dc_vmin_vmax(adev,
+                                       acrtc->dm_irq_params.stream,
+                                       
&acrtc->dm_irq_params.vrr_params.adjust);
                                
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                        }
                }
@@ -672,8 +715,8 @@ static void dm_crtc_high_irq(void *interrupt_params)
                                             acrtc->dm_irq_params.stream,
                                             &acrtc->dm_irq_params.vrr_params);
 
-               dc_stream_adjust_vmin_vmax(adev->dm.dc, 
acrtc->dm_irq_params.stream,
-                                          
&acrtc->dm_irq_params.vrr_params.adjust);
+               schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
+                               &acrtc->dm_irq_params.vrr_params.adjust);
        }
 
        /*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 67c3a7a967f2..94f312bae9ac 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -153,6 +153,20 @@ struct idle_workqueue {
        bool running;
 };
 
+/**
+ * struct dm_vupdate_work - Work data for periodic action in idle
+ * @work: Kernel work data for the work event
+ * @adev: amdgpu_device back pointer
+ * @stream: DC stream associated with the crtc
+ * @adjust: DC CRTC timing adjust to be applied to the crtc
+ */
+struct vupdate_offload_work {
+       struct work_struct work;
+       struct amdgpu_device *adev;
+       struct dc_stream_state *stream;
+       struct dc_crtc_timing_adjust *adjust;
+};
+
 #define MAX_LUMINANCE_DATA_POINTS 99
 
 /**
-- 
2.34.1

Reply via email to