From: Samson Tam <[email protected]>

[Why]
When going from ODM 2:1 single display case to max displays, second
odm pipe needs to be repurposed for one of the new single displays.
However, acquire_first_split_pipe() only handles MPC case and not
ODM case

[How]
Add ODM conditions in acquire_first_split_pipe()
Add commit_minimal_transition_state() in commit_streams() to handle
odm 2:1 exit first, and then process new streams
Handle ODM condition in commit_minimal_transition_state()

Cc: Mario Limonciello <[email protected]>
Cc: Alex Deucher <[email protected]>
Cc: [email protected]
Acked-by: Stylon Wang <[email protected]>
Signed-off-by: Samson Tam <[email protected]>
Reviewed-by: Alvin Lee <[email protected]>
---
 drivers/gpu/drm/amd/display/dc/core/dc.c      | 36 ++++++++++++++++++-
 .../gpu/drm/amd/display/dc/core/dc_resource.c | 20 +++++++++++
 2 files changed, 55 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index f3820c5e63af..2ad4293bb3e5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2008,6 +2008,9 @@ static enum dc_status dc_commit_state_no_check(struct dc 
*dc, struct dc_state *c
        return result;
 }
 
+static bool commit_minimal_transition_state(struct dc *dc,
+               struct dc_state *transition_base_context);
+
 /**
  * dc_commit_streams - Commit current stream state
  *
@@ -2029,6 +2032,8 @@ enum dc_status dc_commit_streams(struct dc *dc,
        struct dc_state *context;
        enum dc_status res = DC_OK;
        struct dc_validation_set set[MAX_STREAMS] = {0};
+       struct pipe_ctx *pipe;
+       bool handle_exit_odm2to1 = false;
 
        if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
                return res;
@@ -2053,6 +2058,22 @@ enum dc_status dc_commit_streams(struct dc *dc,
                }
        }
 
+       /* Check for case where we are going from odm 2:1 to max
+        *  pipe scenario.  For these cases, we will call
+        *  commit_minimal_transition_state() to exit out of odm 2:1
+        *  first before processing new streams
+        */
+       if (stream_count == dc->res_pool->pipe_count) {
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe->next_odm_pipe)
+                               handle_exit_odm2to1 = true;
+               }
+       }
+
+       if (handle_exit_odm2to1)
+               res = commit_minimal_transition_state(dc, dc->current_state);
+
        context = dc_create_state(dc);
        if (!context)
                goto context_alloc_fail;
@@ -3912,6 +3933,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
        unsigned int i, j;
        unsigned int pipe_in_use = 0;
        bool subvp_in_use = false;
+       bool odm_in_use = false;
 
        if (!transition_context)
                return false;
@@ -3940,6 +3962,18 @@ static bool commit_minimal_transition_state(struct dc 
*dc,
                }
        }
 
+       /* If ODM is enabled and we are adding or removing planes from any ODM
+        * pipe, we must use the minimal transition.
+        */
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+               if (pipe->stream && pipe->next_odm_pipe) {
+                       odm_in_use = true;
+                       break;
+               }
+       }
+
        /* When the OS add a new surface if we have been used all of pipes with 
odm combine
         * and mpc split feature, it need use commit_minimal_transition_state 
to transition safely.
         * After OS exit MPO, it will back to use odm and mpc split with all of 
pipes, we need
@@ -3948,7 +3982,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
         * Reduce the scenarios to use dc_commit_state_no_check in the stage of 
flip. Especially
         * enter/exit MPO when DCN still have enough resources.
         */
-       if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) {
+       if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && 
!odm_in_use) {
                dc_release_state(transition_context);
                return true;
        }
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 7e1e5532f88f..c72540d37aef 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1445,6 +1445,26 @@ static int acquire_first_split_pipe(
                        split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
                        split_pipe->pipe_idx = i;
 
+                       split_pipe->stream = stream;
+                       return i;
+               } else if (split_pipe->prev_odm_pipe &&
+                               split_pipe->prev_odm_pipe->plane_state == 
split_pipe->plane_state) {
+                       split_pipe->prev_odm_pipe->next_odm_pipe = 
split_pipe->next_odm_pipe;
+                       if (split_pipe->next_odm_pipe)
+                               split_pipe->next_odm_pipe->prev_odm_pipe = 
split_pipe->prev_odm_pipe;
+
+                       if (split_pipe->prev_odm_pipe->plane_state)
+                               
resource_build_scaling_params(split_pipe->prev_odm_pipe);
+
+                       memset(split_pipe, 0, sizeof(*split_pipe));
+                       split_pipe->stream_res.tg = pool->timing_generators[i];
+                       split_pipe->plane_res.hubp = pool->hubps[i];
+                       split_pipe->plane_res.ipp = pool->ipps[i];
+                       split_pipe->plane_res.dpp = pool->dpps[i];
+                       split_pipe->stream_res.opp = pool->opps[i];
+                       split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
+                       split_pipe->pipe_idx = i;
+
                        split_pipe->stream = stream;
                        return i;
                }
-- 
2.40.1

Reply via email to