On Wed, Sep 16, 2020 at 08:38:22PM +0530, Karthik B S wrote:
> In Gen 9 and Gen 10 platforms, async address update enable bit is
> double buffered. Due to this, during the transition from async flip
> to sync flip we have to wait until this bit is updated before continuing
> with the normal commit for sync flip.
> 
> v9: -Rename skl_toggle_async_sync() to skl_disable_async_flip_wa(). (Ville)
>     -Place the declarations appropriately as per need. (Ville)
>     -Take the lock before the reg read. (Ville)
>     -Fix comment and formatting. (Ville)
>     -Use IS_GEN_RANGE() for gen check. (Ville)
>     -Move skl_disable_async_flip_wa() to intel_pre_plane_update(). (Ville)
> 
> Signed-off-by: Karthik B S <[email protected]>
> Signed-off-by: Vandita Kulkarni <[email protected]>

Reviewed-by: Ville Syrjälä <[email protected]>

> ---
>  drivers/gpu/drm/i915/display/intel_display.c | 46 ++++++++++++++++++++
>  1 file changed, 46 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_display.c 
> b/drivers/gpu/drm/i915/display/intel_display.c
> index 0f0bcbb00c7f..6f6edc581e14 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.c
> +++ b/drivers/gpu/drm/i915/display/intel_display.c
> @@ -6562,6 +6562,43 @@ static void intel_post_plane_update(struct 
> intel_atomic_state *state,
>               icl_wa_scalerclkgating(dev_priv, pipe, false);
>  }
>  
> +static void skl_disable_async_flip_wa(struct intel_atomic_state *state,
> +                                   struct intel_crtc *crtc,
> +                                   const struct intel_crtc_state 
> *new_crtc_state)
> +{
> +     struct drm_i915_private *dev_priv = to_i915(state->base.dev);
> +     struct intel_plane *plane;
> +     struct intel_plane_state *new_plane_state;
> +     int i;
> +
> +     for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
> +             u32 update_mask = new_crtc_state->update_planes;
> +             u32 plane_ctl, surf_addr;
> +             enum plane_id plane_id;
> +             unsigned long irqflags;
> +             enum pipe pipe;
> +
> +             if (crtc->pipe != plane->pipe ||
> +                 !(update_mask & BIT(plane->id)))
> +                     continue;
> +
> +             plane_id = plane->id;
> +             pipe = plane->pipe;
> +
> +             spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
> +             plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, 
> plane_id));
> +             surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, 
> plane_id));
> +
> +             plane_ctl &= ~PLANE_CTL_ASYNC_FLIP;
> +
> +             intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 
> plane_ctl);
> +             intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 
> surf_addr);
> +             spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
> +     }
> +
> +     intel_wait_for_vblank(dev_priv, crtc->pipe);
> +}
> +
>  static void intel_pre_plane_update(struct intel_atomic_state *state,
>                                  struct intel_crtc *crtc)
>  {
> @@ -6647,6 +6684,15 @@ static void intel_pre_plane_update(struct 
> intel_atomic_state *state,
>        */
>       if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, 
> new_crtc_state))
>               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
> +
> +     /*
> +      * WA for platforms where async address update enable bit
> +      * is double buffered and only latched at start of vblank.
> +      */
> +     if (old_crtc_state->uapi.async_flip &&
> +         !new_crtc_state->uapi.async_flip &&
> +         IS_GEN_RANGE(dev_priv, 9, 10))
> +             skl_disable_async_flip_wa(state, crtc, new_crtc_state);
>  }
>  
>  static void intel_crtc_disable_planes(struct intel_atomic_state *state,
> -- 
> 2.22.0

-- 
Ville Syrjälä
Intel
_______________________________________________
dri-devel mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to