drm/amd/display: Add minimal pipe split transition state
authorRodrigo Siqueira <Rodrigo.Siqueira@amd.com>
Mon, 20 Jun 2022 20:37:07 +0000 (16:37 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 5 Jul 2022 20:12:22 +0000 (16:12 -0400)
[WHY?]
When adding/removing a plane to some configurations, unsupported pipe
programming can occur when moving to a new plane.  Such cases include pipe
split on multi-display, with MPO, and/or ODM.

[HOW?]
Add a safe transistion state that minimizes pipe usage before programming
new configuration. When adding a plane, the current state has the least
pipes required so it is applied without splitting.  This must be applied
prior to updating the plane_state for seamless transition.  When removing a
plane, the new state has the least pieps required so it is applied without
splitting.

Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/dc_stream.h

index 05c2e17..40848ed 100644 (file)
@@ -2717,6 +2717,137 @@ static void copy_stream_update_to_stream(struct dc *dc,
        }
 }
 
+void dc_reset_state(struct dc *dc, struct dc_state *context)
+{
+       dc_resource_state_destruct(context);
+
+       /* clear the structure, but don't reset the reference count */
+       memset(context, 0, offsetof(struct dc_state, refcount));
+
+       init_state(dc, context);
+}
+
+static bool update_planes_and_stream_state(struct dc *dc,
+               struct dc_surface_update *srf_updates, int surface_count,
+               struct dc_stream_state *stream,
+               struct dc_stream_update *stream_update,
+               enum surface_update_type *new_update_type,
+               struct dc_state **new_context)
+{
+       struct dc_state *context;
+       int i, j;
+       enum surface_update_type update_type;
+       const struct dc_stream_status *stream_status;
+       struct dc_context *dc_ctx = dc->ctx;
+
+       stream_status = dc_stream_get_status(stream);
+
+       if (!stream_status) {
+               if (surface_count) /* Only an error condition if surf_count non-zero*/
+                       ASSERT(false);
+
+               return false; /* Cannot commit surface to stream that is not committed */
+       }
+
+       context = dc->current_state;
+
+       update_type = dc_check_update_surfaces_for_stream(
+                       dc, srf_updates, surface_count, stream_update, stream_status);
+
+       /* update current stream with the new updates */
+       copy_stream_update_to_stream(dc, context, stream, stream_update);
+
+       /* do not perform surface update if surface has invalid dimensions
+        * (all zero) and no scaling_info is provided
+        */
+       if (surface_count > 0) {
+               for (i = 0; i < surface_count; i++) {
+                       if ((srf_updates[i].surface->src_rect.width == 0 ||
+                                srf_updates[i].surface->src_rect.height == 0 ||
+                                srf_updates[i].surface->dst_rect.width == 0 ||
+                                srf_updates[i].surface->dst_rect.height == 0) &&
+                               (!srf_updates[i].scaling_info ||
+                                 srf_updates[i].scaling_info->src_rect.width == 0 ||
+                                 srf_updates[i].scaling_info->src_rect.height == 0 ||
+                                 srf_updates[i].scaling_info->dst_rect.width == 0 ||
+                                 srf_updates[i].scaling_info->dst_rect.height == 0)) {
+                               DC_ERROR("Invalid src/dst rects in surface update!\n");
+                               return false;
+                       }
+               }
+       }
+
+       if (update_type >= update_surface_trace_level)
+               update_surface_trace(dc, srf_updates, surface_count);
+
+       if (update_type >= UPDATE_TYPE_FULL) {
+               struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
+
+               for (i = 0; i < surface_count; i++)
+                       new_planes[i] = srf_updates[i].surface;
+
+               /* initialize scratch memory for building context */
+               context = dc_create_state(dc);
+               if (context == NULL) {
+                       DC_ERROR("Failed to allocate new validate context!\n");
+                       return false;
+               }
+
+               dc_resource_state_copy_construct(
+                               dc->current_state, context);
+
+               /*remove old surfaces from context */
+               if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+
+                       BREAK_TO_DEBUGGER();
+                       goto fail;
+               }
+
+               /* add surface to context */
+               if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+
+                       BREAK_TO_DEBUGGER();
+                       goto fail;
+               }
+       }
+
+       /* save update parameters into surface */
+       for (i = 0; i < surface_count; i++) {
+               struct dc_plane_state *surface = srf_updates[i].surface;
+
+               copy_surface_update_to_plane(surface, &srf_updates[i]);
+
+               if (update_type >= UPDATE_TYPE_MED) {
+                       for (j = 0; j < dc->res_pool->pipe_count; j++) {
+                               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+                               if (pipe_ctx->plane_state != surface)
+                                       continue;
+
+                               resource_build_scaling_params(pipe_ctx);
+                       }
+               }
+       }
+
+       if (update_type == UPDATE_TYPE_FULL) {
+               if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+                       BREAK_TO_DEBUGGER();
+                       goto fail;
+               }
+       }
+
+       *new_context = context;
+       *new_update_type = update_type;
+
+       return true;
+
+fail:
+       dc_release_state(context);
+
+       return false;
+
+}
+
 static void commit_planes_do_stream_update(struct dc *dc,
                struct dc_stream_state *stream,
                struct dc_stream_update *stream_update,
@@ -3264,6 +3395,152 @@ static void commit_planes_for_stream(struct dc *dc,
        }
 }
 
+static bool commit_minimal_transition_state(struct dc *dc,
+               struct dc_state *transition_base_context)
+{
+       struct dc_state *transition_context = dc_create_state(dc);
+       enum pipe_split_policy tmp_policy;
+       enum dc_status ret = DC_ERROR_UNEXPECTED;
+       unsigned int i, j;
+
+       if (!transition_context)
+               return false;
+
+       tmp_policy = dc->debug.pipe_split_policy;
+       dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+
+       dc_resource_state_copy_construct(transition_base_context, transition_context);
+
+       //commit minimal state
+       if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
+               for (i = 0; i < transition_context->stream_count; i++) {
+                       struct dc_stream_status *stream_status = &transition_context->stream_status[i];
+
+                       for (j = 0; j < stream_status->plane_count; j++) {
+                               struct dc_plane_state *plane_state = stream_status->plane_states[j];
+
+                               /* force vsync flip when reconfiguring pipes to prevent underflow
+                                * and corruption
+                                */
+                               plane_state->flip_immediate = false;
+                       }
+               }
+
+               ret = dc_commit_state_no_check(dc, transition_context);
+       }
+
+       //always release as dc_commit_state_no_check retains in good case
+       dc_release_state(transition_context);
+
+       //restore previous pipe split policy
+       dc->debug.pipe_split_policy = tmp_policy;
+
+       if (ret != DC_OK) {
+               //this should never happen
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+
+       //force full surface update
+       for (i = 0; i < dc->current_state->stream_count; i++) {
+               for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
+                       dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
+               }
+       }
+
+       return true;
+}
+
+bool dc_update_planes_and_stream(struct dc *dc,
+               struct dc_surface_update *srf_updates, int surface_count,
+               struct dc_stream_state *stream,
+               struct dc_stream_update *stream_update)
+{
+       struct dc_state *context;
+       enum surface_update_type update_type;
+       int i;
+
+       /* In cases where MPO and split or ODM are used transitions can
+        * cause underflow. Apply stream configuration with minimal pipe
+        * split first to avoid unsupported transitions for active pipes.
+        */
+       bool force_minimal_pipe_splitting = false;
+       bool is_plane_addition = false;
+
+       struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+
+       if (cur_stream_status &&
+                       dc->current_state->stream_count > 0 &&
+                       dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
+               /* determine if minimal transition is required */
+               if (cur_stream_status->plane_count > surface_count) {
+                       force_minimal_pipe_splitting = true;
+               } else if (cur_stream_status->plane_count < surface_count) {
+                       force_minimal_pipe_splitting = true;
+                       is_plane_addition = true;
+               }
+       }
+
+       /* on plane addition, minimal state is the current one */
+       if (force_minimal_pipe_splitting && is_plane_addition &&
+               !commit_minimal_transition_state(dc, dc->current_state))
+                               return false;
+
+       if (!update_planes_and_stream_state(
+                       dc,
+                       srf_updates,
+                       surface_count,
+                       stream,
+                       stream_update,
+                       &update_type,
+                       &context))
+               return false;
+
+       /* on plane addition, minimal state is the new one */
+       if (force_minimal_pipe_splitting && !is_plane_addition) {
+               if (!commit_minimal_transition_state(dc, context)) {
+                       dc_release_state(context);
+                       return false;
+               }
+
+               update_type = UPDATE_TYPE_FULL;
+       }
+
+       commit_planes_for_stream(
+                       dc,
+                       srf_updates,
+                       surface_count,
+                       stream,
+                       stream_update,
+                       update_type,
+                       context);
+
+       if (dc->current_state != context) {
+
+               /* Since memory free requires elevated IRQL, an interrupt
+                * request is generated by mem free. If this happens
+                * between freeing and reassigning the context, our vsync
+                * interrupt will call into dc and cause a memory
+                * corruption BSOD. Hence, we first reassign the context,
+                * then free the old context.
+                */
+
+               struct dc_state *old = dc->current_state;
+
+               dc->current_state = context;
+               dc_release_state(old);
+
+               // clear any forced full updates
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+                       if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+                               pipe_ctx->plane_state->force_full_update = false;
+               }
+       }
+       return true;
+}
+
 void dc_commit_updates_for_stream(struct dc *dc,
                struct dc_surface_update *srf_updates,
                int surface_count,
index 1820c19..2a2f719 100644 (file)
@@ -324,6 +324,9 @@ bool dc_is_stream_scaling_unchanged(
        struct dc_stream_state *old_stream, struct dc_stream_state *stream);
 
 /*
+ * Setup stream attributes if no stream updates are provided
+ * there will be no impact on the stream parameters
+ *
  * Set up surface attributes and associate to a stream
  * The surfaces parameter is an absolute set of all surface active for the stream.
  * If no surfaces are provided, the stream will be blanked; no memory read.
@@ -332,8 +335,23 @@ bool dc_is_stream_scaling_unchanged(
  * After this call:
  *   Surfaces attributes are programmed and configured to be composed into stream.
  *   This does not trigger a flip.  No surface address is programmed.
+ *
  */
+bool dc_update_planes_and_stream(struct dc *dc,
+               struct dc_surface_update *surface_updates, int surface_count,
+               struct dc_stream_state *dc_stream,
+               struct dc_stream_update *stream_update);
 
+/*
+ * Set up surface attributes and associate to a stream
+ * The surfaces parameter is an absolute set of all surface active for the stream.
+ * If no surfaces are provided, the stream will be blanked; no memory read.
+ * Any flip related attribute changes must be done through this interface.
+ *
+ * After this call:
+ *   Surfaces attributes are programmed and configured to be composed into stream.
+ *   This does not trigger a flip.  No surface address is programmed.
+ */
 void dc_commit_updates_for_stream(struct dc *dc,
                struct dc_surface_update *srf_updates,
                int surface_count,