We resync the FIFO after each pipe update in apply_ctx_to_hw.
However, this means that some pipes (in hardware) are based on the
new context and some are based on the current_state (since the pipes
are updated on at a time). In this case we must ensure to use the
pipe_ctx that's currently still configured in hardware when turning
off / on OTG's and reconfiguring ODM during the resync.
Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Signed-off-by: Jerry Zuo <jerry.zuo@amd.com>
Signed-off-by: Alvin Lee <alvin.lee2@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
#ifdef CONFIG_DRM_AMD_DC_FP
if (hws->funcs.resync_fifo_dccg_dio)
- hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context, i);
#endif
}
}
}
-void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context)
+void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx)
{
unsigned int i;
struct pipe_ctx *pipe = NULL;
bool otg_disabled[MAX_PIPES] = {false};
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ } else {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ }
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx)
+ pipe = &context->res_ctx.pipe_ctx[i];
+ else
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (otg_disabled[i]) {
int opp_inst[MAX_PIPES] = { pipe->stream_res.opp->inst };
void dcn314_calculate_pix_rate_divider(struct dc *dc, struct dc_state *context, const struct dc_stream_state *stream);
-void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context);
+void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx);
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
}
}
-void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context)
+void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx)
{
unsigned int i;
struct pipe_ctx *pipe = NULL;
bool otg_disabled[MAX_PIPES] = {false};
+ struct dc_state *dc_state = NULL;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc_state = context;
+ } else {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc_state = dc->current_state;
+ }
if (!resource_is_pipe_type(pipe, OTG_MASTER))
continue;
if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))
- && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) {
+ && dc_state_get_pipe_subvp_type(dc_state, pipe) != SUBVP_PHANTOM) {
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
otg_disabled[i] = true;
hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx)
+ pipe = &context->res_ctx.pipe_ctx[i];
+ else
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (otg_disabled[i]) {
int opp_inst[MAX_PIPES] = { pipe->stream_res.opp->inst };
#ifdef CONFIG_DRM_AMD_DC_FP
if (hws->funcs.resync_fifo_dccg_dio)
- hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context, i);
#endif
}
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
-void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context);
+void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx);
void dcn32_subvp_pipe_control_lock(struct dc *dc,
struct dc_state *context,
unsigned int *k1_div,
unsigned int *k2_div);
void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ unsigned int current_pipe_idx);
enum dc_status (*apply_single_controller_ctx_to_hw)(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,