2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
55 #define DC_LOGGER_INIT(logger)
63 #define FN(reg_name, field_name) \
64 hws->shifts->field_name, hws->masks->field_name
66 /*print is 17 wide, first two characters are spaces*/
67 #define DTN_INFO_MICRO_SEC(ref_cycle) \
68 print_microsec(dc_ctx, log_ctx, ref_cycle)
70 #define GAMMA_HW_POINTS_NUM 256
72 void print_microsec(struct dc_context *dc_ctx,
73 struct dc_log_buffer_ctx *log_ctx,
76 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
77 static const unsigned int frac = 1000;
78 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
80 DTN_INFO(" %11d.%03d",
85 void dcn10_lock_all_pipes(struct dc *dc,
86 struct dc_state *context,
89 struct pipe_ctx *pipe_ctx;
90 struct timing_generator *tg;
93 for (i = 0; i < dc->res_pool->pipe_count; i++) {
94 pipe_ctx = &context->res_ctx.pipe_ctx[i];
95 tg = pipe_ctx->stream_res.tg;
98 * Only lock the top pipe's tg to prevent redundant
99 * (un)locking. Also skip if pipe is disabled.
101 if (pipe_ctx->top_pipe ||
102 !pipe_ctx->stream || !pipe_ctx->plane_state ||
103 !tg->funcs->is_tg_enabled(tg))
107 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
109 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
113 static void log_mpc_crc(struct dc *dc,
114 struct dc_log_buffer_ctx *log_ctx)
116 struct dc_context *dc_ctx = dc->ctx;
117 struct dce_hwseq *hws = dc->hwseq;
119 if (REG(MPC_CRC_RESULT_GB))
120 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
121 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
122 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
123 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
124 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
127 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
129 struct dc_context *dc_ctx = dc->ctx;
130 struct dcn_hubbub_wm wm;
133 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
134 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
136 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
137 " sr_enter sr_exit dram_clk_change\n");
139 for (i = 0; i < 4; i++) {
140 struct dcn_hubbub_wm_set *s;
143 DTN_INFO("WM_Set[%d]:", s->wm_set);
144 DTN_INFO_MICRO_SEC(s->data_urgent);
145 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
146 DTN_INFO_MICRO_SEC(s->sr_enter);
147 DTN_INFO_MICRO_SEC(s->sr_exit);
148 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
155 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
157 struct dc_context *dc_ctx = dc->ctx;
158 struct resource_pool *pool = dc->res_pool;
162 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
163 for (i = 0; i < pool->pipe_count; i++) {
164 struct hubp *hubp = pool->hubps[i];
165 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
167 hubp->funcs->hubp_read_state(hubp);
170 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
183 s->underflow_status);
184 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
185 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
186 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
191 DTN_INFO("\n=========RQ========\n");
192 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
193 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
194 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
195 for (i = 0; i < pool->pipe_count; i++) {
196 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
197 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
200 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
201 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
202 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
203 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
204 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
205 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
206 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
207 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
208 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
209 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
212 DTN_INFO("========DLG========\n");
213 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
214 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
215 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
216 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
217 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
218 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
219 " x_rp_dlay x_rr_sfl\n");
220 for (i = 0; i < pool->pipe_count; i++) {
221 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
222 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
225 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
226 "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
227 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
228 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
229 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
230 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
231 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
232 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
233 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
234 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
235 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
236 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
237 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
238 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
239 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
240 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
241 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
242 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
243 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
244 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
245 dlg_regs->xfc_reg_remote_surface_flip_latency);
248 DTN_INFO("========TTU========\n");
249 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
250 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
251 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
252 for (i = 0; i < pool->pipe_count; i++) {
253 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
254 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
257 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
258 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
259 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
260 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
261 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
262 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
263 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
264 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
269 void dcn10_log_hw_state(struct dc *dc,
270 struct dc_log_buffer_ctx *log_ctx)
272 struct dc_context *dc_ctx = dc->ctx;
273 struct resource_pool *pool = dc->res_pool;
278 dcn10_log_hubbub_state(dc, log_ctx);
280 dcn10_log_hubp_states(dc, log_ctx);
282 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
283 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
284 "C31 C32 C33 C34\n");
285 for (i = 0; i < pool->pipe_count; i++) {
286 struct dpp *dpp = pool->dpps[i];
287 struct dcn_dpp_state s = {0};
289 dpp->funcs->dpp_read_state(dpp, &s);
294 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
295 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
298 (s.igam_lut_mode == 0) ? "BypassFixed" :
299 ((s.igam_lut_mode == 1) ? "BypassFloat" :
300 ((s.igam_lut_mode == 2) ? "RAM" :
301 ((s.igam_lut_mode == 3) ? "RAM" :
303 (s.dgam_lut_mode == 0) ? "Bypass" :
304 ((s.dgam_lut_mode == 1) ? "sRGB" :
305 ((s.dgam_lut_mode == 2) ? "Ycc" :
306 ((s.dgam_lut_mode == 3) ? "RAM" :
307 ((s.dgam_lut_mode == 4) ? "RAM" :
309 (s.rgam_lut_mode == 0) ? "Bypass" :
310 ((s.rgam_lut_mode == 1) ? "sRGB" :
311 ((s.rgam_lut_mode == 2) ? "Ycc" :
312 ((s.rgam_lut_mode == 3) ? "RAM" :
313 ((s.rgam_lut_mode == 4) ? "RAM" :
316 s.gamut_remap_c11_c12,
317 s.gamut_remap_c13_c14,
318 s.gamut_remap_c21_c22,
319 s.gamut_remap_c23_c24,
320 s.gamut_remap_c31_c32,
321 s.gamut_remap_c33_c34);
326 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
327 for (i = 0; i < pool->pipe_count; i++) {
328 struct mpcc_state s = {0};
330 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
332 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
333 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
334 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
339 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
341 for (i = 0; i < pool->timing_generator_count; i++) {
342 struct timing_generator *tg = pool->timing_generators[i];
343 struct dcn_otg_state s = {0};
344 /* Read shared OTG state registers for all DCNx */
345 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
348 * For DCN2 and greater, a register on the OPP is used to
349 * determine if the CRTC is blanked instead of the OTG. So use
350 * dpg_is_blanked() if exists, otherwise fallback on otg.
352 * TODO: Implement DCN-specific read_otg_state hooks.
354 if (pool->opps[i]->funcs->dpg_is_blanked)
355 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
357 s.blank_enabled = tg->funcs->is_blanked(tg);
359 //only print if OTG master is enabled
360 if ((s.otg_enabled & 1) == 0)
363 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
381 s.underflow_occurred_status,
384 // Clear underflow for debug purposes
385 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
386 // This function is called only from Windows or Diags test environment, hence it's safe to clear
387 // it from here without affecting the original intent.
388 tg->funcs->clear_optc_underflow(tg);
392 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
393 for (i = 0; i < pool->res_cap->num_dsc; i++) {
394 struct display_stream_compressor *dsc = pool->dscs[i];
395 struct dcn_dsc_state s = {0};
397 dsc->funcs->dsc_read_state(dsc, &s);
398 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
402 s.dsc_bytes_per_pixel);
407 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
408 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
409 for (i = 0; i < pool->stream_enc_count; i++) {
410 struct stream_encoder *enc = pool->stream_enc[i];
411 struct enc_state s = {0};
413 if (enc->funcs->enc_read_state) {
414 enc->funcs->enc_read_state(enc, &s);
415 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
418 s.sec_gsp_pps_line_num,
419 s.vbid6_line_reference,
421 s.sec_gsp_pps_enable,
422 s.sec_stream_enable);
428 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
429 for (i = 0; i < dc->link_count; i++) {
430 struct link_encoder *lenc = dc->links[i]->link_enc;
432 struct link_enc_state s = {0};
434 if (lenc->funcs->read_state) {
435 lenc->funcs->read_state(lenc, &s);
436 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
439 s.dphy_fec_ready_shadow,
440 s.dphy_fec_active_status,
441 s.dp_link_training_complete);
447 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
448 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
449 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
450 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
451 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
452 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
453 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
454 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
455 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
457 log_mpc_crc(dc, log_ctx);
462 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
464 struct hubp *hubp = pipe_ctx->plane_res.hubp;
465 struct timing_generator *tg = pipe_ctx->stream_res.tg;
467 if (tg->funcs->is_optc_underflow_occurred(tg)) {
468 tg->funcs->clear_optc_underflow(tg);
472 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
473 hubp->funcs->hubp_clear_underflow(hubp);
479 void dcn10_enable_power_gating_plane(
480 struct dce_hwseq *hws,
483 bool force_on = true; /* disable power gating */
489 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
490 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
491 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
492 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
495 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
496 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
497 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
498 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
501 void dcn10_disable_vga(
502 struct dce_hwseq *hws)
504 unsigned int in_vga1_mode = 0;
505 unsigned int in_vga2_mode = 0;
506 unsigned int in_vga3_mode = 0;
507 unsigned int in_vga4_mode = 0;
509 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
510 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
511 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
512 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
514 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
515 in_vga3_mode == 0 && in_vga4_mode == 0)
518 REG_WRITE(D1VGA_CONTROL, 0);
519 REG_WRITE(D2VGA_CONTROL, 0);
520 REG_WRITE(D3VGA_CONTROL, 0);
521 REG_WRITE(D4VGA_CONTROL, 0);
523 /* HW Engineer's Notes:
524 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
525 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
527 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
528 * VGA_TEST_ENABLE, to leave it in the same state as before.
530 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
531 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
534 void dcn10_dpp_pg_control(
535 struct dce_hwseq *hws,
536 unsigned int dpp_inst,
539 uint32_t power_gate = power_on ? 0 : 1;
540 uint32_t pwr_status = power_on ? 0 : 2;
542 if (hws->ctx->dc->debug.disable_dpp_power_gate)
544 if (REG(DOMAIN1_PG_CONFIG) == 0)
549 REG_UPDATE(DOMAIN1_PG_CONFIG,
550 DOMAIN1_POWER_GATE, power_gate);
552 REG_WAIT(DOMAIN1_PG_STATUS,
553 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
557 REG_UPDATE(DOMAIN3_PG_CONFIG,
558 DOMAIN3_POWER_GATE, power_gate);
560 REG_WAIT(DOMAIN3_PG_STATUS,
561 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
565 REG_UPDATE(DOMAIN5_PG_CONFIG,
566 DOMAIN5_POWER_GATE, power_gate);
568 REG_WAIT(DOMAIN5_PG_STATUS,
569 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
573 REG_UPDATE(DOMAIN7_PG_CONFIG,
574 DOMAIN7_POWER_GATE, power_gate);
576 REG_WAIT(DOMAIN7_PG_STATUS,
577 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
586 void dcn10_hubp_pg_control(
587 struct dce_hwseq *hws,
588 unsigned int hubp_inst,
591 uint32_t power_gate = power_on ? 0 : 1;
592 uint32_t pwr_status = power_on ? 0 : 2;
594 if (hws->ctx->dc->debug.disable_hubp_power_gate)
596 if (REG(DOMAIN0_PG_CONFIG) == 0)
600 case 0: /* DCHUBP0 */
601 REG_UPDATE(DOMAIN0_PG_CONFIG,
602 DOMAIN0_POWER_GATE, power_gate);
604 REG_WAIT(DOMAIN0_PG_STATUS,
605 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
608 case 1: /* DCHUBP1 */
609 REG_UPDATE(DOMAIN2_PG_CONFIG,
610 DOMAIN2_POWER_GATE, power_gate);
612 REG_WAIT(DOMAIN2_PG_STATUS,
613 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
616 case 2: /* DCHUBP2 */
617 REG_UPDATE(DOMAIN4_PG_CONFIG,
618 DOMAIN4_POWER_GATE, power_gate);
620 REG_WAIT(DOMAIN4_PG_STATUS,
621 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
624 case 3: /* DCHUBP3 */
625 REG_UPDATE(DOMAIN6_PG_CONFIG,
626 DOMAIN6_POWER_GATE, power_gate);
628 REG_WAIT(DOMAIN6_PG_STATUS,
629 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
638 static void power_on_plane(
639 struct dce_hwseq *hws,
642 DC_LOGGER_INIT(hws->ctx->logger);
643 if (REG(DC_IP_REQUEST_CNTL)) {
644 REG_SET(DC_IP_REQUEST_CNTL, 0,
646 hws->funcs.dpp_pg_control(hws, plane_id, true);
647 hws->funcs.hubp_pg_control(hws, plane_id, true);
648 REG_SET(DC_IP_REQUEST_CNTL, 0,
651 "Un-gated front end for pipe %d\n", plane_id);
655 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
657 struct dce_hwseq *hws = dc->hwseq;
658 struct hubp *hubp = dc->res_pool->hubps[0];
660 if (!hws->wa_state.DEGVIDCN10_253_applied)
663 hubp->funcs->set_blank(hubp, true);
665 REG_SET(DC_IP_REQUEST_CNTL, 0,
668 hws->funcs.hubp_pg_control(hws, 0, false);
669 REG_SET(DC_IP_REQUEST_CNTL, 0,
672 hws->wa_state.DEGVIDCN10_253_applied = false;
675 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
677 struct dce_hwseq *hws = dc->hwseq;
678 struct hubp *hubp = dc->res_pool->hubps[0];
681 if (dc->debug.disable_stutter)
684 if (!hws->wa.DEGVIDCN10_253)
687 for (i = 0; i < dc->res_pool->pipe_count; i++) {
688 if (!dc->res_pool->hubps[i]->power_gated)
692 /* all pipe power gated, apply work around to enable stutter. */
694 REG_SET(DC_IP_REQUEST_CNTL, 0,
697 hws->funcs.hubp_pg_control(hws, 0, true);
698 REG_SET(DC_IP_REQUEST_CNTL, 0,
701 hubp->funcs->set_hubp_blank_en(hubp, false);
702 hws->wa_state.DEGVIDCN10_253_applied = true;
705 void dcn10_bios_golden_init(struct dc *dc)
707 struct dce_hwseq *hws = dc->hwseq;
708 struct dc_bios *bp = dc->ctx->dc_bios;
710 bool allow_self_fresh_force_enable = true;
712 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
715 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
716 allow_self_fresh_force_enable =
717 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
720 /* WA for making DF sleep when idle after resume from S0i3.
721 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
722 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
723 * before calling command table and it changed to 1 after,
724 * it should be set back to 0.
727 /* initialize dcn global */
728 bp->funcs->enable_disp_power_gating(bp,
729 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
731 for (i = 0; i < dc->res_pool->pipe_count; i++) {
732 /* initialize dcn per pipe */
733 bp->funcs->enable_disp_power_gating(bp,
734 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
737 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
738 if (allow_self_fresh_force_enable == false &&
739 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
740 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true);
744 static void false_optc_underflow_wa(
746 const struct dc_stream_state *stream,
747 struct timing_generator *tg)
752 if (!dc->hwseq->wa.false_optc_underflow)
755 underflow = tg->funcs->is_optc_underflow_occurred(tg);
757 for (i = 0; i < dc->res_pool->pipe_count; i++) {
758 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
760 if (old_pipe_ctx->stream != stream)
763 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
766 if (tg->funcs->set_blank_data_double_buffer)
767 tg->funcs->set_blank_data_double_buffer(tg, true);
769 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
770 tg->funcs->clear_optc_underflow(tg);
773 enum dc_status dcn10_enable_stream_timing(
774 struct pipe_ctx *pipe_ctx,
775 struct dc_state *context,
778 struct dc_stream_state *stream = pipe_ctx->stream;
779 enum dc_color_space color_space;
780 struct tg_color black_color = {0};
782 /* by upper caller loop, pipe0 is parent pipe and be called first.
783 * back end is set up by for pipe0. Other children pipe share back end
784 * with pipe 0. No program is needed.
786 if (pipe_ctx->top_pipe != NULL)
789 /* TODO check if timing_changed, disable stream if timing changed */
791 /* HW program guide assume display already disable
792 * by unplug sequence. OTG assume stop.
794 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
796 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
797 pipe_ctx->clock_source,
798 &pipe_ctx->stream_res.pix_clk_params,
799 &pipe_ctx->pll_settings)) {
801 return DC_ERROR_UNEXPECTED;
804 pipe_ctx->stream_res.tg->funcs->program_timing(
805 pipe_ctx->stream_res.tg,
807 pipe_ctx->pipe_dlg_param.vready_offset,
808 pipe_ctx->pipe_dlg_param.vstartup_start,
809 pipe_ctx->pipe_dlg_param.vupdate_offset,
810 pipe_ctx->pipe_dlg_param.vupdate_width,
811 pipe_ctx->stream->signal,
814 #if 0 /* move to after enable_crtc */
815 /* TODO: OPP FMT, ABM. etc. should be done here. */
816 /* or FPGA now. instance 0 only. TODO: move to opp.c */
818 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
820 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
821 pipe_ctx->stream_res.opp,
822 &stream->bit_depth_params,
825 /* program otg blank color */
826 color_space = stream->output_color_space;
827 color_space_to_black_color(dc, color_space, &black_color);
829 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
830 pipe_ctx->stream_res.tg->funcs->set_blank_color(
831 pipe_ctx->stream_res.tg,
834 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
835 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
836 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
837 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
838 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
841 /* VTG is within DCHUB command block. DCFCLK is always on */
842 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
844 return DC_ERROR_UNEXPECTED;
847 /* TODO program crtc source select for non-virtual signal*/
848 /* TODO program FMT */
849 /* TODO setup link_enc */
850 /* TODO set stream attributes */
851 /* TODO program audio */
852 /* TODO enable stream if timing changed */
853 /* TODO unblank stream if DP */
858 static void dcn10_reset_back_end_for_pipe(
860 struct pipe_ctx *pipe_ctx,
861 struct dc_state *context)
864 struct dc_link *link;
865 DC_LOGGER_INIT(dc->ctx->logger);
866 if (pipe_ctx->stream_res.stream_enc == NULL) {
867 pipe_ctx->stream = NULL;
871 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
872 link = pipe_ctx->stream->link;
873 /* DPMS may already disable or */
874 /* dpms_off status is incorrect due to fastboot
875 * feature. When system resume from S4 with second
876 * screen only, the dpms_off would be true but
877 * VBIOS lit up eDP, so check link status too.
879 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
880 core_link_disable_stream(pipe_ctx);
881 else if (pipe_ctx->stream_res.audio)
882 dc->hwss.disable_audio_stream(pipe_ctx);
884 if (pipe_ctx->stream_res.audio) {
885 /*disable az_endpoint*/
886 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
889 if (dc->caps.dynamic_audio == true) {
890 /*we have to dynamic arbitrate the audio endpoints*/
891 /*we free the resource, need reset is_audio_acquired*/
892 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
893 pipe_ctx->stream_res.audio, false);
894 pipe_ctx->stream_res.audio = NULL;
899 /* by upper caller loop, parent pipe: pipe0, will be reset last.
900 * back end share by all pipes and will be disable only when disable
903 if (pipe_ctx->top_pipe == NULL) {
905 if (pipe_ctx->stream_res.abm)
906 pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
908 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
910 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
911 if (pipe_ctx->stream_res.tg->funcs->set_drr)
912 pipe_ctx->stream_res.tg->funcs->set_drr(
913 pipe_ctx->stream_res.tg, NULL);
916 for (i = 0; i < dc->res_pool->pipe_count; i++)
917 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
920 if (i == dc->res_pool->pipe_count)
923 pipe_ctx->stream = NULL;
924 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
925 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
928 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
932 bool need_recover = true;
934 if (!dc->debug.recovery_enabled)
937 for (i = 0; i < dc->res_pool->pipe_count; i++) {
938 struct pipe_ctx *pipe_ctx =
939 &dc->current_state->res_ctx.pipe_ctx[i];
940 if (pipe_ctx != NULL) {
941 hubp = pipe_ctx->plane_res.hubp;
942 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
943 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
944 /* one pipe underflow, we will reset all the pipes*/
953 DCHUBP_CNTL:HUBP_BLANK_EN=1
954 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
955 DCHUBP_CNTL:HUBP_DISABLE=1
956 DCHUBP_CNTL:HUBP_DISABLE=0
957 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
958 DCSURF_PRIMARY_SURFACE_ADDRESS
959 DCHUBP_CNTL:HUBP_BLANK_EN=0
962 for (i = 0; i < dc->res_pool->pipe_count; i++) {
963 struct pipe_ctx *pipe_ctx =
964 &dc->current_state->res_ctx.pipe_ctx[i];
965 if (pipe_ctx != NULL) {
966 hubp = pipe_ctx->plane_res.hubp;
967 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
968 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
969 hubp->funcs->set_hubp_blank_en(hubp, true);
972 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
973 hubbub1_soft_reset(dc->res_pool->hubbub, true);
975 for (i = 0; i < dc->res_pool->pipe_count; i++) {
976 struct pipe_ctx *pipe_ctx =
977 &dc->current_state->res_ctx.pipe_ctx[i];
978 if (pipe_ctx != NULL) {
979 hubp = pipe_ctx->plane_res.hubp;
980 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
981 if (hubp != NULL && hubp->funcs->hubp_disable_control)
982 hubp->funcs->hubp_disable_control(hubp, true);
985 for (i = 0; i < dc->res_pool->pipe_count; i++) {
986 struct pipe_ctx *pipe_ctx =
987 &dc->current_state->res_ctx.pipe_ctx[i];
988 if (pipe_ctx != NULL) {
989 hubp = pipe_ctx->plane_res.hubp;
990 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
991 if (hubp != NULL && hubp->funcs->hubp_disable_control)
992 hubp->funcs->hubp_disable_control(hubp, true);
995 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
996 hubbub1_soft_reset(dc->res_pool->hubbub, false);
997 for (i = 0; i < dc->res_pool->pipe_count; i++) {
998 struct pipe_ctx *pipe_ctx =
999 &dc->current_state->res_ctx.pipe_ctx[i];
1000 if (pipe_ctx != NULL) {
1001 hubp = pipe_ctx->plane_res.hubp;
1002 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1003 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1004 hubp->funcs->set_hubp_blank_en(hubp, true);
1012 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1014 static bool should_log_hw_state; /* prevent hw state log by default */
1016 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1017 if (should_log_hw_state) {
1018 dcn10_log_hw_state(dc, NULL);
1020 BREAK_TO_DEBUGGER();
1021 if (dcn10_hw_wa_force_recovery(dc)) {
1023 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1024 BREAK_TO_DEBUGGER();
1029 /* trigger HW to start disconnect plane from stream on the next vsync */
1030 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1032 struct dce_hwseq *hws = dc->hwseq;
1033 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1034 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1035 struct mpc *mpc = dc->res_pool->mpc;
1036 struct mpc_tree *mpc_tree_params;
1037 struct mpcc *mpcc_to_remove = NULL;
1038 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1040 mpc_tree_params = &(opp->mpc_tree_params);
1041 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1044 if (mpcc_to_remove == NULL)
1047 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1049 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1051 dc->optimized_required = true;
1053 if (hubp->funcs->hubp_disconnect)
1054 hubp->funcs->hubp_disconnect(hubp);
1056 if (dc->debug.sanity_checks)
1057 hws->funcs.verify_allow_pstate_change_high(dc);
1060 void dcn10_plane_atomic_power_down(struct dc *dc,
1064 struct dce_hwseq *hws = dc->hwseq;
1065 DC_LOGGER_INIT(dc->ctx->logger);
1067 if (REG(DC_IP_REQUEST_CNTL)) {
1068 REG_SET(DC_IP_REQUEST_CNTL, 0,
1070 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1071 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1072 dpp->funcs->dpp_reset(dpp);
1073 REG_SET(DC_IP_REQUEST_CNTL, 0,
1076 "Power gated front end %d\n", hubp->inst);
1080 /* disable HW used by plane.
1081 * note: cannot disable until disconnect is complete
1083 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1085 struct dce_hwseq *hws = dc->hwseq;
1086 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1087 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1088 int opp_id = hubp->opp_id;
1090 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1092 hubp->funcs->hubp_clk_cntl(hubp, false);
1094 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1096 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1097 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1098 pipe_ctx->stream_res.opp,
1101 hubp->power_gated = true;
1102 dc->optimized_required = false; /* We're powering off, no need to optimize */
1104 hws->funcs.plane_atomic_power_down(dc,
1105 pipe_ctx->plane_res.dpp,
1106 pipe_ctx->plane_res.hubp);
1108 pipe_ctx->stream = NULL;
1109 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1110 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1111 pipe_ctx->top_pipe = NULL;
1112 pipe_ctx->bottom_pipe = NULL;
1113 pipe_ctx->plane_state = NULL;
1116 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1118 struct dce_hwseq *hws = dc->hwseq;
1119 DC_LOGGER_INIT(dc->ctx->logger);
1121 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1124 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1126 apply_DEGVIDCN10_253_wa(dc);
1128 DC_LOG_DC("Power down front end %d\n",
1129 pipe_ctx->pipe_idx);
1132 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1135 struct dce_hwseq *hws = dc->hwseq;
1136 bool can_apply_seamless_boot = false;
1138 for (i = 0; i < context->stream_count; i++) {
1139 if (context->streams[i]->apply_seamless_boot_optimization) {
1140 can_apply_seamless_boot = true;
1145 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1146 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1147 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1149 /* There is assumption that pipe_ctx is not mapping irregularly
1150 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1151 * we will use the pipe, so don't disable
1153 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1156 /* Blank controller using driver code instead of
1159 if (tg->funcs->is_tg_enabled(tg)) {
1160 if (hws->funcs.init_blank != NULL) {
1161 hws->funcs.init_blank(dc, tg);
1162 tg->funcs->lock(tg);
1164 tg->funcs->lock(tg);
1165 tg->funcs->set_blank(tg, true);
1166 hwss_wait_for_blank_complete(tg);
1171 /* num_opp will be equal to number of mpcc */
1172 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1173 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1175 /* Cannot reset the MPC mux if seamless boot */
1176 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1179 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1180 dc->res_pool->mpc, i);
1183 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1184 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1185 struct hubp *hubp = dc->res_pool->hubps[i];
1186 struct dpp *dpp = dc->res_pool->dpps[i];
1187 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1189 /* There is assumption that pipe_ctx is not mapping irregularly
1190 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1191 * we will use the pipe, so don't disable
1193 if (can_apply_seamless_boot &&
1194 pipe_ctx->stream != NULL &&
1195 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1196 pipe_ctx->stream_res.tg)) {
1197 // Enable double buffering for OTG_BLANK no matter if
1198 // seamless boot is enabled or not to suppress global sync
1199 // signals when OTG blanked. This is to prevent pipe from
1200 // requesting data while in PSR.
1201 tg->funcs->tg_init(tg);
1205 /* Disable on the current state so the new one isn't cleared. */
1206 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1208 dpp->funcs->dpp_reset(dpp);
1210 pipe_ctx->stream_res.tg = tg;
1211 pipe_ctx->pipe_idx = i;
1213 pipe_ctx->plane_res.hubp = hubp;
1214 pipe_ctx->plane_res.dpp = dpp;
1215 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1216 hubp->mpcc_id = dpp->inst;
1217 hubp->opp_id = OPP_ID_INVALID;
1218 hubp->power_gated = false;
1220 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1221 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1222 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1223 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1225 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1227 if (tg->funcs->is_tg_enabled(tg))
1228 tg->funcs->unlock(tg);
1230 dc->hwss.disable_plane(dc, pipe_ctx);
1232 pipe_ctx->stream_res.tg = NULL;
1233 pipe_ctx->plane_res.hubp = NULL;
1235 tg->funcs->tg_init(tg);
1239 void dcn10_init_hw(struct dc *dc)
1242 struct abm *abm = dc->res_pool->abm;
1243 struct dmcu *dmcu = dc->res_pool->dmcu;
1244 struct dce_hwseq *hws = dc->hwseq;
1245 struct dc_bios *dcb = dc->ctx->dc_bios;
1246 struct resource_pool *res_pool = dc->res_pool;
1248 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1249 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1251 // Initialize the dccg
1252 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1253 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1255 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1257 REG_WRITE(REFCLK_CNTL, 0);
1258 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1259 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1261 if (!dc->debug.disable_clock_gate) {
1262 /* enable all DCN clock gating */
1263 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1265 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1267 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1270 //Enable ability to power gate / don't force power on permanently
1271 if (hws->funcs.enable_power_gating_plane)
1272 hws->funcs.enable_power_gating_plane(hws, true);
1277 if (!dcb->funcs->is_accelerated_mode(dcb))
1278 hws->funcs.disable_vga(dc->hwseq);
1280 hws->funcs.bios_golden_init(dc);
1281 if (dc->ctx->dc_bios->fw_info_valid) {
1282 res_pool->ref_clocks.xtalin_clock_inKhz =
1283 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1285 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1286 if (res_pool->dccg && res_pool->hubbub) {
1288 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1289 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1290 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1292 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1293 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1294 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1296 // Not all ASICs have DCCG sw component
1297 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1298 res_pool->ref_clocks.xtalin_clock_inKhz;
1299 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1300 res_pool->ref_clocks.xtalin_clock_inKhz;
1304 ASSERT_CRITICAL(false);
1306 for (i = 0; i < dc->link_count; i++) {
1307 /* Power up AND update implementation according to the
1308 * required signal (which may be different from the
1309 * default signal on connector).
1311 struct dc_link *link = dc->links[i];
1313 link->link_enc->funcs->hw_init(link->link_enc);
1315 /* Check for enabled DIG to identify enabled display */
1316 if (link->link_enc->funcs->is_dig_enabled &&
1317 link->link_enc->funcs->is_dig_enabled(link->link_enc))
1318 link->link_status.link_active = true;
1321 /* Power gate DSCs */
1322 for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1323 if (hws->funcs.dsc_pg_control != NULL)
1324 hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1326 /* we want to turn off all dp displays before doing detection */
1327 if (dc->config.power_down_display_on_boot) {
1328 uint8_t dpcd_power_state = '\0';
1329 enum dc_status status = DC_ERROR_UNEXPECTED;
1331 for (i = 0; i < dc->link_count; i++) {
1332 if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1336 * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
1337 * which needs to read dpcd info with the help of aconnector.
1338 * If aconnector (dc->links[i]->prev) is NULL, then dpcd status
1341 if (dc->links[i]->priv) {
1342 /* if any of the displays are lit up turn them off */
1343 status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1344 &dpcd_power_state, sizeof(dpcd_power_state));
1345 if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
1346 dp_receiver_power_ctrl(dc->links[i], false);
1351 /* If taking control over from VBIOS, we may want to optimize our first
1352 * mode set, so we need to skip powering down pipes until we know which
1353 * pipes we want to use.
1354 * Otherwise, if taking control is not possible, we need to power
1357 if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1358 hws->funcs.init_pipes(dc, dc->current_state);
1359 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1360 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1361 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1364 for (i = 0; i < res_pool->audio_count; i++) {
1365 struct audio *audio = res_pool->audios[i];
1367 audio->funcs->hw_init(audio);
1371 abm->funcs->init_backlight(abm);
1372 abm->funcs->abm_init(abm);
1375 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1376 dmcu->funcs->dmcu_init(dmcu);
1378 if (abm != NULL && dmcu != NULL)
1379 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1381 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1382 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1384 if (!dc->debug.disable_clock_gate) {
1385 /* enable all DCN clock gating */
1386 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1388 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1390 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1392 if (hws->funcs.enable_power_gating_plane)
1393 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1395 if (dc->clk_mgr->funcs->notify_wm_ranges)
1396 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1400 void dcn10_reset_hw_ctx_wrap(
1402 struct dc_state *context)
1405 struct dce_hwseq *hws = dc->hwseq;
1408 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1409 struct pipe_ctx *pipe_ctx_old =
1410 &dc->current_state->res_ctx.pipe_ctx[i];
1411 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1413 if (!pipe_ctx_old->stream)
1416 if (pipe_ctx_old->top_pipe)
1419 if (!pipe_ctx->stream ||
1420 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1421 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1423 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1424 if (hws->funcs.enable_stream_gating)
1425 hws->funcs.enable_stream_gating(dc, pipe_ctx);
1427 old_clk->funcs->cs_power_down(old_clk);
1432 static bool patch_address_for_sbs_tb_stereo(
1433 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1435 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1436 bool sec_split = pipe_ctx->top_pipe &&
1437 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1438 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1439 (pipe_ctx->stream->timing.timing_3d_format ==
1440 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1441 pipe_ctx->stream->timing.timing_3d_format ==
1442 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1443 *addr = plane_state->address.grph_stereo.left_addr;
1444 plane_state->address.grph_stereo.left_addr =
1445 plane_state->address.grph_stereo.right_addr;
1448 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1449 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1450 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1451 plane_state->address.grph_stereo.right_addr =
1452 plane_state->address.grph_stereo.left_addr;
1458 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1460 bool addr_patched = false;
1461 PHYSICAL_ADDRESS_LOC addr;
1462 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1464 if (plane_state == NULL)
1467 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1469 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1470 pipe_ctx->plane_res.hubp,
1471 &plane_state->address,
1472 plane_state->flip_immediate);
1474 plane_state->status.requested_address = plane_state->address;
1476 if (plane_state->flip_immediate)
1477 plane_state->status.current_address = plane_state->address;
1480 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1483 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1484 const struct dc_plane_state *plane_state)
1486 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1487 const struct dc_transfer_func *tf = NULL;
1490 if (dpp_base == NULL)
1493 if (plane_state->in_transfer_func)
1494 tf = plane_state->in_transfer_func;
1496 if (plane_state->gamma_correction &&
1497 !dpp_base->ctx->dc->debug.always_use_regamma
1498 && !plane_state->gamma_correction->is_identity
1499 && dce_use_lut(plane_state->format))
1500 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1503 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1504 else if (tf->type == TF_TYPE_PREDEFINED) {
1506 case TRANSFER_FUNCTION_SRGB:
1507 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1509 case TRANSFER_FUNCTION_BT709:
1510 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1512 case TRANSFER_FUNCTION_LINEAR:
1513 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1515 case TRANSFER_FUNCTION_PQ:
1516 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1517 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1518 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1525 } else if (tf->type == TF_TYPE_BYPASS) {
1526 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1528 cm_helper_translate_curve_to_degamma_hw_format(tf,
1529 &dpp_base->degamma_params);
1530 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1531 &dpp_base->degamma_params);
1538 #define MAX_NUM_HW_POINTS 0x200
1540 static void log_tf(struct dc_context *ctx,
1541 struct dc_transfer_func *tf, uint32_t hw_points_num)
1543 // DC_LOG_GAMMA is default logging of all hw points
1544 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1545 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1548 DC_LOGGER_INIT(ctx->logger);
1549 DC_LOG_GAMMA("Gamma Correction TF");
1550 DC_LOG_ALL_GAMMA("Logging all tf points...");
1551 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1553 for (i = 0; i < hw_points_num; i++) {
1554 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1555 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1556 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1559 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1560 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1561 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1562 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1566 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1567 const struct dc_stream_state *stream)
1569 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1574 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1576 if (stream->out_transfer_func &&
1577 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1578 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1579 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1581 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1584 else if (cm_helper_translate_curve_to_hw_format(
1585 stream->out_transfer_func,
1586 &dpp->regamma_params, false)) {
1587 dpp->funcs->dpp_program_regamma_pwl(
1589 &dpp->regamma_params, OPP_REGAMMA_USER);
1591 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1593 if (stream != NULL && stream->ctx != NULL &&
1594 stream->out_transfer_func != NULL) {
1596 stream->out_transfer_func,
1597 dpp->regamma_params.hw_points_num);
1603 void dcn10_pipe_control_lock(
1605 struct pipe_ctx *pipe,
1608 struct dce_hwseq *hws = dc->hwseq;
1610 /* use TG master update lock to lock everything on the TG
1611 * therefore only top pipe need to lock
1613 if (!pipe || pipe->top_pipe)
1616 if (dc->debug.sanity_checks)
1617 hws->funcs.verify_allow_pstate_change_high(dc);
1620 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1622 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1624 if (dc->debug.sanity_checks)
1625 hws->funcs.verify_allow_pstate_change_high(dc);
1628 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1630 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1631 if (!pipe || pipe->top_pipe)
1634 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1635 pipe->stream_res.opp->inst, lock);
1638 static bool wait_for_reset_trigger_to_occur(
1639 struct dc_context *dc_ctx,
1640 struct timing_generator *tg)
1644 /* To avoid endless loop we wait at most
1645 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1646 const uint32_t frames_to_wait_on_triggered_reset = 10;
1649 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1651 if (!tg->funcs->is_counter_moving(tg)) {
1652 DC_ERROR("TG counter is not moving!\n");
1656 if (tg->funcs->did_triggered_reset_occur(tg)) {
1658 /* usually occurs at i=1 */
1659 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1664 /* Wait for one frame. */
1665 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1666 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1670 DC_ERROR("GSL: Timeout on reset trigger!\n");
1675 void dcn10_enable_timing_synchronization(
1679 struct pipe_ctx *grouped_pipes[])
1681 struct dc_context *dc_ctx = dc->ctx;
1684 DC_SYNC_INFO("Setting up OTG reset trigger\n");
1686 for (i = 1; i < group_size; i++)
1687 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
1688 grouped_pipes[i]->stream_res.tg,
1689 grouped_pipes[0]->stream_res.tg->inst);
1691 DC_SYNC_INFO("Waiting for trigger\n");
1693 /* Need to get only check 1 pipe for having reset as all the others are
1694 * synchronized. Look at last pipe programmed to reset.
1697 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
1698 for (i = 1; i < group_size; i++)
1699 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
1700 grouped_pipes[i]->stream_res.tg);
1702 DC_SYNC_INFO("Sync complete\n");
1705 void dcn10_enable_per_frame_crtc_position_reset(
1708 struct pipe_ctx *grouped_pipes[])
1710 struct dc_context *dc_ctx = dc->ctx;
1713 DC_SYNC_INFO("Setting up\n");
1714 for (i = 0; i < group_size; i++)
1715 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
1716 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
1717 grouped_pipes[i]->stream_res.tg,
1719 &grouped_pipes[i]->stream->triggered_crtc_reset);
1721 DC_SYNC_INFO("Waiting for trigger\n");
1723 for (i = 0; i < group_size; i++)
1724 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
1726 DC_SYNC_INFO("Multi-display sync is complete\n");
1729 /*static void print_rq_dlg_ttu(
1731 struct pipe_ctx *pipe_ctx)
1733 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1734 "\n============== DML TTU Output parameters [%d] ==============\n"
1735 "qos_level_low_wm: %d, \n"
1736 "qos_level_high_wm: %d, \n"
1737 "min_ttu_vblank: %d, \n"
1738 "qos_level_flip: %d, \n"
1739 "refcyc_per_req_delivery_l: %d, \n"
1740 "qos_level_fixed_l: %d, \n"
1741 "qos_ramp_disable_l: %d, \n"
1742 "refcyc_per_req_delivery_pre_l: %d, \n"
1743 "refcyc_per_req_delivery_c: %d, \n"
1744 "qos_level_fixed_c: %d, \n"
1745 "qos_ramp_disable_c: %d, \n"
1746 "refcyc_per_req_delivery_pre_c: %d\n"
1747 "=============================================================\n",
1749 pipe_ctx->ttu_regs.qos_level_low_wm,
1750 pipe_ctx->ttu_regs.qos_level_high_wm,
1751 pipe_ctx->ttu_regs.min_ttu_vblank,
1752 pipe_ctx->ttu_regs.qos_level_flip,
1753 pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
1754 pipe_ctx->ttu_regs.qos_level_fixed_l,
1755 pipe_ctx->ttu_regs.qos_ramp_disable_l,
1756 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
1757 pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
1758 pipe_ctx->ttu_regs.qos_level_fixed_c,
1759 pipe_ctx->ttu_regs.qos_ramp_disable_c,
1760 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1763 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1764 "\n============== DML DLG Output parameters [%d] ==============\n"
1765 "refcyc_h_blank_end: %d, \n"
1766 "dlg_vblank_end: %d, \n"
1767 "min_dst_y_next_start: %d, \n"
1768 "refcyc_per_htotal: %d, \n"
1769 "refcyc_x_after_scaler: %d, \n"
1770 "dst_y_after_scaler: %d, \n"
1771 "dst_y_prefetch: %d, \n"
1772 "dst_y_per_vm_vblank: %d, \n"
1773 "dst_y_per_row_vblank: %d, \n"
1774 "ref_freq_to_pix_freq: %d, \n"
1775 "vratio_prefetch: %d, \n"
1776 "refcyc_per_pte_group_vblank_l: %d, \n"
1777 "refcyc_per_meta_chunk_vblank_l: %d, \n"
1778 "dst_y_per_pte_row_nom_l: %d, \n"
1779 "refcyc_per_pte_group_nom_l: %d, \n",
1781 pipe_ctx->dlg_regs.refcyc_h_blank_end,
1782 pipe_ctx->dlg_regs.dlg_vblank_end,
1783 pipe_ctx->dlg_regs.min_dst_y_next_start,
1784 pipe_ctx->dlg_regs.refcyc_per_htotal,
1785 pipe_ctx->dlg_regs.refcyc_x_after_scaler,
1786 pipe_ctx->dlg_regs.dst_y_after_scaler,
1787 pipe_ctx->dlg_regs.dst_y_prefetch,
1788 pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
1789 pipe_ctx->dlg_regs.dst_y_per_row_vblank,
1790 pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
1791 pipe_ctx->dlg_regs.vratio_prefetch,
1792 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
1793 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
1794 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
1795 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1798 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1799 "\ndst_y_per_meta_row_nom_l: %d, \n"
1800 "refcyc_per_meta_chunk_nom_l: %d, \n"
1801 "refcyc_per_line_delivery_pre_l: %d, \n"
1802 "refcyc_per_line_delivery_l: %d, \n"
1803 "vratio_prefetch_c: %d, \n"
1804 "refcyc_per_pte_group_vblank_c: %d, \n"
1805 "refcyc_per_meta_chunk_vblank_c: %d, \n"
1806 "dst_y_per_pte_row_nom_c: %d, \n"
1807 "refcyc_per_pte_group_nom_c: %d, \n"
1808 "dst_y_per_meta_row_nom_c: %d, \n"
1809 "refcyc_per_meta_chunk_nom_c: %d, \n"
1810 "refcyc_per_line_delivery_pre_c: %d, \n"
1811 "refcyc_per_line_delivery_c: %d \n"
1812 "========================================================\n",
1813 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
1814 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
1815 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
1816 pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
1817 pipe_ctx->dlg_regs.vratio_prefetch_c,
1818 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
1819 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
1820 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
1821 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
1822 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
1823 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
1824 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
1825 pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
1828 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1829 "\n============== DML RQ Output parameters [%d] ==============\n"
1831 "min_chunk_size: %d \n"
1832 "meta_chunk_size: %d \n"
1833 "min_meta_chunk_size: %d \n"
1834 "dpte_group_size: %d \n"
1835 "mpte_group_size: %d \n"
1836 "swath_height: %d \n"
1837 "pte_row_height_linear: %d \n"
1838 "========================================================\n",
1840 pipe_ctx->rq_regs.rq_regs_l.chunk_size,
1841 pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
1842 pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
1843 pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
1844 pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
1845 pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
1846 pipe_ctx->rq_regs.rq_regs_l.swath_height,
1847 pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
1852 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
1853 struct vm_system_aperture_param *apt,
1854 struct dce_hwseq *hws)
1856 PHYSICAL_ADDRESS_LOC physical_page_number;
1857 uint32_t logical_addr_low;
1858 uint32_t logical_addr_high;
1860 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
1861 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
1862 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
1863 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
1865 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1866 LOGICAL_ADDR, &logical_addr_low);
1868 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1869 LOGICAL_ADDR, &logical_addr_high);
1871 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
1872 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
1873 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
1876 /* Temporary read settings, future will get values from kmd directly */
1877 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
1878 struct vm_context0_param *vm0,
1879 struct dce_hwseq *hws)
1881 PHYSICAL_ADDRESS_LOC fb_base;
1882 PHYSICAL_ADDRESS_LOC fb_offset;
1883 uint32_t fb_base_value;
1884 uint32_t fb_offset_value;
1886 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
1887 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
1889 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
1890 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
1891 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
1892 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
1894 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
1895 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
1896 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
1897 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
1899 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
1900 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
1901 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
1902 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
1904 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
1905 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
1906 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
1907 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
1910 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
1911 * Therefore we need to do
1912 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
1913 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
1915 fb_base.quad_part = (uint64_t)fb_base_value << 24;
1916 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
1917 vm0->pte_base.quad_part += fb_base.quad_part;
1918 vm0->pte_base.quad_part -= fb_offset.quad_part;
1922 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
1924 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
1925 struct vm_system_aperture_param apt = { {{ 0 } } };
1926 struct vm_context0_param vm0 = { { { 0 } } };
1928 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
1929 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
1931 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
1932 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
1935 static void dcn10_enable_plane(
1937 struct pipe_ctx *pipe_ctx,
1938 struct dc_state *context)
1940 struct dce_hwseq *hws = dc->hwseq;
1942 if (dc->debug.sanity_checks) {
1943 hws->funcs.verify_allow_pstate_change_high(dc);
1946 undo_DEGVIDCN10_253_wa(dc);
1948 power_on_plane(dc->hwseq,
1949 pipe_ctx->plane_res.hubp->inst);
1951 /* enable DCFCLK current DCHUB */
1952 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
1954 /* make sure OPP_PIPE_CLOCK_EN = 1 */
1955 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1956 pipe_ctx->stream_res.opp,
1959 /* TODO: enable/disable in dm as per update type.
1961 DC_LOG_DC(dc->ctx->logger,
1962 "Pipe:%d 0x%x: addr hi:0x%x, "
1965 " %d; dst: %d, %d, %d, %d;\n",
1968 plane_state->address.grph.addr.high_part,
1969 plane_state->address.grph.addr.low_part,
1970 plane_state->src_rect.x,
1971 plane_state->src_rect.y,
1972 plane_state->src_rect.width,
1973 plane_state->src_rect.height,
1974 plane_state->dst_rect.x,
1975 plane_state->dst_rect.y,
1976 plane_state->dst_rect.width,
1977 plane_state->dst_rect.height);
1979 DC_LOG_DC(dc->ctx->logger,
1980 "Pipe %d: width, height, x, y format:%d\n"
1981 "viewport:%d, %d, %d, %d\n"
1982 "recout: %d, %d, %d, %d\n",
1984 plane_state->format,
1985 pipe_ctx->plane_res.scl_data.viewport.width,
1986 pipe_ctx->plane_res.scl_data.viewport.height,
1987 pipe_ctx->plane_res.scl_data.viewport.x,
1988 pipe_ctx->plane_res.scl_data.viewport.y,
1989 pipe_ctx->plane_res.scl_data.recout.width,
1990 pipe_ctx->plane_res.scl_data.recout.height,
1991 pipe_ctx->plane_res.scl_data.recout.x,
1992 pipe_ctx->plane_res.scl_data.recout.y);
1993 print_rq_dlg_ttu(dc, pipe_ctx);
1996 if (dc->config.gpu_vm_support)
1997 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
1999 if (dc->debug.sanity_checks) {
2000 hws->funcs.verify_allow_pstate_change_high(dc);
2004 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2007 struct dpp_grph_csc_adjustment adjust;
2008 memset(&adjust, 0, sizeof(adjust));
2009 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2012 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2013 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2014 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2015 adjust.temperature_matrix[i] =
2016 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2017 } else if (pipe_ctx->plane_state &&
2018 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2019 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2020 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2021 adjust.temperature_matrix[i] =
2022 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2025 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2029 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2031 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2032 if (pipe_ctx->top_pipe) {
2033 struct pipe_ctx *top = pipe_ctx->top_pipe;
2035 while (top->top_pipe)
2036 top = top->top_pipe; // Traverse to top pipe_ctx
2037 if (top->plane_state && top->plane_state->layer_index == 0)
2038 return true; // Front MPO plane not hidden
2044 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2046 // Override rear plane RGB bias to fix MPO brightness
2047 uint16_t rgb_bias = matrix[3];
2052 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2053 matrix[3] = rgb_bias;
2054 matrix[7] = rgb_bias;
2055 matrix[11] = rgb_bias;
2058 void dcn10_program_output_csc(struct dc *dc,
2059 struct pipe_ctx *pipe_ctx,
2060 enum dc_color_space colorspace,
2064 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2065 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2067 /* MPO is broken with RGB colorspaces when OCSC matrix
2068 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2069 * Blending adds offsets from front + rear to rear plane
2071 * Fix is to set RGB bias to 0 on rear plane, top plane
2072 * black value pixels add offset instead of rear + front
2075 int16_t rgb_bias = matrix[3];
2076 // matrix[3/7/11] are all the same offset value
2078 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2079 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2081 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2085 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2086 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2090 void dcn10_get_surface_visual_confirm_color(
2091 const struct pipe_ctx *pipe_ctx,
2092 struct tg_color *color)
2094 uint32_t color_value = MAX_TG_COLOR_VALUE;
2096 switch (pipe_ctx->plane_res.scl_data.format) {
2097 case PIXEL_FORMAT_ARGB8888:
2098 /* set boarder color to red */
2099 color->color_r_cr = color_value;
2102 case PIXEL_FORMAT_ARGB2101010:
2103 /* set boarder color to blue */
2104 color->color_b_cb = color_value;
2106 case PIXEL_FORMAT_420BPP8:
2107 /* set boarder color to green */
2108 color->color_g_y = color_value;
2110 case PIXEL_FORMAT_420BPP10:
2111 /* set boarder color to yellow */
2112 color->color_g_y = color_value;
2113 color->color_r_cr = color_value;
2115 case PIXEL_FORMAT_FP16:
2116 /* set boarder color to white */
2117 color->color_r_cr = color_value;
2118 color->color_b_cb = color_value;
2119 color->color_g_y = color_value;
2126 void dcn10_get_hdr_visual_confirm_color(
2127 struct pipe_ctx *pipe_ctx,
2128 struct tg_color *color)
2130 uint32_t color_value = MAX_TG_COLOR_VALUE;
2132 // Determine the overscan color based on the top-most (desktop) plane's context
2133 struct pipe_ctx *top_pipe_ctx = pipe_ctx;
2135 while (top_pipe_ctx->top_pipe != NULL)
2136 top_pipe_ctx = top_pipe_ctx->top_pipe;
2138 switch (top_pipe_ctx->plane_res.scl_data.format) {
2139 case PIXEL_FORMAT_ARGB2101010:
2140 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2141 /* HDR10, ARGB2101010 - set boarder color to red */
2142 color->color_r_cr = color_value;
2143 } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2144 /* FreeSync 2 ARGB2101010 - set boarder color to pink */
2145 color->color_r_cr = color_value;
2146 color->color_b_cb = color_value;
2149 case PIXEL_FORMAT_FP16:
2150 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2151 /* HDR10, FP16 - set boarder color to blue */
2152 color->color_b_cb = color_value;
2153 } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2154 /* FreeSync 2 HDR - set boarder color to green */
2155 color->color_g_y = color_value;
2159 /* SDR - set boarder color to Gray */
2160 color->color_r_cr = color_value/2;
2161 color->color_b_cb = color_value/2;
2162 color->color_g_y = color_value/2;
2167 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2169 struct dc_bias_and_scale bns_params = {0};
2171 // program the input csc
2172 dpp->funcs->dpp_setup(dpp,
2173 plane_state->format,
2174 EXPANSION_MODE_ZERO,
2175 plane_state->input_csc_color_matrix,
2176 plane_state->color_space,
2179 //set scale and bias registers
2180 build_prescale_params(&bns_params, plane_state);
2181 if (dpp->funcs->dpp_program_bias_and_scale)
2182 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2185 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2187 struct dce_hwseq *hws = dc->hwseq;
2188 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2189 struct mpcc_blnd_cfg blnd_cfg = {{0}};
2190 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2192 struct mpcc *new_mpcc;
2193 struct mpc *mpc = dc->res_pool->mpc;
2194 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2196 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2197 hws->funcs.get_hdr_visual_confirm_color(
2198 pipe_ctx, &blnd_cfg.black_color);
2199 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2200 hws->funcs.get_surface_visual_confirm_color(
2201 pipe_ctx, &blnd_cfg.black_color);
2203 color_space_to_black_color(
2204 dc, pipe_ctx->stream->output_color_space,
2205 &blnd_cfg.black_color);
2208 if (per_pixel_alpha)
2209 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2211 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2213 blnd_cfg.overlap_only = false;
2214 blnd_cfg.global_gain = 0xff;
2216 if (pipe_ctx->plane_state->global_alpha)
2217 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2219 blnd_cfg.global_alpha = 0xff;
2221 /* DCN1.0 has output CM before MPC which seems to screw with
2222 * pre-multiplied alpha.
2224 blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2225 pipe_ctx->stream->output_color_space)
2231 * Note: currently there is a bug in init_hw such that
2232 * on resume from hibernate, BIOS sets up MPCC0, and
2233 * we do mpcc_remove but the mpcc cannot go to idle
2234 * after remove. This cause us to pick mpcc1 here,
2235 * which causes a pstate hang for yet unknown reason.
2237 mpcc_id = hubp->inst;
2239 /* If there is no full update, don't need to touch MPC tree*/
2240 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2241 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2245 /* check if this MPCC is already being used */
2246 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2247 /* remove MPCC if being used */
2248 if (new_mpcc != NULL)
2249 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2251 if (dc->debug.sanity_checks)
2252 mpc->funcs->assert_mpcc_idle_before_connect(
2253 dc->res_pool->mpc, mpcc_id);
2255 /* Call MPC to insert new plane */
2256 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2264 ASSERT(new_mpcc != NULL);
2266 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2267 hubp->mpcc_id = mpcc_id;
2270 static void update_scaler(struct pipe_ctx *pipe_ctx)
2272 bool per_pixel_alpha =
2273 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2275 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2276 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2277 /* scaler configuration */
2278 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2279 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2282 static void dcn10_update_dchubp_dpp(
2284 struct pipe_ctx *pipe_ctx,
2285 struct dc_state *context)
2287 struct dce_hwseq *hws = dc->hwseq;
2288 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2289 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2290 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2291 struct plane_size size = plane_state->plane_size;
2292 unsigned int compat_level = 0;
2294 /* depends on DML calculation, DPP clock value may change dynamically */
2295 /* If request max dpp clk is lower than current dispclk, no need to
2298 if (plane_state->update_flags.bits.full_update) {
2299 bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2300 dc->clk_mgr->clks.dispclk_khz / 2;
2302 dpp->funcs->dpp_dppclk_control(
2304 should_divided_by_2,
2307 if (dc->res_pool->dccg)
2308 dc->res_pool->dccg->funcs->update_dpp_dto(
2311 pipe_ctx->plane_res.bw.dppclk_khz);
2313 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2314 dc->clk_mgr->clks.dispclk_khz / 2 :
2315 dc->clk_mgr->clks.dispclk_khz;
2318 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2319 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2320 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2322 if (plane_state->update_flags.bits.full_update) {
2323 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2325 hubp->funcs->hubp_setup(
2327 &pipe_ctx->dlg_regs,
2328 &pipe_ctx->ttu_regs,
2330 &pipe_ctx->pipe_dlg_param);
2331 hubp->funcs->hubp_setup_interdependent(
2333 &pipe_ctx->dlg_regs,
2334 &pipe_ctx->ttu_regs);
2337 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2339 if (plane_state->update_flags.bits.full_update ||
2340 plane_state->update_flags.bits.bpp_change)
2341 dcn10_update_dpp(dpp, plane_state);
2343 if (plane_state->update_flags.bits.full_update ||
2344 plane_state->update_flags.bits.per_pixel_alpha_change ||
2345 plane_state->update_flags.bits.global_alpha_change)
2346 hws->funcs.update_mpcc(dc, pipe_ctx);
2348 if (plane_state->update_flags.bits.full_update ||
2349 plane_state->update_flags.bits.per_pixel_alpha_change ||
2350 plane_state->update_flags.bits.global_alpha_change ||
2351 plane_state->update_flags.bits.scaling_change ||
2352 plane_state->update_flags.bits.position_change) {
2353 update_scaler(pipe_ctx);
2356 if (plane_state->update_flags.bits.full_update ||
2357 plane_state->update_flags.bits.scaling_change ||
2358 plane_state->update_flags.bits.position_change) {
2359 hubp->funcs->mem_program_viewport(
2361 &pipe_ctx->plane_res.scl_data.viewport,
2362 &pipe_ctx->plane_res.scl_data.viewport_c);
2365 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2366 dc->hwss.set_cursor_position(pipe_ctx);
2367 dc->hwss.set_cursor_attribute(pipe_ctx);
2369 if (dc->hwss.set_cursor_sdr_white_level)
2370 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2373 if (plane_state->update_flags.bits.full_update) {
2375 dc->hwss.program_gamut_remap(pipe_ctx);
2377 dc->hwss.program_output_csc(dc,
2379 pipe_ctx->stream->output_color_space,
2380 pipe_ctx->stream->csc_color_matrix.matrix,
2381 pipe_ctx->stream_res.opp->inst);
2384 if (plane_state->update_flags.bits.full_update ||
2385 plane_state->update_flags.bits.pixel_format_change ||
2386 plane_state->update_flags.bits.horizontal_mirror_change ||
2387 plane_state->update_flags.bits.rotation_change ||
2388 plane_state->update_flags.bits.swizzle_change ||
2389 plane_state->update_flags.bits.dcc_change ||
2390 plane_state->update_flags.bits.bpp_change ||
2391 plane_state->update_flags.bits.scaling_change ||
2392 plane_state->update_flags.bits.plane_size_change) {
2393 hubp->funcs->hubp_program_surface_config(
2395 plane_state->format,
2396 &plane_state->tiling_info,
2398 plane_state->rotation,
2400 plane_state->horizontal_mirror,
2404 hubp->power_gated = false;
2406 hws->funcs.update_plane_addr(dc, pipe_ctx);
2408 if (is_pipe_tree_visible(pipe_ctx))
2409 hubp->funcs->set_blank(hubp, false);
2412 void dcn10_blank_pixel_data(
2414 struct pipe_ctx *pipe_ctx,
2417 enum dc_color_space color_space;
2418 struct tg_color black_color = {0};
2419 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2420 struct dc_stream_state *stream = pipe_ctx->stream;
2422 /* program otg blank color */
2423 color_space = stream->output_color_space;
2424 color_space_to_black_color(dc, color_space, &black_color);
2427 * The way 420 is packed, 2 channels carry Y component, 1 channel
2428 * alternate between Cb and Cr, so both channels need the pixel
2431 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2432 black_color.color_r_cr = black_color.color_g_y;
2435 if (stream_res->tg->funcs->set_blank_color)
2436 stream_res->tg->funcs->set_blank_color(
2441 if (stream_res->tg->funcs->set_blank)
2442 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2443 if (stream_res->abm) {
2444 stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
2445 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2448 if (stream_res->abm)
2449 stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
2450 if (stream_res->tg->funcs->set_blank)
2451 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2455 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2457 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2458 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2459 struct custom_float_format fmt;
2461 fmt.exponenta_bits = 6;
2462 fmt.mantissa_bits = 12;
2466 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2467 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2469 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2470 pipe_ctx->plane_res.dpp, hw_mult);
2473 void dcn10_program_pipe(
2475 struct pipe_ctx *pipe_ctx,
2476 struct dc_state *context)
2478 struct dce_hwseq *hws = dc->hwseq;
2480 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2481 dcn10_enable_plane(dc, pipe_ctx, context);
2483 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2485 hws->funcs.set_hdr_multiplier(pipe_ctx);
2487 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2488 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2489 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2490 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2492 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2493 * only do gamma programming for full update.
2494 * TODO: This can be further optimized/cleaned up
2495 * Always call this for now since it does memcmp inside before
2496 * doing heavy calculation and programming
2498 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2499 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2502 static void dcn10_program_all_pipe_in_tree(
2504 struct pipe_ctx *pipe_ctx,
2505 struct dc_state *context)
2507 struct dce_hwseq *hws = dc->hwseq;
2509 if (pipe_ctx->top_pipe == NULL) {
2510 bool blank = !is_pipe_tree_visible(pipe_ctx);
2512 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2513 pipe_ctx->stream_res.tg,
2514 pipe_ctx->pipe_dlg_param.vready_offset,
2515 pipe_ctx->pipe_dlg_param.vstartup_start,
2516 pipe_ctx->pipe_dlg_param.vupdate_offset,
2517 pipe_ctx->pipe_dlg_param.vupdate_width);
2519 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2520 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
2522 if (hws->funcs.setup_vupdate_interrupt)
2523 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2525 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2528 if (pipe_ctx->plane_state != NULL)
2529 hws->funcs.program_pipe(dc, pipe_ctx, context);
2531 if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
2532 dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
2535 static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
2537 struct dc_state *context,
2538 const struct dc_stream_state *stream)
2542 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2543 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2544 struct pipe_ctx *old_pipe_ctx =
2545 &dc->current_state->res_ctx.pipe_ctx[i];
2547 if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
2550 if (pipe_ctx->stream != stream)
2553 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
2559 void dcn10_apply_ctx_for_surface(
2561 const struct dc_stream_state *stream,
2563 struct dc_state *context)
2565 struct dce_hwseq *hws = dc->hwseq;
2567 struct timing_generator *tg;
2568 uint32_t underflow_check_delay_us;
2569 bool interdependent_update = false;
2570 struct pipe_ctx *top_pipe_to_program =
2571 dcn10_find_top_pipe_for_stream(dc, context, stream);
2572 DC_LOGGER_INIT(dc->ctx->logger);
2574 // Clear pipe_ctx flag
2575 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2576 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2577 pipe_ctx->update_flags.raw = 0;
2580 if (!top_pipe_to_program)
2583 tg = top_pipe_to_program->stream_res.tg;
2585 interdependent_update = top_pipe_to_program->plane_state &&
2586 top_pipe_to_program->plane_state->update_flags.bits.full_update;
2588 underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
2590 if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2591 ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2593 if (underflow_check_delay_us != 0xFFFFFFFF)
2594 udelay(underflow_check_delay_us);
2596 if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2597 ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2599 if (num_planes == 0) {
2600 /* OTG blank before remove all front end */
2601 hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
2604 /* Disconnect unused mpcc */
2605 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2606 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2607 struct pipe_ctx *old_pipe_ctx =
2608 &dc->current_state->res_ctx.pipe_ctx[i];
2610 if ((!pipe_ctx->plane_state ||
2611 pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2612 old_pipe_ctx->plane_state &&
2613 old_pipe_ctx->stream_res.tg == tg) {
2615 hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
2616 pipe_ctx->update_flags.bits.disable = 1;
2618 DC_LOG_DC("Reset mpcc for pipe %d\n",
2619 old_pipe_ctx->pipe_idx);
2624 dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
2626 /* Program secondary blending tree and writeback pipes */
2627 if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
2628 hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
2629 if (interdependent_update)
2630 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2631 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2632 /* Skip inactive pipes and ones already updated */
2633 if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
2634 !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
2637 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2638 pipe_ctx->plane_res.hubp,
2639 &pipe_ctx->dlg_regs,
2640 &pipe_ctx->ttu_regs);
2644 void dcn10_post_unlock_program_front_end(
2646 struct dc_state *context)
2650 DC_LOGGER_INIT(dc->ctx->logger);
2652 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2653 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2655 if (!pipe_ctx->top_pipe &&
2656 !pipe_ctx->prev_odm_pipe &&
2658 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2660 if (context->stream_status[i].plane_count == 0)
2661 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2665 for (i = 0; i < dc->res_pool->pipe_count; i++)
2666 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2667 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2669 for (i = 0; i < dc->res_pool->pipe_count; i++)
2670 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2671 dc->hwss.optimize_bandwidth(dc, context);
2675 if (dc->hwseq->wa.DEGVIDCN10_254)
2676 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2679 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2683 for (i = 0; i < context->stream_count; i++) {
2684 if (context->streams[i]->timing.timing_3d_format
2685 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2689 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2695 void dcn10_prepare_bandwidth(
2697 struct dc_state *context)
2699 struct dce_hwseq *hws = dc->hwseq;
2700 struct hubbub *hubbub = dc->res_pool->hubbub;
2702 if (dc->debug.sanity_checks)
2703 hws->funcs.verify_allow_pstate_change_high(dc);
2705 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2706 if (context->stream_count == 0)
2707 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2709 dc->clk_mgr->funcs->update_clocks(
2715 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
2716 &context->bw_ctx.bw.dcn.watermarks,
2717 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2719 dcn10_stereo_hw_frame_pack_wa(dc, context);
2721 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2722 dcn_bw_notify_pplib_of_wm_ranges(dc);
2724 if (dc->debug.sanity_checks)
2725 hws->funcs.verify_allow_pstate_change_high(dc);
2728 void dcn10_optimize_bandwidth(
2730 struct dc_state *context)
2732 struct dce_hwseq *hws = dc->hwseq;
2733 struct hubbub *hubbub = dc->res_pool->hubbub;
2735 if (dc->debug.sanity_checks)
2736 hws->funcs.verify_allow_pstate_change_high(dc);
2738 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2739 if (context->stream_count == 0)
2740 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2742 dc->clk_mgr->funcs->update_clocks(
2748 hubbub->funcs->program_watermarks(hubbub,
2749 &context->bw_ctx.bw.dcn.watermarks,
2750 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2753 dcn10_stereo_hw_frame_pack_wa(dc, context);
2755 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2756 dcn_bw_notify_pplib_of_wm_ranges(dc);
2758 if (dc->debug.sanity_checks)
2759 hws->funcs.verify_allow_pstate_change_high(dc);
2762 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
2763 int num_pipes, unsigned int vmin, unsigned int vmax,
2764 unsigned int vmid, unsigned int vmid_frame_number)
2767 struct drr_params params = {0};
2768 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
2769 unsigned int event_triggers = 0x800;
2770 // Note DRR trigger events are generated regardless of whether num frames met.
2771 unsigned int num_frames = 2;
2773 params.vertical_total_max = vmax;
2774 params.vertical_total_min = vmin;
2775 params.vertical_total_mid = vmid;
2776 params.vertical_total_mid_frame_num = vmid_frame_number;
2778 /* TODO: If multiple pipes are to be supported, you need
2779 * some GSL stuff. Static screen triggers may be programmed differently
2782 for (i = 0; i < num_pipes; i++) {
2783 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
2784 pipe_ctx[i]->stream_res.tg, ¶ms);
2785 if (vmax != 0 && vmin != 0)
2786 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
2787 pipe_ctx[i]->stream_res.tg,
2788 event_triggers, num_frames);
2792 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
2794 struct crtc_position *position)
2798 /* TODO: handle pipes > 1
2800 for (i = 0; i < num_pipes; i++)
2801 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
2804 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
2805 int num_pipes, const struct dc_static_screen_params *params)
2808 unsigned int triggers = 0;
2810 if (params->triggers.surface_update)
2812 if (params->triggers.cursor_update)
2814 if (params->triggers.force_trigger)
2817 for (i = 0; i < num_pipes; i++)
2818 pipe_ctx[i]->stream_res.tg->funcs->
2819 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
2820 triggers, params->num_frames);
2823 static void dcn10_config_stereo_parameters(
2824 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
2826 enum view_3d_format view_format = stream->view_format;
2827 enum dc_timing_3d_format timing_3d_format =\
2828 stream->timing.timing_3d_format;
2829 bool non_stereo_timing = false;
2831 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
2832 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
2833 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
2834 non_stereo_timing = true;
2836 if (non_stereo_timing == false &&
2837 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
2839 flags->PROGRAM_STEREO = 1;
2840 flags->PROGRAM_POLARITY = 1;
2841 if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
2842 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
2843 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2844 enum display_dongle_type dongle = \
2845 stream->link->ddc->dongle_type;
2846 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
2847 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
2848 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
2849 flags->DISABLE_STEREO_DP_SYNC = 1;
2851 flags->RIGHT_EYE_POLARITY =\
2852 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
2853 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
2854 flags->FRAME_PACKED = 1;
2860 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
2862 struct crtc_stereo_flags flags = { 0 };
2863 struct dc_stream_state *stream = pipe_ctx->stream;
2865 dcn10_config_stereo_parameters(stream, &flags);
2867 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2868 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
2869 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
2871 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
2874 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
2875 pipe_ctx->stream_res.opp,
2876 flags.PROGRAM_STEREO == 1 ? true:false,
2879 pipe_ctx->stream_res.tg->funcs->program_stereo(
2880 pipe_ctx->stream_res.tg,
2887 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
2891 for (i = 0; i < res_pool->pipe_count; i++) {
2892 if (res_pool->hubps[i]->inst == mpcc_inst)
2893 return res_pool->hubps[i];
2899 void dcn10_wait_for_mpcc_disconnect(
2901 struct resource_pool *res_pool,
2902 struct pipe_ctx *pipe_ctx)
2904 struct dce_hwseq *hws = dc->hwseq;
2907 if (dc->debug.sanity_checks) {
2908 hws->funcs.verify_allow_pstate_change_high(dc);
2911 if (!pipe_ctx->stream_res.opp)
2914 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
2915 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
2916 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
2918 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
2919 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
2920 hubp->funcs->set_blank(hubp, true);
2924 if (dc->debug.sanity_checks) {
2925 hws->funcs.verify_allow_pstate_change_high(dc);
2930 bool dcn10_dummy_display_power_gating(
2932 uint8_t controller_id,
2933 struct dc_bios *dcb,
2934 enum pipe_gating_control power_gating)
2939 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2941 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2942 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2944 struct dc *dc = plane_state->ctx->dc;
2946 if (plane_state == NULL)
2949 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
2950 pipe_ctx->plane_res.hubp);
2952 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
2955 plane_state->status.current_address = plane_state->status.requested_address;
2957 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
2958 tg->funcs->is_stereo_left_eye) {
2959 plane_state->status.is_right_eye =
2960 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
2963 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
2964 struct dce_hwseq *hwseq = dc->hwseq;
2965 struct timing_generator *tg = dc->res_pool->timing_generators[0];
2966 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
2968 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
2969 struct hubbub *hubbub = dc->res_pool->hubbub;
2971 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
2972 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
2977 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
2979 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
2981 /* In DCN, this programming sequence is owned by the hubbub */
2982 hubbub->funcs->update_dchub(hubbub, dh_data);
2985 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
2987 struct pipe_ctx *test_pipe;
2988 const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
2989 int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
2992 * Disable the cursor if there's another pipe above this with a
2993 * plane that contains this pipe's viewport to prevent double cursor
2994 * and incorrect scaling artifacts.
2996 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
2997 test_pipe = test_pipe->top_pipe) {
2998 if (!test_pipe->plane_state->visible)
3001 r2 = &test_pipe->plane_res.scl_data.recout;
3002 r2_r = r2->x + r2->width;
3003 r2_b = r2->y + r2->height;
3005 if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
3012 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3014 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3015 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3016 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3017 struct dc_cursor_mi_param param = {
3018 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3019 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3020 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3021 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3022 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3023 .rotation = pipe_ctx->plane_state->rotation,
3024 .mirror = pipe_ctx->plane_state->horizontal_mirror
3026 bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3027 (pipe_ctx->bottom_pipe != NULL);
3029 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3030 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3031 int x_pos = pos_cpy.x;
3032 int y_pos = pos_cpy.y;
3035 * DC cursor is stream space, HW cursor is plane space and drawn
3036 * as part of the framebuffer.
3038 * Cursor position can't be negative, but hotspot can be used to
3039 * shift cursor out of the plane bounds. Hotspot must be smaller
3040 * than the cursor size.
3044 * Translate cursor from stream space to plane space.
3046 * If the cursor is scaled then we need to scale the position
3047 * to be in the approximately correct place. We can't do anything
3048 * about the actual size being incorrect, that's a limitation of
3051 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3052 pipe_ctx->plane_state->dst_rect.width;
3053 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3054 pipe_ctx->plane_state->dst_rect.height;
3057 * If the cursor's source viewport is clipped then we need to
3058 * translate the cursor to appear in the correct position on
3061 * This translation isn't affected by scaling so it needs to be
3062 * done *after* we adjust the position for the scale factor.
3064 * This is only done by opt-in for now since there are still
3065 * some usecases like tiled display that might enable the
3066 * cursor on both streams while expecting dc to clip it.
3068 if (pos_cpy.translate_by_source) {
3069 x_pos += pipe_ctx->plane_state->src_rect.x;
3070 y_pos += pipe_ctx->plane_state->src_rect.y;
3074 * If the position is negative then we need to add to the hotspot
3075 * to shift the cursor outside the plane.
3079 pos_cpy.x_hotspot -= x_pos;
3084 pos_cpy.y_hotspot -= y_pos;
3088 pos_cpy.x = (uint32_t)x_pos;
3089 pos_cpy.y = (uint32_t)y_pos;
3091 if (pipe_ctx->plane_state->address.type
3092 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3093 pos_cpy.enable = false;
3095 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3096 pos_cpy.enable = false;
3098 // Swap axis and mirror horizontally
3099 if (param.rotation == ROTATION_ANGLE_90) {
3100 uint32_t temp_x = pos_cpy.x;
3102 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3103 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3106 // Swap axis and mirror vertically
3107 else if (param.rotation == ROTATION_ANGLE_270) {
3108 uint32_t temp_y = pos_cpy.y;
3109 int viewport_height =
3110 pipe_ctx->plane_res.scl_data.viewport.height;
3112 if (pipe_split_on) {
3113 if (pos_cpy.x > viewport_height) {
3114 pos_cpy.x = pos_cpy.x - viewport_height;
3115 pos_cpy.y = viewport_height - pos_cpy.x;
3117 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3120 pos_cpy.y = viewport_height - pos_cpy.x;
3123 // Mirror horizontally and vertically
3124 else if (param.rotation == ROTATION_ANGLE_180) {
3125 int viewport_width =
3126 pipe_ctx->plane_res.scl_data.viewport.width;
3128 pipe_ctx->plane_res.scl_data.viewport.x;
3130 if (pipe_split_on) {
3131 if (pos_cpy.x >= viewport_width + viewport_x) {
3132 pos_cpy.x = 2 * viewport_width
3133 - pos_cpy.x + 2 * viewport_x;
3135 uint32_t temp_x = pos_cpy.x;
3137 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3138 if (temp_x >= viewport_x +
3139 (int)hubp->curs_attr.width || pos_cpy.x
3140 <= (int)hubp->curs_attr.width +
3141 pipe_ctx->plane_state->src_rect.x) {
3142 pos_cpy.x = temp_x + viewport_width;
3146 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3148 pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3151 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3152 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3155 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3157 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3159 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3160 pipe_ctx->plane_res.hubp, attributes);
3161 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3162 pipe_ctx->plane_res.dpp, attributes);
3165 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3167 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3168 struct fixed31_32 multiplier;
3169 struct dpp_cursor_attributes opt_attr = { 0 };
3170 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3171 struct custom_float_format fmt;
3173 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3176 fmt.exponenta_bits = 5;
3177 fmt.mantissa_bits = 10;
3180 if (sdr_white_level > 80) {
3181 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3182 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3185 opt_attr.scale = hw_scale;
3188 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3189 pipe_ctx->plane_res.dpp, &opt_attr);
3193 * apply_front_porch_workaround TODO FPGA still need?
3195 * This is a workaround for a bug that has existed since R5xx and has not been
3196 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3198 static void apply_front_porch_workaround(
3199 struct dc_crtc_timing *timing)
3201 if (timing->flags.INTERLACE == 1) {
3202 if (timing->v_front_porch < 2)
3203 timing->v_front_porch = 2;
3205 if (timing->v_front_porch < 1)
3206 timing->v_front_porch = 1;
3210 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3212 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3213 struct dc_crtc_timing patched_crtc_timing;
3214 int vesa_sync_start;
3216 int interlace_factor;
3217 int vertical_line_start;
3219 patched_crtc_timing = *dc_crtc_timing;
3220 apply_front_porch_workaround(&patched_crtc_timing);
3222 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3224 vesa_sync_start = patched_crtc_timing.v_addressable +
3225 patched_crtc_timing.v_border_bottom +
3226 patched_crtc_timing.v_front_porch;
3228 asic_blank_end = (patched_crtc_timing.v_total -
3230 patched_crtc_timing.v_border_top)
3233 vertical_line_start = asic_blank_end -
3234 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3236 return vertical_line_start;
3239 static void dcn10_calc_vupdate_position(
3241 struct pipe_ctx *pipe_ctx,
3242 uint32_t *start_line,
3245 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3246 int vline_int_offset_from_vupdate =
3247 pipe_ctx->stream->periodic_interrupt0.lines_offset;
3248 int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3251 if (vline_int_offset_from_vupdate > 0)
3252 vline_int_offset_from_vupdate--;
3253 else if (vline_int_offset_from_vupdate < 0)
3254 vline_int_offset_from_vupdate++;
3256 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3258 if (start_position >= 0)
3259 *start_line = start_position;
3261 *start_line = dc_crtc_timing->v_total + start_position - 1;
3263 *end_line = *start_line + 2;
3265 if (*end_line >= dc_crtc_timing->v_total)
3269 static void dcn10_cal_vline_position(
3271 struct pipe_ctx *pipe_ctx,
3272 enum vline_select vline,
3273 uint32_t *start_line,
3276 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3278 if (vline == VLINE0)
3279 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3280 else if (vline == VLINE1)
3281 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3283 switch (ref_point) {
3284 case START_V_UPDATE:
3285 dcn10_calc_vupdate_position(
3292 // Suppose to do nothing because vsync is 0;
3300 void dcn10_setup_periodic_interrupt(
3302 struct pipe_ctx *pipe_ctx,
3303 enum vline_select vline)
3305 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3307 if (vline == VLINE0) {
3308 uint32_t start_line = 0;
3309 uint32_t end_line = 0;
3311 dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3313 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3315 } else if (vline == VLINE1) {
3316 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3318 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3322 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3324 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3325 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3327 if (start_line < 0) {
3332 if (tg->funcs->setup_vertical_interrupt2)
3333 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3336 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3337 struct dc_link_settings *link_settings)
3339 struct encoder_unblank_param params = { { 0 } };
3340 struct dc_stream_state *stream = pipe_ctx->stream;
3341 struct dc_link *link = stream->link;
3342 struct dce_hwseq *hws = link->dc->hwseq;
3344 /* only 3 items below are used by unblank */
3345 params.timing = pipe_ctx->stream->timing;
3347 params.link_settings.link_rate = link_settings->link_rate;
3349 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3350 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3351 params.timing.pix_clk_100hz /= 2;
3352 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms);
3355 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3356 hws->funcs.edp_backlight_control(link, true);
3360 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3361 const uint8_t *custom_sdp_message,
3362 unsigned int sdp_message_size)
3364 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3365 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3366 pipe_ctx->stream_res.stream_enc,
3371 enum dc_status dcn10_set_clock(struct dc *dc,
3372 enum dc_clock_type clock_type,
3376 struct dc_state *context = dc->current_state;
3377 struct dc_clock_config clock_cfg = {0};
3378 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3380 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3381 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3382 context, clock_type, &clock_cfg);
3384 if (!dc->clk_mgr->funcs->get_clock)
3385 return DC_FAIL_UNSUPPORTED_1;
3387 if (clk_khz > clock_cfg.max_clock_khz)
3388 return DC_FAIL_CLK_EXCEED_MAX;
3390 if (clk_khz < clock_cfg.min_clock_khz)
3391 return DC_FAIL_CLK_BELOW_MIN;
3393 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3394 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3396 /*update internal request clock for update clock use*/
3397 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3398 current_clocks->dispclk_khz = clk_khz;
3399 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3400 current_clocks->dppclk_khz = clk_khz;
3402 return DC_ERROR_UNEXPECTED;
3404 if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3405 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3411 void dcn10_get_clock(struct dc *dc,
3412 enum dc_clock_type clock_type,
3413 struct dc_clock_config *clock_cfg)
3415 struct dc_state *context = dc->current_state;
3417 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3418 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);