2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
54 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dce/dmub_outbox.h"
57 #include "inc/dc_link_dp.h"
58 #include "inc/link_dpcd.h"
60 #define DC_LOGGER_INIT(logger)
68 #define FN(reg_name, field_name) \
69 hws->shifts->field_name, hws->masks->field_name
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 print_microsec(dc_ctx, log_ctx, ref_cycle)
75 #define GAMMA_HW_POINTS_NUM 256
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
80 void print_microsec(struct dc_context *dc_ctx,
81 struct dc_log_buffer_ctx *log_ctx,
84 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 static const unsigned int frac = 1000;
86 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
88 DTN_INFO(" %11d.%03d",
93 void dcn10_lock_all_pipes(struct dc *dc,
94 struct dc_state *context,
97 struct pipe_ctx *pipe_ctx;
98 struct timing_generator *tg;
101 for (i = 0; i < dc->res_pool->pipe_count; i++) {
102 pipe_ctx = &context->res_ctx.pipe_ctx[i];
103 tg = pipe_ctx->stream_res.tg;
106 * Only lock the top pipe's tg to prevent redundant
107 * (un)locking. Also skip if pipe is disabled.
109 if (pipe_ctx->top_pipe ||
110 !pipe_ctx->stream || !pipe_ctx->plane_state ||
111 !tg->funcs->is_tg_enabled(tg))
115 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
117 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
121 static void log_mpc_crc(struct dc *dc,
122 struct dc_log_buffer_ctx *log_ctx)
124 struct dc_context *dc_ctx = dc->ctx;
125 struct dce_hwseq *hws = dc->hwseq;
127 if (REG(MPC_CRC_RESULT_GB))
128 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
129 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
130 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
131 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
132 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
135 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
137 struct dc_context *dc_ctx = dc->ctx;
138 struct dcn_hubbub_wm wm;
141 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
142 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
144 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
145 " sr_enter sr_exit dram_clk_change\n");
147 for (i = 0; i < 4; i++) {
148 struct dcn_hubbub_wm_set *s;
151 DTN_INFO("WM_Set[%d]:", s->wm_set);
152 DTN_INFO_MICRO_SEC(s->data_urgent);
153 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
154 DTN_INFO_MICRO_SEC(s->sr_enter);
155 DTN_INFO_MICRO_SEC(s->sr_exit);
156 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
163 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
165 struct dc_context *dc_ctx = dc->ctx;
166 struct resource_pool *pool = dc->res_pool;
170 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
171 for (i = 0; i < pool->pipe_count; i++) {
172 struct hubp *hubp = pool->hubps[i];
173 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
175 hubp->funcs->hubp_read_state(hubp);
178 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
191 s->underflow_status);
192 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
193 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
194 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
199 DTN_INFO("\n=========RQ========\n");
200 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
201 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
202 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
203 for (i = 0; i < pool->pipe_count; i++) {
204 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
205 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
208 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
209 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
210 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
211 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
212 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
213 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
214 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
215 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
216 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
217 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
220 DTN_INFO("========DLG========\n");
221 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
222 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
223 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
224 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
225 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
226 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
227 " x_rp_dlay x_rr_sfl\n");
228 for (i = 0; i < pool->pipe_count; i++) {
229 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
230 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
233 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
234 "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
235 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
236 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
237 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
238 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
239 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
240 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
241 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
242 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
243 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
244 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
245 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
246 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
247 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
248 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
249 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
250 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
251 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
252 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
253 dlg_regs->xfc_reg_remote_surface_flip_latency);
256 DTN_INFO("========TTU========\n");
257 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
258 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
259 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
260 for (i = 0; i < pool->pipe_count; i++) {
261 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
262 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
265 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
266 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
267 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
268 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
269 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
270 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
271 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
272 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
277 void dcn10_log_hw_state(struct dc *dc,
278 struct dc_log_buffer_ctx *log_ctx)
280 struct dc_context *dc_ctx = dc->ctx;
281 struct resource_pool *pool = dc->res_pool;
286 dcn10_log_hubbub_state(dc, log_ctx);
288 dcn10_log_hubp_states(dc, log_ctx);
290 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
291 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
292 "C31 C32 C33 C34\n");
293 for (i = 0; i < pool->pipe_count; i++) {
294 struct dpp *dpp = pool->dpps[i];
295 struct dcn_dpp_state s = {0};
297 dpp->funcs->dpp_read_state(dpp, &s);
302 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
303 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
306 (s.igam_lut_mode == 0) ? "BypassFixed" :
307 ((s.igam_lut_mode == 1) ? "BypassFloat" :
308 ((s.igam_lut_mode == 2) ? "RAM" :
309 ((s.igam_lut_mode == 3) ? "RAM" :
311 (s.dgam_lut_mode == 0) ? "Bypass" :
312 ((s.dgam_lut_mode == 1) ? "sRGB" :
313 ((s.dgam_lut_mode == 2) ? "Ycc" :
314 ((s.dgam_lut_mode == 3) ? "RAM" :
315 ((s.dgam_lut_mode == 4) ? "RAM" :
317 (s.rgam_lut_mode == 0) ? "Bypass" :
318 ((s.rgam_lut_mode == 1) ? "sRGB" :
319 ((s.rgam_lut_mode == 2) ? "Ycc" :
320 ((s.rgam_lut_mode == 3) ? "RAM" :
321 ((s.rgam_lut_mode == 4) ? "RAM" :
324 s.gamut_remap_c11_c12,
325 s.gamut_remap_c13_c14,
326 s.gamut_remap_c21_c22,
327 s.gamut_remap_c23_c24,
328 s.gamut_remap_c31_c32,
329 s.gamut_remap_c33_c34);
334 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
335 for (i = 0; i < pool->pipe_count; i++) {
336 struct mpcc_state s = {0};
338 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
340 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
341 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
342 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
347 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
349 for (i = 0; i < pool->timing_generator_count; i++) {
350 struct timing_generator *tg = pool->timing_generators[i];
351 struct dcn_otg_state s = {0};
352 /* Read shared OTG state registers for all DCNx */
353 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
356 * For DCN2 and greater, a register on the OPP is used to
357 * determine if the CRTC is blanked instead of the OTG. So use
358 * dpg_is_blanked() if exists, otherwise fallback on otg.
360 * TODO: Implement DCN-specific read_otg_state hooks.
362 if (pool->opps[i]->funcs->dpg_is_blanked)
363 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
365 s.blank_enabled = tg->funcs->is_blanked(tg);
367 //only print if OTG master is enabled
368 if ((s.otg_enabled & 1) == 0)
371 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
389 s.underflow_occurred_status,
392 // Clear underflow for debug purposes
393 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
394 // This function is called only from Windows or Diags test environment, hence it's safe to clear
395 // it from here without affecting the original intent.
396 tg->funcs->clear_optc_underflow(tg);
400 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
401 // TODO: Update golden log header to reflect this name change
402 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
403 for (i = 0; i < pool->res_cap->num_dsc; i++) {
404 struct display_stream_compressor *dsc = pool->dscs[i];
405 struct dcn_dsc_state s = {0};
407 dsc->funcs->dsc_read_state(dsc, &s);
408 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
412 s.dsc_bits_per_pixel);
417 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
418 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
419 for (i = 0; i < pool->stream_enc_count; i++) {
420 struct stream_encoder *enc = pool->stream_enc[i];
421 struct enc_state s = {0};
423 if (enc->funcs->enc_read_state) {
424 enc->funcs->enc_read_state(enc, &s);
425 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
428 s.sec_gsp_pps_line_num,
429 s.vbid6_line_reference,
431 s.sec_gsp_pps_enable,
432 s.sec_stream_enable);
438 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
439 for (i = 0; i < dc->link_count; i++) {
440 struct link_encoder *lenc = dc->links[i]->link_enc;
442 struct link_enc_state s = {0};
444 if (lenc->funcs->read_state) {
445 lenc->funcs->read_state(lenc, &s);
446 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
449 s.dphy_fec_ready_shadow,
450 s.dphy_fec_active_status,
451 s.dp_link_training_complete);
457 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
458 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
459 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
460 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
461 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
462 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
463 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
464 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
465 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
467 log_mpc_crc(dc, log_ctx);
472 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
474 struct hubp *hubp = pipe_ctx->plane_res.hubp;
475 struct timing_generator *tg = pipe_ctx->stream_res.tg;
477 if (tg->funcs->is_optc_underflow_occurred(tg)) {
478 tg->funcs->clear_optc_underflow(tg);
482 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
483 hubp->funcs->hubp_clear_underflow(hubp);
489 void dcn10_enable_power_gating_plane(
490 struct dce_hwseq *hws,
493 bool force_on = true; /* disable power gating */
499 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
500 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
501 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
502 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
505 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
506 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
507 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
508 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
511 void dcn10_disable_vga(
512 struct dce_hwseq *hws)
514 unsigned int in_vga1_mode = 0;
515 unsigned int in_vga2_mode = 0;
516 unsigned int in_vga3_mode = 0;
517 unsigned int in_vga4_mode = 0;
519 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
520 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
521 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
522 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
524 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
525 in_vga3_mode == 0 && in_vga4_mode == 0)
528 REG_WRITE(D1VGA_CONTROL, 0);
529 REG_WRITE(D2VGA_CONTROL, 0);
530 REG_WRITE(D3VGA_CONTROL, 0);
531 REG_WRITE(D4VGA_CONTROL, 0);
533 /* HW Engineer's Notes:
534 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
535 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
537 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
538 * VGA_TEST_ENABLE, to leave it in the same state as before.
540 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
541 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
545 * dcn10_dpp_pg_control - DPP power gate control.
547 * @hws: dce_hwseq reference.
548 * @dpp_inst: DPP instance reference.
549 * @power_on: true if we want to enable power gate, false otherwise.
551 * Enable or disable power gate in the specific DPP instance.
553 void dcn10_dpp_pg_control(
554 struct dce_hwseq *hws,
555 unsigned int dpp_inst,
558 uint32_t power_gate = power_on ? 0 : 1;
559 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
561 if (hws->ctx->dc->debug.disable_dpp_power_gate)
563 if (REG(DOMAIN1_PG_CONFIG) == 0)
568 REG_UPDATE(DOMAIN1_PG_CONFIG,
569 DOMAIN1_POWER_GATE, power_gate);
571 REG_WAIT(DOMAIN1_PG_STATUS,
572 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
576 REG_UPDATE(DOMAIN3_PG_CONFIG,
577 DOMAIN3_POWER_GATE, power_gate);
579 REG_WAIT(DOMAIN3_PG_STATUS,
580 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
584 REG_UPDATE(DOMAIN5_PG_CONFIG,
585 DOMAIN5_POWER_GATE, power_gate);
587 REG_WAIT(DOMAIN5_PG_STATUS,
588 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
592 REG_UPDATE(DOMAIN7_PG_CONFIG,
593 DOMAIN7_POWER_GATE, power_gate);
595 REG_WAIT(DOMAIN7_PG_STATUS,
596 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
606 * dcn10_hubp_pg_control - HUBP power gate control.
608 * @hws: dce_hwseq reference.
609 * @hubp_inst: DPP instance reference.
610 * @power_on: true if we want to enable power gate, false otherwise.
612 * Enable or disable power gate in the specific HUBP instance.
614 void dcn10_hubp_pg_control(
615 struct dce_hwseq *hws,
616 unsigned int hubp_inst,
619 uint32_t power_gate = power_on ? 0 : 1;
620 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
622 if (hws->ctx->dc->debug.disable_hubp_power_gate)
624 if (REG(DOMAIN0_PG_CONFIG) == 0)
628 case 0: /* DCHUBP0 */
629 REG_UPDATE(DOMAIN0_PG_CONFIG,
630 DOMAIN0_POWER_GATE, power_gate);
632 REG_WAIT(DOMAIN0_PG_STATUS,
633 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
636 case 1: /* DCHUBP1 */
637 REG_UPDATE(DOMAIN2_PG_CONFIG,
638 DOMAIN2_POWER_GATE, power_gate);
640 REG_WAIT(DOMAIN2_PG_STATUS,
641 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
644 case 2: /* DCHUBP2 */
645 REG_UPDATE(DOMAIN4_PG_CONFIG,
646 DOMAIN4_POWER_GATE, power_gate);
648 REG_WAIT(DOMAIN4_PG_STATUS,
649 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
652 case 3: /* DCHUBP3 */
653 REG_UPDATE(DOMAIN6_PG_CONFIG,
654 DOMAIN6_POWER_GATE, power_gate);
656 REG_WAIT(DOMAIN6_PG_STATUS,
657 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
666 static void power_on_plane(
667 struct dce_hwseq *hws,
670 DC_LOGGER_INIT(hws->ctx->logger);
671 if (REG(DC_IP_REQUEST_CNTL)) {
672 REG_SET(DC_IP_REQUEST_CNTL, 0,
675 if (hws->funcs.dpp_pg_control)
676 hws->funcs.dpp_pg_control(hws, plane_id, true);
678 if (hws->funcs.hubp_pg_control)
679 hws->funcs.hubp_pg_control(hws, plane_id, true);
681 REG_SET(DC_IP_REQUEST_CNTL, 0,
684 "Un-gated front end for pipe %d\n", plane_id);
688 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
690 struct dce_hwseq *hws = dc->hwseq;
691 struct hubp *hubp = dc->res_pool->hubps[0];
693 if (!hws->wa_state.DEGVIDCN10_253_applied)
696 hubp->funcs->set_blank(hubp, true);
698 REG_SET(DC_IP_REQUEST_CNTL, 0,
701 hws->funcs.hubp_pg_control(hws, 0, false);
702 REG_SET(DC_IP_REQUEST_CNTL, 0,
705 hws->wa_state.DEGVIDCN10_253_applied = false;
708 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
710 struct dce_hwseq *hws = dc->hwseq;
711 struct hubp *hubp = dc->res_pool->hubps[0];
714 if (dc->debug.disable_stutter)
717 if (!hws->wa.DEGVIDCN10_253)
720 for (i = 0; i < dc->res_pool->pipe_count; i++) {
721 if (!dc->res_pool->hubps[i]->power_gated)
725 /* all pipe power gated, apply work around to enable stutter. */
727 REG_SET(DC_IP_REQUEST_CNTL, 0,
730 hws->funcs.hubp_pg_control(hws, 0, true);
731 REG_SET(DC_IP_REQUEST_CNTL, 0,
734 hubp->funcs->set_hubp_blank_en(hubp, false);
735 hws->wa_state.DEGVIDCN10_253_applied = true;
738 void dcn10_bios_golden_init(struct dc *dc)
740 struct dce_hwseq *hws = dc->hwseq;
741 struct dc_bios *bp = dc->ctx->dc_bios;
743 bool allow_self_fresh_force_enable = true;
745 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
748 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
749 allow_self_fresh_force_enable =
750 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
753 /* WA for making DF sleep when idle after resume from S0i3.
754 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
755 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
756 * before calling command table and it changed to 1 after,
757 * it should be set back to 0.
760 /* initialize dcn global */
761 bp->funcs->enable_disp_power_gating(bp,
762 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
764 for (i = 0; i < dc->res_pool->pipe_count; i++) {
765 /* initialize dcn per pipe */
766 bp->funcs->enable_disp_power_gating(bp,
767 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
770 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
771 if (allow_self_fresh_force_enable == false &&
772 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
773 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
774 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
778 static void false_optc_underflow_wa(
780 const struct dc_stream_state *stream,
781 struct timing_generator *tg)
786 if (!dc->hwseq->wa.false_optc_underflow)
789 underflow = tg->funcs->is_optc_underflow_occurred(tg);
791 for (i = 0; i < dc->res_pool->pipe_count; i++) {
792 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
794 if (old_pipe_ctx->stream != stream)
797 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
800 if (tg->funcs->set_blank_data_double_buffer)
801 tg->funcs->set_blank_data_double_buffer(tg, true);
803 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
804 tg->funcs->clear_optc_underflow(tg);
807 enum dc_status dcn10_enable_stream_timing(
808 struct pipe_ctx *pipe_ctx,
809 struct dc_state *context,
812 struct dc_stream_state *stream = pipe_ctx->stream;
813 enum dc_color_space color_space;
814 struct tg_color black_color = {0};
816 /* by upper caller loop, pipe0 is parent pipe and be called first.
817 * back end is set up by for pipe0. Other children pipe share back end
818 * with pipe 0. No program is needed.
820 if (pipe_ctx->top_pipe != NULL)
823 /* TODO check if timing_changed, disable stream if timing changed */
825 /* HW program guide assume display already disable
826 * by unplug sequence. OTG assume stop.
828 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
830 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
831 pipe_ctx->clock_source,
832 &pipe_ctx->stream_res.pix_clk_params,
833 &pipe_ctx->pll_settings)) {
835 return DC_ERROR_UNEXPECTED;
838 pipe_ctx->stream_res.tg->funcs->program_timing(
839 pipe_ctx->stream_res.tg,
841 pipe_ctx->pipe_dlg_param.vready_offset,
842 pipe_ctx->pipe_dlg_param.vstartup_start,
843 pipe_ctx->pipe_dlg_param.vupdate_offset,
844 pipe_ctx->pipe_dlg_param.vupdate_width,
845 pipe_ctx->stream->signal,
848 #if 0 /* move to after enable_crtc */
849 /* TODO: OPP FMT, ABM. etc. should be done here. */
850 /* or FPGA now. instance 0 only. TODO: move to opp.c */
852 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
854 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
855 pipe_ctx->stream_res.opp,
856 &stream->bit_depth_params,
859 /* program otg blank color */
860 color_space = stream->output_color_space;
861 color_space_to_black_color(dc, color_space, &black_color);
864 * The way 420 is packed, 2 channels carry Y component, 1 channel
865 * alternate between Cb and Cr, so both channels need the pixel
868 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
869 black_color.color_r_cr = black_color.color_g_y;
871 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
872 pipe_ctx->stream_res.tg->funcs->set_blank_color(
873 pipe_ctx->stream_res.tg,
876 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
877 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
878 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
879 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
880 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
883 /* VTG is within DCHUB command block. DCFCLK is always on */
884 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
886 return DC_ERROR_UNEXPECTED;
889 /* TODO program crtc source select for non-virtual signal*/
890 /* TODO program FMT */
891 /* TODO setup link_enc */
892 /* TODO set stream attributes */
893 /* TODO program audio */
894 /* TODO enable stream if timing changed */
895 /* TODO unblank stream if DP */
900 static void dcn10_reset_back_end_for_pipe(
902 struct pipe_ctx *pipe_ctx,
903 struct dc_state *context)
906 struct dc_link *link;
907 DC_LOGGER_INIT(dc->ctx->logger);
908 if (pipe_ctx->stream_res.stream_enc == NULL) {
909 pipe_ctx->stream = NULL;
913 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
914 link = pipe_ctx->stream->link;
915 /* DPMS may already disable or */
916 /* dpms_off status is incorrect due to fastboot
917 * feature. When system resume from S4 with second
918 * screen only, the dpms_off would be true but
919 * VBIOS lit up eDP, so check link status too.
921 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
922 core_link_disable_stream(pipe_ctx);
923 else if (pipe_ctx->stream_res.audio)
924 dc->hwss.disable_audio_stream(pipe_ctx);
926 if (pipe_ctx->stream_res.audio) {
927 /*disable az_endpoint*/
928 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
931 if (dc->caps.dynamic_audio == true) {
932 /*we have to dynamic arbitrate the audio endpoints*/
933 /*we free the resource, need reset is_audio_acquired*/
934 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
935 pipe_ctx->stream_res.audio, false);
936 pipe_ctx->stream_res.audio = NULL;
941 /* by upper caller loop, parent pipe: pipe0, will be reset last.
942 * back end share by all pipes and will be disable only when disable
945 if (pipe_ctx->top_pipe == NULL) {
947 if (pipe_ctx->stream_res.abm)
948 dc->hwss.set_abm_immediate_disable(pipe_ctx);
950 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
952 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
953 if (pipe_ctx->stream_res.tg->funcs->set_drr)
954 pipe_ctx->stream_res.tg->funcs->set_drr(
955 pipe_ctx->stream_res.tg, NULL);
958 for (i = 0; i < dc->res_pool->pipe_count; i++)
959 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
962 if (i == dc->res_pool->pipe_count)
965 pipe_ctx->stream = NULL;
966 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
967 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
970 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
974 bool need_recover = true;
976 if (!dc->debug.recovery_enabled)
979 for (i = 0; i < dc->res_pool->pipe_count; i++) {
980 struct pipe_ctx *pipe_ctx =
981 &dc->current_state->res_ctx.pipe_ctx[i];
982 if (pipe_ctx != NULL) {
983 hubp = pipe_ctx->plane_res.hubp;
984 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
985 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
986 /* one pipe underflow, we will reset all the pipes*/
995 DCHUBP_CNTL:HUBP_BLANK_EN=1
996 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
997 DCHUBP_CNTL:HUBP_DISABLE=1
998 DCHUBP_CNTL:HUBP_DISABLE=0
999 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1000 DCSURF_PRIMARY_SURFACE_ADDRESS
1001 DCHUBP_CNTL:HUBP_BLANK_EN=0
1004 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1005 struct pipe_ctx *pipe_ctx =
1006 &dc->current_state->res_ctx.pipe_ctx[i];
1007 if (pipe_ctx != NULL) {
1008 hubp = pipe_ctx->plane_res.hubp;
1009 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1010 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1011 hubp->funcs->set_hubp_blank_en(hubp, true);
1014 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1015 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1017 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1018 struct pipe_ctx *pipe_ctx =
1019 &dc->current_state->res_ctx.pipe_ctx[i];
1020 if (pipe_ctx != NULL) {
1021 hubp = pipe_ctx->plane_res.hubp;
1022 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1023 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1024 hubp->funcs->hubp_disable_control(hubp, true);
1027 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1028 struct pipe_ctx *pipe_ctx =
1029 &dc->current_state->res_ctx.pipe_ctx[i];
1030 if (pipe_ctx != NULL) {
1031 hubp = pipe_ctx->plane_res.hubp;
1032 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1033 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1034 hubp->funcs->hubp_disable_control(hubp, true);
1037 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1038 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1039 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1040 struct pipe_ctx *pipe_ctx =
1041 &dc->current_state->res_ctx.pipe_ctx[i];
1042 if (pipe_ctx != NULL) {
1043 hubp = pipe_ctx->plane_res.hubp;
1044 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1045 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1046 hubp->funcs->set_hubp_blank_en(hubp, true);
1053 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1055 static bool should_log_hw_state; /* prevent hw state log by default */
1057 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1060 if (should_log_hw_state)
1061 dcn10_log_hw_state(dc, NULL);
1063 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1064 BREAK_TO_DEBUGGER();
1065 if (dcn10_hw_wa_force_recovery(dc)) {
1067 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1068 BREAK_TO_DEBUGGER();
1073 /* trigger HW to start disconnect plane from stream on the next vsync */
1074 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1076 struct dce_hwseq *hws = dc->hwseq;
1077 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1078 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1079 struct mpc *mpc = dc->res_pool->mpc;
1080 struct mpc_tree *mpc_tree_params;
1081 struct mpcc *mpcc_to_remove = NULL;
1082 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1084 mpc_tree_params = &(opp->mpc_tree_params);
1085 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1088 if (mpcc_to_remove == NULL)
1091 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1093 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1095 dc->optimized_required = true;
1097 if (hubp->funcs->hubp_disconnect)
1098 hubp->funcs->hubp_disconnect(hubp);
1100 if (dc->debug.sanity_checks)
1101 hws->funcs.verify_allow_pstate_change_high(dc);
1105 * dcn10_plane_atomic_power_down - Power down plane components.
1107 * @dc: dc struct reference. used for grab hwseq.
1108 * @dpp: dpp struct reference.
1109 * @hubp: hubp struct reference.
1111 * Keep in mind that this operation requires a power gate configuration;
1112 * however, requests for switch power gate are precisely controlled to avoid
1113 * problems. For this reason, power gate request is usually disabled. This
1114 * function first needs to enable the power gate request before disabling DPP
1115 * and HUBP. Finally, it disables the power gate request again.
1117 void dcn10_plane_atomic_power_down(struct dc *dc,
1121 struct dce_hwseq *hws = dc->hwseq;
1122 DC_LOGGER_INIT(dc->ctx->logger);
1124 if (REG(DC_IP_REQUEST_CNTL)) {
1125 REG_SET(DC_IP_REQUEST_CNTL, 0,
1128 if (hws->funcs.dpp_pg_control)
1129 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1131 if (hws->funcs.hubp_pg_control)
1132 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1134 dpp->funcs->dpp_reset(dpp);
1135 REG_SET(DC_IP_REQUEST_CNTL, 0,
1138 "Power gated front end %d\n", hubp->inst);
1142 /* disable HW used by plane.
1143 * note: cannot disable until disconnect is complete
1145 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1147 struct dce_hwseq *hws = dc->hwseq;
1148 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1149 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1150 int opp_id = hubp->opp_id;
1152 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1154 hubp->funcs->hubp_clk_cntl(hubp, false);
1156 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1158 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1159 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1160 pipe_ctx->stream_res.opp,
1163 hubp->power_gated = true;
1164 dc->optimized_required = false; /* We're powering off, no need to optimize */
1166 hws->funcs.plane_atomic_power_down(dc,
1167 pipe_ctx->plane_res.dpp,
1168 pipe_ctx->plane_res.hubp);
1170 pipe_ctx->stream = NULL;
1171 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1172 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1173 pipe_ctx->top_pipe = NULL;
1174 pipe_ctx->bottom_pipe = NULL;
1175 pipe_ctx->plane_state = NULL;
1178 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1180 struct dce_hwseq *hws = dc->hwseq;
1181 DC_LOGGER_INIT(dc->ctx->logger);
1183 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1186 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1188 apply_DEGVIDCN10_253_wa(dc);
1190 DC_LOG_DC("Power down front end %d\n",
1191 pipe_ctx->pipe_idx);
1194 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1197 struct dce_hwseq *hws = dc->hwseq;
1198 bool can_apply_seamless_boot = false;
1200 for (i = 0; i < context->stream_count; i++) {
1201 if (context->streams[i]->apply_seamless_boot_optimization) {
1202 can_apply_seamless_boot = true;
1207 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1208 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1209 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1211 /* There is assumption that pipe_ctx is not mapping irregularly
1212 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1213 * we will use the pipe, so don't disable
1215 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1218 /* Blank controller using driver code instead of
1221 if (tg->funcs->is_tg_enabled(tg)) {
1222 if (hws->funcs.init_blank != NULL) {
1223 hws->funcs.init_blank(dc, tg);
1224 tg->funcs->lock(tg);
1226 tg->funcs->lock(tg);
1227 tg->funcs->set_blank(tg, true);
1228 hwss_wait_for_blank_complete(tg);
1233 /* num_opp will be equal to number of mpcc */
1234 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1235 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1237 /* Cannot reset the MPC mux if seamless boot */
1238 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1241 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1242 dc->res_pool->mpc, i);
1245 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1246 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1247 struct hubp *hubp = dc->res_pool->hubps[i];
1248 struct dpp *dpp = dc->res_pool->dpps[i];
1249 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1251 /* There is assumption that pipe_ctx is not mapping irregularly
1252 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1253 * we will use the pipe, so don't disable
1255 if (can_apply_seamless_boot &&
1256 pipe_ctx->stream != NULL &&
1257 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1258 pipe_ctx->stream_res.tg)) {
1259 // Enable double buffering for OTG_BLANK no matter if
1260 // seamless boot is enabled or not to suppress global sync
1261 // signals when OTG blanked. This is to prevent pipe from
1262 // requesting data while in PSR.
1263 tg->funcs->tg_init(tg);
1264 hubp->power_gated = true;
1268 /* Disable on the current state so the new one isn't cleared. */
1269 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1271 dpp->funcs->dpp_reset(dpp);
1273 pipe_ctx->stream_res.tg = tg;
1274 pipe_ctx->pipe_idx = i;
1276 pipe_ctx->plane_res.hubp = hubp;
1277 pipe_ctx->plane_res.dpp = dpp;
1278 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1279 hubp->mpcc_id = dpp->inst;
1280 hubp->opp_id = OPP_ID_INVALID;
1281 hubp->power_gated = false;
1283 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1284 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1285 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1286 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1288 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1290 if (tg->funcs->is_tg_enabled(tg))
1291 tg->funcs->unlock(tg);
1293 dc->hwss.disable_plane(dc, pipe_ctx);
1295 pipe_ctx->stream_res.tg = NULL;
1296 pipe_ctx->plane_res.hubp = NULL;
1298 tg->funcs->tg_init(tg);
1302 void dcn10_init_hw(struct dc *dc)
1305 struct abm *abm = dc->res_pool->abm;
1306 struct dmcu *dmcu = dc->res_pool->dmcu;
1307 struct dce_hwseq *hws = dc->hwseq;
1308 struct dc_bios *dcb = dc->ctx->dc_bios;
1309 struct resource_pool *res_pool = dc->res_pool;
1310 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1311 bool is_optimized_init_done = false;
1313 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1314 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1316 // Initialize the dccg
1317 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1318 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1320 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1322 REG_WRITE(REFCLK_CNTL, 0);
1323 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1324 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1326 if (!dc->debug.disable_clock_gate) {
1327 /* enable all DCN clock gating */
1328 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1330 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1332 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1335 //Enable ability to power gate / don't force power on permanently
1336 if (hws->funcs.enable_power_gating_plane)
1337 hws->funcs.enable_power_gating_plane(hws, true);
1342 if (!dcb->funcs->is_accelerated_mode(dcb))
1343 hws->funcs.disable_vga(dc->hwseq);
1345 hws->funcs.bios_golden_init(dc);
1347 if (dc->ctx->dc_bios->fw_info_valid) {
1348 res_pool->ref_clocks.xtalin_clock_inKhz =
1349 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1351 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1352 if (res_pool->dccg && res_pool->hubbub) {
1354 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1355 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1356 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1358 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1359 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1360 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1362 // Not all ASICs have DCCG sw component
1363 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1364 res_pool->ref_clocks.xtalin_clock_inKhz;
1365 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1366 res_pool->ref_clocks.xtalin_clock_inKhz;
1370 ASSERT_CRITICAL(false);
1372 for (i = 0; i < dc->link_count; i++) {
1373 /* Power up AND update implementation according to the
1374 * required signal (which may be different from the
1375 * default signal on connector).
1377 struct dc_link *link = dc->links[i];
1379 if (!is_optimized_init_done)
1380 link->link_enc->funcs->hw_init(link->link_enc);
1382 /* Check for enabled DIG to identify enabled display */
1383 if (link->link_enc->funcs->is_dig_enabled &&
1384 link->link_enc->funcs->is_dig_enabled(link->link_enc))
1385 link->link_status.link_active = true;
1388 /* Power gate DSCs */
1389 if (!is_optimized_init_done) {
1390 for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1391 if (hws->funcs.dsc_pg_control != NULL)
1392 hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1395 /* Enable outbox notification feature of dmub */
1396 if (dc->debug.enable_dmub_aux_for_legacy_ddc)
1397 dmub_enable_outbox_notification(dc);
1399 /* we want to turn off all dp displays before doing detection */
1400 if (dc->config.power_down_display_on_boot) {
1401 uint8_t dpcd_power_state = '\0';
1402 enum dc_status status = DC_ERROR_UNEXPECTED;
1404 for (i = 0; i < dc->link_count; i++) {
1405 if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1408 /* DP 2.0 requires that LTTPR Caps be read first */
1409 dp_retrieve_lttpr_cap(dc->links[i]);
1412 * If any of the displays are lit up turn them off.
1413 * The reason is that some MST hubs cannot be turned off
1414 * completely until we tell them to do so.
1415 * If not turned off, then displays connected to MST hub
1418 status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1419 &dpcd_power_state, sizeof(dpcd_power_state));
1420 if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
1421 /* blank dp stream before power off receiver*/
1422 if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
1423 unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
1425 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1426 if (fe == dc->res_pool->stream_enc[j]->id) {
1427 dc->res_pool->stream_enc[j]->funcs->dp_blank(
1428 dc->res_pool->stream_enc[j]);
1433 dp_receiver_power_ctrl(dc->links[i], false);
1438 /* If taking control over from VBIOS, we may want to optimize our first
1439 * mode set, so we need to skip powering down pipes until we know which
1440 * pipes we want to use.
1441 * Otherwise, if taking control is not possible, we need to power
1444 if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1445 if (!is_optimized_init_done) {
1446 hws->funcs.init_pipes(dc, dc->current_state);
1447 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1448 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1449 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1453 if (!is_optimized_init_done) {
1455 for (i = 0; i < res_pool->audio_count; i++) {
1456 struct audio *audio = res_pool->audios[i];
1458 audio->funcs->hw_init(audio);
1461 for (i = 0; i < dc->link_count; i++) {
1462 struct dc_link *link = dc->links[i];
1464 if (link->panel_cntl)
1465 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1469 abm->funcs->abm_init(abm, backlight);
1471 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1472 dmcu->funcs->dmcu_init(dmcu);
1475 if (abm != NULL && dmcu != NULL)
1476 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1478 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1479 if (!is_optimized_init_done)
1480 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1482 if (!dc->debug.disable_clock_gate) {
1483 /* enable all DCN clock gating */
1484 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1486 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1488 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1490 if (hws->funcs.enable_power_gating_plane)
1491 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1493 if (dc->clk_mgr->funcs->notify_wm_ranges)
1494 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1497 /* In headless boot cases, DIG may be turned
1498 * on which causes HW/SW discrepancies.
1499 * To avoid this, power down hardware on boot
1500 * if DIG is turned on
1502 void dcn10_power_down_on_boot(struct dc *dc)
1504 struct dc_link *edp_links[MAX_NUM_EDP];
1505 struct dc_link *edp_link = NULL;
1509 get_edp_links(dc, edp_links, &edp_num);
1511 edp_link = edp_links[0];
1513 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1514 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1515 dc->hwseq->funcs.edp_backlight_control &&
1516 dc->hwss.power_down &&
1517 dc->hwss.edp_power_control) {
1518 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1519 dc->hwss.power_down(dc);
1520 dc->hwss.edp_power_control(edp_link, false);
1522 for (i = 0; i < dc->link_count; i++) {
1523 struct dc_link *link = dc->links[i];
1525 if (link->link_enc->funcs->is_dig_enabled &&
1526 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1527 dc->hwss.power_down) {
1528 dc->hwss.power_down(dc);
1536 * Call update_clocks with empty context
1537 * to send DISPLAY_OFF
1538 * Otherwise DISPLAY_OFF may not be asserted
1540 if (dc->clk_mgr->funcs->set_low_power_state)
1541 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1544 void dcn10_reset_hw_ctx_wrap(
1546 struct dc_state *context)
1549 struct dce_hwseq *hws = dc->hwseq;
1552 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1553 struct pipe_ctx *pipe_ctx_old =
1554 &dc->current_state->res_ctx.pipe_ctx[i];
1555 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1557 if (!pipe_ctx_old->stream)
1560 if (pipe_ctx_old->top_pipe)
1563 if (!pipe_ctx->stream ||
1564 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1565 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1567 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1568 if (hws->funcs.enable_stream_gating)
1569 hws->funcs.enable_stream_gating(dc, pipe_ctx);
1571 old_clk->funcs->cs_power_down(old_clk);
1576 static bool patch_address_for_sbs_tb_stereo(
1577 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1579 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1580 bool sec_split = pipe_ctx->top_pipe &&
1581 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1582 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1583 (pipe_ctx->stream->timing.timing_3d_format ==
1584 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1585 pipe_ctx->stream->timing.timing_3d_format ==
1586 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1587 *addr = plane_state->address.grph_stereo.left_addr;
1588 plane_state->address.grph_stereo.left_addr =
1589 plane_state->address.grph_stereo.right_addr;
1592 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1593 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1594 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1595 plane_state->address.grph_stereo.right_addr =
1596 plane_state->address.grph_stereo.left_addr;
1597 plane_state->address.grph_stereo.right_meta_addr =
1598 plane_state->address.grph_stereo.left_meta_addr;
1604 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1606 bool addr_patched = false;
1607 PHYSICAL_ADDRESS_LOC addr;
1608 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1610 if (plane_state == NULL)
1613 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1615 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1616 pipe_ctx->plane_res.hubp,
1617 &plane_state->address,
1618 plane_state->flip_immediate);
1620 plane_state->status.requested_address = plane_state->address;
1622 if (plane_state->flip_immediate)
1623 plane_state->status.current_address = plane_state->address;
1626 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1629 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1630 const struct dc_plane_state *plane_state)
1632 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1633 const struct dc_transfer_func *tf = NULL;
1636 if (dpp_base == NULL)
1639 if (plane_state->in_transfer_func)
1640 tf = plane_state->in_transfer_func;
1642 if (plane_state->gamma_correction &&
1643 !dpp_base->ctx->dc->debug.always_use_regamma
1644 && !plane_state->gamma_correction->is_identity
1645 && dce_use_lut(plane_state->format))
1646 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1649 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1650 else if (tf->type == TF_TYPE_PREDEFINED) {
1652 case TRANSFER_FUNCTION_SRGB:
1653 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1655 case TRANSFER_FUNCTION_BT709:
1656 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1658 case TRANSFER_FUNCTION_LINEAR:
1659 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1661 case TRANSFER_FUNCTION_PQ:
1662 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1663 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1664 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1671 } else if (tf->type == TF_TYPE_BYPASS) {
1672 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1674 cm_helper_translate_curve_to_degamma_hw_format(tf,
1675 &dpp_base->degamma_params);
1676 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1677 &dpp_base->degamma_params);
1684 #define MAX_NUM_HW_POINTS 0x200
1686 static void log_tf(struct dc_context *ctx,
1687 struct dc_transfer_func *tf, uint32_t hw_points_num)
1689 // DC_LOG_GAMMA is default logging of all hw points
1690 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1691 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1694 DC_LOGGER_INIT(ctx->logger);
1695 DC_LOG_GAMMA("Gamma Correction TF");
1696 DC_LOG_ALL_GAMMA("Logging all tf points...");
1697 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1699 for (i = 0; i < hw_points_num; i++) {
1700 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1701 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1702 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1705 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1706 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1707 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1708 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1712 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1713 const struct dc_stream_state *stream)
1715 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1720 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1722 if (stream->out_transfer_func &&
1723 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1724 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1725 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1727 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1730 else if (cm_helper_translate_curve_to_hw_format(
1731 stream->out_transfer_func,
1732 &dpp->regamma_params, false)) {
1733 dpp->funcs->dpp_program_regamma_pwl(
1735 &dpp->regamma_params, OPP_REGAMMA_USER);
1737 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1739 if (stream != NULL && stream->ctx != NULL &&
1740 stream->out_transfer_func != NULL) {
1742 stream->out_transfer_func,
1743 dpp->regamma_params.hw_points_num);
1749 void dcn10_pipe_control_lock(
1751 struct pipe_ctx *pipe,
1754 struct dce_hwseq *hws = dc->hwseq;
1756 /* use TG master update lock to lock everything on the TG
1757 * therefore only top pipe need to lock
1759 if (!pipe || pipe->top_pipe)
1762 if (dc->debug.sanity_checks)
1763 hws->funcs.verify_allow_pstate_change_high(dc);
1766 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1768 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1770 if (dc->debug.sanity_checks)
1771 hws->funcs.verify_allow_pstate_change_high(dc);
1775 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1777 * Software keepout workaround to prevent cursor update locking from stalling
1778 * out cursor updates indefinitely or from old values from being retained in
1779 * the case where the viewport changes in the same frame as the cursor.
1781 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1782 * too close to VUPDATE, then stall out until VUPDATE finishes.
1784 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1785 * to avoid the need for this workaround.
1787 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1789 struct dc_stream_state *stream = pipe_ctx->stream;
1790 struct crtc_position position;
1791 uint32_t vupdate_start, vupdate_end;
1792 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1793 unsigned int us_per_line, us_vupdate;
1795 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1798 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1801 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1804 dc->hwss.get_position(&pipe_ctx, 1, &position);
1805 vpos = position.vertical_count;
1807 /* Avoid wraparound calculation issues */
1808 vupdate_start += stream->timing.v_total;
1809 vupdate_end += stream->timing.v_total;
1810 vpos += stream->timing.v_total;
1812 if (vpos <= vupdate_start) {
1813 /* VPOS is in VACTIVE or back porch. */
1814 lines_to_vupdate = vupdate_start - vpos;
1815 } else if (vpos > vupdate_end) {
1816 /* VPOS is in the front porch. */
1819 /* VPOS is in VUPDATE. */
1820 lines_to_vupdate = 0;
1823 /* Calculate time until VUPDATE in microseconds. */
1825 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1826 us_to_vupdate = lines_to_vupdate * us_per_line;
1828 /* 70 us is a conservative estimate of cursor update time*/
1829 if (us_to_vupdate > 70)
1832 /* Stall out until the cursor update completes. */
1833 if (vupdate_end < vupdate_start)
1834 vupdate_end += stream->timing.v_total;
1835 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1836 udelay(us_to_vupdate + us_vupdate);
1839 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1841 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1842 if (!pipe || pipe->top_pipe)
1845 /* Prevent cursor lock from stalling out cursor updates. */
1847 delay_cursor_until_vupdate(dc, pipe);
1849 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1850 union dmub_hw_lock_flags hw_locks = { 0 };
1851 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1853 hw_locks.bits.lock_cursor = 1;
1854 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1856 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1861 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1862 pipe->stream_res.opp->inst, lock);
1865 static bool wait_for_reset_trigger_to_occur(
1866 struct dc_context *dc_ctx,
1867 struct timing_generator *tg)
1871 /* To avoid endless loop we wait at most
1872 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1873 const uint32_t frames_to_wait_on_triggered_reset = 10;
1876 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1878 if (!tg->funcs->is_counter_moving(tg)) {
1879 DC_ERROR("TG counter is not moving!\n");
1883 if (tg->funcs->did_triggered_reset_occur(tg)) {
1885 /* usually occurs at i=1 */
1886 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1891 /* Wait for one frame. */
1892 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1893 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1897 DC_ERROR("GSL: Timeout on reset trigger!\n");
1902 uint64_t reduceSizeAndFraction(
1903 uint64_t *numerator,
1904 uint64_t *denominator,
1905 bool checkUint32Bounary)
1908 bool ret = checkUint32Bounary == false;
1909 uint64_t max_int32 = 0xffffffff;
1910 uint64_t num, denom;
1911 static const uint16_t prime_numbers[] = {
1912 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
1913 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
1914 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
1915 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
1916 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
1917 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
1918 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
1919 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
1920 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
1921 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
1922 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
1923 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
1924 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
1925 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
1926 941, 947, 953, 967, 971, 977, 983, 991, 997};
1927 int count = ARRAY_SIZE(prime_numbers);
1930 denom = *denominator;
1931 for (i = 0; i < count; i++) {
1932 uint32_t num_remainder, denom_remainder;
1933 uint64_t num_result, denom_result;
1934 if (checkUint32Bounary &&
1935 num <= max_int32 && denom <= max_int32) {
1940 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
1941 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
1942 if (num_remainder == 0 && denom_remainder == 0) {
1944 denom = denom_result;
1946 } while (num_remainder == 0 && denom_remainder == 0);
1949 *denominator = denom;
1953 bool is_low_refresh_rate(struct pipe_ctx *pipe)
1955 uint32_t master_pipe_refresh_rate =
1956 pipe->stream->timing.pix_clk_100hz * 100 /
1957 pipe->stream->timing.h_total /
1958 pipe->stream->timing.v_total;
1959 return master_pipe_refresh_rate <= 30;
1962 uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)
1964 uint32_t clock_divider = 1;
1965 uint32_t numpipes = 1;
1967 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
1970 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1973 while (pipe->next_odm_pipe) {
1974 pipe = pipe->next_odm_pipe;
1977 clock_divider *= numpipes;
1979 return clock_divider;
1982 int dcn10_align_pixel_clocks(
1985 struct pipe_ctx *grouped_pipes[])
1987 struct dc_context *dc_ctx = dc->ctx;
1988 int i, master = -1, embedded = -1;
1989 struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
1990 uint64_t phase[MAX_PIPES];
1991 uint64_t modulo[MAX_PIPES];
1994 uint32_t embedded_pix_clk_100hz;
1995 uint16_t embedded_h_total;
1996 uint16_t embedded_v_total;
1997 bool clamshell_closed = false;
1998 uint32_t dp_ref_clk_100hz =
1999 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2001 if (dc->config.vblank_alignment_dto_params &&
2002 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2004 (dc->config.vblank_alignment_dto_params >> 63);
2006 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2008 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2009 embedded_pix_clk_100hz =
2010 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2012 for (i = 0; i < group_size; i++) {
2013 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2014 grouped_pipes[i]->stream_res.tg,
2015 &hw_crtc_timing[i]);
2016 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2017 dc->res_pool->dp_clock_source,
2018 grouped_pipes[i]->stream_res.tg->inst,
2020 hw_crtc_timing[i].pix_clk_100hz = pclk;
2021 if (dc_is_embedded_signal(
2022 grouped_pipes[i]->stream->signal)) {
2025 phase[i] = embedded_pix_clk_100hz*100;
2026 modulo[i] = dp_ref_clk_100hz*100;
2029 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2030 hw_crtc_timing[i].h_total*
2031 hw_crtc_timing[i].v_total;
2032 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2033 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2037 if (reduceSizeAndFraction(&phase[i],
2038 &modulo[i], true) == false) {
2040 * this will help to stop reporting
2041 * this timing synchronizable
2043 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2044 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2049 for (i = 0; i < group_size; i++) {
2050 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2051 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2052 dc->res_pool->dp_clock_source,
2053 grouped_pipes[i]->stream_res.tg->inst,
2054 phase[i], modulo[i]);
2055 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2056 dc->res_pool->dp_clock_source,
2057 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2058 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2059 pclk*get_clock_divider(grouped_pipes[i], false);
2069 void dcn10_enable_vblanks_synchronization(
2073 struct pipe_ctx *grouped_pipes[])
2075 struct dc_context *dc_ctx = dc->ctx;
2076 struct output_pixel_processor *opp;
2077 struct timing_generator *tg;
2078 int i, width, height, master;
2080 for (i = 1; i < group_size; i++) {
2081 opp = grouped_pipes[i]->stream_res.opp;
2082 tg = grouped_pipes[i]->stream_res.tg;
2083 tg->funcs->get_otg_active_size(tg, &width, &height);
2084 if (opp->funcs->opp_program_dpg_dimensions)
2085 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2088 for (i = 0; i < group_size; i++) {
2089 if (grouped_pipes[i]->stream == NULL)
2091 grouped_pipes[i]->stream->vblank_synchronized = false;
2092 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2095 DC_SYNC_INFO("Aligning DP DTOs\n");
2097 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2099 DC_SYNC_INFO("Synchronizing VBlanks\n");
2102 for (i = 0; i < group_size; i++) {
2103 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2104 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2105 grouped_pipes[master]->stream_res.tg,
2106 grouped_pipes[i]->stream_res.tg,
2107 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2108 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2109 get_clock_divider(grouped_pipes[master], false),
2110 get_clock_divider(grouped_pipes[i], false));
2111 grouped_pipes[i]->stream->vblank_synchronized = true;
2113 grouped_pipes[master]->stream->vblank_synchronized = true;
2114 DC_SYNC_INFO("Sync complete\n");
2117 for (i = 1; i < group_size; i++) {
2118 opp = grouped_pipes[i]->stream_res.opp;
2119 tg = grouped_pipes[i]->stream_res.tg;
2120 tg->funcs->get_otg_active_size(tg, &width, &height);
2121 if (opp->funcs->opp_program_dpg_dimensions)
2122 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2126 void dcn10_enable_timing_synchronization(
2130 struct pipe_ctx *grouped_pipes[])
2132 struct dc_context *dc_ctx = dc->ctx;
2133 struct output_pixel_processor *opp;
2134 struct timing_generator *tg;
2135 int i, width, height;
2137 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2139 for (i = 1; i < group_size; i++) {
2140 opp = grouped_pipes[i]->stream_res.opp;
2141 tg = grouped_pipes[i]->stream_res.tg;
2142 tg->funcs->get_otg_active_size(tg, &width, &height);
2143 if (opp->funcs->opp_program_dpg_dimensions)
2144 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2147 for (i = 0; i < group_size; i++) {
2148 if (grouped_pipes[i]->stream == NULL)
2150 grouped_pipes[i]->stream->vblank_synchronized = false;
2153 for (i = 1; i < group_size; i++)
2154 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2155 grouped_pipes[i]->stream_res.tg,
2156 grouped_pipes[0]->stream_res.tg->inst);
2158 DC_SYNC_INFO("Waiting for trigger\n");
2160 /* Need to get only check 1 pipe for having reset as all the others are
2161 * synchronized. Look at last pipe programmed to reset.
2164 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2165 for (i = 1; i < group_size; i++)
2166 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2167 grouped_pipes[i]->stream_res.tg);
2169 for (i = 1; i < group_size; i++) {
2170 opp = grouped_pipes[i]->stream_res.opp;
2171 tg = grouped_pipes[i]->stream_res.tg;
2172 tg->funcs->get_otg_active_size(tg, &width, &height);
2173 if (opp->funcs->opp_program_dpg_dimensions)
2174 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2177 DC_SYNC_INFO("Sync complete\n");
2180 void dcn10_enable_per_frame_crtc_position_reset(
2183 struct pipe_ctx *grouped_pipes[])
2185 struct dc_context *dc_ctx = dc->ctx;
2188 DC_SYNC_INFO("Setting up\n");
2189 for (i = 0; i < group_size; i++)
2190 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2191 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2192 grouped_pipes[i]->stream_res.tg,
2194 &grouped_pipes[i]->stream->triggered_crtc_reset);
2196 DC_SYNC_INFO("Waiting for trigger\n");
2198 for (i = 0; i < group_size; i++)
2199 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2201 DC_SYNC_INFO("Multi-display sync is complete\n");
2204 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2205 struct vm_system_aperture_param *apt,
2206 struct dce_hwseq *hws)
2208 PHYSICAL_ADDRESS_LOC physical_page_number;
2209 uint32_t logical_addr_low;
2210 uint32_t logical_addr_high;
2212 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2213 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2214 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2215 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2217 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2218 LOGICAL_ADDR, &logical_addr_low);
2220 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2221 LOGICAL_ADDR, &logical_addr_high);
2223 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2224 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2225 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2228 /* Temporary read settings, future will get values from kmd directly */
2229 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2230 struct vm_context0_param *vm0,
2231 struct dce_hwseq *hws)
2233 PHYSICAL_ADDRESS_LOC fb_base;
2234 PHYSICAL_ADDRESS_LOC fb_offset;
2235 uint32_t fb_base_value;
2236 uint32_t fb_offset_value;
2238 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2239 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2241 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2242 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2243 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2244 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2246 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2247 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2248 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2249 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2251 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2252 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2253 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2254 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2256 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2257 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2258 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2259 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2262 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2263 * Therefore we need to do
2264 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2265 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2267 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2268 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2269 vm0->pte_base.quad_part += fb_base.quad_part;
2270 vm0->pte_base.quad_part -= fb_offset.quad_part;
2274 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2276 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2277 struct vm_system_aperture_param apt = { {{ 0 } } };
2278 struct vm_context0_param vm0 = { { { 0 } } };
2280 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2281 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2283 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2284 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2287 static void dcn10_enable_plane(
2289 struct pipe_ctx *pipe_ctx,
2290 struct dc_state *context)
2292 struct dce_hwseq *hws = dc->hwseq;
2294 if (dc->debug.sanity_checks) {
2295 hws->funcs.verify_allow_pstate_change_high(dc);
2298 undo_DEGVIDCN10_253_wa(dc);
2300 power_on_plane(dc->hwseq,
2301 pipe_ctx->plane_res.hubp->inst);
2303 /* enable DCFCLK current DCHUB */
2304 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2306 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2307 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2308 pipe_ctx->stream_res.opp,
2311 if (dc->config.gpu_vm_support)
2312 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2314 if (dc->debug.sanity_checks) {
2315 hws->funcs.verify_allow_pstate_change_high(dc);
2318 if (!pipe_ctx->top_pipe
2319 && pipe_ctx->plane_state
2320 && pipe_ctx->plane_state->flip_int_enabled
2321 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2322 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2326 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2329 struct dpp_grph_csc_adjustment adjust;
2330 memset(&adjust, 0, sizeof(adjust));
2331 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2334 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2335 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2336 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2337 adjust.temperature_matrix[i] =
2338 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2339 } else if (pipe_ctx->plane_state &&
2340 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2341 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2342 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2343 adjust.temperature_matrix[i] =
2344 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2347 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2351 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2353 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2354 if (pipe_ctx->top_pipe) {
2355 struct pipe_ctx *top = pipe_ctx->top_pipe;
2357 while (top->top_pipe)
2358 top = top->top_pipe; // Traverse to top pipe_ctx
2359 if (top->plane_state && top->plane_state->layer_index == 0)
2360 return true; // Front MPO plane not hidden
2366 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2368 // Override rear plane RGB bias to fix MPO brightness
2369 uint16_t rgb_bias = matrix[3];
2374 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2375 matrix[3] = rgb_bias;
2376 matrix[7] = rgb_bias;
2377 matrix[11] = rgb_bias;
2380 void dcn10_program_output_csc(struct dc *dc,
2381 struct pipe_ctx *pipe_ctx,
2382 enum dc_color_space colorspace,
2386 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2387 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2389 /* MPO is broken with RGB colorspaces when OCSC matrix
2390 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2391 * Blending adds offsets from front + rear to rear plane
2393 * Fix is to set RGB bias to 0 on rear plane, top plane
2394 * black value pixels add offset instead of rear + front
2397 int16_t rgb_bias = matrix[3];
2398 // matrix[3/7/11] are all the same offset value
2400 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2401 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2403 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2407 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2408 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2412 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2414 struct dc_bias_and_scale bns_params = {0};
2416 // program the input csc
2417 dpp->funcs->dpp_setup(dpp,
2418 plane_state->format,
2419 EXPANSION_MODE_ZERO,
2420 plane_state->input_csc_color_matrix,
2421 plane_state->color_space,
2424 //set scale and bias registers
2425 build_prescale_params(&bns_params, plane_state);
2426 if (dpp->funcs->dpp_program_bias_and_scale)
2427 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2430 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2432 struct mpc *mpc = dc->res_pool->mpc;
2434 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2435 get_hdr_visual_confirm_color(pipe_ctx, color);
2436 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2437 get_surface_visual_confirm_color(pipe_ctx, color);
2438 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2439 get_surface_tile_visual_confirm_color(pipe_ctx, color);
2441 color_space_to_black_color(
2442 dc, pipe_ctx->stream->output_color_space, color);
2444 if (mpc->funcs->set_bg_color)
2445 mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2448 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2450 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2451 struct mpcc_blnd_cfg blnd_cfg = {{0}};
2452 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2454 struct mpcc *new_mpcc;
2455 struct mpc *mpc = dc->res_pool->mpc;
2456 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2458 if (per_pixel_alpha)
2459 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2461 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2463 blnd_cfg.overlap_only = false;
2464 blnd_cfg.global_gain = 0xff;
2466 if (pipe_ctx->plane_state->global_alpha)
2467 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2469 blnd_cfg.global_alpha = 0xff;
2471 /* DCN1.0 has output CM before MPC which seems to screw with
2472 * pre-multiplied alpha.
2474 blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2475 pipe_ctx->stream->output_color_space)
2481 * Note: currently there is a bug in init_hw such that
2482 * on resume from hibernate, BIOS sets up MPCC0, and
2483 * we do mpcc_remove but the mpcc cannot go to idle
2484 * after remove. This cause us to pick mpcc1 here,
2485 * which causes a pstate hang for yet unknown reason.
2487 mpcc_id = hubp->inst;
2489 /* If there is no full update, don't need to touch MPC tree*/
2490 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2491 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2492 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2496 /* check if this MPCC is already being used */
2497 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2498 /* remove MPCC if being used */
2499 if (new_mpcc != NULL)
2500 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2502 if (dc->debug.sanity_checks)
2503 mpc->funcs->assert_mpcc_idle_before_connect(
2504 dc->res_pool->mpc, mpcc_id);
2506 /* Call MPC to insert new plane */
2507 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2514 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2516 ASSERT(new_mpcc != NULL);
2518 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2519 hubp->mpcc_id = mpcc_id;
2522 static void update_scaler(struct pipe_ctx *pipe_ctx)
2524 bool per_pixel_alpha =
2525 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2527 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2528 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2529 /* scaler configuration */
2530 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2531 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2534 static void dcn10_update_dchubp_dpp(
2536 struct pipe_ctx *pipe_ctx,
2537 struct dc_state *context)
2539 struct dce_hwseq *hws = dc->hwseq;
2540 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2541 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2542 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2543 struct plane_size size = plane_state->plane_size;
2544 unsigned int compat_level = 0;
2545 bool should_divided_by_2 = false;
2547 /* depends on DML calculation, DPP clock value may change dynamically */
2548 /* If request max dpp clk is lower than current dispclk, no need to
2551 if (plane_state->update_flags.bits.full_update) {
2553 /* new calculated dispclk, dppclk are stored in
2554 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2555 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2556 * dcn_validate_bandwidth compute new dispclk, dppclk.
2557 * dispclk will put in use after optimize_bandwidth when
2558 * ramp_up_dispclk_with_dpp is called.
2559 * there are two places for dppclk be put in use. One location
2560 * is the same as the location as dispclk. Another is within
2561 * update_dchubp_dpp which happens between pre_bandwidth and
2562 * optimize_bandwidth.
2563 * dppclk updated within update_dchubp_dpp will cause new
2564 * clock values of dispclk and dppclk not be in use at the same
2565 * time. when clocks are decreased, this may cause dppclk is
2566 * lower than previous configuration and let pipe stuck.
2567 * for example, eDP + external dp, change resolution of DP from
2568 * 1920x1080x144hz to 1280x960x60hz.
2569 * before change: dispclk = 337889 dppclk = 337889
2570 * change mode, dcn_validate_bandwidth calculate
2571 * dispclk = 143122 dppclk = 143122
2572 * update_dchubp_dpp be executed before dispclk be updated,
2573 * dispclk = 337889, but dppclk use new value dispclk /2 =
2574 * 168944. this will cause pipe pstate warning issue.
2575 * solution: between pre_bandwidth and optimize_bandwidth, while
2576 * dispclk is going to be decreased, keep dppclk = dispclk
2578 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2579 dc->clk_mgr->clks.dispclk_khz)
2580 should_divided_by_2 = false;
2582 should_divided_by_2 =
2583 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2584 dc->clk_mgr->clks.dispclk_khz / 2;
2586 dpp->funcs->dpp_dppclk_control(
2588 should_divided_by_2,
2591 if (dc->res_pool->dccg)
2592 dc->res_pool->dccg->funcs->update_dpp_dto(
2595 pipe_ctx->plane_res.bw.dppclk_khz);
2597 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2598 dc->clk_mgr->clks.dispclk_khz / 2 :
2599 dc->clk_mgr->clks.dispclk_khz;
2602 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2603 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2604 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2606 if (plane_state->update_flags.bits.full_update) {
2607 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2609 hubp->funcs->hubp_setup(
2611 &pipe_ctx->dlg_regs,
2612 &pipe_ctx->ttu_regs,
2614 &pipe_ctx->pipe_dlg_param);
2615 hubp->funcs->hubp_setup_interdependent(
2617 &pipe_ctx->dlg_regs,
2618 &pipe_ctx->ttu_regs);
2621 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2623 if (plane_state->update_flags.bits.full_update ||
2624 plane_state->update_flags.bits.bpp_change)
2625 dcn10_update_dpp(dpp, plane_state);
2627 if (plane_state->update_flags.bits.full_update ||
2628 plane_state->update_flags.bits.per_pixel_alpha_change ||
2629 plane_state->update_flags.bits.global_alpha_change)
2630 hws->funcs.update_mpcc(dc, pipe_ctx);
2632 if (plane_state->update_flags.bits.full_update ||
2633 plane_state->update_flags.bits.per_pixel_alpha_change ||
2634 plane_state->update_flags.bits.global_alpha_change ||
2635 plane_state->update_flags.bits.scaling_change ||
2636 plane_state->update_flags.bits.position_change) {
2637 update_scaler(pipe_ctx);
2640 if (plane_state->update_flags.bits.full_update ||
2641 plane_state->update_flags.bits.scaling_change ||
2642 plane_state->update_flags.bits.position_change) {
2643 hubp->funcs->mem_program_viewport(
2645 &pipe_ctx->plane_res.scl_data.viewport,
2646 &pipe_ctx->plane_res.scl_data.viewport_c);
2649 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2650 dc->hwss.set_cursor_position(pipe_ctx);
2651 dc->hwss.set_cursor_attribute(pipe_ctx);
2653 if (dc->hwss.set_cursor_sdr_white_level)
2654 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2657 if (plane_state->update_flags.bits.full_update) {
2659 dc->hwss.program_gamut_remap(pipe_ctx);
2661 dc->hwss.program_output_csc(dc,
2663 pipe_ctx->stream->output_color_space,
2664 pipe_ctx->stream->csc_color_matrix.matrix,
2665 pipe_ctx->stream_res.opp->inst);
2668 if (plane_state->update_flags.bits.full_update ||
2669 plane_state->update_flags.bits.pixel_format_change ||
2670 plane_state->update_flags.bits.horizontal_mirror_change ||
2671 plane_state->update_flags.bits.rotation_change ||
2672 plane_state->update_flags.bits.swizzle_change ||
2673 plane_state->update_flags.bits.dcc_change ||
2674 plane_state->update_flags.bits.bpp_change ||
2675 plane_state->update_flags.bits.scaling_change ||
2676 plane_state->update_flags.bits.plane_size_change) {
2677 hubp->funcs->hubp_program_surface_config(
2679 plane_state->format,
2680 &plane_state->tiling_info,
2682 plane_state->rotation,
2684 plane_state->horizontal_mirror,
2688 hubp->power_gated = false;
2690 hws->funcs.update_plane_addr(dc, pipe_ctx);
2692 if (is_pipe_tree_visible(pipe_ctx))
2693 hubp->funcs->set_blank(hubp, false);
2696 void dcn10_blank_pixel_data(
2698 struct pipe_ctx *pipe_ctx,
2701 enum dc_color_space color_space;
2702 struct tg_color black_color = {0};
2703 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2704 struct dc_stream_state *stream = pipe_ctx->stream;
2706 /* program otg blank color */
2707 color_space = stream->output_color_space;
2708 color_space_to_black_color(dc, color_space, &black_color);
2711 * The way 420 is packed, 2 channels carry Y component, 1 channel
2712 * alternate between Cb and Cr, so both channels need the pixel
2715 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2716 black_color.color_r_cr = black_color.color_g_y;
2719 if (stream_res->tg->funcs->set_blank_color)
2720 stream_res->tg->funcs->set_blank_color(
2725 if (stream_res->tg->funcs->set_blank)
2726 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2727 if (stream_res->abm) {
2728 dc->hwss.set_pipe(pipe_ctx);
2729 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2732 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2733 if (stream_res->tg->funcs->set_blank) {
2734 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2735 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2740 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2742 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2743 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2744 struct custom_float_format fmt;
2746 fmt.exponenta_bits = 6;
2747 fmt.mantissa_bits = 12;
2751 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2752 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2754 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2755 pipe_ctx->plane_res.dpp, hw_mult);
2758 void dcn10_program_pipe(
2760 struct pipe_ctx *pipe_ctx,
2761 struct dc_state *context)
2763 struct dce_hwseq *hws = dc->hwseq;
2765 if (pipe_ctx->top_pipe == NULL) {
2766 bool blank = !is_pipe_tree_visible(pipe_ctx);
2768 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2769 pipe_ctx->stream_res.tg,
2770 pipe_ctx->pipe_dlg_param.vready_offset,
2771 pipe_ctx->pipe_dlg_param.vstartup_start,
2772 pipe_ctx->pipe_dlg_param.vupdate_offset,
2773 pipe_ctx->pipe_dlg_param.vupdate_width);
2775 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2776 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2778 if (hws->funcs.setup_vupdate_interrupt)
2779 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2781 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2784 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2785 dcn10_enable_plane(dc, pipe_ctx, context);
2787 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2789 hws->funcs.set_hdr_multiplier(pipe_ctx);
2791 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2792 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2793 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2794 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2796 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2797 * only do gamma programming for full update.
2798 * TODO: This can be further optimized/cleaned up
2799 * Always call this for now since it does memcmp inside before
2800 * doing heavy calculation and programming
2802 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2803 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2806 void dcn10_wait_for_pending_cleared(struct dc *dc,
2807 struct dc_state *context)
2809 struct pipe_ctx *pipe_ctx;
2810 struct timing_generator *tg;
2813 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2814 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2815 tg = pipe_ctx->stream_res.tg;
2818 * Only wait for top pipe's tg penindg bit
2819 * Also skip if pipe is disabled.
2821 if (pipe_ctx->top_pipe ||
2822 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2823 !tg->funcs->is_tg_enabled(tg))
2827 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2828 * For some reason waiting for OTG_UPDATE_PENDING cleared
2829 * seems to not trigger the update right away, and if we
2830 * lock again before VUPDATE then we don't get a separated
2833 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2834 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2838 void dcn10_post_unlock_program_front_end(
2840 struct dc_state *context)
2844 DC_LOGGER_INIT(dc->ctx->logger);
2846 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2847 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2849 if (!pipe_ctx->top_pipe &&
2850 !pipe_ctx->prev_odm_pipe &&
2852 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2854 if (context->stream_status[i].plane_count == 0)
2855 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2859 for (i = 0; i < dc->res_pool->pipe_count; i++)
2860 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2861 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2863 for (i = 0; i < dc->res_pool->pipe_count; i++)
2864 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2865 dc->hwss.optimize_bandwidth(dc, context);
2869 if (dc->hwseq->wa.DEGVIDCN10_254)
2870 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2873 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2877 for (i = 0; i < context->stream_count; i++) {
2878 if (context->streams[i]->timing.timing_3d_format
2879 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2883 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2889 void dcn10_prepare_bandwidth(
2891 struct dc_state *context)
2893 struct dce_hwseq *hws = dc->hwseq;
2894 struct hubbub *hubbub = dc->res_pool->hubbub;
2896 if (dc->debug.sanity_checks)
2897 hws->funcs.verify_allow_pstate_change_high(dc);
2899 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2900 if (context->stream_count == 0)
2901 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2903 dc->clk_mgr->funcs->update_clocks(
2909 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
2910 &context->bw_ctx.bw.dcn.watermarks,
2911 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2913 dcn10_stereo_hw_frame_pack_wa(dc, context);
2915 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2916 dcn_bw_notify_pplib_of_wm_ranges(dc);
2918 if (dc->debug.sanity_checks)
2919 hws->funcs.verify_allow_pstate_change_high(dc);
2922 void dcn10_optimize_bandwidth(
2924 struct dc_state *context)
2926 struct dce_hwseq *hws = dc->hwseq;
2927 struct hubbub *hubbub = dc->res_pool->hubbub;
2929 if (dc->debug.sanity_checks)
2930 hws->funcs.verify_allow_pstate_change_high(dc);
2932 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2933 if (context->stream_count == 0)
2934 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2936 dc->clk_mgr->funcs->update_clocks(
2942 hubbub->funcs->program_watermarks(hubbub,
2943 &context->bw_ctx.bw.dcn.watermarks,
2944 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2947 dcn10_stereo_hw_frame_pack_wa(dc, context);
2949 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2950 dcn_bw_notify_pplib_of_wm_ranges(dc);
2952 if (dc->debug.sanity_checks)
2953 hws->funcs.verify_allow_pstate_change_high(dc);
2956 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
2957 int num_pipes, struct dc_crtc_timing_adjust adjust)
2960 struct drr_params params = {0};
2961 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
2962 unsigned int event_triggers = 0x800;
2963 // Note DRR trigger events are generated regardless of whether num frames met.
2964 unsigned int num_frames = 2;
2966 params.vertical_total_max = adjust.v_total_max;
2967 params.vertical_total_min = adjust.v_total_min;
2968 params.vertical_total_mid = adjust.v_total_mid;
2969 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
2970 /* TODO: If multiple pipes are to be supported, you need
2971 * some GSL stuff. Static screen triggers may be programmed differently
2974 for (i = 0; i < num_pipes; i++) {
2975 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
2976 pipe_ctx[i]->stream_res.tg, ¶ms);
2977 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
2978 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
2979 pipe_ctx[i]->stream_res.tg,
2980 event_triggers, num_frames);
2984 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
2986 struct crtc_position *position)
2990 /* TODO: handle pipes > 1
2992 for (i = 0; i < num_pipes; i++)
2993 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
2996 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
2997 int num_pipes, const struct dc_static_screen_params *params)
3000 unsigned int triggers = 0;
3002 if (params->triggers.surface_update)
3004 if (params->triggers.cursor_update)
3006 if (params->triggers.force_trigger)
3009 for (i = 0; i < num_pipes; i++)
3010 pipe_ctx[i]->stream_res.tg->funcs->
3011 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3012 triggers, params->num_frames);
3015 static void dcn10_config_stereo_parameters(
3016 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3018 enum view_3d_format view_format = stream->view_format;
3019 enum dc_timing_3d_format timing_3d_format =\
3020 stream->timing.timing_3d_format;
3021 bool non_stereo_timing = false;
3023 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3024 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3025 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3026 non_stereo_timing = true;
3028 if (non_stereo_timing == false &&
3029 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3031 flags->PROGRAM_STEREO = 1;
3032 flags->PROGRAM_POLARITY = 1;
3033 if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3034 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3035 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3036 enum display_dongle_type dongle = \
3037 stream->link->ddc->dongle_type;
3038 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3039 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3040 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3041 flags->DISABLE_STEREO_DP_SYNC = 1;
3043 flags->RIGHT_EYE_POLARITY =\
3044 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3045 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3046 flags->FRAME_PACKED = 1;
3052 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3054 struct crtc_stereo_flags flags = { 0 };
3055 struct dc_stream_state *stream = pipe_ctx->stream;
3057 dcn10_config_stereo_parameters(stream, &flags);
3059 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3060 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3061 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3063 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3066 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3067 pipe_ctx->stream_res.opp,
3068 flags.PROGRAM_STEREO == 1,
3071 pipe_ctx->stream_res.tg->funcs->program_stereo(
3072 pipe_ctx->stream_res.tg,
3079 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3083 for (i = 0; i < res_pool->pipe_count; i++) {
3084 if (res_pool->hubps[i]->inst == mpcc_inst)
3085 return res_pool->hubps[i];
3091 void dcn10_wait_for_mpcc_disconnect(
3093 struct resource_pool *res_pool,
3094 struct pipe_ctx *pipe_ctx)
3096 struct dce_hwseq *hws = dc->hwseq;
3099 if (dc->debug.sanity_checks) {
3100 hws->funcs.verify_allow_pstate_change_high(dc);
3103 if (!pipe_ctx->stream_res.opp)
3106 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3107 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3108 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3110 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3111 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3112 hubp->funcs->set_blank(hubp, true);
3116 if (dc->debug.sanity_checks) {
3117 hws->funcs.verify_allow_pstate_change_high(dc);
3122 bool dcn10_dummy_display_power_gating(
3124 uint8_t controller_id,
3125 struct dc_bios *dcb,
3126 enum pipe_gating_control power_gating)
3131 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3133 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3134 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3136 struct dc *dc = plane_state->ctx->dc;
3138 if (plane_state == NULL)
3141 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3142 pipe_ctx->plane_res.hubp);
3144 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3147 plane_state->status.current_address = plane_state->status.requested_address;
3149 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3150 tg->funcs->is_stereo_left_eye) {
3151 plane_state->status.is_right_eye =
3152 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3155 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3156 struct dce_hwseq *hwseq = dc->hwseq;
3157 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3158 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3160 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3161 struct hubbub *hubbub = dc->res_pool->hubbub;
3163 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3164 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3169 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3171 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3173 /* In DCN, this programming sequence is owned by the hubbub */
3174 hubbub->funcs->update_dchub(hubbub, dh_data);
3177 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3179 struct pipe_ctx *test_pipe;
3180 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3181 const struct rect *r1 = &scl_data->recout, *r2;
3182 int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
3183 int cur_layer = pipe_ctx->plane_state->layer_index;
3184 bool upper_pipe_exists = false;
3185 struct fixed31_32 one = dc_fixpt_from_int(1);
3188 * Disable the cursor if there's another pipe above this with a
3189 * plane that contains this pipe's viewport to prevent double cursor
3190 * and incorrect scaling artifacts.
3192 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3193 test_pipe = test_pipe->top_pipe) {
3194 if (!test_pipe->plane_state->visible)
3197 r2 = &test_pipe->plane_res.scl_data.recout;
3198 r2_r = r2->x + r2->width;
3199 r2_b = r2->y + r2->height;
3201 if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
3204 if (test_pipe->plane_state->layer_index < cur_layer)
3205 upper_pipe_exists = true;
3208 // if plane scaled, assume an upper plane can handle cursor if it exists.
3209 if (upper_pipe_exists &&
3210 (scl_data->ratios.horz.value != one.value ||
3211 scl_data->ratios.vert.value != one.value))
3217 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3219 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3220 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3221 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3222 struct dc_cursor_mi_param param = {
3223 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3224 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3225 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3226 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3227 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3228 .rotation = pipe_ctx->plane_state->rotation,
3229 .mirror = pipe_ctx->plane_state->horizontal_mirror
3231 bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3232 (pipe_ctx->bottom_pipe != NULL);
3233 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3234 (pipe_ctx->prev_odm_pipe != NULL);
3236 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3237 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3238 int x_pos = pos_cpy.x;
3239 int y_pos = pos_cpy.y;
3242 * DC cursor is stream space, HW cursor is plane space and drawn
3243 * as part of the framebuffer.
3245 * Cursor position can't be negative, but hotspot can be used to
3246 * shift cursor out of the plane bounds. Hotspot must be smaller
3247 * than the cursor size.
3251 * Translate cursor from stream space to plane space.
3253 * If the cursor is scaled then we need to scale the position
3254 * to be in the approximately correct place. We can't do anything
3255 * about the actual size being incorrect, that's a limitation of
3258 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3259 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3260 pipe_ctx->plane_state->dst_rect.width;
3261 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3262 pipe_ctx->plane_state->dst_rect.height;
3264 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3265 pipe_ctx->plane_state->dst_rect.width;
3266 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3267 pipe_ctx->plane_state->dst_rect.height;
3271 * If the cursor's source viewport is clipped then we need to
3272 * translate the cursor to appear in the correct position on
3275 * This translation isn't affected by scaling so it needs to be
3276 * done *after* we adjust the position for the scale factor.
3278 * This is only done by opt-in for now since there are still
3279 * some usecases like tiled display that might enable the
3280 * cursor on both streams while expecting dc to clip it.
3282 if (pos_cpy.translate_by_source) {
3283 x_pos += pipe_ctx->plane_state->src_rect.x;
3284 y_pos += pipe_ctx->plane_state->src_rect.y;
3288 * If the position is negative then we need to add to the hotspot
3289 * to shift the cursor outside the plane.
3293 pos_cpy.x_hotspot -= x_pos;
3298 pos_cpy.y_hotspot -= y_pos;
3302 pos_cpy.x = (uint32_t)x_pos;
3303 pos_cpy.y = (uint32_t)y_pos;
3305 if (pipe_ctx->plane_state->address.type
3306 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3307 pos_cpy.enable = false;
3309 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3310 pos_cpy.enable = false;
3312 // Swap axis and mirror horizontally
3313 if (param.rotation == ROTATION_ANGLE_90) {
3314 uint32_t temp_x = pos_cpy.x;
3316 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3317 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3320 // Swap axis and mirror vertically
3321 else if (param.rotation == ROTATION_ANGLE_270) {
3322 uint32_t temp_y = pos_cpy.y;
3323 int viewport_height =
3324 pipe_ctx->plane_res.scl_data.viewport.height;
3326 pipe_ctx->plane_res.scl_data.viewport.y;
3329 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3330 * For pipe split cases:
3331 * - apply offset of viewport.y to normalize pos_cpy.x
3332 * - calculate the pos_cpy.y as before
3333 * - shift pos_cpy.y back by same offset to get final value
3334 * - since we iterate through both pipes, use the lower
3335 * viewport.y for offset
3336 * For non pipe split cases, use the same calculation for
3337 * pos_cpy.y as the 180 degree rotation case below,
3338 * but use pos_cpy.x as our input because we are rotating
3341 if (pipe_split_on || odm_combine_on) {
3342 int pos_cpy_x_offset;
3343 int other_pipe_viewport_y;
3345 if (pipe_split_on) {
3346 if (pipe_ctx->bottom_pipe) {
3347 other_pipe_viewport_y =
3348 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3350 other_pipe_viewport_y =
3351 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3354 if (pipe_ctx->next_odm_pipe) {
3355 other_pipe_viewport_y =
3356 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3358 other_pipe_viewport_y =
3359 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3362 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3363 other_pipe_viewport_y : viewport_y;
3364 pos_cpy.x -= pos_cpy_x_offset;
3365 if (pos_cpy.x > viewport_height) {
3366 pos_cpy.x = pos_cpy.x - viewport_height;
3367 pos_cpy.y = viewport_height - pos_cpy.x;
3369 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3371 pos_cpy.y += pos_cpy_x_offset;
3373 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3377 // Mirror horizontally and vertically
3378 else if (param.rotation == ROTATION_ANGLE_180) {
3379 int viewport_width =
3380 pipe_ctx->plane_res.scl_data.viewport.width;
3382 pipe_ctx->plane_res.scl_data.viewport.x;
3384 if (pipe_split_on || odm_combine_on) {
3385 if (pos_cpy.x >= viewport_width + viewport_x) {
3386 pos_cpy.x = 2 * viewport_width
3387 - pos_cpy.x + 2 * viewport_x;
3389 uint32_t temp_x = pos_cpy.x;
3391 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3392 if (temp_x >= viewport_x +
3393 (int)hubp->curs_attr.width || pos_cpy.x
3394 <= (int)hubp->curs_attr.width +
3395 pipe_ctx->plane_state->src_rect.x) {
3396 pos_cpy.x = temp_x + viewport_width;
3400 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3404 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3406 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3407 * pos_cpy.y_new = viewport.y + delta_from_bottom
3409 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3411 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3412 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3415 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3416 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3419 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3421 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3423 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3424 pipe_ctx->plane_res.hubp, attributes);
3425 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3426 pipe_ctx->plane_res.dpp, attributes);
3429 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3431 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3432 struct fixed31_32 multiplier;
3433 struct dpp_cursor_attributes opt_attr = { 0 };
3434 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3435 struct custom_float_format fmt;
3437 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3440 fmt.exponenta_bits = 5;
3441 fmt.mantissa_bits = 10;
3444 if (sdr_white_level > 80) {
3445 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3446 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3449 opt_attr.scale = hw_scale;
3452 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3453 pipe_ctx->plane_res.dpp, &opt_attr);
3457 * apply_front_porch_workaround TODO FPGA still need?
3459 * This is a workaround for a bug that has existed since R5xx and has not been
3460 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3462 static void apply_front_porch_workaround(
3463 struct dc_crtc_timing *timing)
3465 if (timing->flags.INTERLACE == 1) {
3466 if (timing->v_front_porch < 2)
3467 timing->v_front_porch = 2;
3469 if (timing->v_front_porch < 1)
3470 timing->v_front_porch = 1;
3474 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3476 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3477 struct dc_crtc_timing patched_crtc_timing;
3478 int vesa_sync_start;
3480 int interlace_factor;
3481 int vertical_line_start;
3483 patched_crtc_timing = *dc_crtc_timing;
3484 apply_front_porch_workaround(&patched_crtc_timing);
3486 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3488 vesa_sync_start = patched_crtc_timing.v_addressable +
3489 patched_crtc_timing.v_border_bottom +
3490 patched_crtc_timing.v_front_porch;
3492 asic_blank_end = (patched_crtc_timing.v_total -
3494 patched_crtc_timing.v_border_top)
3497 vertical_line_start = asic_blank_end -
3498 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3500 return vertical_line_start;
3503 void dcn10_calc_vupdate_position(
3505 struct pipe_ctx *pipe_ctx,
3506 uint32_t *start_line,
3509 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3510 int vline_int_offset_from_vupdate =
3511 pipe_ctx->stream->periodic_interrupt0.lines_offset;
3512 int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3515 if (vline_int_offset_from_vupdate > 0)
3516 vline_int_offset_from_vupdate--;
3517 else if (vline_int_offset_from_vupdate < 0)
3518 vline_int_offset_from_vupdate++;
3520 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3522 if (start_position >= 0)
3523 *start_line = start_position;
3525 *start_line = dc_crtc_timing->v_total + start_position - 1;
3527 *end_line = *start_line + 2;
3529 if (*end_line >= dc_crtc_timing->v_total)
3533 static void dcn10_cal_vline_position(
3535 struct pipe_ctx *pipe_ctx,
3536 enum vline_select vline,
3537 uint32_t *start_line,
3540 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3542 if (vline == VLINE0)
3543 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3544 else if (vline == VLINE1)
3545 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3547 switch (ref_point) {
3548 case START_V_UPDATE:
3549 dcn10_calc_vupdate_position(
3556 // Suppose to do nothing because vsync is 0;
3564 void dcn10_setup_periodic_interrupt(
3566 struct pipe_ctx *pipe_ctx,
3567 enum vline_select vline)
3569 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3571 if (vline == VLINE0) {
3572 uint32_t start_line = 0;
3573 uint32_t end_line = 0;
3575 dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3577 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3579 } else if (vline == VLINE1) {
3580 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3582 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3586 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3588 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3589 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3591 if (start_line < 0) {
3596 if (tg->funcs->setup_vertical_interrupt2)
3597 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3600 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3601 struct dc_link_settings *link_settings)
3603 struct encoder_unblank_param params = { { 0 } };
3604 struct dc_stream_state *stream = pipe_ctx->stream;
3605 struct dc_link *link = stream->link;
3606 struct dce_hwseq *hws = link->dc->hwseq;
3608 /* only 3 items below are used by unblank */
3609 params.timing = pipe_ctx->stream->timing;
3611 params.link_settings.link_rate = link_settings->link_rate;
3613 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3614 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3615 params.timing.pix_clk_100hz /= 2;
3616 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms);
3619 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3620 hws->funcs.edp_backlight_control(link, true);
3624 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3625 const uint8_t *custom_sdp_message,
3626 unsigned int sdp_message_size)
3628 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3629 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3630 pipe_ctx->stream_res.stream_enc,
3635 enum dc_status dcn10_set_clock(struct dc *dc,
3636 enum dc_clock_type clock_type,
3640 struct dc_state *context = dc->current_state;
3641 struct dc_clock_config clock_cfg = {0};
3642 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3644 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3645 return DC_FAIL_UNSUPPORTED_1;
3647 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3648 context, clock_type, &clock_cfg);
3650 if (clk_khz > clock_cfg.max_clock_khz)
3651 return DC_FAIL_CLK_EXCEED_MAX;
3653 if (clk_khz < clock_cfg.min_clock_khz)
3654 return DC_FAIL_CLK_BELOW_MIN;
3656 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3657 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3659 /*update internal request clock for update clock use*/
3660 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3661 current_clocks->dispclk_khz = clk_khz;
3662 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3663 current_clocks->dppclk_khz = clk_khz;
3665 return DC_ERROR_UNEXPECTED;
3667 if (dc->clk_mgr->funcs->update_clocks)
3668 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3674 void dcn10_get_clock(struct dc *dc,
3675 enum dc_clock_type clock_type,
3676 struct dc_clock_config *clock_cfg)
3678 struct dc_state *context = dc->current_state;
3680 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3681 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3685 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3687 struct resource_pool *pool = dc->res_pool;
3690 for (i = 0; i < pool->pipe_count; i++) {
3691 struct hubp *hubp = pool->hubps[i];
3692 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3694 hubp->funcs->hubp_read_state(hubp);
3697 dcc_en_bits[i] = s->dcc_en ? 1 : 0;