2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
54 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dce/dmub_outbox.h"
57 #include "inc/dc_link_dp.h"
59 #define DC_LOGGER_INIT(logger)
67 #define FN(reg_name, field_name) \
68 hws->shifts->field_name, hws->masks->field_name
70 /*print is 17 wide, first two characters are spaces*/
71 #define DTN_INFO_MICRO_SEC(ref_cycle) \
72 print_microsec(dc_ctx, log_ctx, ref_cycle)
74 #define GAMMA_HW_POINTS_NUM 256
76 #define PGFSM_POWER_ON 0
77 #define PGFSM_POWER_OFF 2
79 void print_microsec(struct dc_context *dc_ctx,
80 struct dc_log_buffer_ctx *log_ctx,
83 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
84 static const unsigned int frac = 1000;
85 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
87 DTN_INFO(" %11d.%03d",
92 void dcn10_lock_all_pipes(struct dc *dc,
93 struct dc_state *context,
96 struct pipe_ctx *pipe_ctx;
97 struct timing_generator *tg;
100 for (i = 0; i < dc->res_pool->pipe_count; i++) {
101 pipe_ctx = &context->res_ctx.pipe_ctx[i];
102 tg = pipe_ctx->stream_res.tg;
105 * Only lock the top pipe's tg to prevent redundant
106 * (un)locking. Also skip if pipe is disabled.
108 if (pipe_ctx->top_pipe ||
109 !pipe_ctx->stream || !pipe_ctx->plane_state ||
110 !tg->funcs->is_tg_enabled(tg))
114 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
116 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
120 static void log_mpc_crc(struct dc *dc,
121 struct dc_log_buffer_ctx *log_ctx)
123 struct dc_context *dc_ctx = dc->ctx;
124 struct dce_hwseq *hws = dc->hwseq;
126 if (REG(MPC_CRC_RESULT_GB))
127 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
128 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
129 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
130 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
131 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
134 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
136 struct dc_context *dc_ctx = dc->ctx;
137 struct dcn_hubbub_wm wm;
140 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
141 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
143 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
144 " sr_enter sr_exit dram_clk_change\n");
146 for (i = 0; i < 4; i++) {
147 struct dcn_hubbub_wm_set *s;
150 DTN_INFO("WM_Set[%d]:", s->wm_set);
151 DTN_INFO_MICRO_SEC(s->data_urgent);
152 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
153 DTN_INFO_MICRO_SEC(s->sr_enter);
154 DTN_INFO_MICRO_SEC(s->sr_exit);
155 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
162 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
164 struct dc_context *dc_ctx = dc->ctx;
165 struct resource_pool *pool = dc->res_pool;
169 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
170 for (i = 0; i < pool->pipe_count; i++) {
171 struct hubp *hubp = pool->hubps[i];
172 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
174 hubp->funcs->hubp_read_state(hubp);
177 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
190 s->underflow_status);
191 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
192 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
193 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
198 DTN_INFO("\n=========RQ========\n");
199 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
200 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
201 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
202 for (i = 0; i < pool->pipe_count; i++) {
203 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
204 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
207 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
208 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
209 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
210 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
211 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
212 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
213 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
214 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
215 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
216 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
219 DTN_INFO("========DLG========\n");
220 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
221 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
222 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
223 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
224 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
225 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
226 " x_rp_dlay x_rr_sfl\n");
227 for (i = 0; i < pool->pipe_count; i++) {
228 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
229 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
232 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
233 "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
234 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
235 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
236 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
237 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
238 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
239 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
240 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
241 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
242 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
243 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
244 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
245 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
246 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
247 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
248 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
249 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
250 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
251 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
252 dlg_regs->xfc_reg_remote_surface_flip_latency);
255 DTN_INFO("========TTU========\n");
256 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
257 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
258 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
259 for (i = 0; i < pool->pipe_count; i++) {
260 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
261 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
264 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
265 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
266 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
267 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
268 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
269 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
270 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
271 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
276 void dcn10_log_hw_state(struct dc *dc,
277 struct dc_log_buffer_ctx *log_ctx)
279 struct dc_context *dc_ctx = dc->ctx;
280 struct resource_pool *pool = dc->res_pool;
285 dcn10_log_hubbub_state(dc, log_ctx);
287 dcn10_log_hubp_states(dc, log_ctx);
289 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
290 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
291 "C31 C32 C33 C34\n");
292 for (i = 0; i < pool->pipe_count; i++) {
293 struct dpp *dpp = pool->dpps[i];
294 struct dcn_dpp_state s = {0};
296 dpp->funcs->dpp_read_state(dpp, &s);
301 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
302 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
305 (s.igam_lut_mode == 0) ? "BypassFixed" :
306 ((s.igam_lut_mode == 1) ? "BypassFloat" :
307 ((s.igam_lut_mode == 2) ? "RAM" :
308 ((s.igam_lut_mode == 3) ? "RAM" :
310 (s.dgam_lut_mode == 0) ? "Bypass" :
311 ((s.dgam_lut_mode == 1) ? "sRGB" :
312 ((s.dgam_lut_mode == 2) ? "Ycc" :
313 ((s.dgam_lut_mode == 3) ? "RAM" :
314 ((s.dgam_lut_mode == 4) ? "RAM" :
316 (s.rgam_lut_mode == 0) ? "Bypass" :
317 ((s.rgam_lut_mode == 1) ? "sRGB" :
318 ((s.rgam_lut_mode == 2) ? "Ycc" :
319 ((s.rgam_lut_mode == 3) ? "RAM" :
320 ((s.rgam_lut_mode == 4) ? "RAM" :
323 s.gamut_remap_c11_c12,
324 s.gamut_remap_c13_c14,
325 s.gamut_remap_c21_c22,
326 s.gamut_remap_c23_c24,
327 s.gamut_remap_c31_c32,
328 s.gamut_remap_c33_c34);
333 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
334 for (i = 0; i < pool->pipe_count; i++) {
335 struct mpcc_state s = {0};
337 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
339 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
340 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
341 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
346 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
348 for (i = 0; i < pool->timing_generator_count; i++) {
349 struct timing_generator *tg = pool->timing_generators[i];
350 struct dcn_otg_state s = {0};
351 /* Read shared OTG state registers for all DCNx */
352 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
355 * For DCN2 and greater, a register on the OPP is used to
356 * determine if the CRTC is blanked instead of the OTG. So use
357 * dpg_is_blanked() if exists, otherwise fallback on otg.
359 * TODO: Implement DCN-specific read_otg_state hooks.
361 if (pool->opps[i]->funcs->dpg_is_blanked)
362 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
364 s.blank_enabled = tg->funcs->is_blanked(tg);
366 //only print if OTG master is enabled
367 if ((s.otg_enabled & 1) == 0)
370 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
388 s.underflow_occurred_status,
391 // Clear underflow for debug purposes
392 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
393 // This function is called only from Windows or Diags test environment, hence it's safe to clear
394 // it from here without affecting the original intent.
395 tg->funcs->clear_optc_underflow(tg);
399 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
400 // TODO: Update golden log header to reflect this name change
401 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
402 for (i = 0; i < pool->res_cap->num_dsc; i++) {
403 struct display_stream_compressor *dsc = pool->dscs[i];
404 struct dcn_dsc_state s = {0};
406 dsc->funcs->dsc_read_state(dsc, &s);
407 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
411 s.dsc_bits_per_pixel);
416 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
417 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
418 for (i = 0; i < pool->stream_enc_count; i++) {
419 struct stream_encoder *enc = pool->stream_enc[i];
420 struct enc_state s = {0};
422 if (enc->funcs->enc_read_state) {
423 enc->funcs->enc_read_state(enc, &s);
424 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
427 s.sec_gsp_pps_line_num,
428 s.vbid6_line_reference,
430 s.sec_gsp_pps_enable,
431 s.sec_stream_enable);
437 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
438 for (i = 0; i < dc->link_count; i++) {
439 struct link_encoder *lenc = dc->links[i]->link_enc;
441 struct link_enc_state s = {0};
443 if (lenc->funcs->read_state) {
444 lenc->funcs->read_state(lenc, &s);
445 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
448 s.dphy_fec_ready_shadow,
449 s.dphy_fec_active_status,
450 s.dp_link_training_complete);
456 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
457 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
458 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
459 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
460 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
461 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
462 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
463 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
464 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
466 log_mpc_crc(dc, log_ctx);
471 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
473 struct hubp *hubp = pipe_ctx->plane_res.hubp;
474 struct timing_generator *tg = pipe_ctx->stream_res.tg;
476 if (tg->funcs->is_optc_underflow_occurred(tg)) {
477 tg->funcs->clear_optc_underflow(tg);
481 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
482 hubp->funcs->hubp_clear_underflow(hubp);
488 void dcn10_enable_power_gating_plane(
489 struct dce_hwseq *hws,
492 bool force_on = true; /* disable power gating */
498 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
499 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
500 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
501 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
504 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
505 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
506 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
507 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
510 void dcn10_disable_vga(
511 struct dce_hwseq *hws)
513 unsigned int in_vga1_mode = 0;
514 unsigned int in_vga2_mode = 0;
515 unsigned int in_vga3_mode = 0;
516 unsigned int in_vga4_mode = 0;
518 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
519 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
520 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
521 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
523 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
524 in_vga3_mode == 0 && in_vga4_mode == 0)
527 REG_WRITE(D1VGA_CONTROL, 0);
528 REG_WRITE(D2VGA_CONTROL, 0);
529 REG_WRITE(D3VGA_CONTROL, 0);
530 REG_WRITE(D4VGA_CONTROL, 0);
532 /* HW Engineer's Notes:
533 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
534 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
536 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
537 * VGA_TEST_ENABLE, to leave it in the same state as before.
539 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
540 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
544 * dcn10_dpp_pg_control - DPP power gate control.
546 * @hws: dce_hwseq reference.
547 * @dpp_inst: DPP instance reference.
548 * @power_on: true if we want to enable power gate, false otherwise.
550 * Enable or disable power gate in the specific DPP instance.
552 void dcn10_dpp_pg_control(
553 struct dce_hwseq *hws,
554 unsigned int dpp_inst,
557 uint32_t power_gate = power_on ? 0 : 1;
558 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
560 if (hws->ctx->dc->debug.disable_dpp_power_gate)
562 if (REG(DOMAIN1_PG_CONFIG) == 0)
567 REG_UPDATE(DOMAIN1_PG_CONFIG,
568 DOMAIN1_POWER_GATE, power_gate);
570 REG_WAIT(DOMAIN1_PG_STATUS,
571 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
575 REG_UPDATE(DOMAIN3_PG_CONFIG,
576 DOMAIN3_POWER_GATE, power_gate);
578 REG_WAIT(DOMAIN3_PG_STATUS,
579 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
583 REG_UPDATE(DOMAIN5_PG_CONFIG,
584 DOMAIN5_POWER_GATE, power_gate);
586 REG_WAIT(DOMAIN5_PG_STATUS,
587 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
591 REG_UPDATE(DOMAIN7_PG_CONFIG,
592 DOMAIN7_POWER_GATE, power_gate);
594 REG_WAIT(DOMAIN7_PG_STATUS,
595 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
605 * dcn10_hubp_pg_control - HUBP power gate control.
607 * @hws: dce_hwseq reference.
608 * @hubp_inst: DPP instance reference.
609 * @power_on: true if we want to enable power gate, false otherwise.
611 * Enable or disable power gate in the specific HUBP instance.
613 void dcn10_hubp_pg_control(
614 struct dce_hwseq *hws,
615 unsigned int hubp_inst,
618 uint32_t power_gate = power_on ? 0 : 1;
619 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
621 if (hws->ctx->dc->debug.disable_hubp_power_gate)
623 if (REG(DOMAIN0_PG_CONFIG) == 0)
627 case 0: /* DCHUBP0 */
628 REG_UPDATE(DOMAIN0_PG_CONFIG,
629 DOMAIN0_POWER_GATE, power_gate);
631 REG_WAIT(DOMAIN0_PG_STATUS,
632 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
635 case 1: /* DCHUBP1 */
636 REG_UPDATE(DOMAIN2_PG_CONFIG,
637 DOMAIN2_POWER_GATE, power_gate);
639 REG_WAIT(DOMAIN2_PG_STATUS,
640 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
643 case 2: /* DCHUBP2 */
644 REG_UPDATE(DOMAIN4_PG_CONFIG,
645 DOMAIN4_POWER_GATE, power_gate);
647 REG_WAIT(DOMAIN4_PG_STATUS,
648 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
651 case 3: /* DCHUBP3 */
652 REG_UPDATE(DOMAIN6_PG_CONFIG,
653 DOMAIN6_POWER_GATE, power_gate);
655 REG_WAIT(DOMAIN6_PG_STATUS,
656 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
665 static void power_on_plane(
666 struct dce_hwseq *hws,
669 DC_LOGGER_INIT(hws->ctx->logger);
670 if (REG(DC_IP_REQUEST_CNTL)) {
671 REG_SET(DC_IP_REQUEST_CNTL, 0,
674 if (hws->funcs.dpp_pg_control)
675 hws->funcs.dpp_pg_control(hws, plane_id, true);
677 if (hws->funcs.hubp_pg_control)
678 hws->funcs.hubp_pg_control(hws, plane_id, true);
680 REG_SET(DC_IP_REQUEST_CNTL, 0,
683 "Un-gated front end for pipe %d\n", plane_id);
687 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
689 struct dce_hwseq *hws = dc->hwseq;
690 struct hubp *hubp = dc->res_pool->hubps[0];
692 if (!hws->wa_state.DEGVIDCN10_253_applied)
695 hubp->funcs->set_blank(hubp, true);
697 REG_SET(DC_IP_REQUEST_CNTL, 0,
700 hws->funcs.hubp_pg_control(hws, 0, false);
701 REG_SET(DC_IP_REQUEST_CNTL, 0,
704 hws->wa_state.DEGVIDCN10_253_applied = false;
707 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
709 struct dce_hwseq *hws = dc->hwseq;
710 struct hubp *hubp = dc->res_pool->hubps[0];
713 if (dc->debug.disable_stutter)
716 if (!hws->wa.DEGVIDCN10_253)
719 for (i = 0; i < dc->res_pool->pipe_count; i++) {
720 if (!dc->res_pool->hubps[i]->power_gated)
724 /* all pipe power gated, apply work around to enable stutter. */
726 REG_SET(DC_IP_REQUEST_CNTL, 0,
729 hws->funcs.hubp_pg_control(hws, 0, true);
730 REG_SET(DC_IP_REQUEST_CNTL, 0,
733 hubp->funcs->set_hubp_blank_en(hubp, false);
734 hws->wa_state.DEGVIDCN10_253_applied = true;
737 void dcn10_bios_golden_init(struct dc *dc)
739 struct dce_hwseq *hws = dc->hwseq;
740 struct dc_bios *bp = dc->ctx->dc_bios;
742 bool allow_self_fresh_force_enable = true;
744 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
747 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
748 allow_self_fresh_force_enable =
749 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
752 /* WA for making DF sleep when idle after resume from S0i3.
753 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
754 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
755 * before calling command table and it changed to 1 after,
756 * it should be set back to 0.
759 /* initialize dcn global */
760 bp->funcs->enable_disp_power_gating(bp,
761 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
763 for (i = 0; i < dc->res_pool->pipe_count; i++) {
764 /* initialize dcn per pipe */
765 bp->funcs->enable_disp_power_gating(bp,
766 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
769 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
770 if (allow_self_fresh_force_enable == false &&
771 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
772 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
773 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
777 static void false_optc_underflow_wa(
779 const struct dc_stream_state *stream,
780 struct timing_generator *tg)
785 if (!dc->hwseq->wa.false_optc_underflow)
788 underflow = tg->funcs->is_optc_underflow_occurred(tg);
790 for (i = 0; i < dc->res_pool->pipe_count; i++) {
791 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
793 if (old_pipe_ctx->stream != stream)
796 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
799 if (tg->funcs->set_blank_data_double_buffer)
800 tg->funcs->set_blank_data_double_buffer(tg, true);
802 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
803 tg->funcs->clear_optc_underflow(tg);
806 enum dc_status dcn10_enable_stream_timing(
807 struct pipe_ctx *pipe_ctx,
808 struct dc_state *context,
811 struct dc_stream_state *stream = pipe_ctx->stream;
812 enum dc_color_space color_space;
813 struct tg_color black_color = {0};
815 /* by upper caller loop, pipe0 is parent pipe and be called first.
816 * back end is set up by for pipe0. Other children pipe share back end
817 * with pipe 0. No program is needed.
819 if (pipe_ctx->top_pipe != NULL)
822 /* TODO check if timing_changed, disable stream if timing changed */
824 /* HW program guide assume display already disable
825 * by unplug sequence. OTG assume stop.
827 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
829 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
830 pipe_ctx->clock_source,
831 &pipe_ctx->stream_res.pix_clk_params,
832 &pipe_ctx->pll_settings)) {
834 return DC_ERROR_UNEXPECTED;
837 pipe_ctx->stream_res.tg->funcs->program_timing(
838 pipe_ctx->stream_res.tg,
840 pipe_ctx->pipe_dlg_param.vready_offset,
841 pipe_ctx->pipe_dlg_param.vstartup_start,
842 pipe_ctx->pipe_dlg_param.vupdate_offset,
843 pipe_ctx->pipe_dlg_param.vupdate_width,
844 pipe_ctx->stream->signal,
847 #if 0 /* move to after enable_crtc */
848 /* TODO: OPP FMT, ABM. etc. should be done here. */
849 /* or FPGA now. instance 0 only. TODO: move to opp.c */
851 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
853 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
854 pipe_ctx->stream_res.opp,
855 &stream->bit_depth_params,
858 /* program otg blank color */
859 color_space = stream->output_color_space;
860 color_space_to_black_color(dc, color_space, &black_color);
863 * The way 420 is packed, 2 channels carry Y component, 1 channel
864 * alternate between Cb and Cr, so both channels need the pixel
867 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
868 black_color.color_r_cr = black_color.color_g_y;
870 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
871 pipe_ctx->stream_res.tg->funcs->set_blank_color(
872 pipe_ctx->stream_res.tg,
875 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
876 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
877 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
878 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
879 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
882 /* VTG is within DCHUB command block. DCFCLK is always on */
883 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
885 return DC_ERROR_UNEXPECTED;
888 /* TODO program crtc source select for non-virtual signal*/
889 /* TODO program FMT */
890 /* TODO setup link_enc */
891 /* TODO set stream attributes */
892 /* TODO program audio */
893 /* TODO enable stream if timing changed */
894 /* TODO unblank stream if DP */
899 static void dcn10_reset_back_end_for_pipe(
901 struct pipe_ctx *pipe_ctx,
902 struct dc_state *context)
905 struct dc_link *link;
906 DC_LOGGER_INIT(dc->ctx->logger);
907 if (pipe_ctx->stream_res.stream_enc == NULL) {
908 pipe_ctx->stream = NULL;
912 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
913 link = pipe_ctx->stream->link;
914 /* DPMS may already disable or */
915 /* dpms_off status is incorrect due to fastboot
916 * feature. When system resume from S4 with second
917 * screen only, the dpms_off would be true but
918 * VBIOS lit up eDP, so check link status too.
920 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
921 core_link_disable_stream(pipe_ctx);
922 else if (pipe_ctx->stream_res.audio)
923 dc->hwss.disable_audio_stream(pipe_ctx);
925 if (pipe_ctx->stream_res.audio) {
926 /*disable az_endpoint*/
927 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
930 if (dc->caps.dynamic_audio == true) {
931 /*we have to dynamic arbitrate the audio endpoints*/
932 /*we free the resource, need reset is_audio_acquired*/
933 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
934 pipe_ctx->stream_res.audio, false);
935 pipe_ctx->stream_res.audio = NULL;
940 /* by upper caller loop, parent pipe: pipe0, will be reset last.
941 * back end share by all pipes and will be disable only when disable
944 if (pipe_ctx->top_pipe == NULL) {
946 if (pipe_ctx->stream_res.abm)
947 dc->hwss.set_abm_immediate_disable(pipe_ctx);
949 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
951 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
952 if (pipe_ctx->stream_res.tg->funcs->set_drr)
953 pipe_ctx->stream_res.tg->funcs->set_drr(
954 pipe_ctx->stream_res.tg, NULL);
957 for (i = 0; i < dc->res_pool->pipe_count; i++)
958 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
961 if (i == dc->res_pool->pipe_count)
964 pipe_ctx->stream = NULL;
965 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
966 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
969 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
973 bool need_recover = true;
975 if (!dc->debug.recovery_enabled)
978 for (i = 0; i < dc->res_pool->pipe_count; i++) {
979 struct pipe_ctx *pipe_ctx =
980 &dc->current_state->res_ctx.pipe_ctx[i];
981 if (pipe_ctx != NULL) {
982 hubp = pipe_ctx->plane_res.hubp;
983 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
984 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
985 /* one pipe underflow, we will reset all the pipes*/
994 DCHUBP_CNTL:HUBP_BLANK_EN=1
995 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
996 DCHUBP_CNTL:HUBP_DISABLE=1
997 DCHUBP_CNTL:HUBP_DISABLE=0
998 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
999 DCSURF_PRIMARY_SURFACE_ADDRESS
1000 DCHUBP_CNTL:HUBP_BLANK_EN=0
1003 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1004 struct pipe_ctx *pipe_ctx =
1005 &dc->current_state->res_ctx.pipe_ctx[i];
1006 if (pipe_ctx != NULL) {
1007 hubp = pipe_ctx->plane_res.hubp;
1008 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1009 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1010 hubp->funcs->set_hubp_blank_en(hubp, true);
1013 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1014 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1016 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1017 struct pipe_ctx *pipe_ctx =
1018 &dc->current_state->res_ctx.pipe_ctx[i];
1019 if (pipe_ctx != NULL) {
1020 hubp = pipe_ctx->plane_res.hubp;
1021 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1022 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1023 hubp->funcs->hubp_disable_control(hubp, true);
1026 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1027 struct pipe_ctx *pipe_ctx =
1028 &dc->current_state->res_ctx.pipe_ctx[i];
1029 if (pipe_ctx != NULL) {
1030 hubp = pipe_ctx->plane_res.hubp;
1031 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1032 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1033 hubp->funcs->hubp_disable_control(hubp, true);
1036 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1037 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1038 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1039 struct pipe_ctx *pipe_ctx =
1040 &dc->current_state->res_ctx.pipe_ctx[i];
1041 if (pipe_ctx != NULL) {
1042 hubp = pipe_ctx->plane_res.hubp;
1043 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1044 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1045 hubp->funcs->set_hubp_blank_en(hubp, true);
1052 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1054 static bool should_log_hw_state; /* prevent hw state log by default */
1056 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1059 if (should_log_hw_state)
1060 dcn10_log_hw_state(dc, NULL);
1062 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1063 BREAK_TO_DEBUGGER();
1064 if (dcn10_hw_wa_force_recovery(dc)) {
1066 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1067 BREAK_TO_DEBUGGER();
1072 /* trigger HW to start disconnect plane from stream on the next vsync */
1073 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1075 struct dce_hwseq *hws = dc->hwseq;
1076 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1077 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1078 struct mpc *mpc = dc->res_pool->mpc;
1079 struct mpc_tree *mpc_tree_params;
1080 struct mpcc *mpcc_to_remove = NULL;
1081 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1083 mpc_tree_params = &(opp->mpc_tree_params);
1084 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1087 if (mpcc_to_remove == NULL)
1090 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1092 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1094 dc->optimized_required = true;
1096 if (hubp->funcs->hubp_disconnect)
1097 hubp->funcs->hubp_disconnect(hubp);
1099 if (dc->debug.sanity_checks)
1100 hws->funcs.verify_allow_pstate_change_high(dc);
1104 * dcn10_plane_atomic_power_down - Power down plane components.
1106 * @dc: dc struct reference. used for grab hwseq.
1107 * @dpp: dpp struct reference.
1108 * @hubp: hubp struct reference.
1110 * Keep in mind that this operation requires a power gate configuration;
1111 * however, requests for switch power gate are precisely controlled to avoid
1112 * problems. For this reason, power gate request is usually disabled. This
1113 * function first needs to enable the power gate request before disabling DPP
1114 * and HUBP. Finally, it disables the power gate request again.
1116 void dcn10_plane_atomic_power_down(struct dc *dc,
1120 struct dce_hwseq *hws = dc->hwseq;
1121 DC_LOGGER_INIT(dc->ctx->logger);
1123 if (REG(DC_IP_REQUEST_CNTL)) {
1124 REG_SET(DC_IP_REQUEST_CNTL, 0,
1127 if (hws->funcs.dpp_pg_control)
1128 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1130 if (hws->funcs.hubp_pg_control)
1131 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1133 dpp->funcs->dpp_reset(dpp);
1134 REG_SET(DC_IP_REQUEST_CNTL, 0,
1137 "Power gated front end %d\n", hubp->inst);
1141 /* disable HW used by plane.
1142 * note: cannot disable until disconnect is complete
1144 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1146 struct dce_hwseq *hws = dc->hwseq;
1147 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1148 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1149 int opp_id = hubp->opp_id;
1151 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1153 hubp->funcs->hubp_clk_cntl(hubp, false);
1155 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1157 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1158 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1159 pipe_ctx->stream_res.opp,
1162 hubp->power_gated = true;
1163 dc->optimized_required = false; /* We're powering off, no need to optimize */
1165 hws->funcs.plane_atomic_power_down(dc,
1166 pipe_ctx->plane_res.dpp,
1167 pipe_ctx->plane_res.hubp);
1169 pipe_ctx->stream = NULL;
1170 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1171 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1172 pipe_ctx->top_pipe = NULL;
1173 pipe_ctx->bottom_pipe = NULL;
1174 pipe_ctx->plane_state = NULL;
1177 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1179 struct dce_hwseq *hws = dc->hwseq;
1180 DC_LOGGER_INIT(dc->ctx->logger);
1182 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1185 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1187 apply_DEGVIDCN10_253_wa(dc);
1189 DC_LOG_DC("Power down front end %d\n",
1190 pipe_ctx->pipe_idx);
1193 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1196 struct dce_hwseq *hws = dc->hwseq;
1197 bool can_apply_seamless_boot = false;
1199 for (i = 0; i < context->stream_count; i++) {
1200 if (context->streams[i]->apply_seamless_boot_optimization) {
1201 can_apply_seamless_boot = true;
1206 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1207 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1208 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1210 /* There is assumption that pipe_ctx is not mapping irregularly
1211 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1212 * we will use the pipe, so don't disable
1214 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1217 /* Blank controller using driver code instead of
1220 if (tg->funcs->is_tg_enabled(tg)) {
1221 if (hws->funcs.init_blank != NULL) {
1222 hws->funcs.init_blank(dc, tg);
1223 tg->funcs->lock(tg);
1225 tg->funcs->lock(tg);
1226 tg->funcs->set_blank(tg, true);
1227 hwss_wait_for_blank_complete(tg);
1232 /* num_opp will be equal to number of mpcc */
1233 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1234 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1236 /* Cannot reset the MPC mux if seamless boot */
1237 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1240 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1241 dc->res_pool->mpc, i);
1244 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1245 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1246 struct hubp *hubp = dc->res_pool->hubps[i];
1247 struct dpp *dpp = dc->res_pool->dpps[i];
1248 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1250 /* There is assumption that pipe_ctx is not mapping irregularly
1251 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1252 * we will use the pipe, so don't disable
1254 if (can_apply_seamless_boot &&
1255 pipe_ctx->stream != NULL &&
1256 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1257 pipe_ctx->stream_res.tg)) {
1258 // Enable double buffering for OTG_BLANK no matter if
1259 // seamless boot is enabled or not to suppress global sync
1260 // signals when OTG blanked. This is to prevent pipe from
1261 // requesting data while in PSR.
1262 tg->funcs->tg_init(tg);
1263 hubp->power_gated = true;
1267 /* Disable on the current state so the new one isn't cleared. */
1268 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1270 dpp->funcs->dpp_reset(dpp);
1272 pipe_ctx->stream_res.tg = tg;
1273 pipe_ctx->pipe_idx = i;
1275 pipe_ctx->plane_res.hubp = hubp;
1276 pipe_ctx->plane_res.dpp = dpp;
1277 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1278 hubp->mpcc_id = dpp->inst;
1279 hubp->opp_id = OPP_ID_INVALID;
1280 hubp->power_gated = false;
1282 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1283 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1284 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1285 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1287 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1289 if (tg->funcs->is_tg_enabled(tg))
1290 tg->funcs->unlock(tg);
1292 dc->hwss.disable_plane(dc, pipe_ctx);
1294 pipe_ctx->stream_res.tg = NULL;
1295 pipe_ctx->plane_res.hubp = NULL;
1297 tg->funcs->tg_init(tg);
1301 void dcn10_init_hw(struct dc *dc)
1304 struct abm *abm = dc->res_pool->abm;
1305 struct dmcu *dmcu = dc->res_pool->dmcu;
1306 struct dce_hwseq *hws = dc->hwseq;
1307 struct dc_bios *dcb = dc->ctx->dc_bios;
1308 struct resource_pool *res_pool = dc->res_pool;
1309 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1310 bool is_optimized_init_done = false;
1312 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1313 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1315 // Initialize the dccg
1316 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1317 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1319 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1321 REG_WRITE(REFCLK_CNTL, 0);
1322 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1323 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1325 if (!dc->debug.disable_clock_gate) {
1326 /* enable all DCN clock gating */
1327 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1329 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1331 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1334 //Enable ability to power gate / don't force power on permanently
1335 if (hws->funcs.enable_power_gating_plane)
1336 hws->funcs.enable_power_gating_plane(hws, true);
1341 if (!dcb->funcs->is_accelerated_mode(dcb))
1342 hws->funcs.disable_vga(dc->hwseq);
1344 hws->funcs.bios_golden_init(dc);
1346 if (dc->ctx->dc_bios->fw_info_valid) {
1347 res_pool->ref_clocks.xtalin_clock_inKhz =
1348 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1350 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1351 if (res_pool->dccg && res_pool->hubbub) {
1353 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1354 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1355 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1357 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1358 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1359 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1361 // Not all ASICs have DCCG sw component
1362 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1363 res_pool->ref_clocks.xtalin_clock_inKhz;
1364 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1365 res_pool->ref_clocks.xtalin_clock_inKhz;
1369 ASSERT_CRITICAL(false);
1371 for (i = 0; i < dc->link_count; i++) {
1372 /* Power up AND update implementation according to the
1373 * required signal (which may be different from the
1374 * default signal on connector).
1376 struct dc_link *link = dc->links[i];
1378 if (!is_optimized_init_done)
1379 link->link_enc->funcs->hw_init(link->link_enc);
1381 /* Check for enabled DIG to identify enabled display */
1382 if (link->link_enc->funcs->is_dig_enabled &&
1383 link->link_enc->funcs->is_dig_enabled(link->link_enc))
1384 link->link_status.link_active = true;
1387 /* Power gate DSCs */
1388 if (!is_optimized_init_done) {
1389 for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1390 if (hws->funcs.dsc_pg_control != NULL)
1391 hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1394 /* Enable outbox notification feature of dmub */
1395 if (dc->debug.enable_dmub_aux_for_legacy_ddc)
1396 dmub_enable_outbox_notification(dc);
1398 /* we want to turn off all dp displays before doing detection */
1399 if (dc->config.power_down_display_on_boot) {
1400 uint8_t dpcd_power_state = '\0';
1401 enum dc_status status = DC_ERROR_UNEXPECTED;
1403 for (i = 0; i < dc->link_count; i++) {
1404 if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1407 /* DP 2.0 requires that LTTPR Caps be read first */
1408 dp_retrieve_lttpr_cap(dc->links[i]);
1411 * If any of the displays are lit up turn them off.
1412 * The reason is that some MST hubs cannot be turned off
1413 * completely until we tell them to do so.
1414 * If not turned off, then displays connected to MST hub
1417 status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1418 &dpcd_power_state, sizeof(dpcd_power_state));
1419 if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
1420 /* blank dp stream before power off receiver*/
1421 if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
1422 unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
1424 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1425 if (fe == dc->res_pool->stream_enc[j]->id) {
1426 dc->res_pool->stream_enc[j]->funcs->dp_blank(
1427 dc->res_pool->stream_enc[j]);
1432 dp_receiver_power_ctrl(dc->links[i], false);
1437 /* If taking control over from VBIOS, we may want to optimize our first
1438 * mode set, so we need to skip powering down pipes until we know which
1439 * pipes we want to use.
1440 * Otherwise, if taking control is not possible, we need to power
1443 if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1444 if (!is_optimized_init_done) {
1445 hws->funcs.init_pipes(dc, dc->current_state);
1446 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1447 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1448 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1452 if (!is_optimized_init_done) {
1454 for (i = 0; i < res_pool->audio_count; i++) {
1455 struct audio *audio = res_pool->audios[i];
1457 audio->funcs->hw_init(audio);
1460 for (i = 0; i < dc->link_count; i++) {
1461 struct dc_link *link = dc->links[i];
1463 if (link->panel_cntl)
1464 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1468 abm->funcs->abm_init(abm, backlight);
1470 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1471 dmcu->funcs->dmcu_init(dmcu);
1474 if (abm != NULL && dmcu != NULL)
1475 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1477 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1478 if (!is_optimized_init_done)
1479 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1481 if (!dc->debug.disable_clock_gate) {
1482 /* enable all DCN clock gating */
1483 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1485 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1487 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1489 if (hws->funcs.enable_power_gating_plane)
1490 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1492 if (dc->clk_mgr->funcs->notify_wm_ranges)
1493 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1496 /* In headless boot cases, DIG may be turned
1497 * on which causes HW/SW discrepancies.
1498 * To avoid this, power down hardware on boot
1499 * if DIG is turned on
1501 void dcn10_power_down_on_boot(struct dc *dc)
1503 struct dc_link *edp_links[MAX_NUM_EDP];
1504 struct dc_link *edp_link;
1508 get_edp_links(dc, edp_links, &edp_num);
1511 for (i = 0; i < edp_num; i++) {
1512 edp_link = edp_links[i];
1513 if (edp_link->link_enc->funcs->is_dig_enabled &&
1514 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1515 dc->hwseq->funcs.edp_backlight_control &&
1516 dc->hwss.power_down &&
1517 dc->hwss.edp_power_control) {
1518 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1519 dc->hwss.power_down(dc);
1520 dc->hwss.edp_power_control(edp_link, false);
1524 for (i = 0; i < dc->link_count; i++) {
1525 struct dc_link *link = dc->links[i];
1527 if (link->link_enc->funcs->is_dig_enabled &&
1528 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1529 dc->hwss.power_down) {
1530 dc->hwss.power_down(dc);
1538 * Call update_clocks with empty context
1539 * to send DISPLAY_OFF
1540 * Otherwise DISPLAY_OFF may not be asserted
1542 if (dc->clk_mgr->funcs->set_low_power_state)
1543 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1546 void dcn10_reset_hw_ctx_wrap(
1548 struct dc_state *context)
1551 struct dce_hwseq *hws = dc->hwseq;
1554 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1555 struct pipe_ctx *pipe_ctx_old =
1556 &dc->current_state->res_ctx.pipe_ctx[i];
1557 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1559 if (!pipe_ctx_old->stream)
1562 if (pipe_ctx_old->top_pipe)
1565 if (!pipe_ctx->stream ||
1566 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1567 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1569 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1570 if (hws->funcs.enable_stream_gating)
1571 hws->funcs.enable_stream_gating(dc, pipe_ctx);
1573 old_clk->funcs->cs_power_down(old_clk);
1578 static bool patch_address_for_sbs_tb_stereo(
1579 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1581 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1582 bool sec_split = pipe_ctx->top_pipe &&
1583 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1584 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1585 (pipe_ctx->stream->timing.timing_3d_format ==
1586 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1587 pipe_ctx->stream->timing.timing_3d_format ==
1588 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1589 *addr = plane_state->address.grph_stereo.left_addr;
1590 plane_state->address.grph_stereo.left_addr =
1591 plane_state->address.grph_stereo.right_addr;
1594 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1595 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1596 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1597 plane_state->address.grph_stereo.right_addr =
1598 plane_state->address.grph_stereo.left_addr;
1599 plane_state->address.grph_stereo.right_meta_addr =
1600 plane_state->address.grph_stereo.left_meta_addr;
1606 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1608 bool addr_patched = false;
1609 PHYSICAL_ADDRESS_LOC addr;
1610 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1612 if (plane_state == NULL)
1615 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1617 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1618 pipe_ctx->plane_res.hubp,
1619 &plane_state->address,
1620 plane_state->flip_immediate);
1622 plane_state->status.requested_address = plane_state->address;
1624 if (plane_state->flip_immediate)
1625 plane_state->status.current_address = plane_state->address;
1628 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1631 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1632 const struct dc_plane_state *plane_state)
1634 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1635 const struct dc_transfer_func *tf = NULL;
1638 if (dpp_base == NULL)
1641 if (plane_state->in_transfer_func)
1642 tf = plane_state->in_transfer_func;
1644 if (plane_state->gamma_correction &&
1645 !dpp_base->ctx->dc->debug.always_use_regamma
1646 && !plane_state->gamma_correction->is_identity
1647 && dce_use_lut(plane_state->format))
1648 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1651 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1652 else if (tf->type == TF_TYPE_PREDEFINED) {
1654 case TRANSFER_FUNCTION_SRGB:
1655 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1657 case TRANSFER_FUNCTION_BT709:
1658 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1660 case TRANSFER_FUNCTION_LINEAR:
1661 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1663 case TRANSFER_FUNCTION_PQ:
1664 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1665 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1666 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1673 } else if (tf->type == TF_TYPE_BYPASS) {
1674 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1676 cm_helper_translate_curve_to_degamma_hw_format(tf,
1677 &dpp_base->degamma_params);
1678 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1679 &dpp_base->degamma_params);
1686 #define MAX_NUM_HW_POINTS 0x200
1688 static void log_tf(struct dc_context *ctx,
1689 struct dc_transfer_func *tf, uint32_t hw_points_num)
1691 // DC_LOG_GAMMA is default logging of all hw points
1692 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1693 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1696 DC_LOGGER_INIT(ctx->logger);
1697 DC_LOG_GAMMA("Gamma Correction TF");
1698 DC_LOG_ALL_GAMMA("Logging all tf points...");
1699 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1701 for (i = 0; i < hw_points_num; i++) {
1702 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1703 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1704 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1707 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1708 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1709 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1710 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1714 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1715 const struct dc_stream_state *stream)
1717 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1722 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1724 if (stream->out_transfer_func &&
1725 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1726 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1727 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1729 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1732 else if (cm_helper_translate_curve_to_hw_format(
1733 stream->out_transfer_func,
1734 &dpp->regamma_params, false)) {
1735 dpp->funcs->dpp_program_regamma_pwl(
1737 &dpp->regamma_params, OPP_REGAMMA_USER);
1739 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1741 if (stream != NULL && stream->ctx != NULL &&
1742 stream->out_transfer_func != NULL) {
1744 stream->out_transfer_func,
1745 dpp->regamma_params.hw_points_num);
1751 void dcn10_pipe_control_lock(
1753 struct pipe_ctx *pipe,
1756 struct dce_hwseq *hws = dc->hwseq;
1758 /* use TG master update lock to lock everything on the TG
1759 * therefore only top pipe need to lock
1761 if (!pipe || pipe->top_pipe)
1764 if (dc->debug.sanity_checks)
1765 hws->funcs.verify_allow_pstate_change_high(dc);
1768 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1770 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1772 if (dc->debug.sanity_checks)
1773 hws->funcs.verify_allow_pstate_change_high(dc);
1777 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1779 * Software keepout workaround to prevent cursor update locking from stalling
1780 * out cursor updates indefinitely or from old values from being retained in
1781 * the case where the viewport changes in the same frame as the cursor.
1783 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1784 * too close to VUPDATE, then stall out until VUPDATE finishes.
1786 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1787 * to avoid the need for this workaround.
1789 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1791 struct dc_stream_state *stream = pipe_ctx->stream;
1792 struct crtc_position position;
1793 uint32_t vupdate_start, vupdate_end;
1794 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1795 unsigned int us_per_line, us_vupdate;
1797 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1800 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1803 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1806 dc->hwss.get_position(&pipe_ctx, 1, &position);
1807 vpos = position.vertical_count;
1809 /* Avoid wraparound calculation issues */
1810 vupdate_start += stream->timing.v_total;
1811 vupdate_end += stream->timing.v_total;
1812 vpos += stream->timing.v_total;
1814 if (vpos <= vupdate_start) {
1815 /* VPOS is in VACTIVE or back porch. */
1816 lines_to_vupdate = vupdate_start - vpos;
1817 } else if (vpos > vupdate_end) {
1818 /* VPOS is in the front porch. */
1821 /* VPOS is in VUPDATE. */
1822 lines_to_vupdate = 0;
1825 /* Calculate time until VUPDATE in microseconds. */
1827 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1828 us_to_vupdate = lines_to_vupdate * us_per_line;
1830 /* 70 us is a conservative estimate of cursor update time*/
1831 if (us_to_vupdate > 70)
1834 /* Stall out until the cursor update completes. */
1835 if (vupdate_end < vupdate_start)
1836 vupdate_end += stream->timing.v_total;
1837 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1838 udelay(us_to_vupdate + us_vupdate);
1841 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1843 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1844 if (!pipe || pipe->top_pipe)
1847 /* Prevent cursor lock from stalling out cursor updates. */
1849 delay_cursor_until_vupdate(dc, pipe);
1851 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1852 union dmub_hw_lock_flags hw_locks = { 0 };
1853 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1855 hw_locks.bits.lock_cursor = 1;
1856 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1858 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1863 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1864 pipe->stream_res.opp->inst, lock);
1867 static bool wait_for_reset_trigger_to_occur(
1868 struct dc_context *dc_ctx,
1869 struct timing_generator *tg)
1873 /* To avoid endless loop we wait at most
1874 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1875 const uint32_t frames_to_wait_on_triggered_reset = 10;
1878 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1880 if (!tg->funcs->is_counter_moving(tg)) {
1881 DC_ERROR("TG counter is not moving!\n");
1885 if (tg->funcs->did_triggered_reset_occur(tg)) {
1887 /* usually occurs at i=1 */
1888 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1893 /* Wait for one frame. */
1894 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1895 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1899 DC_ERROR("GSL: Timeout on reset trigger!\n");
1904 uint64_t reduceSizeAndFraction(
1905 uint64_t *numerator,
1906 uint64_t *denominator,
1907 bool checkUint32Bounary)
1910 bool ret = checkUint32Bounary == false;
1911 uint64_t max_int32 = 0xffffffff;
1912 uint64_t num, denom;
1913 static const uint16_t prime_numbers[] = {
1914 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
1915 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
1916 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
1917 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
1918 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
1919 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
1920 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
1921 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
1922 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
1923 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
1924 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
1925 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
1926 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
1927 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
1928 941, 947, 953, 967, 971, 977, 983, 991, 997};
1929 int count = ARRAY_SIZE(prime_numbers);
1932 denom = *denominator;
1933 for (i = 0; i < count; i++) {
1934 uint32_t num_remainder, denom_remainder;
1935 uint64_t num_result, denom_result;
1936 if (checkUint32Bounary &&
1937 num <= max_int32 && denom <= max_int32) {
1942 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
1943 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
1944 if (num_remainder == 0 && denom_remainder == 0) {
1946 denom = denom_result;
1948 } while (num_remainder == 0 && denom_remainder == 0);
1951 *denominator = denom;
1955 bool is_low_refresh_rate(struct pipe_ctx *pipe)
1957 uint32_t master_pipe_refresh_rate =
1958 pipe->stream->timing.pix_clk_100hz * 100 /
1959 pipe->stream->timing.h_total /
1960 pipe->stream->timing.v_total;
1961 return master_pipe_refresh_rate <= 30;
1964 uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)
1966 uint32_t clock_divider = 1;
1967 uint32_t numpipes = 1;
1969 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
1972 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1975 while (pipe->next_odm_pipe) {
1976 pipe = pipe->next_odm_pipe;
1979 clock_divider *= numpipes;
1981 return clock_divider;
1984 int dcn10_align_pixel_clocks(
1987 struct pipe_ctx *grouped_pipes[])
1989 struct dc_context *dc_ctx = dc->ctx;
1990 int i, master = -1, embedded = -1;
1991 struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
1992 uint64_t phase[MAX_PIPES];
1993 uint64_t modulo[MAX_PIPES];
1996 uint32_t embedded_pix_clk_100hz;
1997 uint16_t embedded_h_total;
1998 uint16_t embedded_v_total;
1999 bool clamshell_closed = false;
2000 uint32_t dp_ref_clk_100hz =
2001 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2003 if (dc->config.vblank_alignment_dto_params &&
2004 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2006 (dc->config.vblank_alignment_dto_params >> 63);
2008 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2010 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2011 embedded_pix_clk_100hz =
2012 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2014 for (i = 0; i < group_size; i++) {
2015 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2016 grouped_pipes[i]->stream_res.tg,
2017 &hw_crtc_timing[i]);
2018 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2019 dc->res_pool->dp_clock_source,
2020 grouped_pipes[i]->stream_res.tg->inst,
2022 hw_crtc_timing[i].pix_clk_100hz = pclk;
2023 if (dc_is_embedded_signal(
2024 grouped_pipes[i]->stream->signal)) {
2027 phase[i] = embedded_pix_clk_100hz*100;
2028 modulo[i] = dp_ref_clk_100hz*100;
2031 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2032 hw_crtc_timing[i].h_total*
2033 hw_crtc_timing[i].v_total;
2034 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2035 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2039 if (reduceSizeAndFraction(&phase[i],
2040 &modulo[i], true) == false) {
2042 * this will help to stop reporting
2043 * this timing synchronizable
2045 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2046 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2051 for (i = 0; i < group_size; i++) {
2052 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2053 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2054 dc->res_pool->dp_clock_source,
2055 grouped_pipes[i]->stream_res.tg->inst,
2056 phase[i], modulo[i]);
2057 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2058 dc->res_pool->dp_clock_source,
2059 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2060 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2061 pclk*get_clock_divider(grouped_pipes[i], false);
2071 void dcn10_enable_vblanks_synchronization(
2075 struct pipe_ctx *grouped_pipes[])
2077 struct dc_context *dc_ctx = dc->ctx;
2078 struct output_pixel_processor *opp;
2079 struct timing_generator *tg;
2080 int i, width, height, master;
2082 for (i = 1; i < group_size; i++) {
2083 opp = grouped_pipes[i]->stream_res.opp;
2084 tg = grouped_pipes[i]->stream_res.tg;
2085 tg->funcs->get_otg_active_size(tg, &width, &height);
2086 if (opp->funcs->opp_program_dpg_dimensions)
2087 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2090 for (i = 0; i < group_size; i++) {
2091 if (grouped_pipes[i]->stream == NULL)
2093 grouped_pipes[i]->stream->vblank_synchronized = false;
2094 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2097 DC_SYNC_INFO("Aligning DP DTOs\n");
2099 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2101 DC_SYNC_INFO("Synchronizing VBlanks\n");
2104 for (i = 0; i < group_size; i++) {
2105 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2106 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2107 grouped_pipes[master]->stream_res.tg,
2108 grouped_pipes[i]->stream_res.tg,
2109 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2110 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2111 get_clock_divider(grouped_pipes[master], false),
2112 get_clock_divider(grouped_pipes[i], false));
2113 grouped_pipes[i]->stream->vblank_synchronized = true;
2115 grouped_pipes[master]->stream->vblank_synchronized = true;
2116 DC_SYNC_INFO("Sync complete\n");
2119 for (i = 1; i < group_size; i++) {
2120 opp = grouped_pipes[i]->stream_res.opp;
2121 tg = grouped_pipes[i]->stream_res.tg;
2122 tg->funcs->get_otg_active_size(tg, &width, &height);
2123 if (opp->funcs->opp_program_dpg_dimensions)
2124 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2128 void dcn10_enable_timing_synchronization(
2132 struct pipe_ctx *grouped_pipes[])
2134 struct dc_context *dc_ctx = dc->ctx;
2135 struct output_pixel_processor *opp;
2136 struct timing_generator *tg;
2137 int i, width, height;
2139 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2141 for (i = 1; i < group_size; i++) {
2142 opp = grouped_pipes[i]->stream_res.opp;
2143 tg = grouped_pipes[i]->stream_res.tg;
2144 tg->funcs->get_otg_active_size(tg, &width, &height);
2145 if (opp->funcs->opp_program_dpg_dimensions)
2146 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2149 for (i = 0; i < group_size; i++) {
2150 if (grouped_pipes[i]->stream == NULL)
2152 grouped_pipes[i]->stream->vblank_synchronized = false;
2155 for (i = 1; i < group_size; i++)
2156 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2157 grouped_pipes[i]->stream_res.tg,
2158 grouped_pipes[0]->stream_res.tg->inst);
2160 DC_SYNC_INFO("Waiting for trigger\n");
2162 /* Need to get only check 1 pipe for having reset as all the others are
2163 * synchronized. Look at last pipe programmed to reset.
2166 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2167 for (i = 1; i < group_size; i++)
2168 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2169 grouped_pipes[i]->stream_res.tg);
2171 for (i = 1; i < group_size; i++) {
2172 opp = grouped_pipes[i]->stream_res.opp;
2173 tg = grouped_pipes[i]->stream_res.tg;
2174 tg->funcs->get_otg_active_size(tg, &width, &height);
2175 if (opp->funcs->opp_program_dpg_dimensions)
2176 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2179 DC_SYNC_INFO("Sync complete\n");
2182 void dcn10_enable_per_frame_crtc_position_reset(
2185 struct pipe_ctx *grouped_pipes[])
2187 struct dc_context *dc_ctx = dc->ctx;
2190 DC_SYNC_INFO("Setting up\n");
2191 for (i = 0; i < group_size; i++)
2192 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2193 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2194 grouped_pipes[i]->stream_res.tg,
2196 &grouped_pipes[i]->stream->triggered_crtc_reset);
2198 DC_SYNC_INFO("Waiting for trigger\n");
2200 for (i = 0; i < group_size; i++)
2201 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2203 DC_SYNC_INFO("Multi-display sync is complete\n");
2206 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2207 struct vm_system_aperture_param *apt,
2208 struct dce_hwseq *hws)
2210 PHYSICAL_ADDRESS_LOC physical_page_number;
2211 uint32_t logical_addr_low;
2212 uint32_t logical_addr_high;
2214 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2215 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2216 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2217 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2219 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2220 LOGICAL_ADDR, &logical_addr_low);
2222 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2223 LOGICAL_ADDR, &logical_addr_high);
2225 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2226 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2227 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2230 /* Temporary read settings, future will get values from kmd directly */
2231 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2232 struct vm_context0_param *vm0,
2233 struct dce_hwseq *hws)
2235 PHYSICAL_ADDRESS_LOC fb_base;
2236 PHYSICAL_ADDRESS_LOC fb_offset;
2237 uint32_t fb_base_value;
2238 uint32_t fb_offset_value;
2240 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2241 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2243 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2244 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2245 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2246 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2248 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2249 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2250 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2251 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2253 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2254 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2255 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2256 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2258 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2259 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2260 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2261 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2264 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2265 * Therefore we need to do
2266 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2267 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2269 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2270 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2271 vm0->pte_base.quad_part += fb_base.quad_part;
2272 vm0->pte_base.quad_part -= fb_offset.quad_part;
2276 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2278 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2279 struct vm_system_aperture_param apt = { {{ 0 } } };
2280 struct vm_context0_param vm0 = { { { 0 } } };
2282 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2283 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2285 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2286 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2289 static void dcn10_enable_plane(
2291 struct pipe_ctx *pipe_ctx,
2292 struct dc_state *context)
2294 struct dce_hwseq *hws = dc->hwseq;
2296 if (dc->debug.sanity_checks) {
2297 hws->funcs.verify_allow_pstate_change_high(dc);
2300 undo_DEGVIDCN10_253_wa(dc);
2302 power_on_plane(dc->hwseq,
2303 pipe_ctx->plane_res.hubp->inst);
2305 /* enable DCFCLK current DCHUB */
2306 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2308 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2309 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2310 pipe_ctx->stream_res.opp,
2313 if (dc->config.gpu_vm_support)
2314 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2316 if (dc->debug.sanity_checks) {
2317 hws->funcs.verify_allow_pstate_change_high(dc);
2320 if (!pipe_ctx->top_pipe
2321 && pipe_ctx->plane_state
2322 && pipe_ctx->plane_state->flip_int_enabled
2323 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2324 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2328 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2331 struct dpp_grph_csc_adjustment adjust;
2332 memset(&adjust, 0, sizeof(adjust));
2333 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2336 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2337 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2338 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2339 adjust.temperature_matrix[i] =
2340 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2341 } else if (pipe_ctx->plane_state &&
2342 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2343 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2344 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2345 adjust.temperature_matrix[i] =
2346 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2349 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2353 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2355 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2356 if (pipe_ctx->top_pipe) {
2357 struct pipe_ctx *top = pipe_ctx->top_pipe;
2359 while (top->top_pipe)
2360 top = top->top_pipe; // Traverse to top pipe_ctx
2361 if (top->plane_state && top->plane_state->layer_index == 0)
2362 return true; // Front MPO plane not hidden
2368 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2370 // Override rear plane RGB bias to fix MPO brightness
2371 uint16_t rgb_bias = matrix[3];
2376 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2377 matrix[3] = rgb_bias;
2378 matrix[7] = rgb_bias;
2379 matrix[11] = rgb_bias;
2382 void dcn10_program_output_csc(struct dc *dc,
2383 struct pipe_ctx *pipe_ctx,
2384 enum dc_color_space colorspace,
2388 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2389 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2391 /* MPO is broken with RGB colorspaces when OCSC matrix
2392 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2393 * Blending adds offsets from front + rear to rear plane
2395 * Fix is to set RGB bias to 0 on rear plane, top plane
2396 * black value pixels add offset instead of rear + front
2399 int16_t rgb_bias = matrix[3];
2400 // matrix[3/7/11] are all the same offset value
2402 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2403 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2405 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2409 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2410 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2414 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2416 struct dc_bias_and_scale bns_params = {0};
2418 // program the input csc
2419 dpp->funcs->dpp_setup(dpp,
2420 plane_state->format,
2421 EXPANSION_MODE_ZERO,
2422 plane_state->input_csc_color_matrix,
2423 plane_state->color_space,
2426 //set scale and bias registers
2427 build_prescale_params(&bns_params, plane_state);
2428 if (dpp->funcs->dpp_program_bias_and_scale)
2429 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2432 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2434 struct mpc *mpc = dc->res_pool->mpc;
2436 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2437 get_hdr_visual_confirm_color(pipe_ctx, color);
2438 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2439 get_surface_visual_confirm_color(pipe_ctx, color);
2440 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2441 get_surface_tile_visual_confirm_color(pipe_ctx, color);
2443 color_space_to_black_color(
2444 dc, pipe_ctx->stream->output_color_space, color);
2446 if (mpc->funcs->set_bg_color)
2447 mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2450 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2452 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2453 struct mpcc_blnd_cfg blnd_cfg = {{0}};
2454 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2456 struct mpcc *new_mpcc;
2457 struct mpc *mpc = dc->res_pool->mpc;
2458 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2460 if (per_pixel_alpha)
2461 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2463 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2465 blnd_cfg.overlap_only = false;
2466 blnd_cfg.global_gain = 0xff;
2468 if (pipe_ctx->plane_state->global_alpha)
2469 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2471 blnd_cfg.global_alpha = 0xff;
2473 /* DCN1.0 has output CM before MPC which seems to screw with
2474 * pre-multiplied alpha.
2476 blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2477 pipe_ctx->stream->output_color_space)
2483 * Note: currently there is a bug in init_hw such that
2484 * on resume from hibernate, BIOS sets up MPCC0, and
2485 * we do mpcc_remove but the mpcc cannot go to idle
2486 * after remove. This cause us to pick mpcc1 here,
2487 * which causes a pstate hang for yet unknown reason.
2489 mpcc_id = hubp->inst;
2491 /* If there is no full update, don't need to touch MPC tree*/
2492 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2493 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2494 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2498 /* check if this MPCC is already being used */
2499 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2500 /* remove MPCC if being used */
2501 if (new_mpcc != NULL)
2502 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2504 if (dc->debug.sanity_checks)
2505 mpc->funcs->assert_mpcc_idle_before_connect(
2506 dc->res_pool->mpc, mpcc_id);
2508 /* Call MPC to insert new plane */
2509 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2516 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2518 ASSERT(new_mpcc != NULL);
2520 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2521 hubp->mpcc_id = mpcc_id;
2524 static void update_scaler(struct pipe_ctx *pipe_ctx)
2526 bool per_pixel_alpha =
2527 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2529 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2530 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2531 /* scaler configuration */
2532 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2533 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2536 static void dcn10_update_dchubp_dpp(
2538 struct pipe_ctx *pipe_ctx,
2539 struct dc_state *context)
2541 struct dce_hwseq *hws = dc->hwseq;
2542 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2543 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2544 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2545 struct plane_size size = plane_state->plane_size;
2546 unsigned int compat_level = 0;
2547 bool should_divided_by_2 = false;
2549 /* depends on DML calculation, DPP clock value may change dynamically */
2550 /* If request max dpp clk is lower than current dispclk, no need to
2553 if (plane_state->update_flags.bits.full_update) {
2555 /* new calculated dispclk, dppclk are stored in
2556 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2557 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2558 * dcn_validate_bandwidth compute new dispclk, dppclk.
2559 * dispclk will put in use after optimize_bandwidth when
2560 * ramp_up_dispclk_with_dpp is called.
2561 * there are two places for dppclk be put in use. One location
2562 * is the same as the location as dispclk. Another is within
2563 * update_dchubp_dpp which happens between pre_bandwidth and
2564 * optimize_bandwidth.
2565 * dppclk updated within update_dchubp_dpp will cause new
2566 * clock values of dispclk and dppclk not be in use at the same
2567 * time. when clocks are decreased, this may cause dppclk is
2568 * lower than previous configuration and let pipe stuck.
2569 * for example, eDP + external dp, change resolution of DP from
2570 * 1920x1080x144hz to 1280x960x60hz.
2571 * before change: dispclk = 337889 dppclk = 337889
2572 * change mode, dcn_validate_bandwidth calculate
2573 * dispclk = 143122 dppclk = 143122
2574 * update_dchubp_dpp be executed before dispclk be updated,
2575 * dispclk = 337889, but dppclk use new value dispclk /2 =
2576 * 168944. this will cause pipe pstate warning issue.
2577 * solution: between pre_bandwidth and optimize_bandwidth, while
2578 * dispclk is going to be decreased, keep dppclk = dispclk
2580 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2581 dc->clk_mgr->clks.dispclk_khz)
2582 should_divided_by_2 = false;
2584 should_divided_by_2 =
2585 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2586 dc->clk_mgr->clks.dispclk_khz / 2;
2588 dpp->funcs->dpp_dppclk_control(
2590 should_divided_by_2,
2593 if (dc->res_pool->dccg)
2594 dc->res_pool->dccg->funcs->update_dpp_dto(
2597 pipe_ctx->plane_res.bw.dppclk_khz);
2599 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2600 dc->clk_mgr->clks.dispclk_khz / 2 :
2601 dc->clk_mgr->clks.dispclk_khz;
2604 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2605 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2606 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2608 if (plane_state->update_flags.bits.full_update) {
2609 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2611 hubp->funcs->hubp_setup(
2613 &pipe_ctx->dlg_regs,
2614 &pipe_ctx->ttu_regs,
2616 &pipe_ctx->pipe_dlg_param);
2617 hubp->funcs->hubp_setup_interdependent(
2619 &pipe_ctx->dlg_regs,
2620 &pipe_ctx->ttu_regs);
2623 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2625 if (plane_state->update_flags.bits.full_update ||
2626 plane_state->update_flags.bits.bpp_change)
2627 dcn10_update_dpp(dpp, plane_state);
2629 if (plane_state->update_flags.bits.full_update ||
2630 plane_state->update_flags.bits.per_pixel_alpha_change ||
2631 plane_state->update_flags.bits.global_alpha_change)
2632 hws->funcs.update_mpcc(dc, pipe_ctx);
2634 if (plane_state->update_flags.bits.full_update ||
2635 plane_state->update_flags.bits.per_pixel_alpha_change ||
2636 plane_state->update_flags.bits.global_alpha_change ||
2637 plane_state->update_flags.bits.scaling_change ||
2638 plane_state->update_flags.bits.position_change) {
2639 update_scaler(pipe_ctx);
2642 if (plane_state->update_flags.bits.full_update ||
2643 plane_state->update_flags.bits.scaling_change ||
2644 plane_state->update_flags.bits.position_change) {
2645 hubp->funcs->mem_program_viewport(
2647 &pipe_ctx->plane_res.scl_data.viewport,
2648 &pipe_ctx->plane_res.scl_data.viewport_c);
2651 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2652 dc->hwss.set_cursor_position(pipe_ctx);
2653 dc->hwss.set_cursor_attribute(pipe_ctx);
2655 if (dc->hwss.set_cursor_sdr_white_level)
2656 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2659 if (plane_state->update_flags.bits.full_update) {
2661 dc->hwss.program_gamut_remap(pipe_ctx);
2663 dc->hwss.program_output_csc(dc,
2665 pipe_ctx->stream->output_color_space,
2666 pipe_ctx->stream->csc_color_matrix.matrix,
2667 pipe_ctx->stream_res.opp->inst);
2670 if (plane_state->update_flags.bits.full_update ||
2671 plane_state->update_flags.bits.pixel_format_change ||
2672 plane_state->update_flags.bits.horizontal_mirror_change ||
2673 plane_state->update_flags.bits.rotation_change ||
2674 plane_state->update_flags.bits.swizzle_change ||
2675 plane_state->update_flags.bits.dcc_change ||
2676 plane_state->update_flags.bits.bpp_change ||
2677 plane_state->update_flags.bits.scaling_change ||
2678 plane_state->update_flags.bits.plane_size_change) {
2679 hubp->funcs->hubp_program_surface_config(
2681 plane_state->format,
2682 &plane_state->tiling_info,
2684 plane_state->rotation,
2686 plane_state->horizontal_mirror,
2690 hubp->power_gated = false;
2692 hws->funcs.update_plane_addr(dc, pipe_ctx);
2694 if (is_pipe_tree_visible(pipe_ctx))
2695 hubp->funcs->set_blank(hubp, false);
2698 void dcn10_blank_pixel_data(
2700 struct pipe_ctx *pipe_ctx,
2703 enum dc_color_space color_space;
2704 struct tg_color black_color = {0};
2705 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2706 struct dc_stream_state *stream = pipe_ctx->stream;
2708 /* program otg blank color */
2709 color_space = stream->output_color_space;
2710 color_space_to_black_color(dc, color_space, &black_color);
2713 * The way 420 is packed, 2 channels carry Y component, 1 channel
2714 * alternate between Cb and Cr, so both channels need the pixel
2717 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2718 black_color.color_r_cr = black_color.color_g_y;
2721 if (stream_res->tg->funcs->set_blank_color)
2722 stream_res->tg->funcs->set_blank_color(
2727 if (stream_res->tg->funcs->set_blank)
2728 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2729 if (stream_res->abm) {
2730 dc->hwss.set_pipe(pipe_ctx);
2731 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2734 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2735 if (stream_res->tg->funcs->set_blank) {
2736 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2737 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2742 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2744 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2745 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2746 struct custom_float_format fmt;
2748 fmt.exponenta_bits = 6;
2749 fmt.mantissa_bits = 12;
2753 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2754 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2756 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2757 pipe_ctx->plane_res.dpp, hw_mult);
2760 void dcn10_program_pipe(
2762 struct pipe_ctx *pipe_ctx,
2763 struct dc_state *context)
2765 struct dce_hwseq *hws = dc->hwseq;
2767 if (pipe_ctx->top_pipe == NULL) {
2768 bool blank = !is_pipe_tree_visible(pipe_ctx);
2770 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2771 pipe_ctx->stream_res.tg,
2772 pipe_ctx->pipe_dlg_param.vready_offset,
2773 pipe_ctx->pipe_dlg_param.vstartup_start,
2774 pipe_ctx->pipe_dlg_param.vupdate_offset,
2775 pipe_ctx->pipe_dlg_param.vupdate_width);
2777 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2778 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2780 if (hws->funcs.setup_vupdate_interrupt)
2781 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2783 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2786 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2787 dcn10_enable_plane(dc, pipe_ctx, context);
2789 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2791 hws->funcs.set_hdr_multiplier(pipe_ctx);
2793 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2794 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2795 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2796 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2798 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2799 * only do gamma programming for full update.
2800 * TODO: This can be further optimized/cleaned up
2801 * Always call this for now since it does memcmp inside before
2802 * doing heavy calculation and programming
2804 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2805 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2808 void dcn10_wait_for_pending_cleared(struct dc *dc,
2809 struct dc_state *context)
2811 struct pipe_ctx *pipe_ctx;
2812 struct timing_generator *tg;
2815 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2816 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2817 tg = pipe_ctx->stream_res.tg;
2820 * Only wait for top pipe's tg penindg bit
2821 * Also skip if pipe is disabled.
2823 if (pipe_ctx->top_pipe ||
2824 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2825 !tg->funcs->is_tg_enabled(tg))
2829 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2830 * For some reason waiting for OTG_UPDATE_PENDING cleared
2831 * seems to not trigger the update right away, and if we
2832 * lock again before VUPDATE then we don't get a separated
2835 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2836 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2840 void dcn10_post_unlock_program_front_end(
2842 struct dc_state *context)
2846 DC_LOGGER_INIT(dc->ctx->logger);
2848 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2849 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2851 if (!pipe_ctx->top_pipe &&
2852 !pipe_ctx->prev_odm_pipe &&
2854 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2856 if (context->stream_status[i].plane_count == 0)
2857 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2861 for (i = 0; i < dc->res_pool->pipe_count; i++)
2862 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2863 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2865 for (i = 0; i < dc->res_pool->pipe_count; i++)
2866 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2867 dc->hwss.optimize_bandwidth(dc, context);
2871 if (dc->hwseq->wa.DEGVIDCN10_254)
2872 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2875 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2879 for (i = 0; i < context->stream_count; i++) {
2880 if (context->streams[i]->timing.timing_3d_format
2881 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2885 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2891 void dcn10_prepare_bandwidth(
2893 struct dc_state *context)
2895 struct dce_hwseq *hws = dc->hwseq;
2896 struct hubbub *hubbub = dc->res_pool->hubbub;
2898 if (dc->debug.sanity_checks)
2899 hws->funcs.verify_allow_pstate_change_high(dc);
2901 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2902 if (context->stream_count == 0)
2903 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2905 dc->clk_mgr->funcs->update_clocks(
2911 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
2912 &context->bw_ctx.bw.dcn.watermarks,
2913 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2915 dcn10_stereo_hw_frame_pack_wa(dc, context);
2917 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2918 dcn_bw_notify_pplib_of_wm_ranges(dc);
2920 if (dc->debug.sanity_checks)
2921 hws->funcs.verify_allow_pstate_change_high(dc);
2924 void dcn10_optimize_bandwidth(
2926 struct dc_state *context)
2928 struct dce_hwseq *hws = dc->hwseq;
2929 struct hubbub *hubbub = dc->res_pool->hubbub;
2931 if (dc->debug.sanity_checks)
2932 hws->funcs.verify_allow_pstate_change_high(dc);
2934 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2935 if (context->stream_count == 0)
2936 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2938 dc->clk_mgr->funcs->update_clocks(
2944 hubbub->funcs->program_watermarks(hubbub,
2945 &context->bw_ctx.bw.dcn.watermarks,
2946 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2949 dcn10_stereo_hw_frame_pack_wa(dc, context);
2951 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2952 dcn_bw_notify_pplib_of_wm_ranges(dc);
2954 if (dc->debug.sanity_checks)
2955 hws->funcs.verify_allow_pstate_change_high(dc);
2958 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
2959 int num_pipes, struct dc_crtc_timing_adjust adjust)
2962 struct drr_params params = {0};
2963 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
2964 unsigned int event_triggers = 0x800;
2965 // Note DRR trigger events are generated regardless of whether num frames met.
2966 unsigned int num_frames = 2;
2968 params.vertical_total_max = adjust.v_total_max;
2969 params.vertical_total_min = adjust.v_total_min;
2970 params.vertical_total_mid = adjust.v_total_mid;
2971 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
2972 /* TODO: If multiple pipes are to be supported, you need
2973 * some GSL stuff. Static screen triggers may be programmed differently
2976 for (i = 0; i < num_pipes; i++) {
2977 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
2978 pipe_ctx[i]->stream_res.tg, ¶ms);
2979 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
2980 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
2981 pipe_ctx[i]->stream_res.tg,
2982 event_triggers, num_frames);
2986 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
2988 struct crtc_position *position)
2992 /* TODO: handle pipes > 1
2994 for (i = 0; i < num_pipes; i++)
2995 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
2998 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
2999 int num_pipes, const struct dc_static_screen_params *params)
3002 unsigned int triggers = 0;
3004 if (params->triggers.surface_update)
3006 if (params->triggers.cursor_update)
3008 if (params->triggers.force_trigger)
3011 for (i = 0; i < num_pipes; i++)
3012 pipe_ctx[i]->stream_res.tg->funcs->
3013 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3014 triggers, params->num_frames);
3017 static void dcn10_config_stereo_parameters(
3018 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3020 enum view_3d_format view_format = stream->view_format;
3021 enum dc_timing_3d_format timing_3d_format =\
3022 stream->timing.timing_3d_format;
3023 bool non_stereo_timing = false;
3025 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3026 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3027 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3028 non_stereo_timing = true;
3030 if (non_stereo_timing == false &&
3031 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3033 flags->PROGRAM_STEREO = 1;
3034 flags->PROGRAM_POLARITY = 1;
3035 if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3036 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3037 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3038 enum display_dongle_type dongle = \
3039 stream->link->ddc->dongle_type;
3040 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3041 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3042 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3043 flags->DISABLE_STEREO_DP_SYNC = 1;
3045 flags->RIGHT_EYE_POLARITY =\
3046 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3047 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3048 flags->FRAME_PACKED = 1;
3054 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3056 struct crtc_stereo_flags flags = { 0 };
3057 struct dc_stream_state *stream = pipe_ctx->stream;
3059 dcn10_config_stereo_parameters(stream, &flags);
3061 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3062 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3063 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3065 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3068 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3069 pipe_ctx->stream_res.opp,
3070 flags.PROGRAM_STEREO == 1,
3073 pipe_ctx->stream_res.tg->funcs->program_stereo(
3074 pipe_ctx->stream_res.tg,
3081 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3085 for (i = 0; i < res_pool->pipe_count; i++) {
3086 if (res_pool->hubps[i]->inst == mpcc_inst)
3087 return res_pool->hubps[i];
3093 void dcn10_wait_for_mpcc_disconnect(
3095 struct resource_pool *res_pool,
3096 struct pipe_ctx *pipe_ctx)
3098 struct dce_hwseq *hws = dc->hwseq;
3101 if (dc->debug.sanity_checks) {
3102 hws->funcs.verify_allow_pstate_change_high(dc);
3105 if (!pipe_ctx->stream_res.opp)
3108 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3109 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3110 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3112 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3113 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3114 hubp->funcs->set_blank(hubp, true);
3118 if (dc->debug.sanity_checks) {
3119 hws->funcs.verify_allow_pstate_change_high(dc);
3124 bool dcn10_dummy_display_power_gating(
3126 uint8_t controller_id,
3127 struct dc_bios *dcb,
3128 enum pipe_gating_control power_gating)
3133 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3135 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3136 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3138 struct dc *dc = plane_state->ctx->dc;
3140 if (plane_state == NULL)
3143 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3144 pipe_ctx->plane_res.hubp);
3146 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3149 plane_state->status.current_address = plane_state->status.requested_address;
3151 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3152 tg->funcs->is_stereo_left_eye) {
3153 plane_state->status.is_right_eye =
3154 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3157 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3158 struct dce_hwseq *hwseq = dc->hwseq;
3159 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3160 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3162 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3163 struct hubbub *hubbub = dc->res_pool->hubbub;
3165 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3166 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3171 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3173 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3175 /* In DCN, this programming sequence is owned by the hubbub */
3176 hubbub->funcs->update_dchub(hubbub, dh_data);
3179 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3181 struct pipe_ctx *test_pipe;
3182 const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
3183 int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
3186 * Disable the cursor if there's another pipe above this with a
3187 * plane that contains this pipe's viewport to prevent double cursor
3188 * and incorrect scaling artifacts.
3190 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3191 test_pipe = test_pipe->top_pipe) {
3192 if (!test_pipe->plane_state->visible)
3195 r2 = &test_pipe->plane_res.scl_data.recout;
3196 r2_r = r2->x + r2->width;
3197 r2_b = r2->y + r2->height;
3199 if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
3206 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3208 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3209 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3210 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3211 struct dc_cursor_mi_param param = {
3212 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3213 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3214 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3215 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3216 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3217 .rotation = pipe_ctx->plane_state->rotation,
3218 .mirror = pipe_ctx->plane_state->horizontal_mirror
3220 bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3221 (pipe_ctx->bottom_pipe != NULL);
3222 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3223 (pipe_ctx->prev_odm_pipe != NULL);
3225 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3226 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3227 int x_pos = pos_cpy.x;
3228 int y_pos = pos_cpy.y;
3231 * DC cursor is stream space, HW cursor is plane space and drawn
3232 * as part of the framebuffer.
3234 * Cursor position can't be negative, but hotspot can be used to
3235 * shift cursor out of the plane bounds. Hotspot must be smaller
3236 * than the cursor size.
3240 * Translate cursor from stream space to plane space.
3242 * If the cursor is scaled then we need to scale the position
3243 * to be in the approximately correct place. We can't do anything
3244 * about the actual size being incorrect, that's a limitation of
3247 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3248 pipe_ctx->plane_state->dst_rect.width;
3249 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3250 pipe_ctx->plane_state->dst_rect.height;
3253 * If the cursor's source viewport is clipped then we need to
3254 * translate the cursor to appear in the correct position on
3257 * This translation isn't affected by scaling so it needs to be
3258 * done *after* we adjust the position for the scale factor.
3260 * This is only done by opt-in for now since there are still
3261 * some usecases like tiled display that might enable the
3262 * cursor on both streams while expecting dc to clip it.
3264 if (pos_cpy.translate_by_source) {
3265 x_pos += pipe_ctx->plane_state->src_rect.x;
3266 y_pos += pipe_ctx->plane_state->src_rect.y;
3270 * If the position is negative then we need to add to the hotspot
3271 * to shift the cursor outside the plane.
3275 pos_cpy.x_hotspot -= x_pos;
3280 pos_cpy.y_hotspot -= y_pos;
3284 pos_cpy.x = (uint32_t)x_pos;
3285 pos_cpy.y = (uint32_t)y_pos;
3287 if (pipe_ctx->plane_state->address.type
3288 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3289 pos_cpy.enable = false;
3291 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3292 pos_cpy.enable = false;
3294 // Swap axis and mirror horizontally
3295 if (param.rotation == ROTATION_ANGLE_90) {
3296 uint32_t temp_x = pos_cpy.x;
3298 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3299 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3302 // Swap axis and mirror vertically
3303 else if (param.rotation == ROTATION_ANGLE_270) {
3304 uint32_t temp_y = pos_cpy.y;
3305 int viewport_height =
3306 pipe_ctx->plane_res.scl_data.viewport.height;
3308 pipe_ctx->plane_res.scl_data.viewport.y;
3311 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3312 * For pipe split cases:
3313 * - apply offset of viewport.y to normalize pos_cpy.x
3314 * - calculate the pos_cpy.y as before
3315 * - shift pos_cpy.y back by same offset to get final value
3316 * - since we iterate through both pipes, use the lower
3317 * viewport.y for offset
3318 * For non pipe split cases, use the same calculation for
3319 * pos_cpy.y as the 180 degree rotation case below,
3320 * but use pos_cpy.x as our input because we are rotating
3323 if (pipe_split_on || odm_combine_on) {
3324 int pos_cpy_x_offset;
3325 int other_pipe_viewport_y;
3327 if (pipe_split_on) {
3328 if (pipe_ctx->bottom_pipe) {
3329 other_pipe_viewport_y =
3330 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3332 other_pipe_viewport_y =
3333 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3336 if (pipe_ctx->next_odm_pipe) {
3337 other_pipe_viewport_y =
3338 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3340 other_pipe_viewport_y =
3341 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3344 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3345 other_pipe_viewport_y : viewport_y;
3346 pos_cpy.x -= pos_cpy_x_offset;
3347 if (pos_cpy.x > viewport_height) {
3348 pos_cpy.x = pos_cpy.x - viewport_height;
3349 pos_cpy.y = viewport_height - pos_cpy.x;
3351 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3353 pos_cpy.y += pos_cpy_x_offset;
3355 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3359 // Mirror horizontally and vertically
3360 else if (param.rotation == ROTATION_ANGLE_180) {
3361 int viewport_width =
3362 pipe_ctx->plane_res.scl_data.viewport.width;
3364 pipe_ctx->plane_res.scl_data.viewport.x;
3366 if (pipe_split_on || odm_combine_on) {
3367 if (pos_cpy.x >= viewport_width + viewport_x) {
3368 pos_cpy.x = 2 * viewport_width
3369 - pos_cpy.x + 2 * viewport_x;
3371 uint32_t temp_x = pos_cpy.x;
3373 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3374 if (temp_x >= viewport_x +
3375 (int)hubp->curs_attr.width || pos_cpy.x
3376 <= (int)hubp->curs_attr.width +
3377 pipe_ctx->plane_state->src_rect.x) {
3378 pos_cpy.x = temp_x + viewport_width;
3382 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3386 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3388 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3389 * pos_cpy.y_new = viewport.y + delta_from_bottom
3391 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3393 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3394 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3397 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3398 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3401 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3403 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3405 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3406 pipe_ctx->plane_res.hubp, attributes);
3407 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3408 pipe_ctx->plane_res.dpp, attributes);
3411 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3413 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3414 struct fixed31_32 multiplier;
3415 struct dpp_cursor_attributes opt_attr = { 0 };
3416 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3417 struct custom_float_format fmt;
3419 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3422 fmt.exponenta_bits = 5;
3423 fmt.mantissa_bits = 10;
3426 if (sdr_white_level > 80) {
3427 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3428 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3431 opt_attr.scale = hw_scale;
3434 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3435 pipe_ctx->plane_res.dpp, &opt_attr);
3439 * apply_front_porch_workaround TODO FPGA still need?
3441 * This is a workaround for a bug that has existed since R5xx and has not been
3442 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3444 static void apply_front_porch_workaround(
3445 struct dc_crtc_timing *timing)
3447 if (timing->flags.INTERLACE == 1) {
3448 if (timing->v_front_porch < 2)
3449 timing->v_front_porch = 2;
3451 if (timing->v_front_porch < 1)
3452 timing->v_front_porch = 1;
3456 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3458 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3459 struct dc_crtc_timing patched_crtc_timing;
3460 int vesa_sync_start;
3462 int interlace_factor;
3463 int vertical_line_start;
3465 patched_crtc_timing = *dc_crtc_timing;
3466 apply_front_porch_workaround(&patched_crtc_timing);
3468 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3470 vesa_sync_start = patched_crtc_timing.v_addressable +
3471 patched_crtc_timing.v_border_bottom +
3472 patched_crtc_timing.v_front_porch;
3474 asic_blank_end = (patched_crtc_timing.v_total -
3476 patched_crtc_timing.v_border_top)
3479 vertical_line_start = asic_blank_end -
3480 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3482 return vertical_line_start;
3485 void dcn10_calc_vupdate_position(
3487 struct pipe_ctx *pipe_ctx,
3488 uint32_t *start_line,
3491 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3492 int vline_int_offset_from_vupdate =
3493 pipe_ctx->stream->periodic_interrupt0.lines_offset;
3494 int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3497 if (vline_int_offset_from_vupdate > 0)
3498 vline_int_offset_from_vupdate--;
3499 else if (vline_int_offset_from_vupdate < 0)
3500 vline_int_offset_from_vupdate++;
3502 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3504 if (start_position >= 0)
3505 *start_line = start_position;
3507 *start_line = dc_crtc_timing->v_total + start_position - 1;
3509 *end_line = *start_line + 2;
3511 if (*end_line >= dc_crtc_timing->v_total)
3515 static void dcn10_cal_vline_position(
3517 struct pipe_ctx *pipe_ctx,
3518 enum vline_select vline,
3519 uint32_t *start_line,
3522 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3524 if (vline == VLINE0)
3525 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3526 else if (vline == VLINE1)
3527 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3529 switch (ref_point) {
3530 case START_V_UPDATE:
3531 dcn10_calc_vupdate_position(
3538 // Suppose to do nothing because vsync is 0;
3546 void dcn10_setup_periodic_interrupt(
3548 struct pipe_ctx *pipe_ctx,
3549 enum vline_select vline)
3551 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3553 if (vline == VLINE0) {
3554 uint32_t start_line = 0;
3555 uint32_t end_line = 0;
3557 dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3559 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3561 } else if (vline == VLINE1) {
3562 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3564 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3568 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3570 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3571 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3573 if (start_line < 0) {
3578 if (tg->funcs->setup_vertical_interrupt2)
3579 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3582 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3583 struct dc_link_settings *link_settings)
3585 struct encoder_unblank_param params = { { 0 } };
3586 struct dc_stream_state *stream = pipe_ctx->stream;
3587 struct dc_link *link = stream->link;
3588 struct dce_hwseq *hws = link->dc->hwseq;
3590 /* only 3 items below are used by unblank */
3591 params.timing = pipe_ctx->stream->timing;
3593 params.link_settings.link_rate = link_settings->link_rate;
3595 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3596 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3597 params.timing.pix_clk_100hz /= 2;
3598 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms);
3601 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3602 hws->funcs.edp_backlight_control(link, true);
3606 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3607 const uint8_t *custom_sdp_message,
3608 unsigned int sdp_message_size)
3610 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3611 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3612 pipe_ctx->stream_res.stream_enc,
3617 enum dc_status dcn10_set_clock(struct dc *dc,
3618 enum dc_clock_type clock_type,
3622 struct dc_state *context = dc->current_state;
3623 struct dc_clock_config clock_cfg = {0};
3624 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3626 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3627 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3628 context, clock_type, &clock_cfg);
3630 if (!dc->clk_mgr->funcs->get_clock)
3631 return DC_FAIL_UNSUPPORTED_1;
3633 if (clk_khz > clock_cfg.max_clock_khz)
3634 return DC_FAIL_CLK_EXCEED_MAX;
3636 if (clk_khz < clock_cfg.min_clock_khz)
3637 return DC_FAIL_CLK_BELOW_MIN;
3639 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3640 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3642 /*update internal request clock for update clock use*/
3643 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3644 current_clocks->dispclk_khz = clk_khz;
3645 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3646 current_clocks->dppclk_khz = clk_khz;
3648 return DC_ERROR_UNEXPECTED;
3650 if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3651 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3657 void dcn10_get_clock(struct dc *dc,
3658 enum dc_clock_type clock_type,
3659 struct dc_clock_config *clock_cfg)
3661 struct dc_state *context = dc->current_state;
3663 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3664 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3668 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3670 struct resource_pool *pool = dc->res_pool;
3673 for (i = 0; i < pool->pipe_count; i++) {
3674 struct hubp *hubp = pool->hubps[i];
3675 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3677 hubp->funcs->hubp_read_state(hubp);
3680 dcc_en_bits[i] = s->dcc_en ? 1 : 0;