2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "core_types.h"
30 #include "custom_float.h"
31 #include "dcn10_hw_sequencer.h"
32 #include "dce110/dce110_hw_sequencer.h"
33 #include "dce/dce_hwseq.h"
36 #include "dcn10_optc.h"
37 #include "dcn10/dcn10_dpp.h"
38 #include "dcn10/dcn10_mpc.h"
39 #include "timing_generator.h"
43 #include "reg_helper.h"
44 #include "dcn10_hubp.h"
45 #include "dcn10_hubbub.h"
46 #include "dcn10_cm_common.h"
47 #include "dc_link_dp.h"
52 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
56 #define DC_LOGGER_INIT(logger)
64 #define FN(reg_name, field_name) \
65 hws->shifts->field_name, hws->masks->field_name
67 /*print is 17 wide, first two characters are spaces*/
68 #define DTN_INFO_MICRO_SEC(ref_cycle) \
69 print_microsec(dc_ctx, log_ctx, ref_cycle)
71 void print_microsec(struct dc_context *dc_ctx,
72 struct dc_log_buffer_ctx *log_ctx,
75 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
76 static const unsigned int frac = 1000;
77 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
79 DTN_INFO(" %11d.%03d",
84 static void log_mpc_crc(struct dc *dc,
85 struct dc_log_buffer_ctx *log_ctx)
87 struct dc_context *dc_ctx = dc->ctx;
88 struct dce_hwseq *hws = dc->hwseq;
90 if (REG(MPC_CRC_RESULT_GB))
91 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
92 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
93 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
94 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
95 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
98 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
100 struct dc_context *dc_ctx = dc->ctx;
101 struct dcn_hubbub_wm wm;
104 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
105 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
107 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
108 " sr_enter sr_exit dram_clk_change\n");
110 for (i = 0; i < 4; i++) {
111 struct dcn_hubbub_wm_set *s;
114 DTN_INFO("WM_Set[%d]:", s->wm_set);
115 DTN_INFO_MICRO_SEC(s->data_urgent);
116 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
117 DTN_INFO_MICRO_SEC(s->sr_enter);
118 DTN_INFO_MICRO_SEC(s->sr_exit);
119 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
126 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
128 struct dc_context *dc_ctx = dc->ctx;
129 struct resource_pool *pool = dc->res_pool;
132 DTN_INFO("HUBP: format addr_hi width height"
133 " rot mir sw_mode dcc_en blank_en ttu_dis underflow"
134 " min_ttu_vblank qos_low_wm qos_high_wm\n");
135 for (i = 0; i < pool->pipe_count; i++) {
136 struct hubp *hubp = pool->hubps[i];
137 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
139 hubp->funcs->hubp_read_state(hubp);
142 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
155 s->underflow_status);
156 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
157 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
158 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
163 DTN_INFO("\n=========RQ========\n");
164 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
165 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
166 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
167 for (i = 0; i < pool->pipe_count; i++) {
168 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
169 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
172 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
173 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
174 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
175 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
176 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
177 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
178 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
179 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
180 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
181 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
184 DTN_INFO("========DLG========\n");
185 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
186 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
187 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
188 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
189 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
190 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
191 " x_rp_dlay x_rr_sfl\n");
192 for (i = 0; i < pool->pipe_count; i++) {
193 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
194 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
197 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
198 "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
199 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
200 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
201 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
202 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
203 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
204 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
205 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
206 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
207 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
208 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
209 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
210 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
211 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
212 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
213 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
214 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
215 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
216 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
217 dlg_regs->xfc_reg_remote_surface_flip_latency);
220 DTN_INFO("========TTU========\n");
221 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
222 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
223 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
224 for (i = 0; i < pool->pipe_count; i++) {
225 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
226 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
229 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
230 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
231 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
232 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
233 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
234 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
235 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
236 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
241 void dcn10_log_hw_state(struct dc *dc,
242 struct dc_log_buffer_ctx *log_ctx)
244 struct dc_context *dc_ctx = dc->ctx;
245 struct resource_pool *pool = dc->res_pool;
250 dcn10_log_hubbub_state(dc, log_ctx);
252 dcn10_log_hubp_states(dc, log_ctx);
254 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
255 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
256 "C31 C32 C33 C34\n");
257 for (i = 0; i < pool->pipe_count; i++) {
258 struct dpp *dpp = pool->dpps[i];
259 struct dcn_dpp_state s = {0};
261 dpp->funcs->dpp_read_state(dpp, &s);
266 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
267 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
270 (s.igam_lut_mode == 0) ? "BypassFixed" :
271 ((s.igam_lut_mode == 1) ? "BypassFloat" :
272 ((s.igam_lut_mode == 2) ? "RAM" :
273 ((s.igam_lut_mode == 3) ? "RAM" :
275 (s.dgam_lut_mode == 0) ? "Bypass" :
276 ((s.dgam_lut_mode == 1) ? "sRGB" :
277 ((s.dgam_lut_mode == 2) ? "Ycc" :
278 ((s.dgam_lut_mode == 3) ? "RAM" :
279 ((s.dgam_lut_mode == 4) ? "RAM" :
281 (s.rgam_lut_mode == 0) ? "Bypass" :
282 ((s.rgam_lut_mode == 1) ? "sRGB" :
283 ((s.rgam_lut_mode == 2) ? "Ycc" :
284 ((s.rgam_lut_mode == 3) ? "RAM" :
285 ((s.rgam_lut_mode == 4) ? "RAM" :
288 s.gamut_remap_c11_c12,
289 s.gamut_remap_c13_c14,
290 s.gamut_remap_c21_c22,
291 s.gamut_remap_c23_c24,
292 s.gamut_remap_c31_c32,
293 s.gamut_remap_c33_c34);
298 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
299 for (i = 0; i < pool->pipe_count; i++) {
300 struct mpcc_state s = {0};
302 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
304 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
305 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
306 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
311 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel"
312 " h_bs h_be h_ss h_se hpol htot vtot underflow\n");
314 for (i = 0; i < pool->timing_generator_count; i++) {
315 struct timing_generator *tg = pool->timing_generators[i];
316 struct dcn_otg_state s = {0};
318 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
320 //only print if OTG master is enabled
321 if ((s.otg_enabled & 1) == 0)
324 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d"
325 " %5d %5d %5d %5d %9d\n",
343 s.underflow_occurred_status);
345 // Clear underflow for debug purposes
346 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
347 // This function is called only from Windows or Diags test environment, hence it's safe to clear
348 // it from here without affecting the original intent.
349 tg->funcs->clear_optc_underflow(tg);
353 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
354 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
355 for (i = 0; i < pool->res_cap->num_dsc; i++) {
356 struct display_stream_compressor *dsc = pool->dscs[i];
357 struct dcn_dsc_state s = {0};
359 dsc->funcs->dsc_read_state(dsc, &s);
360 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
364 s.dsc_bytes_per_pixel);
369 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
370 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
371 for (i = 0; i < pool->stream_enc_count; i++) {
372 struct stream_encoder *enc = pool->stream_enc[i];
373 struct enc_state s = {0};
375 if (enc->funcs->enc_read_state) {
376 enc->funcs->enc_read_state(enc, &s);
377 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
380 s.sec_gsp_pps_line_num,
381 s.vbid6_line_reference,
383 s.sec_gsp_pps_enable,
384 s.sec_stream_enable);
390 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS\n");
391 for (i = 0; i < dc->link_count; i++) {
392 struct link_encoder *lenc = dc->links[i]->link_enc;
394 struct link_enc_state s = {0};
396 if (lenc->funcs->read_state) {
397 lenc->funcs->read_state(lenc, &s);
398 DTN_INFO("[%-3d]: %-12d %-22d %-22d\n",
401 s.dphy_fec_ready_shadow,
402 s.dphy_fec_active_status);
409 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
410 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
411 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
412 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
413 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
414 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
415 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
416 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
417 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
419 log_mpc_crc(dc, log_ctx);
424 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
426 struct hubp *hubp = pipe_ctx->plane_res.hubp;
427 struct timing_generator *tg = pipe_ctx->stream_res.tg;
429 if (tg->funcs->is_optc_underflow_occurred(tg)) {
430 tg->funcs->clear_optc_underflow(tg);
434 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
435 hubp->funcs->hubp_clear_underflow(hubp);
441 static void dcn10_enable_power_gating_plane(
442 struct dce_hwseq *hws,
445 bool force_on = 1; /* disable power gating */
451 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
452 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
453 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
454 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
457 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
458 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
459 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
460 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
463 static void dcn10_disable_vga(
464 struct dce_hwseq *hws)
466 unsigned int in_vga1_mode = 0;
467 unsigned int in_vga2_mode = 0;
468 unsigned int in_vga3_mode = 0;
469 unsigned int in_vga4_mode = 0;
471 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
472 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
473 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
474 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
476 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
477 in_vga3_mode == 0 && in_vga4_mode == 0)
480 REG_WRITE(D1VGA_CONTROL, 0);
481 REG_WRITE(D2VGA_CONTROL, 0);
482 REG_WRITE(D3VGA_CONTROL, 0);
483 REG_WRITE(D4VGA_CONTROL, 0);
485 /* HW Engineer's Notes:
486 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
487 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
489 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
490 * VGA_TEST_ENABLE, to leave it in the same state as before.
492 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
493 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
496 static void dcn10_dpp_pg_control(
497 struct dce_hwseq *hws,
498 unsigned int dpp_inst,
501 uint32_t power_gate = power_on ? 0 : 1;
502 uint32_t pwr_status = power_on ? 0 : 2;
504 if (hws->ctx->dc->debug.disable_dpp_power_gate)
506 if (REG(DOMAIN1_PG_CONFIG) == 0)
511 REG_UPDATE(DOMAIN1_PG_CONFIG,
512 DOMAIN1_POWER_GATE, power_gate);
514 REG_WAIT(DOMAIN1_PG_STATUS,
515 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
519 REG_UPDATE(DOMAIN3_PG_CONFIG,
520 DOMAIN3_POWER_GATE, power_gate);
522 REG_WAIT(DOMAIN3_PG_STATUS,
523 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
527 REG_UPDATE(DOMAIN5_PG_CONFIG,
528 DOMAIN5_POWER_GATE, power_gate);
530 REG_WAIT(DOMAIN5_PG_STATUS,
531 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
535 REG_UPDATE(DOMAIN7_PG_CONFIG,
536 DOMAIN7_POWER_GATE, power_gate);
538 REG_WAIT(DOMAIN7_PG_STATUS,
539 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
548 static void dcn10_hubp_pg_control(
549 struct dce_hwseq *hws,
550 unsigned int hubp_inst,
553 uint32_t power_gate = power_on ? 0 : 1;
554 uint32_t pwr_status = power_on ? 0 : 2;
556 if (hws->ctx->dc->debug.disable_hubp_power_gate)
558 if (REG(DOMAIN0_PG_CONFIG) == 0)
562 case 0: /* DCHUBP0 */
563 REG_UPDATE(DOMAIN0_PG_CONFIG,
564 DOMAIN0_POWER_GATE, power_gate);
566 REG_WAIT(DOMAIN0_PG_STATUS,
567 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
570 case 1: /* DCHUBP1 */
571 REG_UPDATE(DOMAIN2_PG_CONFIG,
572 DOMAIN2_POWER_GATE, power_gate);
574 REG_WAIT(DOMAIN2_PG_STATUS,
575 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
578 case 2: /* DCHUBP2 */
579 REG_UPDATE(DOMAIN4_PG_CONFIG,
580 DOMAIN4_POWER_GATE, power_gate);
582 REG_WAIT(DOMAIN4_PG_STATUS,
583 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
586 case 3: /* DCHUBP3 */
587 REG_UPDATE(DOMAIN6_PG_CONFIG,
588 DOMAIN6_POWER_GATE, power_gate);
590 REG_WAIT(DOMAIN6_PG_STATUS,
591 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
600 static void power_on_plane(
601 struct dce_hwseq *hws,
604 DC_LOGGER_INIT(hws->ctx->logger);
605 if (REG(DC_IP_REQUEST_CNTL)) {
606 REG_SET(DC_IP_REQUEST_CNTL, 0,
608 hws->ctx->dc->hwss.dpp_pg_control(hws, plane_id, true);
609 hws->ctx->dc->hwss.hubp_pg_control(hws, plane_id, true);
610 REG_SET(DC_IP_REQUEST_CNTL, 0,
613 "Un-gated front end for pipe %d\n", plane_id);
617 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
619 struct dce_hwseq *hws = dc->hwseq;
620 struct hubp *hubp = dc->res_pool->hubps[0];
622 if (!hws->wa_state.DEGVIDCN10_253_applied)
625 hubp->funcs->set_blank(hubp, true);
627 REG_SET(DC_IP_REQUEST_CNTL, 0,
630 dc->hwss.hubp_pg_control(hws, 0, false);
631 REG_SET(DC_IP_REQUEST_CNTL, 0,
634 hws->wa_state.DEGVIDCN10_253_applied = false;
637 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
639 struct dce_hwseq *hws = dc->hwseq;
640 struct hubp *hubp = dc->res_pool->hubps[0];
643 if (dc->debug.disable_stutter)
646 if (!hws->wa.DEGVIDCN10_253)
649 for (i = 0; i < dc->res_pool->pipe_count; i++) {
650 if (!dc->res_pool->hubps[i]->power_gated)
654 /* all pipe power gated, apply work around to enable stutter. */
656 REG_SET(DC_IP_REQUEST_CNTL, 0,
659 dc->hwss.hubp_pg_control(hws, 0, true);
660 REG_SET(DC_IP_REQUEST_CNTL, 0,
663 hubp->funcs->set_hubp_blank_en(hubp, false);
664 hws->wa_state.DEGVIDCN10_253_applied = true;
667 static void dcn10_bios_golden_init(struct dc *dc)
669 struct dc_bios *bp = dc->ctx->dc_bios;
671 bool allow_self_fresh_force_enable = true;
673 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
674 allow_self_fresh_force_enable =
675 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
678 /* WA for making DF sleep when idle after resume from S0i3.
679 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
680 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
681 * before calling command table and it changed to 1 after,
682 * it should be set back to 0.
685 /* initialize dcn global */
686 bp->funcs->enable_disp_power_gating(bp,
687 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
689 for (i = 0; i < dc->res_pool->pipe_count; i++) {
690 /* initialize dcn per pipe */
691 bp->funcs->enable_disp_power_gating(bp,
692 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
695 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
696 if (allow_self_fresh_force_enable == false &&
697 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
698 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true);
702 static void false_optc_underflow_wa(
704 const struct dc_stream_state *stream,
705 struct timing_generator *tg)
710 if (!dc->hwseq->wa.false_optc_underflow)
713 underflow = tg->funcs->is_optc_underflow_occurred(tg);
715 for (i = 0; i < dc->res_pool->pipe_count; i++) {
716 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
718 if (old_pipe_ctx->stream != stream)
721 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
724 if (tg->funcs->set_blank_data_double_buffer)
725 tg->funcs->set_blank_data_double_buffer(tg, true);
727 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
728 tg->funcs->clear_optc_underflow(tg);
731 static enum dc_status dcn10_enable_stream_timing(
732 struct pipe_ctx *pipe_ctx,
733 struct dc_state *context,
736 struct dc_stream_state *stream = pipe_ctx->stream;
737 enum dc_color_space color_space;
738 struct tg_color black_color = {0};
740 /* by upper caller loop, pipe0 is parent pipe and be called first.
741 * back end is set up by for pipe0. Other children pipe share back end
742 * with pipe 0. No program is needed.
744 if (pipe_ctx->top_pipe != NULL)
747 /* TODO check if timing_changed, disable stream if timing changed */
749 /* HW program guide assume display already disable
750 * by unplug sequence. OTG assume stop.
752 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
754 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
755 pipe_ctx->clock_source,
756 &pipe_ctx->stream_res.pix_clk_params,
757 &pipe_ctx->pll_settings)) {
759 return DC_ERROR_UNEXPECTED;
762 pipe_ctx->stream_res.tg->funcs->program_timing(
763 pipe_ctx->stream_res.tg,
765 pipe_ctx->pipe_dlg_param.vready_offset,
766 pipe_ctx->pipe_dlg_param.vstartup_start,
767 pipe_ctx->pipe_dlg_param.vupdate_offset,
768 pipe_ctx->pipe_dlg_param.vupdate_width,
769 pipe_ctx->stream->signal,
772 #if 0 /* move to after enable_crtc */
773 /* TODO: OPP FMT, ABM. etc. should be done here. */
774 /* or FPGA now. instance 0 only. TODO: move to opp.c */
776 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
778 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
779 pipe_ctx->stream_res.opp,
780 &stream->bit_depth_params,
783 /* program otg blank color */
784 color_space = stream->output_color_space;
785 color_space_to_black_color(dc, color_space, &black_color);
787 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
788 pipe_ctx->stream_res.tg->funcs->set_blank_color(
789 pipe_ctx->stream_res.tg,
792 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
793 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
794 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
795 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
796 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
799 /* VTG is within DCHUB command block. DCFCLK is always on */
800 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
802 return DC_ERROR_UNEXPECTED;
805 /* TODO program crtc source select for non-virtual signal*/
806 /* TODO program FMT */
807 /* TODO setup link_enc */
808 /* TODO set stream attributes */
809 /* TODO program audio */
810 /* TODO enable stream if timing changed */
811 /* TODO unblank stream if DP */
816 static void dcn10_reset_back_end_for_pipe(
818 struct pipe_ctx *pipe_ctx,
819 struct dc_state *context)
822 DC_LOGGER_INIT(dc->ctx->logger);
823 if (pipe_ctx->stream_res.stream_enc == NULL) {
824 pipe_ctx->stream = NULL;
828 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
829 /* DPMS may already disable */
830 if (!pipe_ctx->stream->dpms_off)
831 core_link_disable_stream(pipe_ctx);
832 else if (pipe_ctx->stream_res.audio)
833 dc->hwss.disable_audio_stream(pipe_ctx);
835 if (pipe_ctx->stream_res.audio) {
836 /*disable az_endpoint*/
837 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
840 if (dc->caps.dynamic_audio == true) {
841 /*we have to dynamic arbitrate the audio endpoints*/
842 /*we free the resource, need reset is_audio_acquired*/
843 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
844 pipe_ctx->stream_res.audio, false);
845 pipe_ctx->stream_res.audio = NULL;
850 /* by upper caller loop, parent pipe: pipe0, will be reset last.
851 * back end share by all pipes and will be disable only when disable
854 if (pipe_ctx->top_pipe == NULL) {
855 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
857 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
858 if (pipe_ctx->stream_res.tg->funcs->set_drr)
859 pipe_ctx->stream_res.tg->funcs->set_drr(
860 pipe_ctx->stream_res.tg, NULL);
863 for (i = 0; i < dc->res_pool->pipe_count; i++)
864 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
867 if (i == dc->res_pool->pipe_count)
870 pipe_ctx->stream = NULL;
871 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
872 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
875 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
879 bool need_recover = true;
881 if (!dc->debug.recovery_enabled)
884 for (i = 0; i < dc->res_pool->pipe_count; i++) {
885 struct pipe_ctx *pipe_ctx =
886 &dc->current_state->res_ctx.pipe_ctx[i];
887 if (pipe_ctx != NULL) {
888 hubp = pipe_ctx->plane_res.hubp;
889 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
890 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
891 /* one pipe underflow, we will reset all the pipes*/
900 DCHUBP_CNTL:HUBP_BLANK_EN=1
901 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
902 DCHUBP_CNTL:HUBP_DISABLE=1
903 DCHUBP_CNTL:HUBP_DISABLE=0
904 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
905 DCSURF_PRIMARY_SURFACE_ADDRESS
906 DCHUBP_CNTL:HUBP_BLANK_EN=0
909 for (i = 0; i < dc->res_pool->pipe_count; i++) {
910 struct pipe_ctx *pipe_ctx =
911 &dc->current_state->res_ctx.pipe_ctx[i];
912 if (pipe_ctx != NULL) {
913 hubp = pipe_ctx->plane_res.hubp;
914 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
915 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
916 hubp->funcs->set_hubp_blank_en(hubp, true);
919 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
920 hubbub1_soft_reset(dc->res_pool->hubbub, true);
922 for (i = 0; i < dc->res_pool->pipe_count; i++) {
923 struct pipe_ctx *pipe_ctx =
924 &dc->current_state->res_ctx.pipe_ctx[i];
925 if (pipe_ctx != NULL) {
926 hubp = pipe_ctx->plane_res.hubp;
927 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
928 if (hubp != NULL && hubp->funcs->hubp_disable_control)
929 hubp->funcs->hubp_disable_control(hubp, true);
932 for (i = 0; i < dc->res_pool->pipe_count; i++) {
933 struct pipe_ctx *pipe_ctx =
934 &dc->current_state->res_ctx.pipe_ctx[i];
935 if (pipe_ctx != NULL) {
936 hubp = pipe_ctx->plane_res.hubp;
937 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
938 if (hubp != NULL && hubp->funcs->hubp_disable_control)
939 hubp->funcs->hubp_disable_control(hubp, true);
942 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
943 hubbub1_soft_reset(dc->res_pool->hubbub, false);
944 for (i = 0; i < dc->res_pool->pipe_count; i++) {
945 struct pipe_ctx *pipe_ctx =
946 &dc->current_state->res_ctx.pipe_ctx[i];
947 if (pipe_ctx != NULL) {
948 hubp = pipe_ctx->plane_res.hubp;
949 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
950 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
951 hubp->funcs->set_hubp_blank_en(hubp, true);
959 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
961 static bool should_log_hw_state; /* prevent hw state log by default */
963 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
964 if (should_log_hw_state) {
965 dcn10_log_hw_state(dc, NULL);
968 if (dcn10_hw_wa_force_recovery(dc)) {
970 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
976 /* trigger HW to start disconnect plane from stream on the next vsync */
977 void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
979 struct hubp *hubp = pipe_ctx->plane_res.hubp;
980 int dpp_id = pipe_ctx->plane_res.dpp->inst;
981 struct mpc *mpc = dc->res_pool->mpc;
982 struct mpc_tree *mpc_tree_params;
983 struct mpcc *mpcc_to_remove = NULL;
984 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
986 mpc_tree_params = &(opp->mpc_tree_params);
987 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
990 if (mpcc_to_remove == NULL)
993 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
995 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
997 dc->optimized_required = true;
999 if (hubp->funcs->hubp_disconnect)
1000 hubp->funcs->hubp_disconnect(hubp);
1002 if (dc->debug.sanity_checks)
1003 dcn10_verify_allow_pstate_change_high(dc);
1006 static void dcn10_plane_atomic_power_down(struct dc *dc,
1010 struct dce_hwseq *hws = dc->hwseq;
1011 DC_LOGGER_INIT(dc->ctx->logger);
1013 if (REG(DC_IP_REQUEST_CNTL)) {
1014 REG_SET(DC_IP_REQUEST_CNTL, 0,
1016 dc->hwss.dpp_pg_control(hws, dpp->inst, false);
1017 dc->hwss.hubp_pg_control(hws, hubp->inst, false);
1018 dpp->funcs->dpp_reset(dpp);
1019 REG_SET(DC_IP_REQUEST_CNTL, 0,
1022 "Power gated front end %d\n", hubp->inst);
1026 /* disable HW used by plane.
1027 * note: cannot disable until disconnect is complete
1029 static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1031 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1032 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1033 int opp_id = hubp->opp_id;
1035 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1037 hubp->funcs->hubp_clk_cntl(hubp, false);
1039 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1041 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1042 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1043 pipe_ctx->stream_res.opp,
1046 hubp->power_gated = true;
1047 dc->optimized_required = false; /* We're powering off, no need to optimize */
1049 dc->hwss.plane_atomic_power_down(dc,
1050 pipe_ctx->plane_res.dpp,
1051 pipe_ctx->plane_res.hubp);
1053 pipe_ctx->stream = NULL;
1054 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1055 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1056 pipe_ctx->top_pipe = NULL;
1057 pipe_ctx->bottom_pipe = NULL;
1058 pipe_ctx->plane_state = NULL;
1061 static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1063 DC_LOGGER_INIT(dc->ctx->logger);
1065 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1068 dc->hwss.plane_atomic_disable(dc, pipe_ctx);
1070 apply_DEGVIDCN10_253_wa(dc);
1072 DC_LOG_DC("Power down front end %d\n",
1073 pipe_ctx->pipe_idx);
1076 static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1079 bool can_apply_seamless_boot = false;
1081 for (i = 0; i < context->stream_count; i++) {
1082 if (context->streams[i]->apply_seamless_boot_optimization) {
1083 can_apply_seamless_boot = true;
1088 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1089 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1090 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1092 /* There is assumption that pipe_ctx is not mapping irregularly
1093 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1094 * we will use the pipe, so don't disable
1096 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1099 /* Blank controller using driver code instead of
1102 if (tg->funcs->is_tg_enabled(tg)) {
1103 if (dc->hwss.init_blank != NULL) {
1104 dc->hwss.init_blank(dc, tg);
1105 tg->funcs->lock(tg);
1107 tg->funcs->lock(tg);
1108 tg->funcs->set_blank(tg, true);
1109 hwss_wait_for_blank_complete(tg);
1114 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1115 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1117 /* Cannot reset the MPC mux if seamless boot */
1118 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1121 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1122 dc->res_pool->mpc, i);
1125 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1126 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1127 struct hubp *hubp = dc->res_pool->hubps[i];
1128 struct dpp *dpp = dc->res_pool->dpps[i];
1129 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1131 /* There is assumption that pipe_ctx is not mapping irregularly
1132 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1133 * we will use the pipe, so don't disable
1135 if (can_apply_seamless_boot &&
1136 pipe_ctx->stream != NULL &&
1137 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1138 pipe_ctx->stream_res.tg))
1141 /* Disable on the current state so the new one isn't cleared. */
1142 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1144 dpp->funcs->dpp_reset(dpp);
1146 pipe_ctx->stream_res.tg = tg;
1147 pipe_ctx->pipe_idx = i;
1149 pipe_ctx->plane_res.hubp = hubp;
1150 pipe_ctx->plane_res.dpp = dpp;
1151 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1152 hubp->mpcc_id = dpp->inst;
1153 hubp->opp_id = OPP_ID_INVALID;
1154 hubp->power_gated = false;
1156 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1157 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1158 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1159 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1161 dc->hwss.plane_atomic_disconnect(dc, pipe_ctx);
1163 if (tg->funcs->is_tg_enabled(tg))
1164 tg->funcs->unlock(tg);
1166 dc->hwss.disable_plane(dc, pipe_ctx);
1168 pipe_ctx->stream_res.tg = NULL;
1169 pipe_ctx->plane_res.hubp = NULL;
1171 tg->funcs->tg_init(tg);
1175 static void dcn10_init_hw(struct dc *dc)
1178 struct abm *abm = dc->res_pool->abm;
1179 struct dmcu *dmcu = dc->res_pool->dmcu;
1180 struct dce_hwseq *hws = dc->hwseq;
1181 struct dc_bios *dcb = dc->ctx->dc_bios;
1182 struct resource_pool *res_pool = dc->res_pool;
1184 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1185 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1187 // Initialize the dccg
1188 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1189 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1191 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1193 REG_WRITE(REFCLK_CNTL, 0);
1194 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1195 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1197 if (!dc->debug.disable_clock_gate) {
1198 /* enable all DCN clock gating */
1199 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1201 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1203 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1206 //Enable ability to power gate / don't force power on permanently
1207 dc->hwss.enable_power_gating_plane(hws, true);
1212 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1213 dc->hwss.bios_golden_init(dc);
1214 if (dc->ctx->dc_bios->fw_info_valid) {
1215 res_pool->ref_clocks.xtalin_clock_inKhz =
1216 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1218 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1219 if (res_pool->dccg && res_pool->hubbub) {
1221 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1222 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1223 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1225 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1226 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1227 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1229 // Not all ASICs have DCCG sw component
1230 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1231 res_pool->ref_clocks.xtalin_clock_inKhz;
1232 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1233 res_pool->ref_clocks.xtalin_clock_inKhz;
1237 ASSERT_CRITICAL(false);
1238 dc->hwss.disable_vga(dc->hwseq);
1241 for (i = 0; i < dc->link_count; i++) {
1242 /* Power up AND update implementation according to the
1243 * required signal (which may be different from the
1244 * default signal on connector).
1246 struct dc_link *link = dc->links[i];
1248 link->link_enc->funcs->hw_init(link->link_enc);
1250 /* Check for enabled DIG to identify enabled display */
1251 if (link->link_enc->funcs->is_dig_enabled &&
1252 link->link_enc->funcs->is_dig_enabled(link->link_enc))
1253 link->link_status.link_active = true;
1256 /* Power gate DSCs */
1257 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1258 for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1259 if (dc->hwss.dsc_pg_control != NULL)
1260 dc->hwss.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1263 /* If taking control over from VBIOS, we may want to optimize our first
1264 * mode set, so we need to skip powering down pipes until we know which
1265 * pipes we want to use.
1266 * Otherwise, if taking control is not possible, we need to power
1269 if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1270 dc->hwss.init_pipes(dc, dc->current_state);
1273 for (i = 0; i < res_pool->audio_count; i++) {
1274 struct audio *audio = res_pool->audios[i];
1276 audio->funcs->hw_init(audio);
1280 abm->funcs->init_backlight(abm);
1281 abm->funcs->abm_init(abm);
1285 dmcu->funcs->dmcu_init(dmcu);
1287 if (abm != NULL && dmcu != NULL)
1288 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1290 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1291 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1293 if (!dc->debug.disable_clock_gate) {
1294 /* enable all DCN clock gating */
1295 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1297 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1299 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1302 dc->hwss.enable_power_gating_plane(dc->hwseq, true);
1305 static void dcn10_reset_hw_ctx_wrap(
1307 struct dc_state *context)
1312 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1313 struct pipe_ctx *pipe_ctx_old =
1314 &dc->current_state->res_ctx.pipe_ctx[i];
1315 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1317 if (!pipe_ctx_old->stream)
1320 if (pipe_ctx_old->top_pipe)
1323 if (!pipe_ctx->stream ||
1324 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1325 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1327 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1328 if (dc->hwss.enable_stream_gating)
1329 dc->hwss.enable_stream_gating(dc, pipe_ctx);
1331 old_clk->funcs->cs_power_down(old_clk);
1336 static bool patch_address_for_sbs_tb_stereo(
1337 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1339 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1340 bool sec_split = pipe_ctx->top_pipe &&
1341 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1342 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1343 (pipe_ctx->stream->timing.timing_3d_format ==
1344 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1345 pipe_ctx->stream->timing.timing_3d_format ==
1346 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1347 *addr = plane_state->address.grph_stereo.left_addr;
1348 plane_state->address.grph_stereo.left_addr =
1349 plane_state->address.grph_stereo.right_addr;
1352 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1353 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1354 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1355 plane_state->address.grph_stereo.right_addr =
1356 plane_state->address.grph_stereo.left_addr;
1364 static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1366 bool addr_patched = false;
1367 PHYSICAL_ADDRESS_LOC addr;
1368 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1370 if (plane_state == NULL)
1373 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1375 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1376 pipe_ctx->plane_res.hubp,
1377 &plane_state->address,
1378 plane_state->flip_immediate);
1380 plane_state->status.requested_address = plane_state->address;
1382 if (plane_state->flip_immediate)
1383 plane_state->status.current_address = plane_state->address;
1386 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1389 static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
1390 const struct dc_plane_state *plane_state)
1392 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1393 const struct dc_transfer_func *tf = NULL;
1396 if (dpp_base == NULL)
1399 if (plane_state->in_transfer_func)
1400 tf = plane_state->in_transfer_func;
1402 if (plane_state->gamma_correction &&
1403 !dpp_base->ctx->dc->debug.always_use_regamma
1404 && !plane_state->gamma_correction->is_identity
1405 && dce_use_lut(plane_state->format))
1406 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1409 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1410 else if (tf->type == TF_TYPE_PREDEFINED) {
1412 case TRANSFER_FUNCTION_SRGB:
1413 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1415 case TRANSFER_FUNCTION_BT709:
1416 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1418 case TRANSFER_FUNCTION_LINEAR:
1419 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1421 case TRANSFER_FUNCTION_PQ:
1426 } else if (tf->type == TF_TYPE_BYPASS) {
1427 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1429 cm_helper_translate_curve_to_degamma_hw_format(tf,
1430 &dpp_base->degamma_params);
1431 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1432 &dpp_base->degamma_params);
1439 #define MAX_NUM_HW_POINTS 0x200
1441 static void log_tf(struct dc_context *ctx,
1442 struct dc_transfer_func *tf, uint32_t hw_points_num)
1444 // DC_LOG_GAMMA is default logging of all hw points
1445 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1446 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1449 DC_LOGGER_INIT(ctx->logger);
1450 DC_LOG_GAMMA("Gamma Correction TF");
1451 DC_LOG_ALL_GAMMA("Logging all tf points...");
1452 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1454 for (i = 0; i < hw_points_num; i++) {
1455 DC_LOG_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
1456 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
1457 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
1460 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1461 DC_LOG_ALL_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
1462 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
1463 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
1468 dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
1469 const struct dc_stream_state *stream)
1471 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1476 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1478 if (stream->out_transfer_func &&
1479 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1480 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1481 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1483 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1486 else if (cm_helper_translate_curve_to_hw_format(
1487 stream->out_transfer_func,
1488 &dpp->regamma_params, false)) {
1489 dpp->funcs->dpp_program_regamma_pwl(
1491 &dpp->regamma_params, OPP_REGAMMA_USER);
1493 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1495 if (stream != NULL && stream->ctx != NULL &&
1496 stream->out_transfer_func != NULL) {
1498 stream->out_transfer_func,
1499 dpp->regamma_params.hw_points_num);
1505 static void dcn10_pipe_control_lock(
1507 struct pipe_ctx *pipe,
1510 /* use TG master update lock to lock everything on the TG
1511 * therefore only top pipe need to lock
1516 if (dc->debug.sanity_checks)
1517 dcn10_verify_allow_pstate_change_high(dc);
1520 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1522 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1524 if (dc->debug.sanity_checks)
1525 dcn10_verify_allow_pstate_change_high(dc);
1528 static bool wait_for_reset_trigger_to_occur(
1529 struct dc_context *dc_ctx,
1530 struct timing_generator *tg)
1534 /* To avoid endless loop we wait at most
1535 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1536 const uint32_t frames_to_wait_on_triggered_reset = 10;
1539 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1541 if (!tg->funcs->is_counter_moving(tg)) {
1542 DC_ERROR("TG counter is not moving!\n");
1546 if (tg->funcs->did_triggered_reset_occur(tg)) {
1548 /* usually occurs at i=1 */
1549 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1554 /* Wait for one frame. */
1555 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1556 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1560 DC_ERROR("GSL: Timeout on reset trigger!\n");
1565 static void dcn10_enable_timing_synchronization(
1569 struct pipe_ctx *grouped_pipes[])
1571 struct dc_context *dc_ctx = dc->ctx;
1574 DC_SYNC_INFO("Setting up OTG reset trigger\n");
1576 for (i = 1; i < group_size; i++)
1577 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
1578 grouped_pipes[i]->stream_res.tg,
1579 grouped_pipes[0]->stream_res.tg->inst);
1581 DC_SYNC_INFO("Waiting for trigger\n");
1583 /* Need to get only check 1 pipe for having reset as all the others are
1584 * synchronized. Look at last pipe programmed to reset.
1587 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
1588 for (i = 1; i < group_size; i++)
1589 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
1590 grouped_pipes[i]->stream_res.tg);
1592 DC_SYNC_INFO("Sync complete\n");
1595 static void dcn10_enable_per_frame_crtc_position_reset(
1598 struct pipe_ctx *grouped_pipes[])
1600 struct dc_context *dc_ctx = dc->ctx;
1603 DC_SYNC_INFO("Setting up\n");
1604 for (i = 0; i < group_size; i++)
1605 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
1606 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
1607 grouped_pipes[i]->stream_res.tg,
1609 &grouped_pipes[i]->stream->triggered_crtc_reset);
1611 DC_SYNC_INFO("Waiting for trigger\n");
1613 for (i = 0; i < group_size; i++)
1614 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
1616 DC_SYNC_INFO("Multi-display sync is complete\n");
1619 /*static void print_rq_dlg_ttu(
1621 struct pipe_ctx *pipe_ctx)
1623 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1624 "\n============== DML TTU Output parameters [%d] ==============\n"
1625 "qos_level_low_wm: %d, \n"
1626 "qos_level_high_wm: %d, \n"
1627 "min_ttu_vblank: %d, \n"
1628 "qos_level_flip: %d, \n"
1629 "refcyc_per_req_delivery_l: %d, \n"
1630 "qos_level_fixed_l: %d, \n"
1631 "qos_ramp_disable_l: %d, \n"
1632 "refcyc_per_req_delivery_pre_l: %d, \n"
1633 "refcyc_per_req_delivery_c: %d, \n"
1634 "qos_level_fixed_c: %d, \n"
1635 "qos_ramp_disable_c: %d, \n"
1636 "refcyc_per_req_delivery_pre_c: %d\n"
1637 "=============================================================\n",
1639 pipe_ctx->ttu_regs.qos_level_low_wm,
1640 pipe_ctx->ttu_regs.qos_level_high_wm,
1641 pipe_ctx->ttu_regs.min_ttu_vblank,
1642 pipe_ctx->ttu_regs.qos_level_flip,
1643 pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
1644 pipe_ctx->ttu_regs.qos_level_fixed_l,
1645 pipe_ctx->ttu_regs.qos_ramp_disable_l,
1646 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
1647 pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
1648 pipe_ctx->ttu_regs.qos_level_fixed_c,
1649 pipe_ctx->ttu_regs.qos_ramp_disable_c,
1650 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1653 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1654 "\n============== DML DLG Output parameters [%d] ==============\n"
1655 "refcyc_h_blank_end: %d, \n"
1656 "dlg_vblank_end: %d, \n"
1657 "min_dst_y_next_start: %d, \n"
1658 "refcyc_per_htotal: %d, \n"
1659 "refcyc_x_after_scaler: %d, \n"
1660 "dst_y_after_scaler: %d, \n"
1661 "dst_y_prefetch: %d, \n"
1662 "dst_y_per_vm_vblank: %d, \n"
1663 "dst_y_per_row_vblank: %d, \n"
1664 "ref_freq_to_pix_freq: %d, \n"
1665 "vratio_prefetch: %d, \n"
1666 "refcyc_per_pte_group_vblank_l: %d, \n"
1667 "refcyc_per_meta_chunk_vblank_l: %d, \n"
1668 "dst_y_per_pte_row_nom_l: %d, \n"
1669 "refcyc_per_pte_group_nom_l: %d, \n",
1671 pipe_ctx->dlg_regs.refcyc_h_blank_end,
1672 pipe_ctx->dlg_regs.dlg_vblank_end,
1673 pipe_ctx->dlg_regs.min_dst_y_next_start,
1674 pipe_ctx->dlg_regs.refcyc_per_htotal,
1675 pipe_ctx->dlg_regs.refcyc_x_after_scaler,
1676 pipe_ctx->dlg_regs.dst_y_after_scaler,
1677 pipe_ctx->dlg_regs.dst_y_prefetch,
1678 pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
1679 pipe_ctx->dlg_regs.dst_y_per_row_vblank,
1680 pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
1681 pipe_ctx->dlg_regs.vratio_prefetch,
1682 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
1683 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
1684 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
1685 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1688 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1689 "\ndst_y_per_meta_row_nom_l: %d, \n"
1690 "refcyc_per_meta_chunk_nom_l: %d, \n"
1691 "refcyc_per_line_delivery_pre_l: %d, \n"
1692 "refcyc_per_line_delivery_l: %d, \n"
1693 "vratio_prefetch_c: %d, \n"
1694 "refcyc_per_pte_group_vblank_c: %d, \n"
1695 "refcyc_per_meta_chunk_vblank_c: %d, \n"
1696 "dst_y_per_pte_row_nom_c: %d, \n"
1697 "refcyc_per_pte_group_nom_c: %d, \n"
1698 "dst_y_per_meta_row_nom_c: %d, \n"
1699 "refcyc_per_meta_chunk_nom_c: %d, \n"
1700 "refcyc_per_line_delivery_pre_c: %d, \n"
1701 "refcyc_per_line_delivery_c: %d \n"
1702 "========================================================\n",
1703 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
1704 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
1705 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
1706 pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
1707 pipe_ctx->dlg_regs.vratio_prefetch_c,
1708 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
1709 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
1710 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
1711 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
1712 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
1713 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
1714 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
1715 pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
1718 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1719 "\n============== DML RQ Output parameters [%d] ==============\n"
1721 "min_chunk_size: %d \n"
1722 "meta_chunk_size: %d \n"
1723 "min_meta_chunk_size: %d \n"
1724 "dpte_group_size: %d \n"
1725 "mpte_group_size: %d \n"
1726 "swath_height: %d \n"
1727 "pte_row_height_linear: %d \n"
1728 "========================================================\n",
1730 pipe_ctx->rq_regs.rq_regs_l.chunk_size,
1731 pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
1732 pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
1733 pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
1734 pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
1735 pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
1736 pipe_ctx->rq_regs.rq_regs_l.swath_height,
1737 pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
1742 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
1743 struct vm_system_aperture_param *apt,
1744 struct dce_hwseq *hws)
1746 PHYSICAL_ADDRESS_LOC physical_page_number;
1747 uint32_t logical_addr_low;
1748 uint32_t logical_addr_high;
1750 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
1751 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
1752 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
1753 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
1755 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1756 LOGICAL_ADDR, &logical_addr_low);
1758 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1759 LOGICAL_ADDR, &logical_addr_high);
1761 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
1762 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
1763 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
1766 /* Temporary read settings, future will get values from kmd directly */
1767 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
1768 struct vm_context0_param *vm0,
1769 struct dce_hwseq *hws)
1771 PHYSICAL_ADDRESS_LOC fb_base;
1772 PHYSICAL_ADDRESS_LOC fb_offset;
1773 uint32_t fb_base_value;
1774 uint32_t fb_offset_value;
1776 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
1777 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
1779 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
1780 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
1781 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
1782 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
1784 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
1785 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
1786 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
1787 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
1789 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
1790 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
1791 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
1792 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
1794 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
1795 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
1796 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
1797 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
1800 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
1801 * Therefore we need to do
1802 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
1803 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
1805 fb_base.quad_part = (uint64_t)fb_base_value << 24;
1806 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
1807 vm0->pte_base.quad_part += fb_base.quad_part;
1808 vm0->pte_base.quad_part -= fb_offset.quad_part;
1812 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
1814 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
1815 struct vm_system_aperture_param apt = { {{ 0 } } };
1816 struct vm_context0_param vm0 = { { { 0 } } };
1818 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
1819 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
1821 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
1822 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
1825 static void dcn10_enable_plane(
1827 struct pipe_ctx *pipe_ctx,
1828 struct dc_state *context)
1830 struct dce_hwseq *hws = dc->hwseq;
1832 if (dc->debug.sanity_checks) {
1833 dcn10_verify_allow_pstate_change_high(dc);
1836 undo_DEGVIDCN10_253_wa(dc);
1838 power_on_plane(dc->hwseq,
1839 pipe_ctx->plane_res.hubp->inst);
1841 /* enable DCFCLK current DCHUB */
1842 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
1844 /* make sure OPP_PIPE_CLOCK_EN = 1 */
1845 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1846 pipe_ctx->stream_res.opp,
1849 /* TODO: enable/disable in dm as per update type.
1851 DC_LOG_DC(dc->ctx->logger,
1852 "Pipe:%d 0x%x: addr hi:0x%x, "
1855 " %d; dst: %d, %d, %d, %d;\n",
1858 plane_state->address.grph.addr.high_part,
1859 plane_state->address.grph.addr.low_part,
1860 plane_state->src_rect.x,
1861 plane_state->src_rect.y,
1862 plane_state->src_rect.width,
1863 plane_state->src_rect.height,
1864 plane_state->dst_rect.x,
1865 plane_state->dst_rect.y,
1866 plane_state->dst_rect.width,
1867 plane_state->dst_rect.height);
1869 DC_LOG_DC(dc->ctx->logger,
1870 "Pipe %d: width, height, x, y format:%d\n"
1871 "viewport:%d, %d, %d, %d\n"
1872 "recout: %d, %d, %d, %d\n",
1874 plane_state->format,
1875 pipe_ctx->plane_res.scl_data.viewport.width,
1876 pipe_ctx->plane_res.scl_data.viewport.height,
1877 pipe_ctx->plane_res.scl_data.viewport.x,
1878 pipe_ctx->plane_res.scl_data.viewport.y,
1879 pipe_ctx->plane_res.scl_data.recout.width,
1880 pipe_ctx->plane_res.scl_data.recout.height,
1881 pipe_ctx->plane_res.scl_data.recout.x,
1882 pipe_ctx->plane_res.scl_data.recout.y);
1883 print_rq_dlg_ttu(dc, pipe_ctx);
1886 if (dc->config.gpu_vm_support)
1887 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
1889 if (dc->debug.sanity_checks) {
1890 dcn10_verify_allow_pstate_change_high(dc);
1894 static void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
1897 struct dpp_grph_csc_adjustment adjust;
1898 memset(&adjust, 0, sizeof(adjust));
1899 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
1902 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
1903 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
1904 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
1905 adjust.temperature_matrix[i] =
1906 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
1909 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
1913 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
1915 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
1916 if (pipe_ctx->top_pipe) {
1917 struct pipe_ctx *top = pipe_ctx->top_pipe;
1919 while (top->top_pipe)
1920 top = top->top_pipe; // Traverse to top pipe_ctx
1921 if (top->plane_state && top->plane_state->layer_index == 0)
1922 return true; // Front MPO plane not hidden
1928 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
1930 // Override rear plane RGB bias to fix MPO brightness
1931 uint16_t rgb_bias = matrix[3];
1936 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
1937 matrix[3] = rgb_bias;
1938 matrix[7] = rgb_bias;
1939 matrix[11] = rgb_bias;
1942 static void dcn10_program_output_csc(struct dc *dc,
1943 struct pipe_ctx *pipe_ctx,
1944 enum dc_color_space colorspace,
1948 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
1949 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
1951 /* MPO is broken with RGB colorspaces when OCSC matrix
1952 * brightness offset >= 0 on DCN1 due to OCSC before MPC
1953 * Blending adds offsets from front + rear to rear plane
1955 * Fix is to set RGB bias to 0 on rear plane, top plane
1956 * black value pixels add offset instead of rear + front
1959 int16_t rgb_bias = matrix[3];
1960 // matrix[3/7/11] are all the same offset value
1962 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
1963 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
1965 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
1969 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
1970 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
1974 bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
1976 if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
1978 if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
1983 bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
1985 if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
1987 if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
1992 bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
1994 if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
1996 if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
1998 if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
2003 bool is_rgb_cspace(enum dc_color_space output_color_space)
2005 switch (output_color_space) {
2006 case COLOR_SPACE_SRGB:
2007 case COLOR_SPACE_SRGB_LIMITED:
2008 case COLOR_SPACE_2020_RGB_FULLRANGE:
2009 case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
2010 case COLOR_SPACE_ADOBERGB:
2012 case COLOR_SPACE_YCBCR601:
2013 case COLOR_SPACE_YCBCR709:
2014 case COLOR_SPACE_YCBCR601_LIMITED:
2015 case COLOR_SPACE_YCBCR709_LIMITED:
2016 case COLOR_SPACE_2020_YCBCR:
2019 /* Add a case to switch */
2020 BREAK_TO_DEBUGGER();
2025 void dcn10_get_surface_visual_confirm_color(
2026 const struct pipe_ctx *pipe_ctx,
2027 struct tg_color *color)
2029 uint32_t color_value = MAX_TG_COLOR_VALUE;
2031 switch (pipe_ctx->plane_res.scl_data.format) {
2032 case PIXEL_FORMAT_ARGB8888:
2033 /* set boarder color to red */
2034 color->color_r_cr = color_value;
2037 case PIXEL_FORMAT_ARGB2101010:
2038 /* set boarder color to blue */
2039 color->color_b_cb = color_value;
2041 case PIXEL_FORMAT_420BPP8:
2042 /* set boarder color to green */
2043 color->color_g_y = color_value;
2045 case PIXEL_FORMAT_420BPP10:
2046 /* set boarder color to yellow */
2047 color->color_g_y = color_value;
2048 color->color_r_cr = color_value;
2050 case PIXEL_FORMAT_FP16:
2051 /* set boarder color to white */
2052 color->color_r_cr = color_value;
2053 color->color_b_cb = color_value;
2054 color->color_g_y = color_value;
2061 void dcn10_get_hdr_visual_confirm_color(
2062 struct pipe_ctx *pipe_ctx,
2063 struct tg_color *color)
2065 uint32_t color_value = MAX_TG_COLOR_VALUE;
2067 // Determine the overscan color based on the top-most (desktop) plane's context
2068 struct pipe_ctx *top_pipe_ctx = pipe_ctx;
2070 while (top_pipe_ctx->top_pipe != NULL)
2071 top_pipe_ctx = top_pipe_ctx->top_pipe;
2073 switch (top_pipe_ctx->plane_res.scl_data.format) {
2074 case PIXEL_FORMAT_ARGB2101010:
2075 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2076 /* HDR10, ARGB2101010 - set boarder color to red */
2077 color->color_r_cr = color_value;
2080 case PIXEL_FORMAT_FP16:
2081 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2082 /* HDR10, FP16 - set boarder color to blue */
2083 color->color_b_cb = color_value;
2084 } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2085 /* FreeSync 2 HDR - set boarder color to green */
2086 color->color_g_y = color_value;
2090 /* SDR - set boarder color to Gray */
2091 color->color_r_cr = color_value/2;
2092 color->color_b_cb = color_value/2;
2093 color->color_g_y = color_value/2;
2098 static uint16_t fixed_point_to_int_frac(
2099 struct fixed31_32 arg,
2100 uint8_t integer_bits,
2101 uint8_t fractional_bits)
2104 int32_t divisor = 1 << fractional_bits;
2108 uint16_t d = (uint16_t)dc_fixpt_floor(
2112 if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
2113 numerator = (uint16_t)dc_fixpt_floor(
2118 numerator = dc_fixpt_floor(
2121 1LL << integer_bits),
2128 result = (uint16_t)numerator;
2130 result = (uint16_t)(
2131 (1 << (integer_bits + fractional_bits + 1)) + numerator);
2133 if ((result != 0) && dc_fixpt_lt(
2134 arg, dc_fixpt_zero))
2135 result |= 1 << (integer_bits + fractional_bits);
2140 void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
2141 const struct dc_plane_state *plane_state)
2143 if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
2144 && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID
2145 && plane_state->input_csc_color_matrix.enable_adjustment
2146 && plane_state->coeff_reduction_factor.value != 0) {
2147 bias_and_scale->scale_blue = fixed_point_to_int_frac(
2148 dc_fixpt_mul(plane_state->coeff_reduction_factor,
2149 dc_fixpt_from_fraction(256, 255)),
2152 bias_and_scale->scale_red = bias_and_scale->scale_blue;
2153 bias_and_scale->scale_green = bias_and_scale->scale_blue;
2155 bias_and_scale->scale_blue = 0x2000;
2156 bias_and_scale->scale_red = 0x2000;
2157 bias_and_scale->scale_green = 0x2000;
2161 static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2163 struct dc_bias_and_scale bns_params = {0};
2165 // program the input csc
2166 dpp->funcs->dpp_setup(dpp,
2167 plane_state->format,
2168 EXPANSION_MODE_ZERO,
2169 plane_state->input_csc_color_matrix,
2170 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
2171 plane_state->color_space,
2174 plane_state->color_space);
2177 //set scale and bias registers
2178 dcn10_build_prescale_params(&bns_params, plane_state);
2179 if (dpp->funcs->dpp_program_bias_and_scale)
2180 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2183 static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2185 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2186 struct mpcc_blnd_cfg blnd_cfg = {{0}};
2187 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2189 struct mpcc *new_mpcc;
2190 struct mpc *mpc = dc->res_pool->mpc;
2191 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2193 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2194 dcn10_get_hdr_visual_confirm_color(
2195 pipe_ctx, &blnd_cfg.black_color);
2196 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2197 dcn10_get_surface_visual_confirm_color(
2198 pipe_ctx, &blnd_cfg.black_color);
2200 color_space_to_black_color(
2201 dc, pipe_ctx->stream->output_color_space,
2202 &blnd_cfg.black_color);
2205 if (per_pixel_alpha)
2206 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2208 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2210 blnd_cfg.overlap_only = false;
2211 blnd_cfg.global_gain = 0xff;
2213 if (pipe_ctx->plane_state->global_alpha)
2214 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2216 blnd_cfg.global_alpha = 0xff;
2218 /* DCN1.0 has output CM before MPC which seems to screw with
2219 * pre-multiplied alpha.
2221 blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2222 pipe_ctx->stream->output_color_space)
2228 * Note: currently there is a bug in init_hw such that
2229 * on resume from hibernate, BIOS sets up MPCC0, and
2230 * we do mpcc_remove but the mpcc cannot go to idle
2231 * after remove. This cause us to pick mpcc1 here,
2232 * which causes a pstate hang for yet unknown reason.
2234 mpcc_id = hubp->inst;
2236 /* If there is no full update, don't need to touch MPC tree*/
2237 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2238 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2242 /* check if this MPCC is already being used */
2243 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2244 /* remove MPCC if being used */
2245 if (new_mpcc != NULL)
2246 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2248 if (dc->debug.sanity_checks)
2249 mpc->funcs->assert_mpcc_idle_before_connect(
2250 dc->res_pool->mpc, mpcc_id);
2252 /* Call MPC to insert new plane */
2253 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2261 ASSERT(new_mpcc != NULL);
2263 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2264 hubp->mpcc_id = mpcc_id;
2267 static void update_scaler(struct pipe_ctx *pipe_ctx)
2269 bool per_pixel_alpha =
2270 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2272 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2273 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2274 /* scaler configuration */
2275 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2276 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2279 void update_dchubp_dpp(
2281 struct pipe_ctx *pipe_ctx,
2282 struct dc_state *context)
2284 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2285 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2286 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2287 struct plane_size size = plane_state->plane_size;
2288 unsigned int compat_level = 0;
2290 /* depends on DML calculation, DPP clock value may change dynamically */
2291 /* If request max dpp clk is lower than current dispclk, no need to
2294 if (plane_state->update_flags.bits.full_update) {
2295 bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2296 dc->clk_mgr->clks.dispclk_khz / 2;
2298 dpp->funcs->dpp_dppclk_control(
2300 should_divided_by_2,
2303 if (dc->res_pool->dccg)
2304 dc->res_pool->dccg->funcs->update_dpp_dto(
2307 pipe_ctx->plane_res.bw.dppclk_khz,
2310 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2311 dc->clk_mgr->clks.dispclk_khz / 2 :
2312 dc->clk_mgr->clks.dispclk_khz;
2315 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2316 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2317 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2319 if (plane_state->update_flags.bits.full_update) {
2320 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2322 hubp->funcs->hubp_setup(
2324 &pipe_ctx->dlg_regs,
2325 &pipe_ctx->ttu_regs,
2327 &pipe_ctx->pipe_dlg_param);
2328 hubp->funcs->hubp_setup_interdependent(
2330 &pipe_ctx->dlg_regs,
2331 &pipe_ctx->ttu_regs);
2334 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2336 if (plane_state->update_flags.bits.full_update ||
2337 plane_state->update_flags.bits.bpp_change)
2338 update_dpp(dpp, plane_state);
2340 if (plane_state->update_flags.bits.full_update ||
2341 plane_state->update_flags.bits.per_pixel_alpha_change ||
2342 plane_state->update_flags.bits.global_alpha_change)
2343 dc->hwss.update_mpcc(dc, pipe_ctx);
2345 if (plane_state->update_flags.bits.full_update ||
2346 plane_state->update_flags.bits.per_pixel_alpha_change ||
2347 plane_state->update_flags.bits.global_alpha_change ||
2348 plane_state->update_flags.bits.scaling_change ||
2349 plane_state->update_flags.bits.position_change) {
2350 update_scaler(pipe_ctx);
2353 if (plane_state->update_flags.bits.full_update ||
2354 plane_state->update_flags.bits.scaling_change ||
2355 plane_state->update_flags.bits.position_change) {
2356 hubp->funcs->mem_program_viewport(
2358 &pipe_ctx->plane_res.scl_data.viewport,
2359 &pipe_ctx->plane_res.scl_data.viewport_c);
2362 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2363 dc->hwss.set_cursor_position(pipe_ctx);
2364 dc->hwss.set_cursor_attribute(pipe_ctx);
2366 if (dc->hwss.set_cursor_sdr_white_level)
2367 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2370 if (plane_state->update_flags.bits.full_update) {
2372 dc->hwss.program_gamut_remap(pipe_ctx);
2374 dc->hwss.program_output_csc(dc,
2376 pipe_ctx->stream->output_color_space,
2377 pipe_ctx->stream->csc_color_matrix.matrix,
2378 pipe_ctx->stream_res.opp->inst);
2381 if (plane_state->update_flags.bits.full_update ||
2382 plane_state->update_flags.bits.pixel_format_change ||
2383 plane_state->update_flags.bits.horizontal_mirror_change ||
2384 plane_state->update_flags.bits.rotation_change ||
2385 plane_state->update_flags.bits.swizzle_change ||
2386 plane_state->update_flags.bits.dcc_change ||
2387 plane_state->update_flags.bits.bpp_change ||
2388 plane_state->update_flags.bits.scaling_change ||
2389 plane_state->update_flags.bits.plane_size_change) {
2390 hubp->funcs->hubp_program_surface_config(
2392 plane_state->format,
2393 &plane_state->tiling_info,
2395 plane_state->rotation,
2397 plane_state->horizontal_mirror,
2401 hubp->power_gated = false;
2403 dc->hwss.update_plane_addr(dc, pipe_ctx);
2405 if (is_pipe_tree_visible(pipe_ctx))
2406 hubp->funcs->set_blank(hubp, false);
2409 static void dcn10_blank_pixel_data(
2411 struct pipe_ctx *pipe_ctx,
2414 enum dc_color_space color_space;
2415 struct tg_color black_color = {0};
2416 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2417 struct dc_stream_state *stream = pipe_ctx->stream;
2419 /* program otg blank color */
2420 color_space = stream->output_color_space;
2421 color_space_to_black_color(dc, color_space, &black_color);
2424 * The way 420 is packed, 2 channels carry Y component, 1 channel
2425 * alternate between Cb and Cr, so both channels need the pixel
2428 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2429 black_color.color_r_cr = black_color.color_g_y;
2432 if (stream_res->tg->funcs->set_blank_color)
2433 stream_res->tg->funcs->set_blank_color(
2438 if (stream_res->tg->funcs->set_blank)
2439 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2440 if (stream_res->abm) {
2441 stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
2442 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2445 if (stream_res->abm)
2446 stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
2447 if (stream_res->tg->funcs->set_blank)
2448 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2452 void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2454 struct fixed31_32 multiplier = dc_fixpt_from_fraction(
2455 pipe_ctx->plane_state->sdr_white_level, 80);
2456 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2457 struct custom_float_format fmt;
2459 fmt.exponenta_bits = 6;
2460 fmt.mantissa_bits = 12;
2463 if (pipe_ctx->plane_state->sdr_white_level > 80)
2464 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2466 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2467 pipe_ctx->plane_res.dpp, hw_mult);
2470 void dcn10_program_pipe(
2472 struct pipe_ctx *pipe_ctx,
2473 struct dc_state *context)
2475 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2476 dcn10_enable_plane(dc, pipe_ctx, context);
2478 update_dchubp_dpp(dc, pipe_ctx, context);
2480 set_hdr_multiplier(pipe_ctx);
2482 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2483 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2484 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2485 dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
2487 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2488 * only do gamma programming for full update.
2489 * TODO: This can be further optimized/cleaned up
2490 * Always call this for now since it does memcmp inside before
2491 * doing heavy calculation and programming
2493 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2494 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
2497 static void program_all_pipe_in_tree(
2499 struct pipe_ctx *pipe_ctx,
2500 struct dc_state *context)
2502 if (pipe_ctx->top_pipe == NULL) {
2503 bool blank = !is_pipe_tree_visible(pipe_ctx);
2505 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2506 pipe_ctx->stream_res.tg,
2507 pipe_ctx->pipe_dlg_param.vready_offset,
2508 pipe_ctx->pipe_dlg_param.vstartup_start,
2509 pipe_ctx->pipe_dlg_param.vupdate_offset,
2510 pipe_ctx->pipe_dlg_param.vupdate_width);
2512 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2513 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
2515 dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
2519 if (pipe_ctx->plane_state != NULL)
2520 dcn10_program_pipe(dc, pipe_ctx, context);
2522 if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
2523 program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
2526 struct pipe_ctx *find_top_pipe_for_stream(
2528 struct dc_state *context,
2529 const struct dc_stream_state *stream)
2533 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2534 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2535 struct pipe_ctx *old_pipe_ctx =
2536 &dc->current_state->res_ctx.pipe_ctx[i];
2538 if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
2541 if (pipe_ctx->stream != stream)
2544 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
2550 static void dcn10_apply_ctx_for_surface(
2552 const struct dc_stream_state *stream,
2554 struct dc_state *context)
2557 struct timing_generator *tg;
2558 uint32_t underflow_check_delay_us;
2559 bool removed_pipe[4] = { false };
2560 bool interdependent_update = false;
2561 struct pipe_ctx *top_pipe_to_program =
2562 find_top_pipe_for_stream(dc, context, stream);
2563 DC_LOGGER_INIT(dc->ctx->logger);
2565 if (!top_pipe_to_program)
2568 tg = top_pipe_to_program->stream_res.tg;
2570 interdependent_update = top_pipe_to_program->plane_state &&
2571 top_pipe_to_program->plane_state->update_flags.bits.full_update;
2573 underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
2575 if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
2576 ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
2578 if (interdependent_update)
2579 lock_all_pipes(dc, context, true);
2581 dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
2583 if (underflow_check_delay_us != 0xFFFFFFFF)
2584 udelay(underflow_check_delay_us);
2586 if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
2587 ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
2589 if (num_planes == 0) {
2590 /* OTG blank before remove all front end */
2591 dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
2594 /* Disconnect unused mpcc */
2595 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2596 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2597 struct pipe_ctx *old_pipe_ctx =
2598 &dc->current_state->res_ctx.pipe_ctx[i];
2600 * Powergate reused pipes that are not powergated
2601 * fairly hacky right now, using opp_id as indicator
2602 * TODO: After move dc_post to dc_update, this will
2605 if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
2606 if (old_pipe_ctx->stream_res.tg == tg &&
2607 old_pipe_ctx->plane_res.hubp &&
2608 old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
2609 dc->hwss.disable_plane(dc, old_pipe_ctx);
2612 if ((!pipe_ctx->plane_state ||
2613 pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2614 old_pipe_ctx->plane_state &&
2615 old_pipe_ctx->stream_res.tg == tg) {
2617 dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
2618 removed_pipe[i] = true;
2620 DC_LOG_DC("Reset mpcc for pipe %d\n",
2621 old_pipe_ctx->pipe_idx);
2626 program_all_pipe_in_tree(dc, top_pipe_to_program, context);
2628 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2629 /* Program secondary blending tree and writeback pipes */
2630 if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
2631 dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
2633 if (interdependent_update)
2634 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2635 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2636 /* Skip inactive pipes and ones already updated */
2637 if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
2638 !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
2641 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2642 pipe_ctx->plane_res.hubp,
2643 &pipe_ctx->dlg_regs,
2644 &pipe_ctx->ttu_regs);
2647 if (interdependent_update)
2648 lock_all_pipes(dc, context, false);
2650 dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
2652 if (num_planes == 0)
2653 false_optc_underflow_wa(dc, stream, tg);
2655 for (i = 0; i < dc->res_pool->pipe_count; i++)
2656 if (removed_pipe[i])
2657 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2659 for (i = 0; i < dc->res_pool->pipe_count; i++)
2660 if (removed_pipe[i]) {
2661 dc->hwss.optimize_bandwidth(dc, context);
2665 if (dc->hwseq->wa.DEGVIDCN10_254)
2666 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2669 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2673 for (i = 0; i < context->stream_count; i++) {
2674 if (context->streams[i]->timing.timing_3d_format
2675 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2679 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2685 static void dcn10_prepare_bandwidth(
2687 struct dc_state *context)
2689 struct hubbub *hubbub = dc->res_pool->hubbub;
2691 if (dc->debug.sanity_checks)
2692 dcn10_verify_allow_pstate_change_high(dc);
2694 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2695 if (context->stream_count == 0)
2696 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2698 dc->clk_mgr->funcs->update_clocks(
2704 hubbub->funcs->program_watermarks(hubbub,
2705 &context->bw_ctx.bw.dcn.watermarks,
2706 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2708 dcn10_stereo_hw_frame_pack_wa(dc, context);
2710 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2711 dcn_bw_notify_pplib_of_wm_ranges(dc);
2713 if (dc->debug.sanity_checks)
2714 dcn10_verify_allow_pstate_change_high(dc);
2717 static void dcn10_optimize_bandwidth(
2719 struct dc_state *context)
2721 struct hubbub *hubbub = dc->res_pool->hubbub;
2723 if (dc->debug.sanity_checks)
2724 dcn10_verify_allow_pstate_change_high(dc);
2726 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2727 if (context->stream_count == 0)
2728 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2730 dc->clk_mgr->funcs->update_clocks(
2736 hubbub->funcs->program_watermarks(hubbub,
2737 &context->bw_ctx.bw.dcn.watermarks,
2738 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2740 dcn10_stereo_hw_frame_pack_wa(dc, context);
2742 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2743 dcn_bw_notify_pplib_of_wm_ranges(dc);
2745 if (dc->debug.sanity_checks)
2746 dcn10_verify_allow_pstate_change_high(dc);
2749 static void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
2750 int num_pipes, int vmin, int vmax)
2753 struct drr_params params = {0};
2754 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
2755 unsigned int event_triggers = 0x800;
2757 params.vertical_total_max = vmax;
2758 params.vertical_total_min = vmin;
2760 /* TODO: If multiple pipes are to be supported, you need
2761 * some GSL stuff. Static screen triggers may be programmed differently
2764 for (i = 0; i < num_pipes; i++) {
2765 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
2766 pipe_ctx[i]->stream_res.tg, ¶ms);
2767 if (vmax != 0 && vmin != 0)
2768 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
2769 pipe_ctx[i]->stream_res.tg,
2774 static void dcn10_get_position(struct pipe_ctx **pipe_ctx,
2776 struct crtc_position *position)
2780 /* TODO: handle pipes > 1
2782 for (i = 0; i < num_pipes; i++)
2783 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
2786 static void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
2787 int num_pipes, const struct dc_static_screen_events *events)
2790 unsigned int value = 0;
2792 if (events->surface_update)
2794 if (events->cursor_update)
2796 if (events->force_trigger)
2799 for (i = 0; i < num_pipes; i++)
2800 pipe_ctx[i]->stream_res.tg->funcs->
2801 set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
2804 static void dcn10_config_stereo_parameters(
2805 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
2807 enum view_3d_format view_format = stream->view_format;
2808 enum dc_timing_3d_format timing_3d_format =\
2809 stream->timing.timing_3d_format;
2810 bool non_stereo_timing = false;
2812 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
2813 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
2814 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
2815 non_stereo_timing = true;
2817 if (non_stereo_timing == false &&
2818 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
2820 flags->PROGRAM_STEREO = 1;
2821 flags->PROGRAM_POLARITY = 1;
2822 if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
2823 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
2824 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2825 enum display_dongle_type dongle = \
2826 stream->link->ddc->dongle_type;
2827 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
2828 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
2829 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
2830 flags->DISABLE_STEREO_DP_SYNC = 1;
2832 flags->RIGHT_EYE_POLARITY =\
2833 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
2834 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
2835 flags->FRAME_PACKED = 1;
2841 static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
2843 struct crtc_stereo_flags flags = { 0 };
2844 struct dc_stream_state *stream = pipe_ctx->stream;
2846 dcn10_config_stereo_parameters(stream, &flags);
2848 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2849 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
2850 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
2852 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
2855 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
2856 pipe_ctx->stream_res.opp,
2857 flags.PROGRAM_STEREO == 1 ? true:false,
2860 pipe_ctx->stream_res.tg->funcs->program_stereo(
2861 pipe_ctx->stream_res.tg,
2868 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
2872 for (i = 0; i < res_pool->pipe_count; i++) {
2873 if (res_pool->hubps[i]->inst == mpcc_inst)
2874 return res_pool->hubps[i];
2880 static void dcn10_wait_for_mpcc_disconnect(
2882 struct resource_pool *res_pool,
2883 struct pipe_ctx *pipe_ctx)
2887 if (dc->debug.sanity_checks) {
2888 dcn10_verify_allow_pstate_change_high(dc);
2891 if (!pipe_ctx->stream_res.opp)
2894 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
2895 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
2896 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
2898 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
2899 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
2900 hubp->funcs->set_blank(hubp, true);
2904 if (dc->debug.sanity_checks) {
2905 dcn10_verify_allow_pstate_change_high(dc);
2910 static bool dcn10_dummy_display_power_gating(
2912 uint8_t controller_id,
2913 struct dc_bios *dcb,
2914 enum pipe_gating_control power_gating)
2919 static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2921 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2922 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2925 if (plane_state == NULL)
2928 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
2929 pipe_ctx->plane_res.hubp);
2931 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
2934 plane_state->status.current_address = plane_state->status.requested_address;
2936 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
2937 tg->funcs->is_stereo_left_eye) {
2938 plane_state->status.is_right_eye =
2939 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
2943 static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
2945 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
2947 /* In DCN, this programming sequence is owned by the hubbub */
2948 hubbub->funcs->update_dchub(hubbub, dh_data);
2951 static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2953 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2954 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2955 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2956 struct dc_cursor_mi_param param = {
2957 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
2958 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
2959 .viewport = pipe_ctx->plane_res.scl_data.viewport,
2960 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
2961 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
2962 .rotation = pipe_ctx->plane_state->rotation,
2963 .mirror = pipe_ctx->plane_state->horizontal_mirror
2965 uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
2966 uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
2967 uint32_t x_offset = min(x_plane, pos_cpy.x);
2968 uint32_t y_offset = min(y_plane, pos_cpy.y);
2970 pos_cpy.x -= x_offset;
2971 pos_cpy.y -= y_offset;
2972 pos_cpy.x_hotspot += (x_plane - x_offset);
2973 pos_cpy.y_hotspot += (y_plane - y_offset);
2975 if (pipe_ctx->plane_state->address.type
2976 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2977 pos_cpy.enable = false;
2979 // Swap axis and mirror horizontally
2980 if (param.rotation == ROTATION_ANGLE_90) {
2981 uint32_t temp_x = pos_cpy.x;
2982 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
2983 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
2986 // Swap axis and mirror vertically
2987 else if (param.rotation == ROTATION_ANGLE_270) {
2988 uint32_t temp_y = pos_cpy.y;
2989 if (pos_cpy.x > pipe_ctx->plane_res.scl_data.viewport.height) {
2990 pos_cpy.x = pos_cpy.x - pipe_ctx->plane_res.scl_data.viewport.height;
2991 pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x;
2993 pos_cpy.y = 2 * pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x;
2997 // Mirror horizontally and vertically
2998 else if (param.rotation == ROTATION_ANGLE_180) {
2999 if (pos_cpy.x >= pipe_ctx->plane_res.scl_data.viewport.width + pipe_ctx->plane_res.scl_data.viewport.x) {
3000 pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.width
3001 - pos_cpy.x + 2 * pipe_ctx->plane_res.scl_data.viewport.x;
3003 uint32_t temp_x = pos_cpy.x;
3004 pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.x - pos_cpy.x;
3005 if (temp_x >= pipe_ctx->plane_res.scl_data.viewport.x + (int)hubp->curs_attr.width
3006 || pos_cpy.x <= (int)hubp->curs_attr.width + pipe_ctx->plane_state->src_rect.x) {
3007 pos_cpy.x = temp_x + pipe_ctx->plane_res.scl_data.viewport.width;
3010 pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3013 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3014 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3017 static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3019 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3021 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3022 pipe_ctx->plane_res.hubp, attributes);
3023 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3024 pipe_ctx->plane_res.dpp, attributes);
3027 static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3029 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3030 struct fixed31_32 multiplier;
3031 struct dpp_cursor_attributes opt_attr = { 0 };
3032 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3033 struct custom_float_format fmt;
3035 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3038 fmt.exponenta_bits = 5;
3039 fmt.mantissa_bits = 10;
3042 if (sdr_white_level > 80) {
3043 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3044 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3047 opt_attr.scale = hw_scale;
3050 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3051 pipe_ctx->plane_res.dpp, &opt_attr);
3055 * apply_front_porch_workaround TODO FPGA still need?
3057 * This is a workaround for a bug that has existed since R5xx and has not been
3058 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3060 static void apply_front_porch_workaround(
3061 struct dc_crtc_timing *timing)
3063 if (timing->flags.INTERLACE == 1) {
3064 if (timing->v_front_porch < 2)
3065 timing->v_front_porch = 2;
3067 if (timing->v_front_porch < 1)
3068 timing->v_front_porch = 1;
3072 int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3074 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3075 struct dc_crtc_timing patched_crtc_timing;
3076 int vesa_sync_start;
3078 int interlace_factor;
3079 int vertical_line_start;
3081 patched_crtc_timing = *dc_crtc_timing;
3082 apply_front_porch_workaround(&patched_crtc_timing);
3084 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3086 vesa_sync_start = patched_crtc_timing.v_addressable +
3087 patched_crtc_timing.v_border_bottom +
3088 patched_crtc_timing.v_front_porch;
3090 asic_blank_end = (patched_crtc_timing.v_total -
3092 patched_crtc_timing.v_border_top)
3095 vertical_line_start = asic_blank_end -
3096 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3098 return vertical_line_start;
3101 void lock_all_pipes(struct dc *dc,
3102 struct dc_state *context,
3105 struct pipe_ctx *pipe_ctx;
3106 struct timing_generator *tg;
3109 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3110 pipe_ctx = &context->res_ctx.pipe_ctx[i];
3111 tg = pipe_ctx->stream_res.tg;
3113 * Only lock the top pipe's tg to prevent redundant
3114 * (un)locking. Also skip if pipe is disabled.
3116 if (pipe_ctx->top_pipe ||
3117 !pipe_ctx->stream || !pipe_ctx->plane_state ||
3118 !tg->funcs->is_tg_enabled(tg))
3122 tg->funcs->lock(tg);
3124 tg->funcs->unlock(tg);
3128 static void calc_vupdate_position(
3129 struct pipe_ctx *pipe_ctx,
3130 uint32_t *start_line,
3133 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3134 int vline_int_offset_from_vupdate =
3135 pipe_ctx->stream->periodic_interrupt0.lines_offset;
3136 int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx);
3139 if (vline_int_offset_from_vupdate > 0)
3140 vline_int_offset_from_vupdate--;
3141 else if (vline_int_offset_from_vupdate < 0)
3142 vline_int_offset_from_vupdate++;
3144 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3146 if (start_position >= 0)
3147 *start_line = start_position;
3149 *start_line = dc_crtc_timing->v_total + start_position - 1;
3151 *end_line = *start_line + 2;
3153 if (*end_line >= dc_crtc_timing->v_total)
3157 static void cal_vline_position(
3158 struct pipe_ctx *pipe_ctx,
3159 enum vline_select vline,
3160 uint32_t *start_line,
3163 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3165 if (vline == VLINE0)
3166 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3167 else if (vline == VLINE1)
3168 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3170 switch (ref_point) {
3171 case START_V_UPDATE:
3172 calc_vupdate_position(
3178 // Suppose to do nothing because vsync is 0;
3186 static void dcn10_setup_periodic_interrupt(
3187 struct pipe_ctx *pipe_ctx,
3188 enum vline_select vline)
3190 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3192 if (vline == VLINE0) {
3193 uint32_t start_line = 0;
3194 uint32_t end_line = 0;
3196 cal_vline_position(pipe_ctx, vline, &start_line, &end_line);
3198 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3200 } else if (vline == VLINE1) {
3201 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3203 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3207 static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
3209 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3210 int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
3212 if (start_line < 0) {
3217 if (tg->funcs->setup_vertical_interrupt2)
3218 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3221 static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3222 struct dc_link_settings *link_settings)
3224 struct encoder_unblank_param params = { { 0 } };
3225 struct dc_stream_state *stream = pipe_ctx->stream;
3226 struct dc_link *link = stream->link;
3228 /* only 3 items below are used by unblank */
3229 params.timing = pipe_ctx->stream->timing;
3231 params.link_settings.link_rate = link_settings->link_rate;
3233 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3234 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3235 params.timing.pix_clk_100hz /= 2;
3236 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms);
3239 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3240 link->dc->hwss.edp_backlight_control(link, true);
3244 static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3245 const uint8_t *custom_sdp_message,
3246 unsigned int sdp_message_size)
3248 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3249 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3250 pipe_ctx->stream_res.stream_enc,
3255 static enum dc_status dcn10_set_clock(struct dc *dc,
3256 enum dc_clock_type clock_type,
3260 struct dc_state *context = dc->current_state;
3261 struct dc_clock_config clock_cfg = {0};
3262 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3264 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3265 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3266 context, clock_type, &clock_cfg);
3268 if (!dc->clk_mgr->funcs->get_clock)
3269 return DC_FAIL_UNSUPPORTED_1;
3271 if (clk_khz > clock_cfg.max_clock_khz)
3272 return DC_FAIL_CLK_EXCEED_MAX;
3274 if (clk_khz < clock_cfg.min_clock_khz)
3275 return DC_FAIL_CLK_BELOW_MIN;
3277 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3278 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3280 /*update internal request clock for update clock use*/
3281 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3282 current_clocks->dispclk_khz = clk_khz;
3283 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3284 current_clocks->dppclk_khz = clk_khz;
3286 return DC_ERROR_UNEXPECTED;
3288 if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3289 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3295 static void dcn10_get_clock(struct dc *dc,
3296 enum dc_clock_type clock_type,
3297 struct dc_clock_config *clock_cfg)
3299 struct dc_state *context = dc->current_state;
3301 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3302 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3306 static const struct hw_sequencer_funcs dcn10_funcs = {
3307 .program_gamut_remap = dcn10_program_gamut_remap,
3308 .init_hw = dcn10_init_hw,
3309 .init_pipes = dcn10_init_pipes,
3310 .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
3311 .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
3312 .update_plane_addr = dcn10_update_plane_addr,
3313 .plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
3314 .update_dchub = dcn10_update_dchub,
3315 .update_mpcc = dcn10_update_mpcc,
3316 .update_pending_status = dcn10_update_pending_status,
3317 .set_input_transfer_func = dcn10_set_input_transfer_func,
3318 .set_output_transfer_func = dcn10_set_output_transfer_func,
3319 .program_output_csc = dcn10_program_output_csc,
3320 .power_down = dce110_power_down,
3321 .enable_accelerated_mode = dce110_enable_accelerated_mode,
3322 .enable_timing_synchronization = dcn10_enable_timing_synchronization,
3323 .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
3324 .update_info_frame = dce110_update_info_frame,
3325 .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
3326 .enable_stream = dce110_enable_stream,
3327 .disable_stream = dce110_disable_stream,
3328 .unblank_stream = dcn10_unblank_stream,
3329 .blank_stream = dce110_blank_stream,
3330 .enable_audio_stream = dce110_enable_audio_stream,
3331 .disable_audio_stream = dce110_disable_audio_stream,
3332 .enable_display_power_gating = dcn10_dummy_display_power_gating,
3333 .disable_plane = dcn10_disable_plane,
3334 .blank_pixel_data = dcn10_blank_pixel_data,
3335 .pipe_control_lock = dcn10_pipe_control_lock,
3336 .prepare_bandwidth = dcn10_prepare_bandwidth,
3337 .optimize_bandwidth = dcn10_optimize_bandwidth,
3338 .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
3339 .enable_stream_timing = dcn10_enable_stream_timing,
3340 .set_drr = dcn10_set_drr,
3341 .get_position = dcn10_get_position,
3342 .set_static_screen_control = dcn10_set_static_screen_control,
3343 .setup_stereo = dcn10_setup_stereo,
3344 .set_avmute = dce110_set_avmute,
3345 .log_hw_state = dcn10_log_hw_state,
3346 .get_hw_state = dcn10_get_hw_state,
3347 .clear_status_bits = dcn10_clear_status_bits,
3348 .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
3349 .edp_backlight_control = dce110_edp_backlight_control,
3350 .edp_power_control = dce110_edp_power_control,
3351 .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
3352 .set_cursor_position = dcn10_set_cursor_position,
3353 .set_cursor_attribute = dcn10_set_cursor_attribute,
3354 .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
3355 .disable_stream_gating = NULL,
3356 .enable_stream_gating = NULL,
3357 .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
3358 .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
3359 .set_clock = dcn10_set_clock,
3360 .get_clock = dcn10_get_clock,
3361 .did_underflow_occur = dcn10_did_underflow_occur,
3363 .disable_vga = dcn10_disable_vga,
3364 .bios_golden_init = dcn10_bios_golden_init,
3365 .plane_atomic_disable = dcn10_plane_atomic_disable,
3366 .plane_atomic_power_down = dcn10_plane_atomic_power_down,
3367 .enable_power_gating_plane = dcn10_enable_power_gating_plane,
3368 .dpp_pg_control = dcn10_dpp_pg_control,
3369 .hubp_pg_control = dcn10_hubp_pg_control,
3370 .dsc_pg_control = NULL,
3374 void dcn10_hw_sequencer_construct(struct dc *dc)
3376 dc->hwss = dcn10_funcs;