2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
38 #include "bandwidth_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
45 #include "link_hwss.h"
46 #include "link_encoder.h"
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
52 /*******************************************************************************
54 ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
59 for (i = 0; i < dc->link_count; i++) {
60 if (NULL != dc->links[i])
61 link_destroy(&dc->links[i]);
65 static bool create_links(
67 uint32_t num_virtual_links)
71 struct dc_bios *bios = dc->ctx->dc_bios;
75 connectors_num = bios->funcs->get_connectors_number(bios);
77 if (connectors_num > ENUM_ID_COUNT) {
79 "DC: Number of connectors %d exceeds maximum of %d!\n",
85 if (connectors_num == 0 && num_virtual_links == 0) {
86 dm_error("DC: Number of connectors is zero!\n");
90 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
95 for (i = 0; i < connectors_num; i++) {
96 struct link_init_data link_init_params = {0};
97 struct core_link *link;
99 link_init_params.ctx = dc->ctx;
100 link_init_params.connector_index = i;
101 link_init_params.link_index = dc->link_count;
102 link_init_params.dc = dc;
103 link = link_create(&link_init_params);
106 dc->links[dc->link_count] = link;
110 dm_error("DC: failed to create link!\n");
114 for (i = 0; i < num_virtual_links; i++) {
115 struct core_link *link = dm_alloc(sizeof(*link));
116 struct encoder_init_data enc_init = {0};
125 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
126 link->link_id.type = OBJECT_TYPE_CONNECTOR;
127 link->link_id.id = CONNECTOR_ID_VIRTUAL;
128 link->link_id.enum_id = ENUM_ID_1;
129 link->link_enc = dm_alloc(sizeof(*link->link_enc));
131 enc_init.ctx = dc->ctx;
132 enc_init.channel = CHANNEL_ID_UNKNOWN;
133 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
134 enc_init.transmitter = TRANSMITTER_UNKNOWN;
135 enc_init.connector = link->link_id;
136 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
137 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
138 enc_init.encoder.enum_id = ENUM_ID_1;
139 virtual_link_encoder_construct(link->link_enc, &enc_init);
141 link->public.link_index = dc->link_count;
142 dc->links[dc->link_count] = link;
152 static bool stream_adjust_vmin_vmax(struct dc *dc,
153 const struct dc_stream **stream, int num_streams,
156 /* TODO: Support multiple streams */
157 struct core_dc *core_dc = DC_TO_CORE(dc);
158 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
162 for (i = 0; i < MAX_PIPES; i++) {
163 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
165 if (pipe->stream == core_stream && pipe->stream_enc) {
166 core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
168 /* build and update the info frame */
169 resource_build_info_frame(pipe);
170 core_dc->hwss.update_info_frame(pipe);
179 static bool set_gamut_remap(struct dc *dc,
180 const struct dc_stream **stream, int num_streams)
182 struct core_dc *core_dc = DC_TO_CORE(dc);
183 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
186 struct pipe_ctx *pipes;
188 for (i = 0; i < MAX_PIPES; i++) {
189 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
192 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
193 core_dc->hwss.set_plane_config(core_dc, pipes,
194 &core_dc->current_context->res_ctx);
202 /* This function is not expected to fail, proper implementation of
203 * validation will prevent this from ever being called for unsupported
206 static void stream_update_scaling(
208 const struct dc_stream *dc_stream,
209 const struct rect *src,
210 const struct rect *dst)
212 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
213 struct core_dc *core_dc = DC_TO_CORE(dc);
214 struct validate_context *cur_ctx = core_dc->current_context;
218 stream->public.src = *src;
221 stream->public.dst = *dst;
223 for (i = 0; i < cur_ctx->stream_count; i++) {
224 struct core_stream *cur_stream = cur_ctx->streams[i];
226 if (stream == cur_stream) {
227 struct dc_stream_status *status = &cur_ctx->stream_status[i];
229 if (status->surface_count)
230 if (!dc_commit_surfaces_to_stream(
233 status->surface_count,
234 &cur_stream->public))
235 /* Need to debug validation */
243 static bool set_psr_enable(struct dc *dc, bool enable)
245 struct core_dc *core_dc = DC_TO_CORE(dc);
248 for (i = 0; i < core_dc->link_count; i++)
249 dc_link_set_psr_enable(&core_dc->links[i]->public,
256 static bool setup_psr(struct dc *dc, const struct dc_stream *stream)
258 struct core_dc *core_dc = DC_TO_CORE(dc);
259 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
260 struct pipe_ctx *pipes;
262 unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
264 for (i = 0; i < core_dc->link_count; i++) {
265 if (core_stream->sink->link == core_dc->links[i])
266 dc_link_setup_psr(&core_dc->links[i]->public,
270 for (i = 0; i < MAX_PIPES; i++) {
271 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
272 == core_stream && i != underlay_idx) {
273 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
274 core_dc->hwss.set_static_screen_control(&pipes, 1,
282 static void set_drive_settings(struct dc *dc,
283 struct link_training_settings *lt_settings,
284 const struct dc_link *link)
286 struct core_dc *core_dc = DC_TO_CORE(dc);
289 for (i = 0; i < core_dc->link_count; i++) {
290 if (&core_dc->links[i]->public == link)
294 if (i >= core_dc->link_count)
295 ASSERT_CRITICAL(false);
297 dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
300 static void perform_link_training(struct dc *dc,
301 struct dc_link_settings *link_setting,
302 bool skip_video_pattern)
304 struct core_dc *core_dc = DC_TO_CORE(dc);
307 for (i = 0; i < core_dc->link_count; i++)
308 dc_link_dp_perform_link_training(
309 &core_dc->links[i]->public,
314 static void set_preferred_link_settings(struct dc *dc,
315 struct dc_link_settings *link_setting,
316 const struct dc_link *link)
318 struct core_link *core_link = DC_LINK_TO_CORE(link);
320 core_link->public.verified_link_cap.lane_count =
321 link_setting->lane_count;
322 core_link->public.verified_link_cap.link_rate =
323 link_setting->link_rate;
324 dp_retrain_link_dp_test(core_link, link_setting, false);
327 static void enable_hpd(const struct dc_link *link)
329 dc_link_dp_enable_hpd(link);
332 static void disable_hpd(const struct dc_link *link)
334 dc_link_dp_disable_hpd(link);
338 static void set_test_pattern(
339 const struct dc_link *link,
340 enum dp_test_pattern test_pattern,
341 const struct link_training_settings *p_link_settings,
342 const unsigned char *p_custom_pattern,
343 unsigned int cust_pattern_size)
346 dc_link_dp_set_test_pattern(
354 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
356 core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
357 if (core_dc->hwss.set_drr != NULL) {
358 core_dc->public.stream_funcs.adjust_vmin_vmax =
359 stream_adjust_vmin_vmax;
362 core_dc->public.stream_funcs.set_gamut_remap =
365 core_dc->public.stream_funcs.set_psr_enable =
368 core_dc->public.stream_funcs.setup_psr =
371 core_dc->public.link_funcs.set_drive_settings =
374 core_dc->public.link_funcs.perform_link_training =
375 perform_link_training;
377 core_dc->public.link_funcs.set_preferred_link_settings =
378 set_preferred_link_settings;
380 core_dc->public.link_funcs.enable_hpd =
383 core_dc->public.link_funcs.disable_hpd =
386 core_dc->public.link_funcs.set_test_pattern =
390 static void destruct(struct core_dc *dc)
392 resource_validate_ctx_destruct(dc->current_context);
396 dc_destroy_resource_pool(dc);
398 if (dc->ctx->gpio_service)
399 dal_gpio_service_destroy(&dc->ctx->gpio_service);
402 dal_i2caux_destroy(&dc->ctx->i2caux);
404 if (dc->ctx->created_bios)
405 dal_bios_parser_destroy(&dc->ctx->dc_bios);
408 dal_logger_destroy(&dc->ctx->logger);
410 dm_free(dc->current_context);
411 dc->current_context = NULL;
412 dm_free(dc->temp_flip_context);
413 dc->temp_flip_context = NULL;
414 dm_free(dc->scratch_val_ctx);
415 dc->scratch_val_ctx = NULL;
421 static bool construct(struct core_dc *dc,
422 const struct dc_init_data *init_params)
424 struct dal_logger *logger;
425 struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
426 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
429 dm_error("%s: failed to create ctx\n", __func__);
433 dc->current_context = dm_alloc(sizeof(*dc->current_context));
434 dc->temp_flip_context = dm_alloc(sizeof(*dc->temp_flip_context));
435 dc->scratch_val_ctx = dm_alloc(sizeof(*dc->scratch_val_ctx));
437 if (!dc->current_context || !dc->temp_flip_context) {
438 dm_error("%s: failed to create validate ctx\n", __func__);
442 dc_ctx->cgs_device = init_params->cgs_device;
443 dc_ctx->driver_context = init_params->driver;
444 dc_ctx->dc = &dc->public;
445 dc_ctx->asic_id = init_params->asic_id;
448 logger = dal_logger_create(dc_ctx);
451 /* can *not* call logger. call base driver 'print error' */
452 dm_error("%s: failed to create Logger!\n", __func__);
455 dc_ctx->logger = logger;
457 dc->ctx->dce_environment = init_params->dce_environment;
459 dc_version = resource_parse_asic_id(init_params->asic_id);
460 dc->ctx->dce_version = dc_version;
462 /* Resource should construct all asic specific resources.
463 * This should be the only place where we need to parse the asic id
465 if (init_params->vbios_override)
466 dc_ctx->dc_bios = init_params->vbios_override;
468 /* Create BIOS parser */
469 struct bp_init_data bp_init_data;
470 bp_init_data.ctx = dc_ctx;
471 bp_init_data.bios = init_params->asic_id.atombios_base_address;
473 dc_ctx->dc_bios = dal_bios_parser_create(
474 &bp_init_data, dc_version);
476 if (!dc_ctx->dc_bios) {
477 ASSERT_CRITICAL(false);
481 dc_ctx->created_bios = true;
485 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
487 if (!dc_ctx->i2caux) {
488 ASSERT_CRITICAL(false);
489 goto failed_to_create_i2caux;
492 /* Create GPIO service */
493 dc_ctx->gpio_service = dal_gpio_service_create(
495 dc_ctx->dce_environment,
498 if (!dc_ctx->gpio_service) {
499 ASSERT_CRITICAL(false);
503 dc->res_pool = dc_create_resource_pool(
505 init_params->num_virtual_links,
507 init_params->asic_id);
509 goto create_resource_fail;
511 if (!create_links(dc, init_params->num_virtual_links))
512 goto create_links_fail;
514 allocate_dc_stream_funcs(dc);
518 /**** error handling here ****/
520 create_resource_fail:
522 failed_to_create_i2caux:
532 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
534 fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
535 unsigned int pixDurationInPico = round(pixel_duration);
537 DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
539 arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
540 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
541 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
543 arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
544 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
545 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
547 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
548 WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
550 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
551 WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
555 /*******************************************************************************
557 ******************************************************************************/
559 struct dc *dc_create(const struct dc_init_data *init_params)
561 struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
562 unsigned int full_pipe_count;
567 if (false == construct(core_dc, init_params))
570 /*TODO: separate HW and SW initialization*/
571 core_dc->hwss.init_hw(core_dc);
573 full_pipe_count = core_dc->res_pool->pipe_count;
574 if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
576 core_dc->public.caps.max_streams = min(
578 core_dc->res_pool->stream_enc_count);
580 core_dc->public.caps.max_links = core_dc->link_count;
581 core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
583 core_dc->public.config = init_params->flags;
585 dm_logger_write(core_dc->ctx->logger, LOG_DC,
586 "Display Core initialized\n");
589 /* TODO: missing feature to be enabled */
590 core_dc->public.debug.disable_dfs_bypass = true;
592 return &core_dc->public;
601 void dc_destroy(struct dc **dc)
603 struct core_dc *core_dc = DC_TO_CORE(*dc);
609 static bool is_validation_required(
610 const struct core_dc *dc,
611 const struct dc_validation_set set[],
614 const struct validate_context *context = dc->current_context;
617 if (context->stream_count != set_count)
620 for (i = 0; i < set_count; i++) {
622 if (set[i].surface_count != context->stream_status[i].surface_count)
624 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
627 for (j = 0; j < set[i].surface_count; j++) {
628 struct dc_surface temp_surf = { 0 };
630 temp_surf = *context->stream_status[i].surfaces[j];
631 temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
632 temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
633 temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
635 if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
643 bool dc_validate_resources(
645 const struct dc_validation_set set[],
648 struct core_dc *core_dc = DC_TO_CORE(dc);
649 enum dc_status result = DC_ERROR_UNEXPECTED;
650 struct validate_context *context;
652 if (!is_validation_required(core_dc, set, set_count))
655 context = dm_alloc(sizeof(struct validate_context));
657 goto context_alloc_fail;
659 result = core_dc->res_pool->funcs->validate_with_context(
660 core_dc, set, set_count, context);
662 resource_validate_ctx_destruct(context);
666 if (result != DC_OK) {
667 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
668 "%s:resource validation failed, dc_status:%d\n",
673 return (result == DC_OK);
677 bool dc_validate_guaranteed(
679 const struct dc_stream *stream)
681 struct core_dc *core_dc = DC_TO_CORE(dc);
682 enum dc_status result = DC_ERROR_UNEXPECTED;
683 struct validate_context *context;
685 context = dm_alloc(sizeof(struct validate_context));
687 goto context_alloc_fail;
689 result = core_dc->res_pool->funcs->validate_guaranteed(
690 core_dc, stream, context);
692 resource_validate_ctx_destruct(context);
696 if (result != DC_OK) {
697 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
698 "%s:guaranteed validation failed, dc_status:%d\n",
703 return (result == DC_OK);
706 static void program_timing_sync(
707 struct core_dc *core_dc,
708 struct validate_context *ctx)
712 int pipe_count = ctx->res_ctx.pool->pipe_count;
713 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
715 for (i = 0; i < pipe_count; i++) {
716 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
719 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
722 for (i = 0; i < pipe_count; i++) {
724 struct pipe_ctx *pipe_set[MAX_PIPES];
726 if (!unsynced_pipes[i])
729 pipe_set[0] = unsynced_pipes[i];
730 unsynced_pipes[i] = NULL;
732 /* Add tg to the set, search rest of the tg's for ones with
733 * same timing, add all tgs with same timing to the group
735 for (j = i + 1; j < pipe_count; j++) {
736 if (!unsynced_pipes[j])
739 if (resource_are_streams_timing_synchronizable(
740 unsynced_pipes[j]->stream,
741 pipe_set[0]->stream)) {
742 pipe_set[group_size] = unsynced_pipes[j];
743 unsynced_pipes[j] = NULL;
748 /* set first unblanked pipe as master */
749 for (j = 0; j < group_size; j++) {
750 struct pipe_ctx *temp;
752 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
757 pipe_set[0] = pipe_set[j];
763 /* remove any other unblanked pipes as they have already been synced */
764 for (j = j + 1; j < group_size; j++) {
765 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
767 pipe_set[j] = pipe_set[group_size];
772 if (group_size > 1) {
773 core_dc->hwss.enable_timing_synchronization(
774 core_dc, group_index, group_size, pipe_set);
780 static bool streams_changed(
782 const struct dc_stream *streams[],
783 uint8_t stream_count)
787 if (stream_count != dc->current_context->stream_count)
790 for (i = 0; i < dc->current_context->stream_count; i++) {
791 if (&dc->current_context->streams[i]->public != streams[i])
798 static void fill_display_configs(
799 const struct validate_context *context,
800 struct dm_pp_display_configuration *pp_display_cfg)
805 for (j = 0; j < context->stream_count; j++) {
808 const struct core_stream *stream = context->streams[j];
809 struct dm_pp_single_disp_config *cfg =
810 &pp_display_cfg->disp_configs[num_cfgs];
811 const struct pipe_ctx *pipe_ctx = NULL;
813 for (k = 0; k < MAX_PIPES; k++)
814 if (stream == context->res_ctx.pipe_ctx[k].stream) {
815 pipe_ctx = &context->res_ctx.pipe_ctx[k];
819 ASSERT(pipe_ctx != NULL);
822 cfg->signal = pipe_ctx->stream->signal;
823 cfg->pipe_idx = pipe_ctx->pipe_idx;
824 cfg->src_height = stream->public.src.height;
825 cfg->src_width = stream->public.src.width;
826 cfg->ddi_channel_mapping =
827 stream->sink->link->ddi_channel_mapping.raw;
829 stream->sink->link->link_enc->transmitter;
830 cfg->link_settings.lane_count =
831 stream->sink->link->public.cur_link_settings.lane_count;
832 cfg->link_settings.link_rate =
833 stream->sink->link->public.cur_link_settings.link_rate;
834 cfg->link_settings.link_spread =
835 stream->sink->link->public.cur_link_settings.link_spread;
836 cfg->sym_clock = stream->phy_pix_clk;
838 cfg->v_refresh = stream->public.timing.pix_clk_khz * 1000;
839 cfg->v_refresh /= stream->public.timing.h_total;
840 cfg->v_refresh = (cfg->v_refresh + stream->public.timing.v_total / 2)
841 / stream->public.timing.v_total;
844 pp_display_cfg->display_count = num_cfgs;
847 static uint32_t get_min_vblank_time_us(const struct validate_context *context)
850 uint32_t min_vertical_blank_time = -1;
852 for (j = 0; j < context->stream_count; j++) {
853 const struct dc_stream *stream = &context->streams[j]->public;
854 uint32_t vertical_blank_in_pixels = 0;
855 uint32_t vertical_blank_time = 0;
857 vertical_blank_in_pixels = stream->timing.h_total *
858 (stream->timing.v_total
859 - stream->timing.v_addressable);
861 vertical_blank_time = vertical_blank_in_pixels
862 * 1000 / stream->timing.pix_clk_khz;
864 if (min_vertical_blank_time > vertical_blank_time)
865 min_vertical_blank_time = vertical_blank_time;
868 return min_vertical_blank_time;
871 static int determine_sclk_from_bounding_box(
872 const struct core_dc *dc,
878 * Some asics do not give us sclk levels, so we just report the actual
881 if (dc->sclk_lvls.num_levels == 0)
882 return required_sclk;
884 for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
885 if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
886 return dc->sclk_lvls.clocks_in_khz[i];
889 * even maximum level could not satisfy requirement, this
890 * is unexpected at this stage, should have been caught at
894 return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
897 void pplib_apply_display_requirements(
899 const struct validate_context *context,
900 struct dm_pp_display_configuration *pp_display_cfg)
902 pp_display_cfg->all_displays_in_sync =
903 context->bw_results.all_displays_in_sync;
904 pp_display_cfg->nb_pstate_switch_disable =
905 context->bw_results.nbp_state_change_enable == false;
906 pp_display_cfg->cpu_cc6_disable =
907 context->bw_results.cpuc_state_change_enable == false;
908 pp_display_cfg->cpu_pstate_disable =
909 context->bw_results.cpup_state_change_enable == false;
910 pp_display_cfg->cpu_pstate_separation_time =
911 context->bw_results.blackout_recovery_time_us;
913 pp_display_cfg->min_memory_clock_khz = context->bw_results.required_yclk
914 / MEMORY_TYPE_MULTIPLIER;
916 pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
918 context->bw_results.required_sclk);
920 pp_display_cfg->min_engine_clock_deep_sleep_khz
921 = context->bw_results.required_sclk_deep_sleep;
923 pp_display_cfg->avail_mclk_switch_time_us =
924 get_min_vblank_time_us(context);
926 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
928 pp_display_cfg->disp_clk_khz = context->dispclk_khz;
930 fill_display_configs(context, pp_display_cfg);
932 /* TODO: is this still applicable?*/
933 if (pp_display_cfg->display_count == 1) {
934 const struct dc_crtc_timing *timing =
935 &context->streams[0]->public.timing;
937 pp_display_cfg->crtc_index =
938 pp_display_cfg->disp_configs[0].pipe_idx;
939 pp_display_cfg->line_time_in_us = timing->h_total * 1000
940 / timing->pix_clk_khz;
943 if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
944 struct dm_pp_display_configuration)) != 0)
945 dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
947 dc->prev_display_config = *pp_display_cfg;
951 bool dc_commit_streams(
953 const struct dc_stream *streams[],
954 uint8_t stream_count)
956 struct core_dc *core_dc = DC_TO_CORE(dc);
957 struct dc_bios *dcb = core_dc->ctx->dc_bios;
958 enum dc_status result = DC_ERROR_UNEXPECTED;
959 struct validate_context *context;
960 struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
963 if (false == streams_changed(core_dc, streams, stream_count))
966 dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
967 __func__, stream_count);
969 for (i = 0; i < stream_count; i++) {
970 const struct dc_stream *stream = streams[i];
971 const struct dc_stream_status *status = dc_stream_get_status(stream);
974 dc_stream_log(stream,
975 core_dc->ctx->logger,
978 set[i].stream = stream;
981 set[i].surface_count = status->surface_count;
982 for (j = 0; j < status->surface_count; j++)
983 set[i].surfaces[j] = status->surfaces[j];
988 context = dm_alloc(sizeof(struct validate_context));
990 goto context_alloc_fail;
992 result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, stream_count, context);
993 if (result != DC_OK){
994 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
995 "%s: Context validation failed! dc_status:%d\n",
999 resource_validate_ctx_destruct(context);
1003 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1004 core_dc->hwss.enable_accelerated_mode(core_dc);
1007 if (result == DC_OK) {
1008 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
1011 program_timing_sync(core_dc, context);
1013 for (i = 0; i < context->stream_count; i++) {
1014 const struct core_sink *sink = context->streams[i]->sink;
1016 for (j = 0; j < context->stream_status[i].surface_count; j++) {
1017 const struct dc_surface *dc_surface =
1018 context->stream_status[i].surfaces[j];
1020 for (k = 0; k < context->res_ctx.pool->pipe_count; k++) {
1021 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[k];
1023 if (dc_surface != &pipe->surface->public
1024 || !dc_surface->visible)
1027 pipe->tg->funcs->set_blank(pipe->tg, false);
1031 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
1032 context->streams[i]->public.timing.h_addressable,
1033 context->streams[i]->public.timing.v_addressable,
1034 context->streams[i]->public.timing.h_total,
1035 context->streams[i]->public.timing.v_total,
1036 context->streams[i]->public.timing.pix_clk_khz);
1039 pplib_apply_display_requirements(core_dc,
1040 context, &context->pp_display_cfg);
1042 resource_validate_ctx_destruct(core_dc->current_context);
1044 if (core_dc->temp_flip_context != core_dc->current_context) {
1045 dm_free(core_dc->temp_flip_context);
1046 core_dc->temp_flip_context = core_dc->current_context;
1048 core_dc->current_context = context;
1049 memset(core_dc->temp_flip_context, 0, sizeof(*core_dc->temp_flip_context));
1051 return (result == DC_OK);
1057 return (result == DC_OK);
1060 bool dc_pre_update_surfaces_to_stream(
1062 const struct dc_surface *const *new_surfaces,
1063 uint8_t new_surface_count,
1064 const struct dc_stream *dc_stream)
1067 struct core_dc *core_dc = DC_TO_CORE(dc);
1068 int prev_disp_clk = core_dc->current_context->dispclk_khz;
1069 struct dc_stream_status *stream_status = NULL;
1070 struct validate_context *context;
1073 pre_surface_trace(dc, new_surfaces, new_surface_count);
1075 if (core_dc->current_context->stream_count == 0)
1078 /* Cannot commit surface to a stream that is not commited */
1079 for (i = 0; i < core_dc->current_context->stream_count; i++)
1080 if (dc_stream == &core_dc->current_context->streams[i]->public)
1083 if (i == core_dc->current_context->stream_count)
1086 stream_status = &core_dc->current_context->stream_status[i];
1088 if (new_surface_count == stream_status->surface_count) {
1089 bool skip_pre = true;
1091 for (i = 0; i < stream_status->surface_count; i++) {
1092 struct dc_surface temp_surf = { 0 };
1094 temp_surf = *stream_status->surfaces[i];
1095 temp_surf.clip_rect = new_surfaces[i]->clip_rect;
1096 temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x;
1097 temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y;
1099 if (memcmp(&temp_surf, new_surfaces[i], sizeof(temp_surf)) != 0) {
1109 context = dm_alloc(sizeof(struct validate_context));
1112 dm_error("%s: failed to create validate ctx\n", __func__);
1117 resource_validate_ctx_copy_construct(core_dc->current_context, context);
1119 dm_logger_write(core_dc->ctx->logger, LOG_DC,
1120 "%s: commit %d surfaces to stream 0x%x\n",
1125 if (!resource_attach_surfaces_to_context(
1126 new_surfaces, new_surface_count, dc_stream, context)) {
1127 BREAK_TO_DEBUGGER();
1129 goto unexpected_fail;
1132 for (i = 0; i < new_surface_count; i++)
1133 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1134 if (context->res_ctx.pipe_ctx[j].surface !=
1135 DC_SURFACE_TO_CORE(new_surfaces[i]))
1138 resource_build_scaling_params(
1139 new_surfaces[i], &context->res_ctx.pipe_ctx[j]);
1141 if (dc->debug.surface_visual_confirm) {
1142 context->res_ctx.pipe_ctx[j].scl_data.recout.height -= 2;
1143 context->res_ctx.pipe_ctx[j].scl_data.recout.width -= 2;
1147 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1148 BREAK_TO_DEBUGGER();
1150 goto unexpected_fail;
1153 if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)
1154 && prev_disp_clk < context->dispclk_khz) {
1155 pplib_apply_display_requirements(core_dc, context,
1156 &context->pp_display_cfg);
1157 context->res_ctx.pool->display_clock->funcs->set_clock(
1158 context->res_ctx.pool->display_clock,
1159 context->dispclk_khz * 115 / 100);
1160 core_dc->current_context->bw_results.dispclk_khz = context->dispclk_khz;
1161 core_dc->current_context->dispclk_khz = context->dispclk_khz;
1164 for (i = 0; i < new_surface_count; i++)
1165 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1166 if (context->res_ctx.pipe_ctx[j].surface !=
1167 DC_SURFACE_TO_CORE(new_surfaces[i]))
1170 core_dc->hwss.prepare_pipe_for_context(
1172 &context->res_ctx.pipe_ctx[j],
1177 resource_validate_ctx_destruct(context);
1184 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1187 struct core_dc *core_dc = DC_TO_CORE(dc);
1188 struct validate_context *context = dm_alloc(sizeof(struct validate_context));
1191 dm_error("%s: failed to create validate ctx\n", __func__);
1194 resource_validate_ctx_copy_construct(core_dc->current_context, context);
1196 post_surface_trace(dc);
1198 for (i = 0; i < context->res_ctx.pool->pipe_count; i++)
1199 if (context->res_ctx.pipe_ctx[i].stream == NULL) {
1200 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1201 core_dc->hwss.power_down_front_end(
1202 core_dc, &context->res_ctx.pipe_ctx[i]);
1204 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1205 BREAK_TO_DEBUGGER();
1209 core_dc->hwss.set_bandwidth(core_dc);
1211 /*TODO: dce specific*/
1212 pplib_apply_display_requirements(core_dc, context, &context->pp_display_cfg);
1214 resource_validate_ctx_destruct(core_dc->current_context);
1215 core_dc->current_context = context;
1220 bool dc_commit_surfaces_to_stream(
1222 const struct dc_surface **new_surfaces,
1223 uint8_t new_surface_count,
1224 const struct dc_stream *dc_stream)
1226 struct dc_surface_update updates[MAX_SURFACES];
1227 struct dc_flip_addrs flip_addr[MAX_SURFACES];
1228 struct dc_plane_info plane_info[MAX_SURFACES];
1229 struct dc_scaling_info scaling_info[MAX_SURFACES];
1232 if (!dc_pre_update_surfaces_to_stream(
1233 dc, new_surfaces, new_surface_count, dc_stream))
1236 memset(updates, 0, sizeof(updates));
1237 memset(flip_addr, 0, sizeof(flip_addr));
1238 memset(plane_info, 0, sizeof(plane_info));
1239 memset(scaling_info, 0, sizeof(scaling_info));
1241 for (i = 0; i < new_surface_count; i++) {
1242 updates[i].surface = new_surfaces[i];
1244 (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1245 flip_addr[i].address = new_surfaces[i]->address;
1246 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1247 plane_info[i].color_space = new_surfaces[i]->color_space;
1248 plane_info[i].format = new_surfaces[i]->format;
1249 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1250 plane_info[i].rotation = new_surfaces[i]->rotation;
1251 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1252 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1253 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1254 plane_info[i].visible = new_surfaces[i]->visible;
1255 plane_info[i].dcc = new_surfaces[i]->dcc;
1256 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1257 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1258 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1259 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1261 updates[i].flip_addr = &flip_addr[i];
1262 updates[i].plane_info = &plane_info[i];
1263 updates[i].scaling_info = &scaling_info[i];
1265 dc_update_surfaces_for_stream(dc, updates, new_surface_count, dc_stream);
1267 return dc_post_update_surfaces_to_stream(dc);
1270 static bool is_surface_in_context(
1271 const struct validate_context *context,
1272 const struct dc_surface *surface)
1276 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1277 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1279 if (surface == &pipe_ctx->surface->public) {
1287 enum surface_update_type {
1288 UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
1289 UPDATE_TYPE_MED, /* a lot of programming needed. may need to alloc */
1290 UPDATE_TYPE_FULL, /* may need to shuffle resources */
1293 static enum surface_update_type det_surface_update(
1294 const struct core_dc *dc,
1295 const struct dc_surface_update *u)
1297 const struct validate_context *context = dc->current_context;
1299 if (u->scaling_info || u->plane_info)
1300 /* todo: not all scale and plane_info update need full update
1301 * ie. check if following is the same
1302 * scale ratio, view port, surface bpp etc
1304 return UPDATE_TYPE_FULL; /* may need bandwidth update */
1306 if (!is_surface_in_context(context, u->surface))
1307 return UPDATE_TYPE_FULL;
1309 if (u->in_transfer_func ||
1310 u->out_transfer_func ||
1311 u->hdr_static_metadata)
1312 return UPDATE_TYPE_MED;
1314 return UPDATE_TYPE_FAST;
1317 static enum surface_update_type check_update_surfaces_for_stream(
1319 struct dc_surface_update *updates,
1321 const struct dc_stream_status *stream_status)
1324 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1326 if (stream_status->surface_count != surface_count)
1327 return UPDATE_TYPE_FULL;
1329 for (i = 0 ; i < surface_count; i++) {
1330 enum surface_update_type type =
1331 det_surface_update(dc, &updates[i]);
1333 if (type == UPDATE_TYPE_FULL)
1336 if (overall_type < type)
1337 overall_type = type;
1340 return overall_type;
1343 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1345 void dc_update_surfaces_for_stream(struct dc *dc,
1346 struct dc_surface_update *updates, int surface_count,
1347 const struct dc_stream *dc_stream)
1349 struct core_dc *core_dc = DC_TO_CORE(dc);
1350 struct validate_context *context;
1353 enum surface_update_type update_type;
1354 const struct dc_stream_status *stream_status;
1356 stream_status = dc_stream_get_status(dc_stream);
1357 ASSERT(stream_status);
1359 return; /* Cannot commit surface to stream that is not committed */
1361 update_type = check_update_surfaces_for_stream(
1362 core_dc, updates, surface_count, stream_status);
1364 if (update_type >= update_surface_trace_level)
1365 update_surface_trace(dc, updates, surface_count);
1367 if (update_type >= UPDATE_TYPE_FULL) {
1368 const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
1370 for (i = 0; i < surface_count; i++)
1371 new_surfaces[i] = updates[i].surface;
1373 /* initialize scratch memory for building context */
1374 context = core_dc->temp_flip_context;
1375 resource_validate_ctx_copy_construct(
1376 core_dc->current_context, context);
1378 /* add surface to context */
1379 if (!resource_attach_surfaces_to_context(
1380 new_surfaces, surface_count, dc_stream, context)) {
1381 BREAK_TO_DEBUGGER();
1385 context = core_dc->current_context;
1387 for (i = 0; i < surface_count; i++) {
1388 /* save update param into surface */
1389 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1390 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1392 if (updates[i].flip_addr) {
1393 surface->public.address = updates[i].flip_addr->address;
1394 surface->public.flip_immediate =
1395 updates[i].flip_addr->flip_immediate;
1398 if (updates[i].scaling_info) {
1399 surface->public.scaling_quality =
1400 updates[i].scaling_info->scaling_quality;
1401 surface->public.dst_rect =
1402 updates[i].scaling_info->dst_rect;
1403 surface->public.src_rect =
1404 updates[i].scaling_info->src_rect;
1405 surface->public.clip_rect =
1406 updates[i].scaling_info->clip_rect;
1409 if (updates[i].plane_info) {
1410 surface->public.color_space =
1411 updates[i].plane_info->color_space;
1412 surface->public.format =
1413 updates[i].plane_info->format;
1414 surface->public.plane_size =
1415 updates[i].plane_info->plane_size;
1416 surface->public.rotation =
1417 updates[i].plane_info->rotation;
1418 surface->public.horizontal_mirror =
1419 updates[i].plane_info->horizontal_mirror;
1420 surface->public.stereo_format =
1421 updates[i].plane_info->stereo_format;
1422 surface->public.tiling_info =
1423 updates[i].plane_info->tiling_info;
1424 surface->public.visible =
1425 updates[i].plane_info->visible;
1426 surface->public.dcc =
1427 updates[i].plane_info->dcc;
1430 /* not sure if we still need this */
1431 if (update_type == UPDATE_TYPE_FULL) {
1432 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1433 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1435 if (pipe_ctx->surface != surface)
1438 resource_build_scaling_params(updates[i].surface, pipe_ctx);
1439 if (dc->debug.surface_visual_confirm) {
1440 pipe_ctx->scl_data.recout.height -= 2;
1441 pipe_ctx->scl_data.recout.width -= 2;
1446 if (updates[i].gamma &&
1447 updates[i].gamma != surface->public.gamma_correction) {
1448 if (surface->public.gamma_correction != NULL)
1449 dc_gamma_release(&surface->public.
1452 dc_gamma_retain(updates[i].gamma);
1453 surface->public.gamma_correction =
1457 if (updates[i].in_transfer_func &&
1458 updates[i].in_transfer_func != surface->public.in_transfer_func) {
1459 if (surface->public.in_transfer_func != NULL)
1460 dc_transfer_func_release(
1464 dc_transfer_func_retain(
1465 updates[i].in_transfer_func);
1466 surface->public.in_transfer_func =
1467 updates[i].in_transfer_func;
1470 if (updates[i].out_transfer_func &&
1471 updates[i].out_transfer_func != dc_stream->out_transfer_func) {
1472 if (dc_stream->out_transfer_func != NULL)
1473 dc_transfer_func_release(dc_stream->out_transfer_func);
1474 dc_transfer_func_retain(updates[i].out_transfer_func);
1475 stream->public.out_transfer_func = updates[i].out_transfer_func;
1477 if (updates[i].hdr_static_metadata)
1478 surface->public.hdr_static_ctx =
1479 *(updates[i].hdr_static_metadata);
1482 if (update_type == UPDATE_TYPE_FULL &&
1483 !core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1484 BREAK_TO_DEBUGGER();
1488 if (!surface_count) /* reset */
1489 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1491 for (i = 0; i < surface_count; i++) {
1492 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1494 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1495 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1496 struct pipe_ctx *cur_pipe_ctx;
1497 bool is_new_pipe_surface = true;
1499 if (pipe_ctx->surface != surface)
1502 if (update_type != UPDATE_TYPE_FAST &&
1503 !pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1504 core_dc->hwss.pipe_control_lock(
1507 PIPE_LOCK_CONTROL_GRAPHICS |
1508 PIPE_LOCK_CONTROL_SCL |
1509 PIPE_LOCK_CONTROL_BLENDER |
1510 PIPE_LOCK_CONTROL_MODE,
1514 if (update_type == UPDATE_TYPE_FULL) {
1515 /* only apply for top pipe */
1516 if (!pipe_ctx->top_pipe) {
1517 core_dc->hwss.apply_ctx_for_surface(core_dc,
1519 context_timing_trace(dc, &context->res_ctx);
1523 if (updates[i].flip_addr)
1524 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1526 if (update_type == UPDATE_TYPE_FAST)
1529 cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1530 if (cur_pipe_ctx->surface == pipe_ctx->surface)
1531 is_new_pipe_surface = false;
1533 if (is_new_pipe_surface ||
1534 updates[i].in_transfer_func)
1535 core_dc->hwss.set_input_transfer_func(
1536 pipe_ctx, pipe_ctx->surface);
1538 if (is_new_pipe_surface ||
1539 updates[i].out_transfer_func)
1540 core_dc->hwss.set_output_transfer_func(
1545 if (updates[i].hdr_static_metadata) {
1546 resource_build_info_frame(pipe_ctx);
1547 core_dc->hwss.update_info_frame(pipe_ctx);
1552 if (update_type == UPDATE_TYPE_FAST)
1555 for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
1556 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1558 for (j = 0; j < surface_count; j++) {
1559 if (updates[j].surface == &pipe_ctx->surface->public) {
1560 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1561 core_dc->hwss.pipe_control_lock(
1564 PIPE_LOCK_CONTROL_GRAPHICS |
1565 PIPE_LOCK_CONTROL_SCL |
1566 PIPE_LOCK_CONTROL_BLENDER,
1574 if (core_dc->current_context != context) {
1575 resource_validate_ctx_destruct(core_dc->current_context);
1576 core_dc->temp_flip_context = core_dc->current_context;
1578 core_dc->current_context = context;
1582 uint8_t dc_get_current_stream_count(const struct dc *dc)
1584 struct core_dc *core_dc = DC_TO_CORE(dc);
1585 return core_dc->current_context->stream_count;
1588 struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1590 struct core_dc *core_dc = DC_TO_CORE(dc);
1591 if (i < core_dc->current_context->stream_count)
1592 return &(core_dc->current_context->streams[i]->public);
1596 const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1598 struct core_dc *core_dc = DC_TO_CORE(dc);
1599 return &core_dc->links[link_index]->public;
1602 const struct graphics_object_id dc_get_link_id_at_index(
1603 struct dc *dc, uint32_t link_index)
1605 struct core_dc *core_dc = DC_TO_CORE(dc);
1606 return core_dc->links[link_index]->link_id;
1609 const struct ddc_service *dc_get_ddc_at_index(
1610 struct dc *dc, uint32_t link_index)
1612 struct core_dc *core_dc = DC_TO_CORE(dc);
1613 return core_dc->links[link_index]->ddc;
1616 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1617 struct dc *dc, uint32_t link_index)
1619 struct core_dc *core_dc = DC_TO_CORE(dc);
1620 return core_dc->links[link_index]->public.irq_source_hpd;
1623 const struct audio **dc_get_audios(struct dc *dc)
1625 struct core_dc *core_dc = DC_TO_CORE(dc);
1626 return (const struct audio **)core_dc->res_pool->audios;
1629 void dc_flip_surface_addrs(
1631 const struct dc_surface *const surfaces[],
1632 struct dc_flip_addrs flip_addrs[],
1635 struct core_dc *core_dc = DC_TO_CORE(dc);
1638 for (i = 0; i < count; i++) {
1639 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
1641 surface->public.address = flip_addrs[i].address;
1642 surface->public.flip_immediate = flip_addrs[i].flip_immediate;
1644 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1645 struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1647 if (pipe_ctx->surface != surface)
1650 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1655 enum dc_irq_source dc_interrupt_to_irq_source(
1660 struct core_dc *core_dc = DC_TO_CORE(dc);
1661 return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1664 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1666 struct core_dc *core_dc = DC_TO_CORE(dc);
1667 dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1670 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1672 struct core_dc *core_dc = DC_TO_CORE(dc);
1673 dal_irq_service_ack(core_dc->res_pool->irqs, src);
1676 void dc_set_power_state(
1678 enum dc_acpi_cm_power_state power_state,
1679 enum dc_video_power_state video_power_state)
1681 struct core_dc *core_dc = DC_TO_CORE(dc);
1683 core_dc->previous_power_state = core_dc->current_power_state;
1684 core_dc->current_power_state = video_power_state;
1686 switch (power_state) {
1687 case DC_ACPI_CM_POWER_STATE_D0:
1688 core_dc->hwss.init_hw(core_dc);
1691 /* NULL means "reset/release all DC streams" */
1692 dc_commit_streams(dc, NULL, 0);
1694 core_dc->hwss.power_down(core_dc);
1696 /* Zero out the current context so that on resume we start with
1697 * clean state, and dc hw programming optimizations will not
1698 * cause any trouble.
1700 memset(core_dc->current_context, 0,
1701 sizeof(*core_dc->current_context));
1703 core_dc->current_context->res_ctx.pool = core_dc->res_pool;
1710 void dc_resume(const struct dc *dc)
1712 struct core_dc *core_dc = DC_TO_CORE(dc);
1716 for (i = 0; i < core_dc->link_count; i++)
1717 core_link_resume(core_dc->links[i]);
1722 uint32_t link_index,
1727 struct core_dc *core_dc = DC_TO_CORE(dc);
1729 struct core_link *link = core_dc->links[link_index];
1730 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1735 return r == DDC_RESULT_SUCESSFULL;
1738 bool dc_query_ddc_data(
1740 uint32_t link_index,
1743 uint32_t write_size,
1745 uint32_t read_size) {
1747 struct core_dc *core_dc = DC_TO_CORE(dc);
1749 struct core_link *link = core_dc->links[link_index];
1751 bool result = dal_ddc_service_query_ddc_data(
1765 uint32_t link_index,
1767 const uint8_t *data,
1770 struct core_dc *core_dc = DC_TO_CORE(dc);
1772 struct core_link *link = core_dc->links[link_index];
1774 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1779 return r == DDC_RESULT_SUCESSFULL;
1784 uint32_t link_index,
1785 struct i2c_command *cmd)
1787 struct core_dc *core_dc = DC_TO_CORE(dc);
1789 struct core_link *link = core_dc->links[link_index];
1790 struct ddc_service *ddc = link->ddc;
1792 return dal_i2caux_submit_i2c_command(
1798 static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1800 struct dc_link *dc_link = &core_link->public;
1802 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1803 BREAK_TO_DEBUGGER();
1807 dc_sink_retain(sink);
1809 dc_link->remote_sinks[dc_link->sink_count] = sink;
1810 dc_link->sink_count++;
1815 struct dc_sink *dc_link_add_remote_sink(
1816 const struct dc_link *link,
1817 const uint8_t *edid,
1819 struct dc_sink_init_data *init_data)
1821 struct dc_sink *dc_sink;
1822 enum dc_edid_status edid_status;
1823 struct core_link *core_link = DC_LINK_TO_LINK(link);
1825 if (len > MAX_EDID_BUFFER_SIZE) {
1826 dm_error("Max EDID buffer size breached!\n");
1831 BREAK_TO_DEBUGGER();
1835 if (!init_data->link) {
1836 BREAK_TO_DEBUGGER();
1840 dc_sink = dc_sink_create(init_data);
1845 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1846 dc_sink->dc_edid.length = len;
1848 if (!link_add_remote_sink_helper(
1853 edid_status = dm_helpers_parse_edid_caps(
1856 &dc_sink->edid_caps);
1858 if (edid_status != EDID_OK)
1863 dc_link_remove_remote_sink(link, dc_sink);
1865 dc_sink_release(dc_sink);
1869 void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1871 struct core_link *core_link = DC_LINK_TO_LINK(link);
1872 struct dc_link *dc_link = &core_link->public;
1874 dc_link->local_sink = sink;
1877 dc_link->type = dc_connection_none;
1879 dc_link->type = dc_connection_single;
1883 void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1886 struct core_link *core_link = DC_LINK_TO_LINK(link);
1887 struct dc_link *dc_link = &core_link->public;
1889 if (!link->sink_count) {
1890 BREAK_TO_DEBUGGER();
1894 for (i = 0; i < dc_link->sink_count; i++) {
1895 if (dc_link->remote_sinks[i] == sink) {
1896 dc_sink_release(sink);
1897 dc_link->remote_sinks[i] = NULL;
1899 /* shrink array to remove empty place */
1900 while (i < dc_link->sink_count - 1) {
1901 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1905 dc_link->sink_count--;