2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
38 #include "bandwidth_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
45 #include "link_hwss.h"
46 #include "link_encoder.h"
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
52 /*******************************************************************************
54 ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
59 for (i = 0; i < dc->link_count; i++) {
60 if (NULL != dc->links[i])
61 link_destroy(&dc->links[i]);
65 static bool create_links(
67 uint32_t num_virtual_links)
71 struct dc_bios *bios = dc->ctx->dc_bios;
75 connectors_num = bios->funcs->get_connectors_number(bios);
77 if (connectors_num > ENUM_ID_COUNT) {
79 "DC: Number of connectors %d exceeds maximum of %d!\n",
85 if (connectors_num == 0 && num_virtual_links == 0) {
86 dm_error("DC: Number of connectors is zero!\n");
90 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
95 for (i = 0; i < connectors_num; i++) {
96 struct link_init_data link_init_params = {0};
97 struct core_link *link;
99 link_init_params.ctx = dc->ctx;
100 link_init_params.connector_index = i;
101 link_init_params.link_index = dc->link_count;
102 link_init_params.dc = dc;
103 link = link_create(&link_init_params);
106 dc->links[dc->link_count] = link;
110 dm_error("DC: failed to create link!\n");
114 for (i = 0; i < num_virtual_links; i++) {
115 struct core_link *link = dm_alloc(sizeof(*link));
116 struct encoder_init_data enc_init = {0};
125 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
126 link->link_id.type = OBJECT_TYPE_CONNECTOR;
127 link->link_id.id = CONNECTOR_ID_VIRTUAL;
128 link->link_id.enum_id = ENUM_ID_1;
129 link->link_enc = dm_alloc(sizeof(*link->link_enc));
131 enc_init.ctx = dc->ctx;
132 enc_init.channel = CHANNEL_ID_UNKNOWN;
133 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
134 enc_init.transmitter = TRANSMITTER_UNKNOWN;
135 enc_init.connector = link->link_id;
136 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
137 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
138 enc_init.encoder.enum_id = ENUM_ID_1;
139 virtual_link_encoder_construct(link->link_enc, &enc_init);
141 link->public.link_index = dc->link_count;
142 dc->links[dc->link_count] = link;
152 static bool stream_adjust_vmin_vmax(struct dc *dc,
153 const struct dc_stream **stream, int num_streams,
156 /* TODO: Support multiple streams */
157 struct core_dc *core_dc = DC_TO_CORE(dc);
158 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
162 for (i = 0; i < MAX_PIPES; i++) {
163 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
165 if (pipe->stream == core_stream && pipe->stream_enc) {
166 core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
168 /* build and update the info frame */
169 resource_build_info_frame(pipe);
170 core_dc->hwss.update_info_frame(pipe);
179 static bool set_gamut_remap(struct dc *dc,
180 const struct dc_stream **stream, int num_streams)
182 struct core_dc *core_dc = DC_TO_CORE(dc);
183 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
186 struct pipe_ctx *pipes;
188 for (i = 0; i < MAX_PIPES; i++) {
189 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
192 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
193 core_dc->hwss.set_plane_config(core_dc, pipes,
194 &core_dc->current_context->res_ctx);
202 /* This function is not expected to fail, proper implementation of
203 * validation will prevent this from ever being called for unsupported
206 static void stream_update_scaling(
208 const struct dc_stream *dc_stream,
209 const struct rect *src,
210 const struct rect *dst)
212 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
213 struct core_dc *core_dc = DC_TO_CORE(dc);
214 struct validate_context *cur_ctx = core_dc->current_context;
218 stream->public.src = *src;
221 stream->public.dst = *dst;
223 for (i = 0; i < cur_ctx->stream_count; i++) {
224 struct core_stream *cur_stream = cur_ctx->streams[i];
226 if (stream == cur_stream) {
227 struct dc_stream_status *status = &cur_ctx->stream_status[i];
229 if (status->surface_count)
230 if (!dc_commit_surfaces_to_stream(
233 status->surface_count,
234 &cur_stream->public))
235 /* Need to debug validation */
243 static bool set_backlight(struct dc *dc, unsigned int backlight_level,
244 unsigned int frame_ramp, const struct dc_stream *stream)
246 struct core_dc *core_dc = DC_TO_CORE(dc);
249 if (stream->sink->sink_signal == SIGNAL_TYPE_EDP) {
250 for (i = 0; i < core_dc->link_count; i++)
251 dc_link_set_backlight_level(&core_dc->links[i]->public,
252 backlight_level, frame_ramp, stream);
259 static bool init_dmcu_backlight_settings(struct dc *dc)
261 struct core_dc *core_dc = DC_TO_CORE(dc);
264 for (i = 0; i < core_dc->link_count; i++)
265 dc_link_init_dmcu_backlight_settings
266 (&core_dc->links[i]->public);
272 static bool set_abm_level(struct dc *dc, unsigned int abm_level)
274 struct core_dc *core_dc = DC_TO_CORE(dc);
277 for (i = 0; i < core_dc->link_count; i++)
278 dc_link_set_abm_level(&core_dc->links[i]->public,
284 static bool set_psr_enable(struct dc *dc, bool enable)
286 struct core_dc *core_dc = DC_TO_CORE(dc);
289 for (i = 0; i < core_dc->link_count; i++)
290 dc_link_set_psr_enable(&core_dc->links[i]->public,
297 static bool setup_psr(struct dc *dc, const struct dc_stream *stream)
299 struct core_dc *core_dc = DC_TO_CORE(dc);
300 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
301 struct pipe_ctx *pipes;
303 unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
305 for (i = 0; i < core_dc->link_count; i++) {
306 if (core_stream->sink->link == core_dc->links[i])
307 dc_link_setup_psr(&core_dc->links[i]->public,
311 for (i = 0; i < MAX_PIPES; i++) {
312 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
313 == core_stream && i != underlay_idx) {
314 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
315 core_dc->hwss.set_static_screen_control(&pipes, 1,
323 static void set_drive_settings(struct dc *dc,
324 struct link_training_settings *lt_settings,
325 const struct dc_link *link)
327 struct core_dc *core_dc = DC_TO_CORE(dc);
330 for (i = 0; i < core_dc->link_count; i++) {
331 if (&core_dc->links[i]->public == link)
335 if (i >= core_dc->link_count)
336 ASSERT_CRITICAL(false);
338 dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
341 static void perform_link_training(struct dc *dc,
342 struct dc_link_settings *link_setting,
343 bool skip_video_pattern)
345 struct core_dc *core_dc = DC_TO_CORE(dc);
348 for (i = 0; i < core_dc->link_count; i++)
349 dc_link_dp_perform_link_training(
350 &core_dc->links[i]->public,
355 static void set_preferred_link_settings(struct dc *dc,
356 struct dc_link_settings *link_setting,
357 const struct dc_link *link)
359 struct core_link *core_link = DC_LINK_TO_CORE(link);
361 core_link->public.verified_link_cap.lane_count =
362 link_setting->lane_count;
363 core_link->public.verified_link_cap.link_rate =
364 link_setting->link_rate;
365 dp_retrain_link_dp_test(core_link, link_setting, false);
368 static void enable_hpd(const struct dc_link *link)
370 dc_link_dp_enable_hpd(link);
373 static void disable_hpd(const struct dc_link *link)
375 dc_link_dp_disable_hpd(link);
379 static void set_test_pattern(
380 const struct dc_link *link,
381 enum dp_test_pattern test_pattern,
382 const struct link_training_settings *p_link_settings,
383 const unsigned char *p_custom_pattern,
384 unsigned int cust_pattern_size)
387 dc_link_dp_set_test_pattern(
395 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
397 core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
398 if (core_dc->hwss.set_drr != NULL) {
399 core_dc->public.stream_funcs.adjust_vmin_vmax =
400 stream_adjust_vmin_vmax;
403 core_dc->public.stream_funcs.set_gamut_remap =
406 core_dc->public.stream_funcs.set_backlight =
409 core_dc->public.stream_funcs.init_dmcu_backlight_settings =
410 init_dmcu_backlight_settings;
412 core_dc->public.stream_funcs.set_abm_level =
415 core_dc->public.stream_funcs.set_psr_enable =
418 core_dc->public.stream_funcs.setup_psr =
421 core_dc->public.link_funcs.set_drive_settings =
424 core_dc->public.link_funcs.perform_link_training =
425 perform_link_training;
427 core_dc->public.link_funcs.set_preferred_link_settings =
428 set_preferred_link_settings;
430 core_dc->public.link_funcs.enable_hpd =
433 core_dc->public.link_funcs.disable_hpd =
436 core_dc->public.link_funcs.set_test_pattern =
440 static void destruct(struct core_dc *dc)
442 resource_validate_ctx_destruct(dc->current_context);
446 dc_destroy_resource_pool(dc);
448 if (dc->ctx->gpio_service)
449 dal_gpio_service_destroy(&dc->ctx->gpio_service);
452 dal_i2caux_destroy(&dc->ctx->i2caux);
454 if (dc->ctx->created_bios)
455 dal_bios_parser_destroy(&dc->ctx->dc_bios);
458 dal_logger_destroy(&dc->ctx->logger);
460 dm_free(dc->current_context);
461 dc->current_context = NULL;
462 dm_free(dc->temp_flip_context);
463 dc->temp_flip_context = NULL;
464 dm_free(dc->scratch_val_ctx);
465 dc->scratch_val_ctx = NULL;
471 static bool construct(struct core_dc *dc,
472 const struct dc_init_data *init_params)
474 struct dal_logger *logger;
475 struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
476 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
479 dm_error("%s: failed to create ctx\n", __func__);
483 dc->current_context = dm_alloc(sizeof(*dc->current_context));
484 dc->temp_flip_context = dm_alloc(sizeof(*dc->temp_flip_context));
485 dc->scratch_val_ctx = dm_alloc(sizeof(*dc->scratch_val_ctx));
487 if (!dc->current_context || !dc->temp_flip_context) {
488 dm_error("%s: failed to create validate ctx\n", __func__);
492 dc_ctx->cgs_device = init_params->cgs_device;
493 dc_ctx->driver_context = init_params->driver;
494 dc_ctx->dc = &dc->public;
495 dc_ctx->asic_id = init_params->asic_id;
498 logger = dal_logger_create(dc_ctx);
501 /* can *not* call logger. call base driver 'print error' */
502 dm_error("%s: failed to create Logger!\n", __func__);
505 dc_ctx->logger = logger;
507 dc->ctx->dce_environment = init_params->dce_environment;
509 dc_version = resource_parse_asic_id(init_params->asic_id);
510 dc->ctx->dce_version = dc_version;
512 /* Resource should construct all asic specific resources.
513 * This should be the only place where we need to parse the asic id
515 if (init_params->vbios_override)
516 dc_ctx->dc_bios = init_params->vbios_override;
518 /* Create BIOS parser */
519 struct bp_init_data bp_init_data;
520 bp_init_data.ctx = dc_ctx;
521 bp_init_data.bios = init_params->asic_id.atombios_base_address;
523 dc_ctx->dc_bios = dal_bios_parser_create(
524 &bp_init_data, dc_version);
526 if (!dc_ctx->dc_bios) {
527 ASSERT_CRITICAL(false);
531 dc_ctx->created_bios = true;
535 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
537 if (!dc_ctx->i2caux) {
538 ASSERT_CRITICAL(false);
539 goto failed_to_create_i2caux;
542 /* Create GPIO service */
543 dc_ctx->gpio_service = dal_gpio_service_create(
545 dc_ctx->dce_environment,
548 if (!dc_ctx->gpio_service) {
549 ASSERT_CRITICAL(false);
553 dc->res_pool = dc_create_resource_pool(
555 init_params->num_virtual_links,
557 init_params->asic_id);
559 goto create_resource_fail;
561 if (!create_links(dc, init_params->num_virtual_links))
562 goto create_links_fail;
564 allocate_dc_stream_funcs(dc);
568 /**** error handling here ****/
570 create_resource_fail:
572 failed_to_create_i2caux:
582 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
584 fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
585 unsigned int pixDurationInPico = round(pixel_duration);
587 DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
589 arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
590 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
591 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
593 arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
594 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
595 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
597 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
598 WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
600 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
601 WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
605 /*******************************************************************************
607 ******************************************************************************/
609 struct dc *dc_create(const struct dc_init_data *init_params)
611 struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
612 unsigned int full_pipe_count;
617 if (false == construct(core_dc, init_params))
620 /*TODO: separate HW and SW initialization*/
621 core_dc->hwss.init_hw(core_dc);
623 full_pipe_count = core_dc->res_pool->pipe_count;
624 if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
626 core_dc->public.caps.max_streams = min(
628 core_dc->res_pool->stream_enc_count);
630 core_dc->public.caps.max_links = core_dc->link_count;
631 core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
633 core_dc->public.config = init_params->flags;
635 dm_logger_write(core_dc->ctx->logger, LOG_DC,
636 "Display Core initialized\n");
639 /* TODO: missing feature to be enabled */
640 core_dc->public.debug.disable_dfs_bypass = true;
642 return &core_dc->public;
651 void dc_destroy(struct dc **dc)
653 struct core_dc *core_dc = DC_TO_CORE(*dc);
659 static bool is_validation_required(
660 const struct core_dc *dc,
661 const struct dc_validation_set set[],
664 const struct validate_context *context = dc->current_context;
667 if (context->stream_count != set_count)
670 for (i = 0; i < set_count; i++) {
672 if (set[i].surface_count != context->stream_status[i].surface_count)
674 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
677 for (j = 0; j < set[i].surface_count; j++) {
678 struct dc_surface temp_surf = { 0 };
680 temp_surf = *context->stream_status[i].surfaces[j];
681 temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
682 temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
683 temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
685 if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
693 bool dc_validate_resources(
695 const struct dc_validation_set set[],
698 struct core_dc *core_dc = DC_TO_CORE(dc);
699 enum dc_status result = DC_ERROR_UNEXPECTED;
700 struct validate_context *context;
702 if (!is_validation_required(core_dc, set, set_count))
705 context = dm_alloc(sizeof(struct validate_context));
707 goto context_alloc_fail;
709 result = core_dc->res_pool->funcs->validate_with_context(
710 core_dc, set, set_count, context);
712 resource_validate_ctx_destruct(context);
716 if (result != DC_OK) {
717 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
718 "%s:resource validation failed, dc_status:%d\n",
723 return (result == DC_OK);
727 bool dc_validate_guaranteed(
729 const struct dc_stream *stream)
731 struct core_dc *core_dc = DC_TO_CORE(dc);
732 enum dc_status result = DC_ERROR_UNEXPECTED;
733 struct validate_context *context;
735 context = dm_alloc(sizeof(struct validate_context));
737 goto context_alloc_fail;
739 result = core_dc->res_pool->funcs->validate_guaranteed(
740 core_dc, stream, context);
742 resource_validate_ctx_destruct(context);
746 if (result != DC_OK) {
747 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
748 "%s:guaranteed validation failed, dc_status:%d\n",
753 return (result == DC_OK);
756 static void program_timing_sync(
757 struct core_dc *core_dc,
758 struct validate_context *ctx)
762 int pipe_count = ctx->res_ctx.pool->pipe_count;
763 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
765 for (i = 0; i < pipe_count; i++) {
766 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
769 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
772 for (i = 0; i < pipe_count; i++) {
774 struct pipe_ctx *pipe_set[MAX_PIPES];
776 if (!unsynced_pipes[i])
779 pipe_set[0] = unsynced_pipes[i];
780 unsynced_pipes[i] = NULL;
782 /* Add tg to the set, search rest of the tg's for ones with
783 * same timing, add all tgs with same timing to the group
785 for (j = i + 1; j < pipe_count; j++) {
786 if (!unsynced_pipes[j])
789 if (resource_are_streams_timing_synchronizable(
790 unsynced_pipes[j]->stream,
791 pipe_set[0]->stream)) {
792 pipe_set[group_size] = unsynced_pipes[j];
793 unsynced_pipes[j] = NULL;
798 /* set first unblanked pipe as master */
799 for (j = 0; j < group_size; j++) {
800 struct pipe_ctx *temp;
802 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
807 pipe_set[0] = pipe_set[j];
813 /* remove any other unblanked pipes as they have already been synced */
814 for (j = j + 1; j < group_size; j++) {
815 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
817 pipe_set[j] = pipe_set[group_size];
822 if (group_size > 1) {
823 core_dc->hwss.enable_timing_synchronization(
824 core_dc, group_index, group_size, pipe_set);
830 static bool streams_changed(
832 const struct dc_stream *streams[],
833 uint8_t stream_count)
837 if (stream_count != dc->current_context->stream_count)
840 for (i = 0; i < dc->current_context->stream_count; i++) {
841 if (&dc->current_context->streams[i]->public != streams[i])
848 static void fill_display_configs(
849 const struct validate_context *context,
850 struct dm_pp_display_configuration *pp_display_cfg)
855 for (j = 0; j < context->stream_count; j++) {
858 const struct core_stream *stream = context->streams[j];
859 struct dm_pp_single_disp_config *cfg =
860 &pp_display_cfg->disp_configs[num_cfgs];
861 const struct pipe_ctx *pipe_ctx = NULL;
863 for (k = 0; k < MAX_PIPES; k++)
864 if (stream == context->res_ctx.pipe_ctx[k].stream) {
865 pipe_ctx = &context->res_ctx.pipe_ctx[k];
869 ASSERT(pipe_ctx != NULL);
872 cfg->signal = pipe_ctx->stream->signal;
873 cfg->pipe_idx = pipe_ctx->pipe_idx;
874 cfg->src_height = stream->public.src.height;
875 cfg->src_width = stream->public.src.width;
876 cfg->ddi_channel_mapping =
877 stream->sink->link->ddi_channel_mapping.raw;
879 stream->sink->link->link_enc->transmitter;
880 cfg->link_settings.lane_count =
881 stream->sink->link->public.cur_link_settings.lane_count;
882 cfg->link_settings.link_rate =
883 stream->sink->link->public.cur_link_settings.link_rate;
884 cfg->link_settings.link_spread =
885 stream->sink->link->public.cur_link_settings.link_spread;
886 cfg->sym_clock = stream->phy_pix_clk;
888 cfg->v_refresh = stream->public.timing.pix_clk_khz * 1000;
889 cfg->v_refresh /= stream->public.timing.h_total;
890 cfg->v_refresh = (cfg->v_refresh + stream->public.timing.v_total / 2)
891 / stream->public.timing.v_total;
894 pp_display_cfg->display_count = num_cfgs;
897 static uint32_t get_min_vblank_time_us(const struct validate_context *context)
900 uint32_t min_vertical_blank_time = -1;
902 for (j = 0; j < context->stream_count; j++) {
903 const struct dc_stream *stream = &context->streams[j]->public;
904 uint32_t vertical_blank_in_pixels = 0;
905 uint32_t vertical_blank_time = 0;
907 vertical_blank_in_pixels = stream->timing.h_total *
908 (stream->timing.v_total
909 - stream->timing.v_addressable);
911 vertical_blank_time = vertical_blank_in_pixels
912 * 1000 / stream->timing.pix_clk_khz;
914 if (min_vertical_blank_time > vertical_blank_time)
915 min_vertical_blank_time = vertical_blank_time;
918 return min_vertical_blank_time;
921 static int determine_sclk_from_bounding_box(
922 const struct core_dc *dc,
928 * Some asics do not give us sclk levels, so we just report the actual
931 if (dc->sclk_lvls.num_levels == 0)
932 return required_sclk;
934 for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
935 if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
936 return dc->sclk_lvls.clocks_in_khz[i];
939 * even maximum level could not satisfy requirement, this
940 * is unexpected at this stage, should have been caught at
944 return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
947 void pplib_apply_display_requirements(
949 const struct validate_context *context,
950 struct dm_pp_display_configuration *pp_display_cfg)
952 pp_display_cfg->all_displays_in_sync =
953 context->bw_results.all_displays_in_sync;
954 pp_display_cfg->nb_pstate_switch_disable =
955 context->bw_results.nbp_state_change_enable == false;
956 pp_display_cfg->cpu_cc6_disable =
957 context->bw_results.cpuc_state_change_enable == false;
958 pp_display_cfg->cpu_pstate_disable =
959 context->bw_results.cpup_state_change_enable == false;
960 pp_display_cfg->cpu_pstate_separation_time =
961 context->bw_results.blackout_recovery_time_us;
963 pp_display_cfg->min_memory_clock_khz = context->bw_results.required_yclk
964 / MEMORY_TYPE_MULTIPLIER;
966 pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
968 context->bw_results.required_sclk);
970 pp_display_cfg->min_engine_clock_deep_sleep_khz
971 = context->bw_results.required_sclk_deep_sleep;
973 pp_display_cfg->avail_mclk_switch_time_us =
974 get_min_vblank_time_us(context);
976 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
978 pp_display_cfg->disp_clk_khz = context->bw_results.dispclk_khz;
980 fill_display_configs(context, pp_display_cfg);
982 /* TODO: is this still applicable?*/
983 if (pp_display_cfg->display_count == 1) {
984 const struct dc_crtc_timing *timing =
985 &context->streams[0]->public.timing;
987 pp_display_cfg->crtc_index =
988 pp_display_cfg->disp_configs[0].pipe_idx;
989 pp_display_cfg->line_time_in_us = timing->h_total * 1000
990 / timing->pix_clk_khz;
993 if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
994 struct dm_pp_display_configuration)) != 0)
995 dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
997 dc->prev_display_config = *pp_display_cfg;
1001 bool dc_commit_streams(
1003 const struct dc_stream *streams[],
1004 uint8_t stream_count)
1006 struct core_dc *core_dc = DC_TO_CORE(dc);
1007 struct dc_bios *dcb = core_dc->ctx->dc_bios;
1008 enum dc_status result = DC_ERROR_UNEXPECTED;
1009 struct validate_context *context;
1010 struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
1013 if (false == streams_changed(core_dc, streams, stream_count))
1016 dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
1017 __func__, stream_count);
1019 for (i = 0; i < stream_count; i++) {
1020 const struct dc_stream *stream = streams[i];
1021 const struct dc_stream_status *status = dc_stream_get_status(stream);
1024 dc_stream_log(stream,
1025 core_dc->ctx->logger,
1028 set[i].stream = stream;
1031 set[i].surface_count = status->surface_count;
1032 for (j = 0; j < status->surface_count; j++)
1033 set[i].surfaces[j] = status->surfaces[j];
1038 context = dm_alloc(sizeof(struct validate_context));
1039 if (context == NULL)
1040 goto context_alloc_fail;
1042 result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, stream_count, context);
1043 if (result != DC_OK){
1044 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
1045 "%s: Context validation failed! dc_status:%d\n",
1048 BREAK_TO_DEBUGGER();
1049 resource_validate_ctx_destruct(context);
1053 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1054 core_dc->hwss.enable_accelerated_mode(core_dc);
1057 if (result == DC_OK) {
1058 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
1061 program_timing_sync(core_dc, context);
1063 for (i = 0; i < context->stream_count; i++) {
1064 const struct core_sink *sink = context->streams[i]->sink;
1066 for (j = 0; j < context->stream_status[i].surface_count; j++) {
1067 const struct dc_surface *dc_surface =
1068 context->stream_status[i].surfaces[j];
1070 for (k = 0; k < context->res_ctx.pool->pipe_count; k++) {
1071 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[k];
1073 if (dc_surface != &pipe->surface->public
1074 || !dc_surface->visible)
1077 pipe->tg->funcs->set_blank(pipe->tg, false);
1081 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
1082 context->streams[i]->public.timing.h_addressable,
1083 context->streams[i]->public.timing.v_addressable,
1084 context->streams[i]->public.timing.h_total,
1085 context->streams[i]->public.timing.v_total,
1086 context->streams[i]->public.timing.pix_clk_khz);
1089 pplib_apply_display_requirements(core_dc,
1090 context, &context->pp_display_cfg);
1092 resource_validate_ctx_destruct(core_dc->current_context);
1094 if (core_dc->temp_flip_context != core_dc->current_context) {
1095 dm_free(core_dc->temp_flip_context);
1096 core_dc->temp_flip_context = core_dc->current_context;
1098 core_dc->current_context = context;
1099 memset(core_dc->temp_flip_context, 0, sizeof(*core_dc->temp_flip_context));
1101 return (result == DC_OK);
1107 return (result == DC_OK);
1110 bool dc_pre_update_surfaces_to_stream(
1112 const struct dc_surface *const *new_surfaces,
1113 uint8_t new_surface_count,
1114 const struct dc_stream *dc_stream)
1117 struct core_dc *core_dc = DC_TO_CORE(dc);
1118 uint32_t prev_disp_clk = core_dc->current_context->bw_results.dispclk_khz;
1119 struct dc_stream_status *stream_status = NULL;
1120 struct validate_context *context;
1121 struct validate_context *temp_context;
1124 pre_surface_trace(dc, new_surfaces, new_surface_count);
1126 if (core_dc->current_context->stream_count == 0)
1129 /* Cannot commit surface to a stream that is not commited */
1130 for (i = 0; i < core_dc->current_context->stream_count; i++)
1131 if (dc_stream == &core_dc->current_context->streams[i]->public)
1134 if (i == core_dc->current_context->stream_count)
1137 stream_status = &core_dc->current_context->stream_status[i];
1139 if (new_surface_count == stream_status->surface_count) {
1140 bool skip_pre = true;
1142 for (i = 0; i < stream_status->surface_count; i++) {
1143 struct dc_surface temp_surf = { 0 };
1145 temp_surf = *stream_status->surfaces[i];
1146 temp_surf.clip_rect = new_surfaces[i]->clip_rect;
1147 temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x;
1148 temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y;
1150 if (memcmp(&temp_surf, new_surfaces[i], sizeof(temp_surf)) != 0) {
1160 context = dm_alloc(sizeof(struct validate_context));
1163 dm_error("%s: failed to create validate ctx\n", __func__);
1168 resource_validate_ctx_copy_construct(core_dc->current_context, context);
1170 dm_logger_write(core_dc->ctx->logger, LOG_DC,
1171 "%s: commit %d surfaces to stream 0x%x\n",
1176 if (!resource_attach_surfaces_to_context(
1177 new_surfaces, new_surface_count, dc_stream, context)) {
1178 BREAK_TO_DEBUGGER();
1180 goto unexpected_fail;
1183 for (i = 0; i < new_surface_count; i++)
1184 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1185 if (context->res_ctx.pipe_ctx[j].surface !=
1186 DC_SURFACE_TO_CORE(new_surfaces[i]))
1189 resource_build_scaling_params(
1190 new_surfaces[i], &context->res_ctx.pipe_ctx[j]);
1192 if (dc->debug.surface_visual_confirm) {
1193 context->res_ctx.pipe_ctx[j].scl_data.recout.height -= 2;
1194 context->res_ctx.pipe_ctx[j].scl_data.recout.width -= 2;
1198 if (core_dc->res_pool->funcs->validate_bandwidth(core_dc, context) != DC_OK) {
1199 BREAK_TO_DEBUGGER();
1201 goto unexpected_fail;
1204 if (core_dc->res_pool->funcs->apply_clk_constraints) {
1205 temp_context = core_dc->res_pool->funcs->apply_clk_constraints(
1208 if (!temp_context) {
1209 dm_error("%s:failed apply clk constraints\n", __func__);
1211 goto unexpected_fail;
1213 resource_validate_ctx_destruct(context);
1214 ASSERT(core_dc->scratch_val_ctx == temp_context);
1215 core_dc->scratch_val_ctx = context;
1216 context = temp_context;
1219 if (prev_disp_clk < context->bw_results.dispclk_khz) {
1220 pplib_apply_display_requirements(core_dc, context,
1221 &context->pp_display_cfg);
1222 context->res_ctx.pool->display_clock->funcs->set_clock(
1223 context->res_ctx.pool->display_clock,
1224 context->bw_results.dispclk_khz * 115 / 100);
1225 core_dc->current_context->bw_results.dispclk_khz =
1226 context->bw_results.dispclk_khz;
1229 for (i = 0; i < new_surface_count; i++)
1230 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1231 if (context->res_ctx.pipe_ctx[j].surface !=
1232 DC_SURFACE_TO_CORE(new_surfaces[i]))
1235 core_dc->hwss.prepare_pipe_for_context(
1237 &context->res_ctx.pipe_ctx[j],
1242 resource_validate_ctx_destruct(context);
1249 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1251 struct core_dc *core_dc = DC_TO_CORE(dc);
1254 post_surface_trace(dc);
1256 for (i = 0; i < core_dc->current_context->res_ctx.pool->pipe_count; i++)
1257 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream == NULL) {
1258 core_dc->current_context->res_ctx.pipe_ctx[i].pipe_idx = i;
1259 core_dc->hwss.power_down_front_end(
1260 core_dc, &core_dc->current_context->res_ctx.pipe_ctx[i]);
1264 if (core_dc->res_pool->funcs->validate_bandwidth(core_dc, core_dc->current_context)
1266 BREAK_TO_DEBUGGER();
1270 core_dc->hwss.set_bandwidth(core_dc);
1272 pplib_apply_display_requirements(
1273 core_dc, core_dc->current_context, &core_dc->current_context->pp_display_cfg);
1278 bool dc_commit_surfaces_to_stream(
1280 const struct dc_surface **new_surfaces,
1281 uint8_t new_surface_count,
1282 const struct dc_stream *dc_stream)
1284 struct dc_surface_update updates[MAX_SURFACES];
1285 struct dc_flip_addrs flip_addr[MAX_SURFACES];
1286 struct dc_plane_info plane_info[MAX_SURFACES];
1287 struct dc_scaling_info scaling_info[MAX_SURFACES];
1290 if (!dc_pre_update_surfaces_to_stream(
1291 dc, new_surfaces, new_surface_count, dc_stream))
1294 memset(updates, 0, sizeof(updates));
1295 memset(flip_addr, 0, sizeof(flip_addr));
1296 memset(plane_info, 0, sizeof(plane_info));
1297 memset(scaling_info, 0, sizeof(scaling_info));
1299 for (i = 0; i < new_surface_count; i++) {
1300 updates[i].surface = new_surfaces[i];
1302 (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1303 flip_addr[i].address = new_surfaces[i]->address;
1304 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1305 plane_info[i].color_space = new_surfaces[i]->color_space;
1306 plane_info[i].format = new_surfaces[i]->format;
1307 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1308 plane_info[i].rotation = new_surfaces[i]->rotation;
1309 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1310 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1311 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1312 plane_info[i].visible = new_surfaces[i]->visible;
1313 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1314 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1315 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1316 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1318 updates[i].flip_addr = &flip_addr[i];
1319 updates[i].plane_info = &plane_info[i];
1320 updates[i].scaling_info = &scaling_info[i];
1322 dc_update_surfaces_for_stream(dc, updates, new_surface_count, dc_stream);
1324 return dc_post_update_surfaces_to_stream(dc);
1327 static bool is_surface_in_context(
1328 const struct validate_context *context,
1329 const struct dc_surface *surface)
1333 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1334 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1336 if (surface == &pipe_ctx->surface->public) {
1344 enum surface_update_type {
1345 UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
1346 UPDATE_TYPE_MED, /* a lot of programming needed. may need to alloc */
1347 UPDATE_TYPE_FULL, /* may need to shuffle resources */
1350 static enum surface_update_type det_surface_update(
1351 const struct core_dc *dc,
1352 const struct dc_surface_update *u)
1354 const struct validate_context *context = dc->current_context;
1356 if (u->scaling_info || u->plane_info)
1357 /* todo: not all scale and plane_info update need full update
1358 * ie. check if following is the same
1359 * scale ratio, view port, surface bpp etc
1361 return UPDATE_TYPE_FULL; /* may need bandwidth update */
1363 if (!is_surface_in_context(context, u->surface))
1364 return UPDATE_TYPE_FULL;
1366 if (u->in_transfer_func ||
1367 u->out_transfer_func ||
1368 u->hdr_static_metadata)
1369 return UPDATE_TYPE_MED;
1371 return UPDATE_TYPE_FAST;
1374 static enum surface_update_type check_update_surfaces_for_stream(
1376 struct dc_surface_update *updates,
1378 const struct dc_stream_status *stream_status)
1381 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1383 if (stream_status->surface_count != surface_count)
1384 return UPDATE_TYPE_FULL;
1386 for (i = 0 ; i < surface_count; i++) {
1387 enum surface_update_type type =
1388 det_surface_update(dc, &updates[i]);
1390 if (type == UPDATE_TYPE_FULL)
1393 if (overall_type < type)
1394 overall_type = type;
1397 return overall_type;
1400 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1402 void dc_update_surfaces_for_stream(struct dc *dc,
1403 struct dc_surface_update *updates, int surface_count,
1404 const struct dc_stream *dc_stream)
1406 struct core_dc *core_dc = DC_TO_CORE(dc);
1407 struct validate_context *context;
1410 enum surface_update_type update_type;
1411 const struct dc_stream_status *stream_status;
1413 stream_status = dc_stream_get_status(dc_stream);
1414 ASSERT(stream_status);
1416 return; /* Cannot commit surface to stream that is not committed */
1418 update_type = check_update_surfaces_for_stream(
1419 core_dc, updates, surface_count, stream_status);
1421 if (update_type >= update_surface_trace_level)
1422 update_surface_trace(dc, updates, surface_count);
1424 if (update_type >= UPDATE_TYPE_FULL) {
1425 const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
1427 for (i = 0; i < surface_count; i++)
1428 new_surfaces[i] = updates[i].surface;
1430 /* initialize scratch memory for building context */
1431 context = core_dc->temp_flip_context;
1432 resource_validate_ctx_copy_construct(
1433 core_dc->current_context, context);
1435 /* add surface to context */
1436 if (!resource_attach_surfaces_to_context(
1437 new_surfaces, surface_count, dc_stream, context)) {
1438 BREAK_TO_DEBUGGER();
1442 context = core_dc->current_context;
1444 for (i = 0; i < surface_count; i++) {
1445 /* save update param into surface */
1446 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1447 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1449 if (updates[i].flip_addr) {
1450 surface->public.address = updates[i].flip_addr->address;
1451 surface->public.flip_immediate =
1452 updates[i].flip_addr->flip_immediate;
1455 if (updates[i].scaling_info) {
1456 surface->public.scaling_quality =
1457 updates[i].scaling_info->scaling_quality;
1458 surface->public.dst_rect =
1459 updates[i].scaling_info->dst_rect;
1460 surface->public.src_rect =
1461 updates[i].scaling_info->src_rect;
1462 surface->public.clip_rect =
1463 updates[i].scaling_info->clip_rect;
1466 if (updates[i].plane_info) {
1467 surface->public.color_space =
1468 updates[i].plane_info->color_space;
1469 surface->public.format =
1470 updates[i].plane_info->format;
1471 surface->public.plane_size =
1472 updates[i].plane_info->plane_size;
1473 surface->public.rotation =
1474 updates[i].plane_info->rotation;
1475 surface->public.horizontal_mirror =
1476 updates[i].plane_info->horizontal_mirror;
1477 surface->public.stereo_format =
1478 updates[i].plane_info->stereo_format;
1479 surface->public.tiling_info =
1480 updates[i].plane_info->tiling_info;
1481 surface->public.visible =
1482 updates[i].plane_info->visible;
1483 surface->public.dcc =
1484 updates[i].plane_info->dcc;
1487 /* not sure if we still need this */
1488 if (update_type == UPDATE_TYPE_FULL) {
1489 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1490 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1491 struct core_stream *stream = pipe_ctx->stream;
1493 if (pipe_ctx->surface != surface)
1496 resource_build_scaling_params(updates[i].surface, pipe_ctx);
1497 if (dc->debug.surface_visual_confirm) {
1498 pipe_ctx->scl_data.recout.height -= 2;
1499 pipe_ctx->scl_data.recout.width -= 2;
1504 if (updates[i].gamma &&
1505 updates[i].gamma != surface->public.gamma_correction) {
1506 if (surface->public.gamma_correction != NULL)
1507 dc_gamma_release(&surface->public.
1510 dc_gamma_retain(updates[i].gamma);
1511 surface->public.gamma_correction =
1515 if (updates[i].in_transfer_func &&
1516 updates[i].in_transfer_func != surface->public.in_transfer_func) {
1517 if (surface->public.in_transfer_func != NULL)
1518 dc_transfer_func_release(
1522 dc_transfer_func_retain(
1523 updates[i].in_transfer_func);
1524 surface->public.in_transfer_func =
1525 updates[i].in_transfer_func;
1528 if (updates[i].out_transfer_func &&
1529 updates[i].out_transfer_func != dc_stream->out_transfer_func) {
1530 if (dc_stream->out_transfer_func != NULL)
1531 dc_transfer_func_release(dc_stream->out_transfer_func);
1532 dc_transfer_func_retain(updates[i].out_transfer_func);
1533 stream->public.out_transfer_func = updates[i].out_transfer_func;
1535 if (updates[i].hdr_static_metadata)
1536 surface->public.hdr_static_ctx =
1537 *(updates[i].hdr_static_metadata);
1541 if (!surface_count) /* reset */
1542 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1544 for (i = 0; i < surface_count; i++) {
1545 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1547 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1548 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1549 struct pipe_ctx *cur_pipe_ctx;
1550 bool is_new_pipe_surface = true;
1552 if (pipe_ctx->surface != surface)
1555 if (update_type != UPDATE_TYPE_FAST &&
1556 !pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1557 core_dc->hwss.pipe_control_lock(
1560 PIPE_LOCK_CONTROL_GRAPHICS |
1561 PIPE_LOCK_CONTROL_SCL |
1562 PIPE_LOCK_CONTROL_BLENDER |
1563 PIPE_LOCK_CONTROL_MODE,
1567 if (update_type == UPDATE_TYPE_FULL) {
1568 /* only apply for top pipe */
1569 if (!pipe_ctx->top_pipe) {
1570 core_dc->hwss.apply_ctx_for_surface(core_dc,
1572 context_timing_trace(dc, &context->res_ctx);
1576 if (updates[i].flip_addr)
1577 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1579 if (update_type == UPDATE_TYPE_FAST)
1582 cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1583 if (cur_pipe_ctx->surface == pipe_ctx->surface)
1584 is_new_pipe_surface = false;
1586 if (is_new_pipe_surface ||
1587 updates[i].in_transfer_func)
1588 core_dc->hwss.set_input_transfer_func(
1589 pipe_ctx, pipe_ctx->surface);
1591 if (is_new_pipe_surface ||
1592 updates[i].out_transfer_func)
1593 core_dc->hwss.set_output_transfer_func(
1598 if (updates[i].hdr_static_metadata) {
1599 resource_build_info_frame(pipe_ctx);
1600 core_dc->hwss.update_info_frame(pipe_ctx);
1605 if (update_type == UPDATE_TYPE_FAST)
1608 for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
1609 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1611 for (j = 0; j < surface_count; j++) {
1612 if (updates[j].surface == &pipe_ctx->surface->public) {
1613 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1614 core_dc->hwss.pipe_control_lock(
1617 PIPE_LOCK_CONTROL_GRAPHICS |
1618 PIPE_LOCK_CONTROL_SCL |
1619 PIPE_LOCK_CONTROL_BLENDER,
1627 if (core_dc->current_context != context) {
1628 resource_validate_ctx_destruct(core_dc->current_context);
1629 core_dc->temp_flip_context = core_dc->current_context;
1631 core_dc->current_context = context;
1635 uint8_t dc_get_current_stream_count(const struct dc *dc)
1637 struct core_dc *core_dc = DC_TO_CORE(dc);
1638 return core_dc->current_context->stream_count;
1641 struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1643 struct core_dc *core_dc = DC_TO_CORE(dc);
1644 if (i < core_dc->current_context->stream_count)
1645 return &(core_dc->current_context->streams[i]->public);
1649 const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1651 struct core_dc *core_dc = DC_TO_CORE(dc);
1652 return &core_dc->links[link_index]->public;
1655 const struct graphics_object_id dc_get_link_id_at_index(
1656 struct dc *dc, uint32_t link_index)
1658 struct core_dc *core_dc = DC_TO_CORE(dc);
1659 return core_dc->links[link_index]->link_id;
1662 const struct ddc_service *dc_get_ddc_at_index(
1663 struct dc *dc, uint32_t link_index)
1665 struct core_dc *core_dc = DC_TO_CORE(dc);
1666 return core_dc->links[link_index]->ddc;
1669 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1670 struct dc *dc, uint32_t link_index)
1672 struct core_dc *core_dc = DC_TO_CORE(dc);
1673 return core_dc->links[link_index]->public.irq_source_hpd;
1676 const struct audio **dc_get_audios(struct dc *dc)
1678 struct core_dc *core_dc = DC_TO_CORE(dc);
1679 return (const struct audio **)core_dc->res_pool->audios;
1682 void dc_flip_surface_addrs(
1684 const struct dc_surface *const surfaces[],
1685 struct dc_flip_addrs flip_addrs[],
1688 struct core_dc *core_dc = DC_TO_CORE(dc);
1691 for (i = 0; i < count; i++) {
1692 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
1694 surface->public.address = flip_addrs[i].address;
1695 surface->public.flip_immediate = flip_addrs[i].flip_immediate;
1697 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1698 struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1700 if (pipe_ctx->surface != surface)
1703 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1708 enum dc_irq_source dc_interrupt_to_irq_source(
1713 struct core_dc *core_dc = DC_TO_CORE(dc);
1714 return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1717 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1719 struct core_dc *core_dc = DC_TO_CORE(dc);
1720 dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1723 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1725 struct core_dc *core_dc = DC_TO_CORE(dc);
1726 dal_irq_service_ack(core_dc->res_pool->irqs, src);
1729 void dc_set_power_state(
1731 enum dc_acpi_cm_power_state power_state,
1732 enum dc_video_power_state video_power_state)
1734 struct core_dc *core_dc = DC_TO_CORE(dc);
1736 core_dc->previous_power_state = core_dc->current_power_state;
1737 core_dc->current_power_state = video_power_state;
1739 switch (power_state) {
1740 case DC_ACPI_CM_POWER_STATE_D0:
1741 core_dc->hwss.init_hw(core_dc);
1744 /* NULL means "reset/release all DC streams" */
1745 dc_commit_streams(dc, NULL, 0);
1747 core_dc->hwss.power_down(core_dc);
1749 /* Zero out the current context so that on resume we start with
1750 * clean state, and dc hw programming optimizations will not
1751 * cause any trouble.
1753 memset(core_dc->current_context, 0,
1754 sizeof(*core_dc->current_context));
1756 core_dc->current_context->res_ctx.pool = core_dc->res_pool;
1763 void dc_resume(const struct dc *dc)
1765 struct core_dc *core_dc = DC_TO_CORE(dc);
1769 for (i = 0; i < core_dc->link_count; i++)
1770 core_link_resume(core_dc->links[i]);
1775 uint32_t link_index,
1780 struct core_dc *core_dc = DC_TO_CORE(dc);
1782 struct core_link *link = core_dc->links[link_index];
1783 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1788 return r == DDC_RESULT_SUCESSFULL;
1793 uint32_t link_index,
1795 const uint8_t *data,
1798 struct core_dc *core_dc = DC_TO_CORE(dc);
1800 struct core_link *link = core_dc->links[link_index];
1802 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1807 return r == DDC_RESULT_SUCESSFULL;
1812 uint32_t link_index,
1813 struct i2c_command *cmd)
1815 struct core_dc *core_dc = DC_TO_CORE(dc);
1817 struct core_link *link = core_dc->links[link_index];
1818 struct ddc_service *ddc = link->ddc;
1820 return dal_i2caux_submit_i2c_command(
1826 static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1828 struct dc_link *dc_link = &core_link->public;
1830 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1831 BREAK_TO_DEBUGGER();
1835 dc_sink_retain(sink);
1837 dc_link->remote_sinks[dc_link->sink_count] = sink;
1838 dc_link->sink_count++;
1843 struct dc_sink *dc_link_add_remote_sink(
1844 const struct dc_link *link,
1845 const uint8_t *edid,
1847 struct dc_sink_init_data *init_data)
1849 struct dc_sink *dc_sink;
1850 enum dc_edid_status edid_status;
1851 struct core_link *core_link = DC_LINK_TO_LINK(link);
1853 if (len > MAX_EDID_BUFFER_SIZE) {
1854 dm_error("Max EDID buffer size breached!\n");
1859 BREAK_TO_DEBUGGER();
1863 if (!init_data->link) {
1864 BREAK_TO_DEBUGGER();
1868 dc_sink = dc_sink_create(init_data);
1873 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1874 dc_sink->dc_edid.length = len;
1876 if (!link_add_remote_sink_helper(
1881 edid_status = dm_helpers_parse_edid_caps(
1884 &dc_sink->edid_caps);
1886 if (edid_status != EDID_OK)
1891 dc_link_remove_remote_sink(link, dc_sink);
1893 dc_sink_release(dc_sink);
1897 void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1899 struct core_link *core_link = DC_LINK_TO_LINK(link);
1900 struct dc_link *dc_link = &core_link->public;
1902 dc_link->local_sink = sink;
1905 dc_link->type = dc_connection_none;
1907 dc_link->type = dc_connection_single;
1911 void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1914 struct core_link *core_link = DC_LINK_TO_LINK(link);
1915 struct dc_link *dc_link = &core_link->public;
1917 if (!link->sink_count) {
1918 BREAK_TO_DEBUGGER();
1922 for (i = 0; i < dc_link->sink_count; i++) {
1923 if (dc_link->remote_sinks[i] == sink) {
1924 dc_sink_release(sink);
1925 dc_link->remote_sinks[i] = NULL;
1927 /* shrink array to remove empty place */
1928 while (i < dc_link->sink_count - 1) {
1929 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1933 dc_link->sink_count--;