2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
38 #include "dce_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
45 #include "link_hwss.h"
46 #include "link_encoder.h"
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
52 /*******************************************************************************
54 ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
59 for (i = 0; i < dc->link_count; i++) {
60 if (NULL != dc->links[i])
61 link_destroy(&dc->links[i]);
65 static bool create_links(
67 uint32_t num_virtual_links)
71 struct dc_bios *bios = dc->ctx->dc_bios;
75 connectors_num = bios->funcs->get_connectors_number(bios);
77 if (connectors_num > ENUM_ID_COUNT) {
79 "DC: Number of connectors %d exceeds maximum of %d!\n",
85 if (connectors_num == 0 && num_virtual_links == 0) {
86 dm_error("DC: Number of connectors is zero!\n");
90 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
95 for (i = 0; i < connectors_num; i++) {
96 struct link_init_data link_init_params = {0};
97 struct core_link *link;
99 link_init_params.ctx = dc->ctx;
100 /* next BIOS object table connector */
101 link_init_params.connector_index = i;
102 link_init_params.link_index = dc->link_count;
103 link_init_params.dc = dc;
104 link = link_create(&link_init_params);
107 dc->links[dc->link_count] = link;
113 for (i = 0; i < num_virtual_links; i++) {
114 struct core_link *link = dm_alloc(sizeof(*link));
115 struct encoder_init_data enc_init = {0};
124 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
125 link->link_id.type = OBJECT_TYPE_CONNECTOR;
126 link->link_id.id = CONNECTOR_ID_VIRTUAL;
127 link->link_id.enum_id = ENUM_ID_1;
128 link->link_enc = dm_alloc(sizeof(*link->link_enc));
130 enc_init.ctx = dc->ctx;
131 enc_init.channel = CHANNEL_ID_UNKNOWN;
132 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
133 enc_init.transmitter = TRANSMITTER_UNKNOWN;
134 enc_init.connector = link->link_id;
135 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
136 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
137 enc_init.encoder.enum_id = ENUM_ID_1;
138 virtual_link_encoder_construct(link->link_enc, &enc_init);
140 link->public.link_index = dc->link_count;
141 dc->links[dc->link_count] = link;
151 static bool stream_adjust_vmin_vmax(struct dc *dc,
152 const struct dc_stream **stream, int num_streams,
155 /* TODO: Support multiple streams */
156 struct core_dc *core_dc = DC_TO_CORE(dc);
157 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
161 for (i = 0; i < MAX_PIPES; i++) {
162 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
164 if (pipe->stream == core_stream && pipe->stream_enc) {
165 core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
167 /* build and update the info frame */
168 resource_build_info_frame(pipe);
169 core_dc->hwss.update_info_frame(pipe);
177 static bool stream_get_crtc_position(struct dc *dc,
178 const struct dc_stream **stream, int num_streams,
179 unsigned int *v_pos, unsigned int *nom_v_pos)
181 /* TODO: Support multiple streams */
182 struct core_dc *core_dc = DC_TO_CORE(dc);
183 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
186 struct crtc_position position;
188 for (i = 0; i < MAX_PIPES; i++) {
189 struct pipe_ctx *pipe =
190 &core_dc->current_context->res_ctx.pipe_ctx[i];
192 if (pipe->stream == core_stream && pipe->stream_enc) {
193 core_dc->hwss.get_position(&pipe, 1, &position);
195 *v_pos = position.vertical_count;
196 *nom_v_pos = position.nominal_vcount;
203 static bool set_gamut_remap(struct dc *dc, const struct dc_stream *stream)
205 struct core_dc *core_dc = DC_TO_CORE(dc);
206 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
209 struct pipe_ctx *pipes;
211 for (i = 0; i < MAX_PIPES; i++) {
212 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
215 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
216 core_dc->hwss.program_gamut_remap(pipes);
224 static void set_static_screen_events(struct dc *dc,
225 const struct dc_stream **stream,
227 const struct dc_static_screen_events *events)
229 struct core_dc *core_dc = DC_TO_CORE(dc);
232 struct pipe_ctx *pipes_affected[MAX_PIPES];
233 int num_pipes_affected = 0;
235 for (i = 0; i < num_streams; i++) {
236 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[i]);
238 for (j = 0; j < MAX_PIPES; j++) {
239 if (core_dc->current_context->res_ctx.pipe_ctx[j].stream
241 pipes_affected[num_pipes_affected++] =
242 &core_dc->current_context->res_ctx.pipe_ctx[j];
247 core_dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
250 static void set_drive_settings(struct dc *dc,
251 struct link_training_settings *lt_settings,
252 const struct dc_link *link)
254 struct core_dc *core_dc = DC_TO_CORE(dc);
257 for (i = 0; i < core_dc->link_count; i++) {
258 if (&core_dc->links[i]->public == link)
262 if (i >= core_dc->link_count)
263 ASSERT_CRITICAL(false);
265 dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
268 static void perform_link_training(struct dc *dc,
269 struct dc_link_settings *link_setting,
270 bool skip_video_pattern)
272 struct core_dc *core_dc = DC_TO_CORE(dc);
275 for (i = 0; i < core_dc->link_count; i++)
276 dc_link_dp_perform_link_training(
277 &core_dc->links[i]->public,
282 static void set_preferred_link_settings(struct dc *dc,
283 struct dc_link_settings *link_setting,
284 const struct dc_link *link)
286 struct core_link *core_link = DC_LINK_TO_CORE(link);
288 core_link->public.verified_link_cap.lane_count =
289 link_setting->lane_count;
290 core_link->public.verified_link_cap.link_rate =
291 link_setting->link_rate;
292 dp_retrain_link_dp_test(core_link, link_setting, false);
295 static void enable_hpd(const struct dc_link *link)
297 dc_link_dp_enable_hpd(link);
300 static void disable_hpd(const struct dc_link *link)
302 dc_link_dp_disable_hpd(link);
306 static void set_test_pattern(
307 const struct dc_link *link,
308 enum dp_test_pattern test_pattern,
309 const struct link_training_settings *p_link_settings,
310 const unsigned char *p_custom_pattern,
311 unsigned int cust_pattern_size)
314 dc_link_dp_set_test_pattern(
322 void set_dither_option(const struct dc_stream *dc_stream,
323 enum dc_dither_option option)
325 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
326 struct bit_depth_reduction_params params;
327 struct core_link *core_link = DC_LINK_TO_CORE(stream->status.link);
328 struct pipe_ctx *pipes =
329 core_link->dc->current_context->res_ctx.pipe_ctx;
331 memset(¶ms, 0, sizeof(params));
334 if (option > DITHER_OPTION_MAX)
336 if (option == DITHER_OPTION_DEFAULT) {
337 switch (stream->public.timing.display_color_depth) {
338 case COLOR_DEPTH_666:
339 stream->public.dither_option = DITHER_OPTION_SPATIAL6;
341 case COLOR_DEPTH_888:
342 stream->public.dither_option = DITHER_OPTION_SPATIAL8;
344 case COLOR_DEPTH_101010:
345 stream->public.dither_option = DITHER_OPTION_SPATIAL10;
348 option = DITHER_OPTION_DISABLE;
351 stream->public.dither_option = option;
353 resource_build_bit_depth_reduction_params(stream,
355 stream->bit_depth_params = params;
357 opp_program_bit_depth_reduction(pipes->opp, ¶ms);
360 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
362 if (core_dc->hwss.set_drr != NULL) {
363 core_dc->public.stream_funcs.adjust_vmin_vmax =
364 stream_adjust_vmin_vmax;
367 core_dc->public.stream_funcs.set_static_screen_events =
368 set_static_screen_events;
370 core_dc->public.stream_funcs.get_crtc_position =
371 stream_get_crtc_position;
373 core_dc->public.stream_funcs.set_gamut_remap =
376 core_dc->public.stream_funcs.set_dither_option =
379 core_dc->public.link_funcs.set_drive_settings =
382 core_dc->public.link_funcs.perform_link_training =
383 perform_link_training;
385 core_dc->public.link_funcs.set_preferred_link_settings =
386 set_preferred_link_settings;
388 core_dc->public.link_funcs.enable_hpd =
391 core_dc->public.link_funcs.disable_hpd =
394 core_dc->public.link_funcs.set_test_pattern =
398 static void destruct(struct core_dc *dc)
400 dc_resource_validate_ctx_destruct(dc->current_context);
404 dc_destroy_resource_pool(dc);
406 if (dc->ctx->gpio_service)
407 dal_gpio_service_destroy(&dc->ctx->gpio_service);
410 dal_i2caux_destroy(&dc->ctx->i2caux);
412 if (dc->ctx->created_bios)
413 dal_bios_parser_destroy(&dc->ctx->dc_bios);
416 dal_logger_destroy(&dc->ctx->logger);
418 dm_free(dc->current_context);
419 dc->current_context = NULL;
425 static bool construct(struct core_dc *dc,
426 const struct dc_init_data *init_params)
428 struct dal_logger *logger;
429 struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
430 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
433 dm_error("%s: failed to create ctx\n", __func__);
437 dc->current_context = dm_alloc(sizeof(*dc->current_context));
439 if (!dc->current_context) {
440 dm_error("%s: failed to create validate ctx\n", __func__);
444 dc_ctx->cgs_device = init_params->cgs_device;
445 dc_ctx->driver_context = init_params->driver;
446 dc_ctx->dc = &dc->public;
447 dc_ctx->asic_id = init_params->asic_id;
450 logger = dal_logger_create(dc_ctx);
453 /* can *not* call logger. call base driver 'print error' */
454 dm_error("%s: failed to create Logger!\n", __func__);
457 dc_ctx->logger = logger;
459 dc->ctx->dce_environment = init_params->dce_environment;
461 dc_version = resource_parse_asic_id(init_params->asic_id);
462 dc->ctx->dce_version = dc_version;
464 /* Resource should construct all asic specific resources.
465 * This should be the only place where we need to parse the asic id
467 if (init_params->vbios_override)
468 dc_ctx->dc_bios = init_params->vbios_override;
470 /* Create BIOS parser */
471 struct bp_init_data bp_init_data;
473 bp_init_data.ctx = dc_ctx;
474 bp_init_data.bios = init_params->asic_id.atombios_base_address;
476 dc_ctx->dc_bios = dal_bios_parser_create(
477 &bp_init_data, dc_version);
479 if (!dc_ctx->dc_bios) {
480 ASSERT_CRITICAL(false);
484 dc_ctx->created_bios = true;
488 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
490 if (!dc_ctx->i2caux) {
491 ASSERT_CRITICAL(false);
492 goto failed_to_create_i2caux;
495 /* Create GPIO service */
496 dc_ctx->gpio_service = dal_gpio_service_create(
498 dc_ctx->dce_environment,
501 if (!dc_ctx->gpio_service) {
502 ASSERT_CRITICAL(false);
506 dc->res_pool = dc_create_resource_pool(
508 init_params->num_virtual_links,
510 init_params->asic_id);
512 goto create_resource_fail;
514 if (!create_links(dc, init_params->num_virtual_links))
515 goto create_links_fail;
517 allocate_dc_stream_funcs(dc);
521 /**** error handling here ****/
523 create_resource_fail:
525 failed_to_create_i2caux:
535 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
537 fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
538 unsigned int pixDurationInPico = round(pixel_duration);
540 DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
542 arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
543 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
544 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
546 arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
547 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
548 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
550 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
551 WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
553 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
554 WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
558 /*******************************************************************************
560 ******************************************************************************/
562 struct dc *dc_create(const struct dc_init_data *init_params)
564 struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
565 unsigned int full_pipe_count;
570 if (false == construct(core_dc, init_params))
573 /*TODO: separate HW and SW initialization*/
574 core_dc->hwss.init_hw(core_dc);
576 full_pipe_count = core_dc->res_pool->pipe_count;
577 if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
579 core_dc->public.caps.max_streams = min(
581 core_dc->res_pool->stream_enc_count);
583 core_dc->public.caps.max_links = core_dc->link_count;
584 core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
586 core_dc->public.config = init_params->flags;
588 dm_logger_write(core_dc->ctx->logger, LOG_DC,
589 "Display Core initialized\n");
592 /* TODO: missing feature to be enabled */
593 core_dc->public.debug.disable_dfs_bypass = true;
595 return &core_dc->public;
604 void dc_destroy(struct dc **dc)
606 struct core_dc *core_dc = DC_TO_CORE(*dc);
612 static bool is_validation_required(
613 const struct core_dc *dc,
614 const struct dc_validation_set set[],
617 const struct validate_context *context = dc->current_context;
620 if (context->stream_count != set_count)
623 for (i = 0; i < set_count; i++) {
625 if (set[i].surface_count != context->stream_status[i].surface_count)
627 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
630 for (j = 0; j < set[i].surface_count; j++) {
631 struct dc_surface temp_surf;
632 memset(&temp_surf, 0, sizeof(temp_surf));
634 temp_surf = *context->stream_status[i].surfaces[j];
635 temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
636 temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
637 temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
639 if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
647 struct validate_context *dc_get_validate_context(
649 const struct dc_validation_set set[],
652 struct core_dc *core_dc = DC_TO_CORE(dc);
653 enum dc_status result = DC_ERROR_UNEXPECTED;
654 struct validate_context *context;
656 context = dm_alloc(sizeof(struct validate_context));
658 goto context_alloc_fail;
660 if (!is_validation_required(core_dc, set, set_count)) {
661 dc_resource_validate_ctx_copy_construct(core_dc->current_context, context);
665 result = core_dc->res_pool->funcs->validate_with_context(
666 core_dc, set, set_count, context, core_dc->current_context);
669 if (result != DC_OK) {
670 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
671 "%s:resource validation failed, dc_status:%d\n",
675 dc_resource_validate_ctx_destruct(context);
684 bool dc_validate_resources(
686 const struct dc_validation_set set[],
689 struct core_dc *core_dc = DC_TO_CORE(dc);
690 enum dc_status result = DC_ERROR_UNEXPECTED;
691 struct validate_context *context;
693 context = dm_alloc(sizeof(struct validate_context));
695 goto context_alloc_fail;
697 result = core_dc->res_pool->funcs->validate_with_context(
698 core_dc, set, set_count, context, NULL);
701 if (result != DC_OK) {
702 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
703 "%s:resource validation failed, dc_status:%d\n",
708 dc_resource_validate_ctx_destruct(context);
712 return result == DC_OK;
715 bool dc_validate_guaranteed(
717 const struct dc_stream *stream)
719 struct core_dc *core_dc = DC_TO_CORE(dc);
720 enum dc_status result = DC_ERROR_UNEXPECTED;
721 struct validate_context *context;
723 context = dm_alloc(sizeof(struct validate_context));
725 goto context_alloc_fail;
727 result = core_dc->res_pool->funcs->validate_guaranteed(
728 core_dc, stream, context);
730 dc_resource_validate_ctx_destruct(context);
734 if (result != DC_OK) {
735 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
736 "%s:guaranteed validation failed, dc_status:%d\n",
741 return (result == DC_OK);
744 static void program_timing_sync(
745 struct core_dc *core_dc,
746 struct validate_context *ctx)
750 int pipe_count = core_dc->res_pool->pipe_count;
751 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
753 for (i = 0; i < pipe_count; i++) {
754 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
757 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
760 for (i = 0; i < pipe_count; i++) {
762 struct pipe_ctx *pipe_set[MAX_PIPES];
764 if (!unsynced_pipes[i])
767 pipe_set[0] = unsynced_pipes[i];
768 unsynced_pipes[i] = NULL;
770 /* Add tg to the set, search rest of the tg's for ones with
771 * same timing, add all tgs with same timing to the group
773 for (j = i + 1; j < pipe_count; j++) {
774 if (!unsynced_pipes[j])
777 if (resource_are_streams_timing_synchronizable(
778 unsynced_pipes[j]->stream,
779 pipe_set[0]->stream)) {
780 pipe_set[group_size] = unsynced_pipes[j];
781 unsynced_pipes[j] = NULL;
786 /* set first unblanked pipe as master */
787 for (j = 0; j < group_size; j++) {
788 struct pipe_ctx *temp;
790 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
795 pipe_set[0] = pipe_set[j];
801 /* remove any other unblanked pipes as they have already been synced */
802 for (j = j + 1; j < group_size; j++) {
803 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
805 pipe_set[j] = pipe_set[group_size];
810 if (group_size > 1) {
811 core_dc->hwss.enable_timing_synchronization(
812 core_dc, group_index, group_size, pipe_set);
818 static bool streams_changed(
820 const struct dc_stream *streams[],
821 uint8_t stream_count)
825 if (stream_count != dc->current_context->stream_count)
828 for (i = 0; i < dc->current_context->stream_count; i++) {
829 if (&dc->current_context->streams[i]->public != streams[i])
836 bool dc_enable_stereo(
838 struct validate_context *context,
839 const struct dc_stream *streams[],
840 uint8_t stream_count)
844 struct pipe_ctx *pipe;
845 struct core_dc *core_dc = DC_TO_CORE(dc);
846 for (i = 0; i < MAX_PIPES; i++) {
848 pipe = &context->res_ctx.pipe_ctx[i];
850 pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
851 for (j = 0 ; pipe && j < stream_count; j++) {
852 if (streams[j] && streams[j] == &pipe->stream->public &&
853 core_dc->hwss.setup_stereo)
854 core_dc->hwss.setup_stereo(pipe, core_dc);
860 bool dc_commit_streams(
862 const struct dc_stream *streams[],
863 uint8_t stream_count)
865 struct core_dc *core_dc = DC_TO_CORE(dc);
866 struct dc_bios *dcb = core_dc->ctx->dc_bios;
867 enum dc_status result = DC_ERROR_UNEXPECTED;
868 struct validate_context *context;
869 struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
872 if (false == streams_changed(core_dc, streams, stream_count))
875 dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
876 __func__, stream_count);
878 for (i = 0; i < stream_count; i++) {
879 const struct dc_stream *stream = streams[i];
880 const struct dc_stream_status *status = dc_stream_get_status(stream);
883 dc_stream_log(stream,
884 core_dc->ctx->logger,
887 set[i].stream = stream;
890 set[i].surface_count = status->surface_count;
891 for (j = 0; j < status->surface_count; j++)
892 set[i].surfaces[j] = status->surfaces[j];
897 context = dm_alloc(sizeof(struct validate_context));
899 goto context_alloc_fail;
901 result = core_dc->res_pool->funcs->validate_with_context(
902 core_dc, set, stream_count, context, core_dc->current_context);
903 if (result != DC_OK){
904 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
905 "%s: Context validation failed! dc_status:%d\n",
909 dc_resource_validate_ctx_destruct(context);
913 if (!dcb->funcs->is_accelerated_mode(dcb)) {
914 core_dc->hwss.enable_accelerated_mode(core_dc);
917 if (result == DC_OK) {
918 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
921 program_timing_sync(core_dc, context);
923 for (i = 0; i < context->stream_count; i++) {
924 const struct core_sink *sink = context->streams[i]->sink;
926 for (j = 0; j < context->stream_status[i].surface_count; j++) {
927 struct core_surface *surface =
928 DC_SURFACE_TO_CORE(context->stream_status[i].surfaces[j]);
930 core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
931 dc_enable_stereo(dc, context, streams, stream_count);
934 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
935 context->streams[i]->public.timing.h_addressable,
936 context->streams[i]->public.timing.v_addressable,
937 context->streams[i]->public.timing.h_total,
938 context->streams[i]->public.timing.v_total,
939 context->streams[i]->public.timing.pix_clk_khz);
942 dc_resource_validate_ctx_destruct(core_dc->current_context);
943 dm_free(core_dc->current_context);
945 core_dc->current_context = context;
947 return (result == DC_OK);
953 return (result == DC_OK);
956 bool dc_post_update_surfaces_to_stream(struct dc *dc)
959 struct core_dc *core_dc = DC_TO_CORE(dc);
960 struct validate_context *context = core_dc->current_context;
962 post_surface_trace(dc);
964 for (i = 0; i < core_dc->res_pool->pipe_count; i++)
965 if (context->res_ctx.pipe_ctx[i].stream == NULL
966 || context->res_ctx.pipe_ctx[i].surface == NULL) {
967 context->res_ctx.pipe_ctx[i].pipe_idx = i;
968 core_dc->hwss.power_down_front_end(
969 core_dc, &context->res_ctx.pipe_ctx[i]);
972 /* 3rd param should be true, temp w/a for RV*/
973 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
974 core_dc->hwss.set_bandwidth(core_dc, context, core_dc->ctx->dce_version != DCN_VERSION_1_0);
976 core_dc->hwss.set_bandwidth(core_dc, context, true);
981 bool dc_commit_surfaces_to_stream(
983 const struct dc_surface **new_surfaces,
984 uint8_t new_surface_count,
985 const struct dc_stream *dc_stream)
987 struct dc_surface_update updates[MAX_SURFACES];
988 struct dc_flip_addrs flip_addr[MAX_SURFACES];
989 struct dc_plane_info plane_info[MAX_SURFACES];
990 struct dc_scaling_info scaling_info[MAX_SURFACES];
993 struct dc_stream_update *stream_update =
994 dm_alloc(sizeof(struct dc_stream_update));
996 if (!stream_update) {
1001 memset(updates, 0, sizeof(updates));
1002 memset(flip_addr, 0, sizeof(flip_addr));
1003 memset(plane_info, 0, sizeof(plane_info));
1004 memset(scaling_info, 0, sizeof(scaling_info));
1006 stream_update->src = dc_stream->src;
1007 stream_update->dst = dc_stream->dst;
1009 for (i = 0; i < new_surface_count; i++) {
1010 updates[i].surface = new_surfaces[i];
1012 (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1013 flip_addr[i].address = new_surfaces[i]->address;
1014 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1015 plane_info[i].color_space = new_surfaces[i]->color_space;
1016 plane_info[i].format = new_surfaces[i]->format;
1017 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1018 plane_info[i].rotation = new_surfaces[i]->rotation;
1019 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1020 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1021 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1022 plane_info[i].visible = new_surfaces[i]->visible;
1023 plane_info[i].per_pixel_alpha = new_surfaces[i]->per_pixel_alpha;
1024 plane_info[i].dcc = new_surfaces[i]->dcc;
1025 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1026 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1027 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1028 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1030 updates[i].flip_addr = &flip_addr[i];
1031 updates[i].plane_info = &plane_info[i];
1032 updates[i].scaling_info = &scaling_info[i];
1035 dc_update_surfaces_and_stream(
1039 dc_stream, stream_update);
1041 ret = dc_post_update_surfaces_to_stream(dc);
1043 dm_free(stream_update);
1047 static bool is_surface_in_context(
1048 const struct validate_context *context,
1049 const struct dc_surface *surface)
1053 for (j = 0; j < MAX_PIPES; j++) {
1054 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1056 if (surface == &pipe_ctx->surface->public) {
1064 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1067 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1068 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1070 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1071 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1072 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1073 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1075 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1076 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1077 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1078 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1080 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1081 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1082 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1085 ASSERT_CRITICAL(false);
1090 static enum surface_update_type get_plane_info_update_type(
1091 const struct dc_surface_update *u,
1094 struct dc_plane_info temp_plane_info;
1095 memset(&temp_plane_info, 0, sizeof(temp_plane_info));
1098 return UPDATE_TYPE_FAST;
1100 temp_plane_info = *u->plane_info;
1102 /* Copy all parameters that will cause a full update
1103 * from current surface, the rest of the parameters
1104 * from provided plane configuration.
1105 * Perform memory compare and special validation
1106 * for those that can cause fast/medium updates
1109 /* Full update parameters */
1110 temp_plane_info.color_space = u->surface->color_space;
1111 temp_plane_info.dcc = u->surface->dcc;
1112 temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
1113 temp_plane_info.plane_size = u->surface->plane_size;
1114 temp_plane_info.rotation = u->surface->rotation;
1115 temp_plane_info.stereo_format = u->surface->stereo_format;
1116 temp_plane_info.tiling_info = u->surface->tiling_info;
1118 if (surface_index == 0)
1119 temp_plane_info.visible = u->plane_info->visible;
1121 temp_plane_info.visible = u->surface->visible;
1123 if (memcmp(u->plane_info, &temp_plane_info,
1124 sizeof(struct dc_plane_info)) != 0)
1125 return UPDATE_TYPE_FULL;
1127 if (pixel_format_to_bpp(u->plane_info->format) !=
1128 pixel_format_to_bpp(u->surface->format)) {
1129 return UPDATE_TYPE_FULL;
1131 return UPDATE_TYPE_MED;
1135 static enum surface_update_type get_scaling_info_update_type(
1136 const struct dc_surface_update *u)
1138 if (!u->scaling_info)
1139 return UPDATE_TYPE_FAST;
1141 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1142 || u->scaling_info->src_rect.height != u->surface->src_rect.height
1143 || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1144 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1145 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1146 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
1147 return UPDATE_TYPE_FULL;
1149 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1150 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1151 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1152 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1153 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1154 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1155 return UPDATE_TYPE_MED;
1157 return UPDATE_TYPE_FAST;
1160 static enum surface_update_type det_surface_update(
1161 const struct core_dc *dc,
1162 const struct dc_surface_update *u,
1165 const struct validate_context *context = dc->current_context;
1166 enum surface_update_type type = UPDATE_TYPE_FAST;
1167 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1169 if (!is_surface_in_context(context, u->surface))
1170 return UPDATE_TYPE_FULL;
1172 type = get_plane_info_update_type(u, surface_index);
1173 if (overall_type < type)
1174 overall_type = type;
1176 type = get_scaling_info_update_type(u);
1177 if (overall_type < type)
1178 overall_type = type;
1180 if (u->in_transfer_func ||
1181 u->hdr_static_metadata) {
1182 if (overall_type < UPDATE_TYPE_MED)
1183 overall_type = UPDATE_TYPE_MED;
1186 return overall_type;
1189 enum surface_update_type dc_check_update_surfaces_for_stream(
1191 struct dc_surface_update *updates,
1193 struct dc_stream_update *stream_update,
1194 const struct dc_stream_status *stream_status)
1196 struct core_dc *core_dc = DC_TO_CORE(dc);
1198 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1200 if (stream_status == NULL || stream_status->surface_count != surface_count)
1201 return UPDATE_TYPE_FULL;
1204 return UPDATE_TYPE_FULL;
1206 for (i = 0 ; i < surface_count; i++) {
1207 enum surface_update_type type =
1208 det_surface_update(core_dc, &updates[i], i);
1210 if (type == UPDATE_TYPE_FULL)
1213 if (overall_type < type)
1214 overall_type = type;
1217 return overall_type;
1220 void dc_update_surfaces_for_stream(struct dc *dc,
1221 struct dc_surface_update *surface_updates, int surface_count,
1222 const struct dc_stream *dc_stream)
1224 dc_update_surfaces_and_stream(dc, surface_updates, surface_count,
1228 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1230 void dc_update_surfaces_and_stream(struct dc *dc,
1231 struct dc_surface_update *srf_updates, int surface_count,
1232 const struct dc_stream *dc_stream,
1233 struct dc_stream_update *stream_update)
1235 struct core_dc *core_dc = DC_TO_CORE(dc);
1236 struct validate_context *context;
1238 enum surface_update_type update_type;
1239 const struct dc_stream_status *stream_status;
1240 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1242 stream_status = dc_stream_get_status(dc_stream);
1243 ASSERT(stream_status);
1245 return; /* Cannot commit surface to stream that is not committed */
1247 context = core_dc->current_context;
1249 /* update current stream with the new updates */
1250 if (stream_update) {
1251 if ((stream_update->src.height != 0) &&
1252 (stream_update->src.width != 0))
1253 stream->public.src = stream_update->src;
1255 if ((stream_update->dst.height != 0) &&
1256 (stream_update->dst.width != 0))
1257 stream->public.dst = stream_update->dst;
1259 if (stream_update->out_transfer_func &&
1260 stream_update->out_transfer_func !=
1261 dc_stream->out_transfer_func) {
1262 if (dc_stream->out_transfer_func != NULL)
1263 dc_transfer_func_release(dc_stream->out_transfer_func);
1264 dc_transfer_func_retain(stream_update->out_transfer_func);
1265 stream->public.out_transfer_func =
1266 stream_update->out_transfer_func;
1270 /* do not perform surface update if surface has invalid dimensions
1271 * (all zero) and no scaling_info is provided
1273 if (surface_count > 0 &&
1274 srf_updates->surface->src_rect.width == 0 &&
1275 srf_updates->surface->src_rect.height == 0 &&
1276 srf_updates->surface->dst_rect.width == 0 &&
1277 srf_updates->surface->dst_rect.height == 0 &&
1278 !srf_updates->scaling_info) {
1283 update_type = dc_check_update_surfaces_for_stream(
1284 dc, srf_updates, surface_count, stream_update, stream_status);
1286 if (update_type >= update_surface_trace_level)
1287 update_surface_trace(dc, srf_updates, surface_count);
1289 if (update_type >= UPDATE_TYPE_FULL) {
1290 const struct dc_surface *new_surfaces[MAX_SURFACES] = {0};
1292 for (i = 0; i < surface_count; i++)
1293 new_surfaces[i] = srf_updates[i].surface;
1295 /* initialize scratch memory for building context */
1296 context = dm_alloc(sizeof(*context));
1297 dc_resource_validate_ctx_copy_construct(
1298 core_dc->current_context, context);
1300 /* add surface to context */
1301 if (!resource_attach_surfaces_to_context(
1302 new_surfaces, surface_count, dc_stream,
1303 context, core_dc->res_pool)) {
1304 BREAK_TO_DEBUGGER();
1309 /* save update parameters into surface */
1310 for (i = 0; i < surface_count; i++) {
1311 struct core_surface *surface =
1312 DC_SURFACE_TO_CORE(srf_updates[i].surface);
1314 if (srf_updates[i].flip_addr) {
1315 surface->public.address = srf_updates[i].flip_addr->address;
1316 surface->public.flip_immediate =
1317 srf_updates[i].flip_addr->flip_immediate;
1320 if (srf_updates[i].scaling_info) {
1321 surface->public.scaling_quality =
1322 srf_updates[i].scaling_info->scaling_quality;
1323 surface->public.dst_rect =
1324 srf_updates[i].scaling_info->dst_rect;
1325 surface->public.src_rect =
1326 srf_updates[i].scaling_info->src_rect;
1327 surface->public.clip_rect =
1328 srf_updates[i].scaling_info->clip_rect;
1331 if (srf_updates[i].plane_info) {
1332 surface->public.color_space =
1333 srf_updates[i].plane_info->color_space;
1334 surface->public.format =
1335 srf_updates[i].plane_info->format;
1336 surface->public.plane_size =
1337 srf_updates[i].plane_info->plane_size;
1338 surface->public.rotation =
1339 srf_updates[i].plane_info->rotation;
1340 surface->public.horizontal_mirror =
1341 srf_updates[i].plane_info->horizontal_mirror;
1342 surface->public.stereo_format =
1343 srf_updates[i].plane_info->stereo_format;
1344 surface->public.tiling_info =
1345 srf_updates[i].plane_info->tiling_info;
1346 surface->public.visible =
1347 srf_updates[i].plane_info->visible;
1348 surface->public.per_pixel_alpha =
1349 srf_updates[i].plane_info->per_pixel_alpha;
1350 surface->public.dcc =
1351 srf_updates[i].plane_info->dcc;
1354 if (update_type >= UPDATE_TYPE_MED) {
1355 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1356 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1358 if (pipe_ctx->surface != surface)
1361 resource_build_scaling_params(pipe_ctx);
1365 if (srf_updates[i].gamma &&
1366 srf_updates[i].gamma != surface->public.gamma_correction) {
1367 if (surface->public.gamma_correction != NULL)
1368 dc_gamma_release(&surface->public.
1371 dc_gamma_retain(srf_updates[i].gamma);
1372 surface->public.gamma_correction =
1373 srf_updates[i].gamma;
1376 if (srf_updates[i].in_transfer_func &&
1377 srf_updates[i].in_transfer_func != surface->public.in_transfer_func) {
1378 if (surface->public.in_transfer_func != NULL)
1379 dc_transfer_func_release(
1383 dc_transfer_func_retain(
1384 srf_updates[i].in_transfer_func);
1385 surface->public.in_transfer_func =
1386 srf_updates[i].in_transfer_func;
1389 if (srf_updates[i].hdr_static_metadata)
1390 surface->public.hdr_static_ctx =
1391 *(srf_updates[i].hdr_static_metadata);
1394 if (update_type == UPDATE_TYPE_FULL) {
1395 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1396 BREAK_TO_DEBUGGER();
1399 core_dc->hwss.set_bandwidth(core_dc, context, false);
1400 context_clock_trace(dc, context);
1404 if (!surface_count) /* reset */
1405 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1407 /* Lock pipes for provided surfaces */
1408 for (i = 0; i < surface_count; i++) {
1409 struct core_surface *surface = DC_SURFACE_TO_CORE(srf_updates[i].surface);
1411 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1412 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1414 if (pipe_ctx->surface != surface)
1416 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1417 core_dc->hwss.pipe_control_lock(
1425 /* Perform requested Updates */
1426 for (i = 0; i < surface_count; i++) {
1427 struct core_surface *surface = DC_SURFACE_TO_CORE(srf_updates[i].surface);
1429 if (update_type >= UPDATE_TYPE_MED) {
1430 core_dc->hwss.apply_ctx_for_surface(
1431 core_dc, surface, context);
1432 context_timing_trace(dc, &context->res_ctx);
1435 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1436 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1437 struct pipe_ctx *cur_pipe_ctx;
1438 bool is_new_pipe_surface = true;
1440 if (pipe_ctx->surface != surface)
1443 if (srf_updates[i].flip_addr)
1444 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1446 if (update_type == UPDATE_TYPE_FAST)
1449 cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1450 if (cur_pipe_ctx->surface == pipe_ctx->surface)
1451 is_new_pipe_surface = false;
1453 if (is_new_pipe_surface ||
1454 srf_updates[i].in_transfer_func)
1455 core_dc->hwss.set_input_transfer_func(
1456 pipe_ctx, pipe_ctx->surface);
1458 if (is_new_pipe_surface ||
1459 (stream_update != NULL &&
1460 stream_update->out_transfer_func !=
1462 core_dc->hwss.set_output_transfer_func(
1463 pipe_ctx, pipe_ctx->stream);
1466 if (srf_updates[i].hdr_static_metadata) {
1467 resource_build_info_frame(pipe_ctx);
1468 core_dc->hwss.update_info_frame(pipe_ctx);
1474 for (i = core_dc->res_pool->pipe_count - 1; i >= 0; i--) {
1475 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1477 for (j = 0; j < surface_count; j++) {
1478 if (srf_updates[j].surface == &pipe_ctx->surface->public) {
1479 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1480 core_dc->hwss.pipe_control_lock(
1490 if (core_dc->current_context != context) {
1491 dc_resource_validate_ctx_destruct(core_dc->current_context);
1492 dm_free(core_dc->current_context);
1494 core_dc->current_context = context;
1499 dc_resource_validate_ctx_destruct(context);
1503 uint8_t dc_get_current_stream_count(const struct dc *dc)
1505 struct core_dc *core_dc = DC_TO_CORE(dc);
1506 return core_dc->current_context->stream_count;
1509 struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1511 struct core_dc *core_dc = DC_TO_CORE(dc);
1512 if (i < core_dc->current_context->stream_count)
1513 return &(core_dc->current_context->streams[i]->public);
1517 const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1519 struct core_dc *core_dc = DC_TO_CORE(dc);
1520 return &core_dc->links[link_index]->public;
1523 const struct graphics_object_id dc_get_link_id_at_index(
1524 struct dc *dc, uint32_t link_index)
1526 struct core_dc *core_dc = DC_TO_CORE(dc);
1527 return core_dc->links[link_index]->link_id;
1530 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1531 struct dc *dc, uint32_t link_index)
1533 struct core_dc *core_dc = DC_TO_CORE(dc);
1534 return core_dc->links[link_index]->public.irq_source_hpd;
1537 const struct audio **dc_get_audios(struct dc *dc)
1539 struct core_dc *core_dc = DC_TO_CORE(dc);
1540 return (const struct audio **)core_dc->res_pool->audios;
1543 enum dc_irq_source dc_interrupt_to_irq_source(
1548 struct core_dc *core_dc = DC_TO_CORE(dc);
1549 return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1552 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1554 struct core_dc *core_dc;
1558 core_dc = DC_TO_CORE(dc);
1560 dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1563 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1565 struct core_dc *core_dc = DC_TO_CORE(dc);
1566 dal_irq_service_ack(core_dc->res_pool->irqs, src);
1569 void dc_set_power_state(
1571 enum dc_acpi_cm_power_state power_state)
1573 struct core_dc *core_dc = DC_TO_CORE(dc);
1575 switch (power_state) {
1576 case DC_ACPI_CM_POWER_STATE_D0:
1577 core_dc->hwss.init_hw(core_dc);
1581 core_dc->hwss.power_down(core_dc);
1583 /* Zero out the current context so that on resume we start with
1584 * clean state, and dc hw programming optimizations will not
1585 * cause any trouble.
1587 memset(core_dc->current_context, 0,
1588 sizeof(*core_dc->current_context));
1595 void dc_resume(const struct dc *dc)
1597 struct core_dc *core_dc = DC_TO_CORE(dc);
1601 for (i = 0; i < core_dc->link_count; i++)
1602 core_link_resume(core_dc->links[i]);
1605 bool dc_read_aux_dpcd(
1607 uint32_t link_index,
1612 struct core_dc *core_dc = DC_TO_CORE(dc);
1614 struct core_link *link = core_dc->links[link_index];
1615 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1622 return r == DDC_RESULT_SUCESSFULL;
1625 bool dc_write_aux_dpcd(
1627 uint32_t link_index,
1629 const uint8_t *data,
1632 struct core_dc *core_dc = DC_TO_CORE(dc);
1633 struct core_link *link = core_dc->links[link_index];
1635 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1642 return r == DDC_RESULT_SUCESSFULL;
1645 bool dc_read_aux_i2c(
1647 uint32_t link_index,
1648 enum i2c_mot_mode mot,
1653 struct core_dc *core_dc = DC_TO_CORE(dc);
1655 struct core_link *link = core_dc->links[link_index];
1656 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1663 return r == DDC_RESULT_SUCESSFULL;
1666 bool dc_write_aux_i2c(
1668 uint32_t link_index,
1669 enum i2c_mot_mode mot,
1671 const uint8_t *data,
1674 struct core_dc *core_dc = DC_TO_CORE(dc);
1675 struct core_link *link = core_dc->links[link_index];
1677 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1684 return r == DDC_RESULT_SUCESSFULL;
1687 bool dc_query_ddc_data(
1689 uint32_t link_index,
1692 uint32_t write_size,
1694 uint32_t read_size) {
1696 struct core_dc *core_dc = DC_TO_CORE(dc);
1698 struct core_link *link = core_dc->links[link_index];
1700 bool result = dal_ddc_service_query_ddc_data(
1713 uint32_t link_index,
1714 struct i2c_command *cmd)
1716 struct core_dc *core_dc = DC_TO_CORE(dc);
1718 struct core_link *link = core_dc->links[link_index];
1719 struct ddc_service *ddc = link->public.ddc;
1721 return dal_i2caux_submit_i2c_command(
1727 static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1729 struct dc_link *dc_link = &core_link->public;
1731 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1732 BREAK_TO_DEBUGGER();
1736 dc_sink_retain(sink);
1738 dc_link->remote_sinks[dc_link->sink_count] = sink;
1739 dc_link->sink_count++;
1744 struct dc_sink *dc_link_add_remote_sink(
1745 const struct dc_link *link,
1746 const uint8_t *edid,
1748 struct dc_sink_init_data *init_data)
1750 struct dc_sink *dc_sink;
1751 enum dc_edid_status edid_status;
1752 struct core_link *core_link = DC_LINK_TO_LINK(link);
1754 if (len > MAX_EDID_BUFFER_SIZE) {
1755 dm_error("Max EDID buffer size breached!\n");
1760 BREAK_TO_DEBUGGER();
1764 if (!init_data->link) {
1765 BREAK_TO_DEBUGGER();
1769 dc_sink = dc_sink_create(init_data);
1774 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1775 dc_sink->dc_edid.length = len;
1777 if (!link_add_remote_sink_helper(
1782 edid_status = dm_helpers_parse_edid_caps(
1785 &dc_sink->edid_caps);
1787 if (edid_status != EDID_OK)
1792 dc_link_remove_remote_sink(link, dc_sink);
1794 dc_sink_release(dc_sink);
1798 void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1800 struct core_link *core_link = DC_LINK_TO_LINK(link);
1801 struct dc_link *dc_link = &core_link->public;
1803 dc_link->local_sink = sink;
1806 dc_link->type = dc_connection_none;
1808 dc_link->type = dc_connection_single;
1812 void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1815 struct core_link *core_link = DC_LINK_TO_LINK(link);
1816 struct dc_link *dc_link = &core_link->public;
1818 if (!link->sink_count) {
1819 BREAK_TO_DEBUGGER();
1823 for (i = 0; i < dc_link->sink_count; i++) {
1824 if (dc_link->remote_sinks[i] == sink) {
1825 dc_sink_release(sink);
1826 dc_link->remote_sinks[i] = NULL;
1828 /* shrink array to remove empty place */
1829 while (i < dc_link->sink_count - 1) {
1830 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1833 dc_link->remote_sinks[i] = NULL;
1834 dc_link->sink_count--;
1840 bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
1843 struct core_dc *core_dc = DC_TO_CORE(dc);
1844 struct mem_input *mi = NULL;
1846 for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
1847 if (core_dc->res_pool->mis[i] != NULL) {
1848 mi = core_dc->res_pool->mis[i];
1853 dm_error("no mem_input!\n");
1857 if (mi->funcs->mem_input_update_dchub)
1858 mi->funcs->mem_input_update_dchub(mi, dh_data);
1860 ASSERT(mi->funcs->mem_input_update_dchub);