2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
38 #include "dce_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
45 #include "link_hwss.h"
46 #include "link_encoder.h"
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
52 /*******************************************************************************
54 ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
59 for (i = 0; i < dc->link_count; i++) {
60 if (NULL != dc->links[i])
61 link_destroy(&dc->links[i]);
65 static bool create_links(
67 uint32_t num_virtual_links)
71 struct dc_bios *bios = dc->ctx->dc_bios;
75 connectors_num = bios->funcs->get_connectors_number(bios);
77 if (connectors_num > ENUM_ID_COUNT) {
79 "DC: Number of connectors %d exceeds maximum of %d!\n",
85 if (connectors_num == 0 && num_virtual_links == 0) {
86 dm_error("DC: Number of connectors is zero!\n");
90 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
95 for (i = 0; i < connectors_num; i++) {
96 struct link_init_data link_init_params = {0};
97 struct core_link *link;
99 link_init_params.ctx = dc->ctx;
100 link_init_params.connector_index = i;
101 link_init_params.link_index = dc->link_count;
102 link_init_params.dc = dc;
103 link = link_create(&link_init_params);
106 dc->links[dc->link_count] = link;
110 dm_error("DC: failed to create link!\n");
114 for (i = 0; i < num_virtual_links; i++) {
115 struct core_link *link = dm_alloc(sizeof(*link));
116 struct encoder_init_data enc_init = {0};
125 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
126 link->link_id.type = OBJECT_TYPE_CONNECTOR;
127 link->link_id.id = CONNECTOR_ID_VIRTUAL;
128 link->link_id.enum_id = ENUM_ID_1;
129 link->link_enc = dm_alloc(sizeof(*link->link_enc));
131 enc_init.ctx = dc->ctx;
132 enc_init.channel = CHANNEL_ID_UNKNOWN;
133 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
134 enc_init.transmitter = TRANSMITTER_UNKNOWN;
135 enc_init.connector = link->link_id;
136 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
137 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
138 enc_init.encoder.enum_id = ENUM_ID_1;
139 virtual_link_encoder_construct(link->link_enc, &enc_init);
141 link->public.link_index = dc->link_count;
142 dc->links[dc->link_count] = link;
152 static bool stream_adjust_vmin_vmax(struct dc *dc,
153 const struct dc_stream **stream, int num_streams,
156 /* TODO: Support multiple streams */
157 struct core_dc *core_dc = DC_TO_CORE(dc);
158 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
162 for (i = 0; i < MAX_PIPES; i++) {
163 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
165 if (pipe->stream == core_stream && pipe->stream_enc) {
166 core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
168 /* build and update the info frame */
169 resource_build_info_frame(pipe);
170 core_dc->hwss.update_info_frame(pipe);
178 static bool stream_get_crtc_position(struct dc *dc,
179 const struct dc_stream **stream, int num_streams,
180 unsigned int *v_pos, unsigned int *nom_v_pos)
182 /* TODO: Support multiple streams */
183 struct core_dc *core_dc = DC_TO_CORE(dc);
184 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
187 struct crtc_position position;
189 for (i = 0; i < MAX_PIPES; i++) {
190 struct pipe_ctx *pipe =
191 &core_dc->current_context->res_ctx.pipe_ctx[i];
193 if (pipe->stream == core_stream && pipe->stream_enc) {
194 core_dc->hwss.get_position(&pipe, 1, &position);
196 *v_pos = position.vertical_count;
197 *nom_v_pos = position.nominal_vcount;
204 static bool set_gamut_remap(struct dc *dc, const struct dc_stream *stream)
206 struct core_dc *core_dc = DC_TO_CORE(dc);
207 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
210 struct pipe_ctx *pipes;
212 for (i = 0; i < MAX_PIPES; i++) {
213 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
216 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
217 core_dc->hwss.set_plane_config(core_dc, pipes,
218 &core_dc->current_context->res_ctx);
226 static void set_static_screen_events(struct dc *dc,
227 const struct dc_stream **stream,
229 const struct dc_static_screen_events *events)
231 struct core_dc *core_dc = DC_TO_CORE(dc);
234 struct pipe_ctx *pipes_affected[MAX_PIPES];
235 int num_pipes_affected = 0;
237 for (i = 0; i < num_streams; i++) {
238 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[i]);
240 for (j = 0; j < MAX_PIPES; j++) {
241 if (core_dc->current_context->res_ctx.pipe_ctx[j].stream
243 pipes_affected[num_pipes_affected++] =
244 &core_dc->current_context->res_ctx.pipe_ctx[j];
249 core_dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
252 /* This function is not expected to fail, proper implementation of
253 * validation will prevent this from ever being called for unsupported
256 static void stream_update_scaling(
258 const struct dc_stream *dc_stream,
259 const struct rect *src,
260 const struct rect *dst)
262 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
263 struct core_dc *core_dc = DC_TO_CORE(dc);
264 struct validate_context *cur_ctx = core_dc->current_context;
268 stream->public.src = *src;
271 stream->public.dst = *dst;
273 for (i = 0; i < cur_ctx->stream_count; i++) {
274 struct core_stream *cur_stream = cur_ctx->streams[i];
276 if (stream == cur_stream) {
277 struct dc_stream_status *status = &cur_ctx->stream_status[i];
279 if (status->surface_count)
280 if (!dc_commit_surfaces_to_stream(
283 status->surface_count,
284 &cur_stream->public))
285 /* Need to debug validation */
293 static void set_drive_settings(struct dc *dc,
294 struct link_training_settings *lt_settings,
295 const struct dc_link *link)
297 struct core_dc *core_dc = DC_TO_CORE(dc);
300 for (i = 0; i < core_dc->link_count; i++) {
301 if (&core_dc->links[i]->public == link)
305 if (i >= core_dc->link_count)
306 ASSERT_CRITICAL(false);
308 dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
311 static void perform_link_training(struct dc *dc,
312 struct dc_link_settings *link_setting,
313 bool skip_video_pattern)
315 struct core_dc *core_dc = DC_TO_CORE(dc);
318 for (i = 0; i < core_dc->link_count; i++)
319 dc_link_dp_perform_link_training(
320 &core_dc->links[i]->public,
325 static void set_preferred_link_settings(struct dc *dc,
326 struct dc_link_settings *link_setting,
327 const struct dc_link *link)
329 struct core_link *core_link = DC_LINK_TO_CORE(link);
331 core_link->public.verified_link_cap.lane_count =
332 link_setting->lane_count;
333 core_link->public.verified_link_cap.link_rate =
334 link_setting->link_rate;
335 dp_retrain_link_dp_test(core_link, link_setting, false);
338 static void enable_hpd(const struct dc_link *link)
340 dc_link_dp_enable_hpd(link);
343 static void disable_hpd(const struct dc_link *link)
345 dc_link_dp_disable_hpd(link);
349 static void set_test_pattern(
350 const struct dc_link *link,
351 enum dp_test_pattern test_pattern,
352 const struct link_training_settings *p_link_settings,
353 const unsigned char *p_custom_pattern,
354 unsigned int cust_pattern_size)
357 dc_link_dp_set_test_pattern(
365 void set_dither_option(const struct dc_stream *dc_stream,
366 enum dc_dither_option option)
368 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
369 struct bit_depth_reduction_params params;
370 struct core_link *core_link = DC_LINK_TO_CORE(stream->status.link);
371 struct pipe_ctx *pipes =
372 core_link->dc->current_context->res_ctx.pipe_ctx;
374 memset(¶ms, 0, sizeof(params));
377 if (option > DITHER_OPTION_MAX)
379 if (option == DITHER_OPTION_DEFAULT) {
380 switch (stream->public.timing.display_color_depth) {
381 case COLOR_DEPTH_666:
382 stream->public.dither_option = DITHER_OPTION_SPATIAL6;
384 case COLOR_DEPTH_888:
385 stream->public.dither_option = DITHER_OPTION_SPATIAL8;
387 case COLOR_DEPTH_101010:
388 stream->public.dither_option = DITHER_OPTION_SPATIAL10;
391 option = DITHER_OPTION_DISABLE;
394 stream->public.dither_option = option;
396 resource_build_bit_depth_reduction_params(stream,
398 stream->bit_depth_params = params;
400 opp_program_bit_depth_reduction(pipes->opp, ¶ms);
403 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
405 core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
406 if (core_dc->hwss.set_drr != NULL) {
407 core_dc->public.stream_funcs.adjust_vmin_vmax =
408 stream_adjust_vmin_vmax;
411 core_dc->public.stream_funcs.set_static_screen_events =
412 set_static_screen_events;
414 core_dc->public.stream_funcs.get_crtc_position =
415 stream_get_crtc_position;
417 core_dc->public.stream_funcs.set_gamut_remap =
420 core_dc->public.stream_funcs.set_dither_option =
423 core_dc->public.link_funcs.set_drive_settings =
426 core_dc->public.link_funcs.perform_link_training =
427 perform_link_training;
429 core_dc->public.link_funcs.set_preferred_link_settings =
430 set_preferred_link_settings;
432 core_dc->public.link_funcs.enable_hpd =
435 core_dc->public.link_funcs.disable_hpd =
438 core_dc->public.link_funcs.set_test_pattern =
442 static void destruct(struct core_dc *dc)
444 dc_resource_validate_ctx_destruct(dc->current_context);
448 dc_destroy_resource_pool(dc);
450 if (dc->ctx->gpio_service)
451 dal_gpio_service_destroy(&dc->ctx->gpio_service);
454 dal_i2caux_destroy(&dc->ctx->i2caux);
456 if (dc->ctx->created_bios)
457 dal_bios_parser_destroy(&dc->ctx->dc_bios);
460 dal_logger_destroy(&dc->ctx->logger);
462 dm_free(dc->current_context);
463 dc->current_context = NULL;
469 static bool construct(struct core_dc *dc,
470 const struct dc_init_data *init_params)
472 struct dal_logger *logger;
473 struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
474 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
477 dm_error("%s: failed to create ctx\n", __func__);
481 dc->current_context = dm_alloc(sizeof(*dc->current_context));
483 if (!dc->current_context) {
484 dm_error("%s: failed to create validate ctx\n", __func__);
488 dc_ctx->cgs_device = init_params->cgs_device;
489 dc_ctx->driver_context = init_params->driver;
490 dc_ctx->dc = &dc->public;
491 dc_ctx->asic_id = init_params->asic_id;
494 logger = dal_logger_create(dc_ctx);
497 /* can *not* call logger. call base driver 'print error' */
498 dm_error("%s: failed to create Logger!\n", __func__);
501 dc_ctx->logger = logger;
503 dc->ctx->dce_environment = init_params->dce_environment;
505 dc_version = resource_parse_asic_id(init_params->asic_id);
506 dc->ctx->dce_version = dc_version;
508 /* Resource should construct all asic specific resources.
509 * This should be the only place where we need to parse the asic id
511 if (init_params->vbios_override)
512 dc_ctx->dc_bios = init_params->vbios_override;
514 /* Create BIOS parser */
515 struct bp_init_data bp_init_data;
517 bp_init_data.ctx = dc_ctx;
518 bp_init_data.bios = init_params->asic_id.atombios_base_address;
520 dc_ctx->dc_bios = dal_bios_parser_create(
521 &bp_init_data, dc_version);
523 if (!dc_ctx->dc_bios) {
524 ASSERT_CRITICAL(false);
528 dc_ctx->created_bios = true;
532 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
534 if (!dc_ctx->i2caux) {
535 ASSERT_CRITICAL(false);
536 goto failed_to_create_i2caux;
539 /* Create GPIO service */
540 dc_ctx->gpio_service = dal_gpio_service_create(
542 dc_ctx->dce_environment,
545 if (!dc_ctx->gpio_service) {
546 ASSERT_CRITICAL(false);
550 dc->res_pool = dc_create_resource_pool(
552 init_params->num_virtual_links,
554 init_params->asic_id);
556 goto create_resource_fail;
558 if (!create_links(dc, init_params->num_virtual_links))
559 goto create_links_fail;
561 allocate_dc_stream_funcs(dc);
565 /**** error handling here ****/
567 create_resource_fail:
569 failed_to_create_i2caux:
579 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
581 fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
582 unsigned int pixDurationInPico = round(pixel_duration);
584 DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
586 arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
587 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
588 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
590 arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
591 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
592 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
594 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
595 WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
597 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
598 WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
602 /*******************************************************************************
604 ******************************************************************************/
606 struct dc *dc_create(const struct dc_init_data *init_params)
608 struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
609 unsigned int full_pipe_count;
614 if (false == construct(core_dc, init_params))
617 /*TODO: separate HW and SW initialization*/
618 core_dc->hwss.init_hw(core_dc);
620 full_pipe_count = core_dc->res_pool->pipe_count;
621 if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
623 core_dc->public.caps.max_streams = min(
625 core_dc->res_pool->stream_enc_count);
627 core_dc->public.caps.max_links = core_dc->link_count;
628 core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
630 core_dc->public.config = init_params->flags;
632 dm_logger_write(core_dc->ctx->logger, LOG_DC,
633 "Display Core initialized\n");
636 /* TODO: missing feature to be enabled */
637 core_dc->public.debug.disable_dfs_bypass = true;
639 return &core_dc->public;
648 void dc_destroy(struct dc **dc)
650 struct core_dc *core_dc = DC_TO_CORE(*dc);
656 static bool is_validation_required(
657 const struct core_dc *dc,
658 const struct dc_validation_set set[],
661 const struct validate_context *context = dc->current_context;
664 if (context->stream_count != set_count)
667 for (i = 0; i < set_count; i++) {
669 if (set[i].surface_count != context->stream_status[i].surface_count)
671 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
674 for (j = 0; j < set[i].surface_count; j++) {
675 struct dc_surface temp_surf = { 0 };
677 temp_surf = *context->stream_status[i].surfaces[j];
678 temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
679 temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
680 temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
682 if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
690 struct validate_context *dc_get_validate_context(
692 const struct dc_validation_set set[],
695 struct core_dc *core_dc = DC_TO_CORE(dc);
696 enum dc_status result = DC_ERROR_UNEXPECTED;
697 struct validate_context *context;
699 context = dm_alloc(sizeof(struct validate_context));
701 goto context_alloc_fail;
703 if (!is_validation_required(core_dc, set, set_count)) {
704 dc_resource_validate_ctx_copy_construct(core_dc->current_context, context);
708 result = core_dc->res_pool->funcs->validate_with_context(
709 core_dc, set, set_count, context);
712 if (result != DC_OK) {
713 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
714 "%s:resource validation failed, dc_status:%d\n",
718 dc_resource_validate_ctx_destruct(context);
727 bool dc_validate_resources(
729 const struct dc_validation_set set[],
732 struct validate_context *ctx;
734 ctx = dc_get_validate_context(dc, set, set_count);
736 dc_resource_validate_ctx_destruct(ctx);
744 bool dc_validate_guaranteed(
746 const struct dc_stream *stream)
748 struct core_dc *core_dc = DC_TO_CORE(dc);
749 enum dc_status result = DC_ERROR_UNEXPECTED;
750 struct validate_context *context;
752 context = dm_alloc(sizeof(struct validate_context));
754 goto context_alloc_fail;
756 result = core_dc->res_pool->funcs->validate_guaranteed(
757 core_dc, stream, context);
759 dc_resource_validate_ctx_destruct(context);
763 if (result != DC_OK) {
764 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
765 "%s:guaranteed validation failed, dc_status:%d\n",
770 return (result == DC_OK);
773 static void program_timing_sync(
774 struct core_dc *core_dc,
775 struct validate_context *ctx)
779 int pipe_count = core_dc->res_pool->pipe_count;
780 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
782 for (i = 0; i < pipe_count; i++) {
783 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
786 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
789 for (i = 0; i < pipe_count; i++) {
791 struct pipe_ctx *pipe_set[MAX_PIPES];
793 if (!unsynced_pipes[i])
796 pipe_set[0] = unsynced_pipes[i];
797 unsynced_pipes[i] = NULL;
799 /* Add tg to the set, search rest of the tg's for ones with
800 * same timing, add all tgs with same timing to the group
802 for (j = i + 1; j < pipe_count; j++) {
803 if (!unsynced_pipes[j])
806 if (resource_are_streams_timing_synchronizable(
807 unsynced_pipes[j]->stream,
808 pipe_set[0]->stream)) {
809 pipe_set[group_size] = unsynced_pipes[j];
810 unsynced_pipes[j] = NULL;
815 /* set first unblanked pipe as master */
816 for (j = 0; j < group_size; j++) {
817 struct pipe_ctx *temp;
819 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
824 pipe_set[0] = pipe_set[j];
830 /* remove any other unblanked pipes as they have already been synced */
831 for (j = j + 1; j < group_size; j++) {
832 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
834 pipe_set[j] = pipe_set[group_size];
839 if (group_size > 1) {
840 core_dc->hwss.enable_timing_synchronization(
841 core_dc, group_index, group_size, pipe_set);
847 static bool streams_changed(
849 const struct dc_stream *streams[],
850 uint8_t stream_count)
854 if (stream_count != dc->current_context->stream_count)
857 for (i = 0; i < dc->current_context->stream_count; i++) {
858 if (&dc->current_context->streams[i]->public != streams[i])
865 bool dc_commit_streams(
867 const struct dc_stream *streams[],
868 uint8_t stream_count)
870 struct core_dc *core_dc = DC_TO_CORE(dc);
871 struct dc_bios *dcb = core_dc->ctx->dc_bios;
872 enum dc_status result = DC_ERROR_UNEXPECTED;
873 struct validate_context *context;
874 struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
877 if (false == streams_changed(core_dc, streams, stream_count))
880 dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
881 __func__, stream_count);
883 for (i = 0; i < stream_count; i++) {
884 const struct dc_stream *stream = streams[i];
885 const struct dc_stream_status *status = dc_stream_get_status(stream);
888 dc_stream_log(stream,
889 core_dc->ctx->logger,
892 set[i].stream = stream;
895 set[i].surface_count = status->surface_count;
896 for (j = 0; j < status->surface_count; j++)
897 set[i].surfaces[j] = status->surfaces[j];
902 context = dm_alloc(sizeof(struct validate_context));
904 goto context_alloc_fail;
906 result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, stream_count, context);
907 if (result != DC_OK){
908 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
909 "%s: Context validation failed! dc_status:%d\n",
913 dc_resource_validate_ctx_destruct(context);
917 if (!dcb->funcs->is_accelerated_mode(dcb)) {
918 core_dc->hwss.enable_accelerated_mode(core_dc);
921 if (result == DC_OK) {
922 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
925 program_timing_sync(core_dc, context);
927 for (i = 0; i < context->stream_count; i++) {
928 const struct core_sink *sink = context->streams[i]->sink;
930 for (j = 0; j < context->stream_status[i].surface_count; j++) {
931 struct core_surface *surface =
932 DC_SURFACE_TO_CORE(context->stream_status[i].surfaces[j]);
934 core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
937 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
938 context->streams[i]->public.timing.h_addressable,
939 context->streams[i]->public.timing.v_addressable,
940 context->streams[i]->public.timing.h_total,
941 context->streams[i]->public.timing.v_total,
942 context->streams[i]->public.timing.pix_clk_khz);
945 dc_resource_validate_ctx_destruct(core_dc->current_context);
946 dm_free(core_dc->current_context);
948 core_dc->current_context = context;
950 return (result == DC_OK);
956 return (result == DC_OK);
959 bool dc_pre_update_surfaces_to_stream(
961 const struct dc_surface *const *new_surfaces,
962 uint8_t new_surface_count,
963 const struct dc_stream *dc_stream)
968 bool dc_post_update_surfaces_to_stream(struct dc *dc)
971 struct core_dc *core_dc = DC_TO_CORE(dc);
972 struct validate_context *context = core_dc->current_context;
974 post_surface_trace(dc);
976 for (i = 0; i < core_dc->res_pool->pipe_count; i++)
977 if (context->res_ctx.pipe_ctx[i].stream == NULL) {
978 context->res_ctx.pipe_ctx[i].pipe_idx = i;
979 core_dc->hwss.power_down_front_end(
980 core_dc, &context->res_ctx.pipe_ctx[i]);
983 core_dc->hwss.set_bandwidth(core_dc, context, true);
988 bool dc_commit_surfaces_to_stream(
990 const struct dc_surface **new_surfaces,
991 uint8_t new_surface_count,
992 const struct dc_stream *dc_stream)
994 struct dc_surface_update updates[MAX_SURFACES];
995 struct dc_flip_addrs flip_addr[MAX_SURFACES];
996 struct dc_plane_info plane_info[MAX_SURFACES];
997 struct dc_scaling_info scaling_info[MAX_SURFACES];
1000 memset(updates, 0, sizeof(updates));
1001 memset(flip_addr, 0, sizeof(flip_addr));
1002 memset(plane_info, 0, sizeof(plane_info));
1003 memset(scaling_info, 0, sizeof(scaling_info));
1005 for (i = 0; i < new_surface_count; i++) {
1006 updates[i].surface = new_surfaces[i];
1008 (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1009 flip_addr[i].address = new_surfaces[i]->address;
1010 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1011 plane_info[i].color_space = new_surfaces[i]->color_space;
1012 plane_info[i].format = new_surfaces[i]->format;
1013 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1014 plane_info[i].rotation = new_surfaces[i]->rotation;
1015 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1016 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1017 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1018 plane_info[i].visible = new_surfaces[i]->visible;
1019 plane_info[i].dcc = new_surfaces[i]->dcc;
1020 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1021 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1022 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1023 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1025 updates[i].flip_addr = &flip_addr[i];
1026 updates[i].plane_info = &plane_info[i];
1027 updates[i].scaling_info = &scaling_info[i];
1029 dc_update_surfaces_for_stream(dc, updates, new_surface_count, dc_stream);
1031 return dc_post_update_surfaces_to_stream(dc);
1034 static bool is_surface_in_context(
1035 const struct validate_context *context,
1036 const struct dc_surface *surface)
1040 for (j = 0; j < MAX_PIPES; j++) {
1041 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1043 if (surface == &pipe_ctx->surface->public) {
1051 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1054 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1055 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1057 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1058 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1059 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1060 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1062 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1063 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1064 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1067 ASSERT_CRITICAL(false);
1072 static enum surface_update_type get_plane_info_update_type(
1073 const struct dc_surface_update *u,
1076 struct dc_plane_info temp_plane_info = { { { { 0 } } } };
1079 return UPDATE_TYPE_FAST;
1081 /* Copy all parameters that will cause a full update
1082 * from current surface, the rest of the parameters
1083 * from provided plane configuration.
1084 * Perform memory compare and special validation
1085 * for those that can cause fast/medium updates
1088 /* Full update parameters */
1089 temp_plane_info.color_space = u->surface->color_space;
1090 temp_plane_info.dcc = u->surface->dcc;
1091 temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
1092 temp_plane_info.plane_size = u->surface->plane_size;
1093 temp_plane_info.rotation = u->surface->rotation;
1094 temp_plane_info.stereo_format = u->surface->stereo_format;
1095 temp_plane_info.tiling_info = u->surface->tiling_info;
1097 /* Special Validation parameters */
1098 temp_plane_info.format = u->plane_info->format;
1100 if (surface_index == 0)
1101 temp_plane_info.visible = u->plane_info->visible;
1103 temp_plane_info.visible = u->surface->visible;
1105 if (memcmp(u->plane_info, &temp_plane_info,
1106 sizeof(struct dc_plane_info)) != 0)
1107 return UPDATE_TYPE_FULL;
1109 if (pixel_format_to_bpp(u->plane_info->format) !=
1110 pixel_format_to_bpp(u->surface->format)) {
1111 return UPDATE_TYPE_FULL;
1113 return UPDATE_TYPE_MED;
1117 static enum surface_update_type get_scaling_info_update_type(
1118 const struct dc_surface_update *u)
1120 struct dc_scaling_info temp_scaling_info = { { 0 } };
1122 if (!u->scaling_info)
1123 return UPDATE_TYPE_FAST;
1125 /* Copy all parameters that will cause a full update
1126 * from current surface, the rest of the parameters
1127 * from provided plane configuration.
1128 * Perform memory compare and special validation
1129 * for those that can cause fast/medium updates
1132 /* Full Update Parameters */
1133 temp_scaling_info.dst_rect = u->surface->dst_rect;
1134 temp_scaling_info.src_rect = u->surface->src_rect;
1135 temp_scaling_info.scaling_quality = u->surface->scaling_quality;
1137 /* Special validation required */
1138 temp_scaling_info.clip_rect = u->scaling_info->clip_rect;
1140 if (memcmp(u->scaling_info, &temp_scaling_info,
1141 sizeof(struct dc_scaling_info)) != 0)
1142 return UPDATE_TYPE_FULL;
1144 /* Check Clip rectangles if not equal
1145 * difference is in offsets == > UPDATE_TYPE_MED
1146 * difference is in dimensions == > UPDATE_TYPE_FULL
1148 if (memcmp(&u->scaling_info->clip_rect,
1149 &u->surface->clip_rect, sizeof(struct rect)) != 0) {
1150 if ((u->scaling_info->clip_rect.height ==
1151 u->surface->clip_rect.height) &&
1152 (u->scaling_info->clip_rect.width ==
1153 u->surface->clip_rect.width)) {
1154 return UPDATE_TYPE_MED;
1156 return UPDATE_TYPE_FULL;
1160 return UPDATE_TYPE_FAST;
1163 static enum surface_update_type det_surface_update(
1164 const struct core_dc *dc,
1165 const struct dc_surface_update *u,
1168 const struct validate_context *context = dc->current_context;
1169 enum surface_update_type type = UPDATE_TYPE_FAST;
1170 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1172 if (!is_surface_in_context(context, u->surface))
1173 return UPDATE_TYPE_FULL;
1175 type = get_plane_info_update_type(u, surface_index);
1176 if (overall_type < type)
1177 overall_type = type;
1179 type = get_scaling_info_update_type(u);
1180 if (overall_type < type)
1181 overall_type = type;
1183 if (u->in_transfer_func ||
1184 u->hdr_static_metadata) {
1185 if (overall_type < UPDATE_TYPE_MED)
1186 overall_type = UPDATE_TYPE_MED;
1189 return overall_type;
1192 enum surface_update_type dc_check_update_surfaces_for_stream(
1194 struct dc_surface_update *updates,
1196 struct dc_stream_update *stream_update,
1197 const struct dc_stream_status *stream_status)
1199 struct core_dc *core_dc = DC_TO_CORE(dc);
1201 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1203 if (stream_status == NULL || stream_status->surface_count != surface_count)
1204 return UPDATE_TYPE_FULL;
1207 return UPDATE_TYPE_FULL;
1209 for (i = 0 ; i < surface_count; i++) {
1210 enum surface_update_type type =
1211 det_surface_update(core_dc, &updates[i], i);
1213 if (type == UPDATE_TYPE_FULL)
1216 if (overall_type < type)
1217 overall_type = type;
1220 return overall_type;
1223 void dc_update_surfaces_for_stream(struct dc *dc,
1224 struct dc_surface_update *surface_updates, int surface_count,
1225 const struct dc_stream *dc_stream)
1227 dc_update_surfaces_and_stream(dc, surface_updates, surface_count,
1231 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1233 void dc_update_surfaces_and_stream(struct dc *dc,
1234 struct dc_surface_update *srf_updates, int surface_count,
1235 const struct dc_stream *dc_stream,
1236 struct dc_stream_update *stream_update)
1238 struct core_dc *core_dc = DC_TO_CORE(dc);
1239 struct validate_context *context;
1241 enum surface_update_type update_type;
1242 const struct dc_stream_status *stream_status;
1243 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1245 stream_status = dc_stream_get_status(dc_stream);
1246 ASSERT(stream_status);
1248 return; /* Cannot commit surface to stream that is not committed */
1250 update_type = dc_check_update_surfaces_for_stream(
1251 dc, srf_updates, surface_count, stream_update, stream_status);
1253 if (update_type >= update_surface_trace_level)
1254 update_surface_trace(dc, srf_updates, surface_count);
1256 if (update_type >= UPDATE_TYPE_FULL) {
1257 const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
1259 for (i = 0; i < surface_count; i++)
1260 new_surfaces[i] = srf_updates[i].surface;
1262 /* initialize scratch memory for building context */
1263 context = dm_alloc(sizeof(*context));
1264 dc_resource_validate_ctx_copy_construct(
1265 core_dc->current_context, context);
1267 /* add surface to context */
1268 if (!resource_attach_surfaces_to_context(
1269 new_surfaces, surface_count, dc_stream,
1270 context, core_dc->res_pool)) {
1271 BREAK_TO_DEBUGGER();
1275 context = core_dc->current_context;
1278 /* update current stream with the new updates */
1279 if (stream_update) {
1280 if ((stream_update->src.height != 0) &&
1281 (stream_update->src.width != 0))
1282 stream->public.src = stream_update->src;
1284 if ((stream_update->dst.height != 0) &&
1285 (stream_update->dst.width != 0))
1286 stream->public.dst = stream_update->dst;
1288 if (stream_update->out_transfer_func &&
1289 stream_update->out_transfer_func !=
1290 dc_stream->out_transfer_func) {
1291 if (stream_update->out_transfer_func->type !=
1293 if (dc_stream->out_transfer_func != NULL)
1294 dc_transfer_func_release
1295 (dc_stream->out_transfer_func);
1296 dc_transfer_func_retain(stream_update->
1298 stream->public.out_transfer_func =
1299 stream_update->out_transfer_func;
1304 /* save update parameters into surface */
1305 for (i = 0; i < surface_count; i++) {
1306 struct core_surface *surface =
1307 DC_SURFACE_TO_CORE(srf_updates[i].surface);
1309 if (srf_updates[i].flip_addr) {
1310 surface->public.address = srf_updates[i].flip_addr->address;
1311 surface->public.flip_immediate =
1312 srf_updates[i].flip_addr->flip_immediate;
1315 if (srf_updates[i].scaling_info) {
1316 surface->public.scaling_quality =
1317 srf_updates[i].scaling_info->scaling_quality;
1318 surface->public.dst_rect =
1319 srf_updates[i].scaling_info->dst_rect;
1320 surface->public.src_rect =
1321 srf_updates[i].scaling_info->src_rect;
1322 surface->public.clip_rect =
1323 srf_updates[i].scaling_info->clip_rect;
1326 if (srf_updates[i].plane_info) {
1327 surface->public.color_space =
1328 srf_updates[i].plane_info->color_space;
1329 surface->public.format =
1330 srf_updates[i].plane_info->format;
1331 surface->public.plane_size =
1332 srf_updates[i].plane_info->plane_size;
1333 surface->public.rotation =
1334 srf_updates[i].plane_info->rotation;
1335 surface->public.horizontal_mirror =
1336 srf_updates[i].plane_info->horizontal_mirror;
1337 surface->public.stereo_format =
1338 srf_updates[i].plane_info->stereo_format;
1339 surface->public.tiling_info =
1340 srf_updates[i].plane_info->tiling_info;
1341 surface->public.visible =
1342 srf_updates[i].plane_info->visible;
1343 surface->public.dcc =
1344 srf_updates[i].plane_info->dcc;
1347 if (update_type >= UPDATE_TYPE_MED) {
1348 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1349 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1351 if (pipe_ctx->surface != surface)
1354 resource_build_scaling_params(pipe_ctx);
1358 if (srf_updates[i].gamma &&
1359 srf_updates[i].gamma != surface->public.gamma_correction) {
1360 if (surface->public.gamma_correction != NULL)
1361 dc_gamma_release(&surface->public.
1364 dc_gamma_retain(srf_updates[i].gamma);
1365 surface->public.gamma_correction =
1366 srf_updates[i].gamma;
1369 if (srf_updates[i].in_transfer_func &&
1370 srf_updates[i].in_transfer_func != surface->public.in_transfer_func) {
1371 if (surface->public.in_transfer_func != NULL)
1372 dc_transfer_func_release(
1376 dc_transfer_func_retain(
1377 srf_updates[i].in_transfer_func);
1378 surface->public.in_transfer_func =
1379 srf_updates[i].in_transfer_func;
1382 if (srf_updates[i].hdr_static_metadata)
1383 surface->public.hdr_static_ctx =
1384 *(srf_updates[i].hdr_static_metadata);
1387 if (update_type == UPDATE_TYPE_FULL) {
1388 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1389 BREAK_TO_DEBUGGER();
1392 core_dc->hwss.set_bandwidth(core_dc, context, false);
1395 if (!surface_count) /* reset */
1396 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1398 /* Lock pipes for provided surfaces */
1399 for (i = 0; i < surface_count; i++) {
1400 struct core_surface *surface = DC_SURFACE_TO_CORE(srf_updates[i].surface);
1402 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1403 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1405 if (pipe_ctx->surface != surface)
1407 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1408 core_dc->hwss.pipe_control_lock(
1416 /* Perform requested Updates */
1417 for (i = 0; i < surface_count; i++) {
1418 struct core_surface *surface = DC_SURFACE_TO_CORE(srf_updates[i].surface);
1420 if (update_type >= UPDATE_TYPE_MED) {
1421 core_dc->hwss.apply_ctx_for_surface(
1422 core_dc, surface, context);
1423 context_timing_trace(dc, &context->res_ctx);
1426 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1427 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1428 struct pipe_ctx *cur_pipe_ctx;
1429 bool is_new_pipe_surface = true;
1431 if (pipe_ctx->surface != surface)
1434 if (srf_updates[i].flip_addr)
1435 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1437 if (update_type == UPDATE_TYPE_FAST)
1440 cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1441 if (cur_pipe_ctx->surface == pipe_ctx->surface)
1442 is_new_pipe_surface = false;
1444 if (is_new_pipe_surface ||
1445 srf_updates[i].in_transfer_func)
1446 core_dc->hwss.set_input_transfer_func(
1447 pipe_ctx, pipe_ctx->surface);
1449 if (is_new_pipe_surface ||
1450 (stream_update != NULL &&
1451 stream_update->out_transfer_func !=
1453 core_dc->hwss.set_output_transfer_func(
1454 pipe_ctx, pipe_ctx->stream);
1457 if (srf_updates[i].hdr_static_metadata) {
1458 resource_build_info_frame(pipe_ctx);
1459 core_dc->hwss.update_info_frame(pipe_ctx);
1465 for (i = core_dc->res_pool->pipe_count - 1; i >= 0; i--) {
1466 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1468 for (j = 0; j < surface_count; j++) {
1469 if (srf_updates[j].surface == &pipe_ctx->surface->public) {
1470 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1471 core_dc->hwss.pipe_control_lock(
1481 if (core_dc->current_context != context) {
1482 dc_resource_validate_ctx_destruct(core_dc->current_context);
1483 dm_free(core_dc->current_context);
1485 core_dc->current_context = context;
1490 dc_resource_validate_ctx_destruct(context);
1494 uint8_t dc_get_current_stream_count(const struct dc *dc)
1496 struct core_dc *core_dc = DC_TO_CORE(dc);
1497 return core_dc->current_context->stream_count;
1500 struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1502 struct core_dc *core_dc = DC_TO_CORE(dc);
1503 if (i < core_dc->current_context->stream_count)
1504 return &(core_dc->current_context->streams[i]->public);
1508 const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1510 struct core_dc *core_dc = DC_TO_CORE(dc);
1511 return &core_dc->links[link_index]->public;
1514 const struct graphics_object_id dc_get_link_id_at_index(
1515 struct dc *dc, uint32_t link_index)
1517 struct core_dc *core_dc = DC_TO_CORE(dc);
1518 return core_dc->links[link_index]->link_id;
1521 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1522 struct dc *dc, uint32_t link_index)
1524 struct core_dc *core_dc = DC_TO_CORE(dc);
1525 return core_dc->links[link_index]->public.irq_source_hpd;
1528 const struct audio **dc_get_audios(struct dc *dc)
1530 struct core_dc *core_dc = DC_TO_CORE(dc);
1531 return (const struct audio **)core_dc->res_pool->audios;
1534 void dc_flip_surface_addrs(
1536 const struct dc_surface *const surfaces[],
1537 struct dc_flip_addrs flip_addrs[],
1540 struct core_dc *core_dc = DC_TO_CORE(dc);
1543 for (i = 0; i < count; i++) {
1544 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
1546 surface->public.address = flip_addrs[i].address;
1547 surface->public.flip_immediate = flip_addrs[i].flip_immediate;
1549 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1550 struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1552 if (pipe_ctx->surface != surface)
1555 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1560 enum dc_irq_source dc_interrupt_to_irq_source(
1565 struct core_dc *core_dc = DC_TO_CORE(dc);
1566 return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1569 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1571 struct core_dc *core_dc = DC_TO_CORE(dc);
1572 dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1575 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1577 struct core_dc *core_dc = DC_TO_CORE(dc);
1578 dal_irq_service_ack(core_dc->res_pool->irqs, src);
1581 void dc_set_power_state(
1583 enum dc_acpi_cm_power_state power_state)
1585 struct core_dc *core_dc = DC_TO_CORE(dc);
1587 switch (power_state) {
1588 case DC_ACPI_CM_POWER_STATE_D0:
1589 core_dc->hwss.init_hw(core_dc);
1593 core_dc->hwss.power_down(core_dc);
1595 /* Zero out the current context so that on resume we start with
1596 * clean state, and dc hw programming optimizations will not
1597 * cause any trouble.
1599 memset(core_dc->current_context, 0,
1600 sizeof(*core_dc->current_context));
1607 void dc_resume(const struct dc *dc)
1609 struct core_dc *core_dc = DC_TO_CORE(dc);
1613 for (i = 0; i < core_dc->link_count; i++)
1614 core_link_resume(core_dc->links[i]);
1617 bool dc_read_aux_dpcd(
1619 uint32_t link_index,
1624 struct core_dc *core_dc = DC_TO_CORE(dc);
1626 struct core_link *link = core_dc->links[link_index];
1627 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1634 return r == DDC_RESULT_SUCESSFULL;
1637 bool dc_write_aux_dpcd(
1639 uint32_t link_index,
1641 const uint8_t *data,
1644 struct core_dc *core_dc = DC_TO_CORE(dc);
1645 struct core_link *link = core_dc->links[link_index];
1647 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1654 return r == DDC_RESULT_SUCESSFULL;
1657 bool dc_read_aux_i2c(
1659 uint32_t link_index,
1660 enum i2c_mot_mode mot,
1665 struct core_dc *core_dc = DC_TO_CORE(dc);
1667 struct core_link *link = core_dc->links[link_index];
1668 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1675 return r == DDC_RESULT_SUCESSFULL;
1678 bool dc_write_aux_i2c(
1680 uint32_t link_index,
1681 enum i2c_mot_mode mot,
1683 const uint8_t *data,
1686 struct core_dc *core_dc = DC_TO_CORE(dc);
1687 struct core_link *link = core_dc->links[link_index];
1689 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1696 return r == DDC_RESULT_SUCESSFULL;
1699 bool dc_query_ddc_data(
1701 uint32_t link_index,
1704 uint32_t write_size,
1706 uint32_t read_size) {
1708 struct core_dc *core_dc = DC_TO_CORE(dc);
1710 struct core_link *link = core_dc->links[link_index];
1712 bool result = dal_ddc_service_query_ddc_data(
1725 uint32_t link_index,
1726 struct i2c_command *cmd)
1728 struct core_dc *core_dc = DC_TO_CORE(dc);
1730 struct core_link *link = core_dc->links[link_index];
1731 struct ddc_service *ddc = link->public.ddc;
1733 return dal_i2caux_submit_i2c_command(
1739 static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1741 struct dc_link *dc_link = &core_link->public;
1743 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1744 BREAK_TO_DEBUGGER();
1748 dc_sink_retain(sink);
1750 dc_link->remote_sinks[dc_link->sink_count] = sink;
1751 dc_link->sink_count++;
1756 struct dc_sink *dc_link_add_remote_sink(
1757 const struct dc_link *link,
1758 const uint8_t *edid,
1760 struct dc_sink_init_data *init_data)
1762 struct dc_sink *dc_sink;
1763 enum dc_edid_status edid_status;
1764 struct core_link *core_link = DC_LINK_TO_LINK(link);
1766 if (len > MAX_EDID_BUFFER_SIZE) {
1767 dm_error("Max EDID buffer size breached!\n");
1772 BREAK_TO_DEBUGGER();
1776 if (!init_data->link) {
1777 BREAK_TO_DEBUGGER();
1781 dc_sink = dc_sink_create(init_data);
1786 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1787 dc_sink->dc_edid.length = len;
1789 if (!link_add_remote_sink_helper(
1794 edid_status = dm_helpers_parse_edid_caps(
1797 &dc_sink->edid_caps);
1799 if (edid_status != EDID_OK)
1804 dc_link_remove_remote_sink(link, dc_sink);
1806 dc_sink_release(dc_sink);
1810 void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1812 struct core_link *core_link = DC_LINK_TO_LINK(link);
1813 struct dc_link *dc_link = &core_link->public;
1815 dc_link->local_sink = sink;
1818 dc_link->type = dc_connection_none;
1820 dc_link->type = dc_connection_single;
1824 void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1827 struct core_link *core_link = DC_LINK_TO_LINK(link);
1828 struct dc_link *dc_link = &core_link->public;
1830 if (!link->sink_count) {
1831 BREAK_TO_DEBUGGER();
1835 for (i = 0; i < dc_link->sink_count; i++) {
1836 if (dc_link->remote_sinks[i] == sink) {
1837 dc_sink_release(sink);
1838 dc_link->remote_sinks[i] = NULL;
1840 /* shrink array to remove empty place */
1841 while (i < dc_link->sink_count - 1) {
1842 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1845 dc_link->remote_sinks[i] = NULL;
1846 dc_link->sink_count--;
1852 bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
1855 struct core_dc *core_dc = DC_TO_CORE(dc);
1856 struct mem_input *mi = NULL;
1858 for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
1859 if (core_dc->res_pool->mis[i] != NULL) {
1860 mi = core_dc->res_pool->mis[i];
1865 dm_error("no mem_input!\n");
1869 if (mi->funcs->mem_input_update_dchub)
1870 mi->funcs->mem_input_update_dchub(mi, dh_data);
1872 ASSERT(mi->funcs->mem_input_update_dchub);