2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
38 #include "dce_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
45 #include "link_hwss.h"
46 #include "link_encoder.h"
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
52 /*******************************************************************************
54 ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
59 for (i = 0; i < dc->link_count; i++) {
60 if (NULL != dc->links[i])
61 link_destroy(&dc->links[i]);
65 static bool create_links(
67 uint32_t num_virtual_links)
71 struct dc_bios *bios = dc->ctx->dc_bios;
75 connectors_num = bios->funcs->get_connectors_number(bios);
77 if (connectors_num > ENUM_ID_COUNT) {
79 "DC: Number of connectors %d exceeds maximum of %d!\n",
85 if (connectors_num == 0 && num_virtual_links == 0) {
86 dm_error("DC: Number of connectors is zero!\n");
90 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
95 for (i = 0; i < connectors_num; i++) {
96 struct link_init_data link_init_params = {0};
97 struct core_link *link;
99 link_init_params.ctx = dc->ctx;
100 /* next BIOS object table connector */
101 link_init_params.connector_index = i;
102 link_init_params.link_index = dc->link_count;
103 link_init_params.dc = dc;
104 link = link_create(&link_init_params);
107 dc->links[dc->link_count] = link;
113 for (i = 0; i < num_virtual_links; i++) {
114 struct core_link *link = dm_alloc(sizeof(*link));
115 struct encoder_init_data enc_init = {0};
124 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
125 link->link_id.type = OBJECT_TYPE_CONNECTOR;
126 link->link_id.id = CONNECTOR_ID_VIRTUAL;
127 link->link_id.enum_id = ENUM_ID_1;
128 link->link_enc = dm_alloc(sizeof(*link->link_enc));
130 enc_init.ctx = dc->ctx;
131 enc_init.channel = CHANNEL_ID_UNKNOWN;
132 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
133 enc_init.transmitter = TRANSMITTER_UNKNOWN;
134 enc_init.connector = link->link_id;
135 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
136 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
137 enc_init.encoder.enum_id = ENUM_ID_1;
138 virtual_link_encoder_construct(link->link_enc, &enc_init);
140 link->public.link_index = dc->link_count;
141 dc->links[dc->link_count] = link;
151 static bool stream_adjust_vmin_vmax(struct dc *dc,
152 const struct dc_stream **stream, int num_streams,
155 /* TODO: Support multiple streams */
156 struct core_dc *core_dc = DC_TO_CORE(dc);
157 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
161 for (i = 0; i < MAX_PIPES; i++) {
162 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
164 if (pipe->stream == core_stream && pipe->stream_enc) {
165 core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
167 /* build and update the info frame */
168 resource_build_info_frame(pipe);
169 core_dc->hwss.update_info_frame(pipe);
177 static bool stream_get_crtc_position(struct dc *dc,
178 const struct dc_stream **stream, int num_streams,
179 unsigned int *v_pos, unsigned int *nom_v_pos)
181 /* TODO: Support multiple streams */
182 struct core_dc *core_dc = DC_TO_CORE(dc);
183 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
186 struct crtc_position position;
188 for (i = 0; i < MAX_PIPES; i++) {
189 struct pipe_ctx *pipe =
190 &core_dc->current_context->res_ctx.pipe_ctx[i];
192 if (pipe->stream == core_stream && pipe->stream_enc) {
193 core_dc->hwss.get_position(&pipe, 1, &position);
195 *v_pos = position.vertical_count;
196 *nom_v_pos = position.nominal_vcount;
203 static bool set_gamut_remap(struct dc *dc, const struct dc_stream *stream)
205 struct core_dc *core_dc = DC_TO_CORE(dc);
206 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
209 struct pipe_ctx *pipes;
211 for (i = 0; i < MAX_PIPES; i++) {
212 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
215 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
216 core_dc->hwss.program_gamut_remap(pipes);
224 static bool program_csc_matrix(struct dc *dc, const struct dc_stream *stream)
226 struct core_dc *core_dc = DC_TO_CORE(dc);
227 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
230 struct pipe_ctx *pipes;
232 for (i = 0; i < MAX_PIPES; i++) {
233 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
236 pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
237 core_dc->hwss.program_csc_matrix(pipes,
238 core_stream->public.output_color_space,
239 core_stream->public.csc_color_matrix.matrix);
247 static void set_static_screen_events(struct dc *dc,
248 const struct dc_stream **stream,
250 const struct dc_static_screen_events *events)
252 struct core_dc *core_dc = DC_TO_CORE(dc);
255 struct pipe_ctx *pipes_affected[MAX_PIPES];
256 int num_pipes_affected = 0;
258 for (i = 0; i < num_streams; i++) {
259 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[i]);
261 for (j = 0; j < MAX_PIPES; j++) {
262 if (core_dc->current_context->res_ctx.pipe_ctx[j].stream
264 pipes_affected[num_pipes_affected++] =
265 &core_dc->current_context->res_ctx.pipe_ctx[j];
270 core_dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
273 static void set_drive_settings(struct dc *dc,
274 struct link_training_settings *lt_settings,
275 const struct dc_link *link)
277 struct core_dc *core_dc = DC_TO_CORE(dc);
280 for (i = 0; i < core_dc->link_count; i++) {
281 if (&core_dc->links[i]->public == link)
285 if (i >= core_dc->link_count)
286 ASSERT_CRITICAL(false);
288 dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
291 static void perform_link_training(struct dc *dc,
292 struct dc_link_settings *link_setting,
293 bool skip_video_pattern)
295 struct core_dc *core_dc = DC_TO_CORE(dc);
298 for (i = 0; i < core_dc->link_count; i++)
299 dc_link_dp_perform_link_training(
300 &core_dc->links[i]->public,
305 static void set_preferred_link_settings(struct dc *dc,
306 struct dc_link_settings *link_setting,
307 const struct dc_link *link)
309 struct core_link *core_link = DC_LINK_TO_CORE(link);
311 core_link->public.preferred_link_setting =
313 dp_retrain_link_dp_test(core_link, link_setting, false);
316 static void enable_hpd(const struct dc_link *link)
318 dc_link_dp_enable_hpd(link);
321 static void disable_hpd(const struct dc_link *link)
323 dc_link_dp_disable_hpd(link);
327 static void set_test_pattern(
328 const struct dc_link *link,
329 enum dp_test_pattern test_pattern,
330 const struct link_training_settings *p_link_settings,
331 const unsigned char *p_custom_pattern,
332 unsigned int cust_pattern_size)
335 dc_link_dp_set_test_pattern(
343 void set_dither_option(const struct dc_stream *dc_stream,
344 enum dc_dither_option option)
346 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
347 struct bit_depth_reduction_params params;
348 struct core_link *core_link = DC_LINK_TO_CORE(stream->status.link);
349 struct pipe_ctx *pipes =
350 core_link->dc->current_context->res_ctx.pipe_ctx;
352 memset(¶ms, 0, sizeof(params));
355 if (option > DITHER_OPTION_MAX)
357 if (option == DITHER_OPTION_DEFAULT) {
358 switch (stream->public.timing.display_color_depth) {
359 case COLOR_DEPTH_666:
360 stream->public.dither_option = DITHER_OPTION_SPATIAL6;
362 case COLOR_DEPTH_888:
363 stream->public.dither_option = DITHER_OPTION_SPATIAL8;
365 case COLOR_DEPTH_101010:
366 stream->public.dither_option = DITHER_OPTION_SPATIAL10;
369 option = DITHER_OPTION_DISABLE;
372 stream->public.dither_option = option;
374 resource_build_bit_depth_reduction_params(stream,
376 stream->bit_depth_params = params;
378 opp_program_bit_depth_reduction(pipes->opp, ¶ms);
381 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
383 if (core_dc->hwss.set_drr != NULL) {
384 core_dc->public.stream_funcs.adjust_vmin_vmax =
385 stream_adjust_vmin_vmax;
388 core_dc->public.stream_funcs.set_static_screen_events =
389 set_static_screen_events;
391 core_dc->public.stream_funcs.get_crtc_position =
392 stream_get_crtc_position;
394 core_dc->public.stream_funcs.set_gamut_remap =
397 core_dc->public.stream_funcs.program_csc_matrix =
400 core_dc->public.stream_funcs.set_dither_option =
403 core_dc->public.link_funcs.set_drive_settings =
406 core_dc->public.link_funcs.perform_link_training =
407 perform_link_training;
409 core_dc->public.link_funcs.set_preferred_link_settings =
410 set_preferred_link_settings;
412 core_dc->public.link_funcs.enable_hpd =
415 core_dc->public.link_funcs.disable_hpd =
418 core_dc->public.link_funcs.set_test_pattern =
422 static void destruct(struct core_dc *dc)
424 dc_release_validate_context(dc->current_context);
425 dc->current_context = NULL;
429 dc_destroy_resource_pool(dc);
431 if (dc->ctx->gpio_service)
432 dal_gpio_service_destroy(&dc->ctx->gpio_service);
435 dal_i2caux_destroy(&dc->ctx->i2caux);
437 if (dc->ctx->created_bios)
438 dal_bios_parser_destroy(&dc->ctx->dc_bios);
441 dal_logger_destroy(&dc->ctx->logger);
447 static bool construct(struct core_dc *dc,
448 const struct dc_init_data *init_params)
450 struct dal_logger *logger;
451 struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
452 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
455 dm_error("%s: failed to create ctx\n", __func__);
459 dc->current_context = dm_alloc(sizeof(*dc->current_context));
461 if (!dc->current_context) {
462 dm_error("%s: failed to create validate ctx\n", __func__);
466 dc->current_context->ref_count++;
468 dc_ctx->cgs_device = init_params->cgs_device;
469 dc_ctx->driver_context = init_params->driver;
470 dc_ctx->dc = &dc->public;
471 dc_ctx->asic_id = init_params->asic_id;
474 logger = dal_logger_create(dc_ctx);
477 /* can *not* call logger. call base driver 'print error' */
478 dm_error("%s: failed to create Logger!\n", __func__);
481 dc_ctx->logger = logger;
483 dc->ctx->dce_environment = init_params->dce_environment;
485 dc_version = resource_parse_asic_id(init_params->asic_id);
486 dc->ctx->dce_version = dc_version;
488 /* Resource should construct all asic specific resources.
489 * This should be the only place where we need to parse the asic id
491 if (init_params->vbios_override)
492 dc_ctx->dc_bios = init_params->vbios_override;
494 /* Create BIOS parser */
495 struct bp_init_data bp_init_data;
497 bp_init_data.ctx = dc_ctx;
498 bp_init_data.bios = init_params->asic_id.atombios_base_address;
500 dc_ctx->dc_bios = dal_bios_parser_create(
501 &bp_init_data, dc_version);
503 if (!dc_ctx->dc_bios) {
504 ASSERT_CRITICAL(false);
508 dc_ctx->created_bios = true;
512 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
514 if (!dc_ctx->i2caux) {
515 ASSERT_CRITICAL(false);
516 goto failed_to_create_i2caux;
519 /* Create GPIO service */
520 dc_ctx->gpio_service = dal_gpio_service_create(
522 dc_ctx->dce_environment,
525 if (!dc_ctx->gpio_service) {
526 ASSERT_CRITICAL(false);
530 dc->res_pool = dc_create_resource_pool(
532 init_params->num_virtual_links,
534 init_params->asic_id);
536 goto create_resource_fail;
538 if (!create_links(dc, init_params->num_virtual_links))
539 goto create_links_fail;
541 allocate_dc_stream_funcs(dc);
545 /**** error handling here ****/
547 create_resource_fail:
549 failed_to_create_i2caux:
559 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
561 fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
562 unsigned int pixDurationInPico = round(pixel_duration);
564 DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
566 arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
567 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
568 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
570 arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
571 arb_control.bits.PIXEL_DURATION = pixDurationInPico;
572 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
574 WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
575 WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
577 WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
578 WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
582 /*******************************************************************************
584 ******************************************************************************/
586 struct dc *dc_create(const struct dc_init_data *init_params)
588 struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
589 unsigned int full_pipe_count;
594 if (false == construct(core_dc, init_params))
597 /*TODO: separate HW and SW initialization*/
598 core_dc->hwss.init_hw(core_dc);
600 full_pipe_count = core_dc->res_pool->pipe_count;
601 if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
603 core_dc->public.caps.max_streams = min(
605 core_dc->res_pool->stream_enc_count);
607 core_dc->public.caps.max_links = core_dc->link_count;
608 core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
610 core_dc->public.config = init_params->flags;
612 dm_logger_write(core_dc->ctx->logger, LOG_DC,
613 "Display Core initialized\n");
616 /* TODO: missing feature to be enabled */
617 core_dc->public.debug.disable_dfs_bypass = true;
619 return &core_dc->public;
628 void dc_destroy(struct dc **dc)
630 struct core_dc *core_dc = DC_TO_CORE(*dc);
636 static bool is_validation_required(
637 const struct core_dc *dc,
638 const struct dc_validation_set set[],
641 const struct validate_context *context = dc->current_context;
644 if (context->stream_count != set_count)
647 for (i = 0; i < set_count; i++) {
649 if (set[i].surface_count != context->stream_status[i].surface_count)
651 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
654 for (j = 0; j < set[i].surface_count; j++) {
655 struct dc_surface temp_surf;
656 memset(&temp_surf, 0, sizeof(temp_surf));
658 temp_surf = *context->stream_status[i].surfaces[j];
659 temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
660 temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
661 temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
663 if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
671 struct validate_context *dc_get_validate_context(
673 const struct dc_validation_set set[],
676 struct core_dc *core_dc = DC_TO_CORE(dc);
677 enum dc_status result = DC_ERROR_UNEXPECTED;
678 struct validate_context *context;
680 context = dm_alloc(sizeof(struct validate_context));
682 goto context_alloc_fail;
684 ++context->ref_count;
686 if (!is_validation_required(core_dc, set, set_count)) {
687 dc_resource_validate_ctx_copy_construct(core_dc->current_context, context);
691 result = core_dc->res_pool->funcs->validate_with_context(
692 core_dc, set, set_count, context, core_dc->current_context);
695 if (result != DC_OK) {
696 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
697 "%s:resource validation failed, dc_status:%d\n",
701 dc_release_validate_context(context);
709 bool dc_validate_resources(
711 const struct dc_validation_set set[],
714 struct core_dc *core_dc = DC_TO_CORE(dc);
715 enum dc_status result = DC_ERROR_UNEXPECTED;
716 struct validate_context *context;
718 context = dm_alloc(sizeof(struct validate_context));
720 goto context_alloc_fail;
722 ++context->ref_count;
724 result = core_dc->res_pool->funcs->validate_with_context(
725 core_dc, set, set_count, context, NULL);
728 if (result != DC_OK) {
729 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
730 "%s:resource validation failed, dc_status:%d\n",
735 dc_release_validate_context(context);
738 return result == DC_OK;
741 bool dc_validate_guaranteed(
743 const struct dc_stream *stream)
745 struct core_dc *core_dc = DC_TO_CORE(dc);
746 enum dc_status result = DC_ERROR_UNEXPECTED;
747 struct validate_context *context;
749 context = dm_alloc(sizeof(struct validate_context));
751 goto context_alloc_fail;
753 ++context->ref_count;
755 result = core_dc->res_pool->funcs->validate_guaranteed(
756 core_dc, stream, context);
758 dc_release_validate_context(context);
761 if (result != DC_OK) {
762 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
763 "%s:guaranteed validation failed, dc_status:%d\n",
768 return (result == DC_OK);
771 static void program_timing_sync(
772 struct core_dc *core_dc,
773 struct validate_context *ctx)
777 int pipe_count = core_dc->res_pool->pipe_count;
778 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
780 for (i = 0; i < pipe_count; i++) {
781 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
784 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
787 for (i = 0; i < pipe_count; i++) {
789 struct pipe_ctx *pipe_set[MAX_PIPES];
791 if (!unsynced_pipes[i])
794 pipe_set[0] = unsynced_pipes[i];
795 unsynced_pipes[i] = NULL;
797 /* Add tg to the set, search rest of the tg's for ones with
798 * same timing, add all tgs with same timing to the group
800 for (j = i + 1; j < pipe_count; j++) {
801 if (!unsynced_pipes[j])
804 if (resource_are_streams_timing_synchronizable(
805 unsynced_pipes[j]->stream,
806 pipe_set[0]->stream)) {
807 pipe_set[group_size] = unsynced_pipes[j];
808 unsynced_pipes[j] = NULL;
813 /* set first unblanked pipe as master */
814 for (j = 0; j < group_size; j++) {
815 struct pipe_ctx *temp;
817 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
822 pipe_set[0] = pipe_set[j];
828 /* remove any other unblanked pipes as they have already been synced */
829 for (j = j + 1; j < group_size; j++) {
830 if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
832 pipe_set[j] = pipe_set[group_size];
837 if (group_size > 1) {
838 core_dc->hwss.enable_timing_synchronization(
839 core_dc, group_index, group_size, pipe_set);
845 static bool context_changed(
847 struct validate_context *context)
851 if (context->stream_count != dc->current_context->stream_count)
854 for (i = 0; i < dc->current_context->stream_count; i++) {
855 if (&dc->current_context->streams[i]->public != &context->streams[i]->public)
862 static bool streams_changed(
864 const struct dc_stream *streams[],
865 uint8_t stream_count)
869 if (stream_count != dc->current_context->stream_count)
872 for (i = 0; i < dc->current_context->stream_count; i++) {
873 if (&dc->current_context->streams[i]->public != streams[i])
880 bool dc_enable_stereo(
882 struct validate_context *context,
883 const struct dc_stream *streams[],
884 uint8_t stream_count)
888 struct pipe_ctx *pipe;
889 struct core_dc *core_dc = DC_TO_CORE(dc);
892 struct compressor *fbc_compressor = core_dc->fbc_compressor;
895 for (i = 0; i < MAX_PIPES; i++) {
897 pipe = &context->res_ctx.pipe_ctx[i];
899 pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
900 for (j = 0 ; pipe && j < stream_count; j++) {
901 if (streams[j] && streams[j] == &pipe->stream->public &&
902 core_dc->hwss.setup_stereo)
903 core_dc->hwss.setup_stereo(pipe, core_dc);
908 if (fbc_compressor != NULL &&
909 fbc_compressor->funcs->is_fbc_enabled_in_hw(core_dc->fbc_compressor,
911 fbc_compressor->funcs->disable_fbc(fbc_compressor);
919 * Applies given context to HW and copy it into current context.
920 * It's up to the user to release the src context afterwards.
922 static bool dc_commit_context_no_check(struct dc *dc, struct validate_context *context)
924 struct core_dc *core_dc = DC_TO_CORE(dc);
925 struct dc_bios *dcb = core_dc->ctx->dc_bios;
926 enum dc_status result = DC_ERROR_UNEXPECTED;
927 struct pipe_ctx *pipe;
929 const struct dc_stream *dc_streams[MAX_STREAMS] = {0};
931 for (i = 0; i < context->stream_count; i++)
932 dc_streams[i] = &context->streams[i]->public;
934 if (!dcb->funcs->is_accelerated_mode(dcb))
935 core_dc->hwss.enable_accelerated_mode(core_dc);
937 for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
938 pipe = &context->res_ctx.pipe_ctx[i];
939 core_dc->hwss.wait_for_mpcc_disconnect(core_dc->res_pool, pipe);
941 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
943 program_timing_sync(core_dc, context);
945 for (i = 0; i < context->stream_count; i++) {
946 const struct core_sink *sink = context->streams[i]->sink;
948 for (j = 0; j < context->stream_status[i].surface_count; j++) {
949 const struct dc_surface *surface =
950 context->stream_status[i].surfaces[j];
952 core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
956 * TODO rework dc_enable_stereo call to work with validation sets?
958 for (k = 0; k < MAX_PIPES; k++) {
959 pipe = &context->res_ctx.pipe_ctx[k];
961 for (l = 0 ; pipe && l < context->stream_count; l++) {
962 if (context->streams[l] &&
963 context->streams[l] == pipe->stream &&
964 core_dc->hwss.setup_stereo)
965 core_dc->hwss.setup_stereo(pipe, core_dc);
970 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
971 context->streams[i]->public.timing.h_addressable,
972 context->streams[i]->public.timing.v_addressable,
973 context->streams[i]->public.timing.h_total,
974 context->streams[i]->public.timing.v_total,
975 context->streams[i]->public.timing.pix_clk_khz);
978 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
980 dc_release_validate_context(core_dc->current_context);
982 core_dc->current_context = context;
984 dc_retain_validate_context(core_dc->current_context);
986 return (result == DC_OK);
989 bool dc_commit_context(struct dc *dc, struct validate_context *context)
991 enum dc_status result = DC_ERROR_UNEXPECTED;
992 struct core_dc *core_dc = DC_TO_CORE(dc);
995 if (false == context_changed(core_dc, context))
998 dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
999 __func__, context->stream_count);
1001 for (i = 0; i < context->stream_count; i++) {
1002 const struct dc_stream *stream = &context->streams[i]->public;
1004 dc_stream_log(stream,
1005 core_dc->ctx->logger,
1009 result = dc_commit_context_no_check(dc, context);
1011 return (result == DC_OK);
1015 bool dc_commit_streams(
1017 const struct dc_stream *streams[],
1018 uint8_t stream_count)
1020 struct core_dc *core_dc = DC_TO_CORE(dc);
1021 enum dc_status result = DC_ERROR_UNEXPECTED;
1022 struct validate_context *context;
1023 struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
1026 if (false == streams_changed(core_dc, streams, stream_count))
1029 dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
1030 __func__, stream_count);
1032 for (i = 0; i < stream_count; i++) {
1033 const struct dc_stream *stream = streams[i];
1034 const struct dc_stream_status *status = dc_stream_get_status(stream);
1037 dc_stream_log(stream,
1038 core_dc->ctx->logger,
1041 set[i].stream = stream;
1044 set[i].surface_count = status->surface_count;
1045 for (j = 0; j < status->surface_count; j++)
1046 set[i].surfaces[j] = status->surfaces[j];
1051 context = dm_alloc(sizeof(struct validate_context));
1052 if (context == NULL)
1053 goto context_alloc_fail;
1055 ++context->ref_count;
1057 result = core_dc->res_pool->funcs->validate_with_context(
1058 core_dc, set, stream_count, context, core_dc->current_context);
1059 if (result != DC_OK){
1060 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
1061 "%s: Context validation failed! dc_status:%d\n",
1064 BREAK_TO_DEBUGGER();
1068 result = dc_commit_context_no_check(dc, context);
1071 dc_release_validate_context(context);
1074 return (result == DC_OK);
1077 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1080 struct core_dc *core_dc = DC_TO_CORE(dc);
1081 struct validate_context *context = core_dc->current_context;
1083 post_surface_trace(dc);
1085 for (i = 0; i < core_dc->res_pool->pipe_count; i++)
1086 if (context->res_ctx.pipe_ctx[i].stream == NULL
1087 || context->res_ctx.pipe_ctx[i].surface == NULL)
1088 core_dc->hwss.power_down_front_end(core_dc, i);
1090 /* 3rd param should be true, temp w/a for RV*/
1091 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1092 core_dc->hwss.set_bandwidth(core_dc, context, core_dc->ctx->dce_version != DCN_VERSION_1_0);
1094 core_dc->hwss.set_bandwidth(core_dc, context, true);
1099 bool dc_commit_surfaces_to_stream(
1101 struct dc_surface **new_surfaces,
1102 uint8_t new_surface_count,
1103 const struct dc_stream *dc_stream)
1105 struct dc_surface_update updates[MAX_SURFACES];
1106 struct dc_flip_addrs flip_addr[MAX_SURFACES];
1107 struct dc_plane_info plane_info[MAX_SURFACES];
1108 struct dc_scaling_info scaling_info[MAX_SURFACES];
1110 struct dc_stream_update *stream_update =
1111 dm_alloc(sizeof(struct dc_stream_update));
1113 if (!stream_update) {
1114 BREAK_TO_DEBUGGER();
1118 memset(updates, 0, sizeof(updates));
1119 memset(flip_addr, 0, sizeof(flip_addr));
1120 memset(plane_info, 0, sizeof(plane_info));
1121 memset(scaling_info, 0, sizeof(scaling_info));
1123 stream_update->src = dc_stream->src;
1124 stream_update->dst = dc_stream->dst;
1125 stream_update->out_transfer_func = dc_stream->out_transfer_func;
1127 for (i = 0; i < new_surface_count; i++) {
1128 updates[i].surface = new_surfaces[i];
1130 (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1131 updates[i].in_transfer_func = new_surfaces[i]->in_transfer_func;
1132 flip_addr[i].address = new_surfaces[i]->address;
1133 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1134 plane_info[i].color_space = new_surfaces[i]->color_space;
1135 plane_info[i].format = new_surfaces[i]->format;
1136 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1137 plane_info[i].rotation = new_surfaces[i]->rotation;
1138 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1139 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1140 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1141 plane_info[i].visible = new_surfaces[i]->visible;
1142 plane_info[i].per_pixel_alpha = new_surfaces[i]->per_pixel_alpha;
1143 plane_info[i].dcc = new_surfaces[i]->dcc;
1144 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1145 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1146 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1147 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1149 updates[i].flip_addr = &flip_addr[i];
1150 updates[i].plane_info = &plane_info[i];
1151 updates[i].scaling_info = &scaling_info[i];
1154 dc_update_surfaces_and_stream(
1158 dc_stream, stream_update);
1160 dc_post_update_surfaces_to_stream(dc);
1162 dm_free(stream_update);
1166 void dc_retain_validate_context(struct validate_context *context)
1168 ASSERT(context->ref_count > 0);
1169 ++context->ref_count;
1172 void dc_release_validate_context(struct validate_context *context)
1174 ASSERT(context->ref_count > 0);
1175 --context->ref_count;
1177 if (context->ref_count == 0) {
1178 dc_resource_validate_ctx_destruct(context);
1183 static bool is_surface_in_context(
1184 const struct validate_context *context,
1185 const struct dc_surface *surface)
1189 for (j = 0; j < MAX_PIPES; j++) {
1190 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1192 if (surface == pipe_ctx->surface) {
1200 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1203 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1204 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1206 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1207 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1208 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1209 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1211 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1212 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1213 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1214 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1216 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1217 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1218 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1221 ASSERT_CRITICAL(false);
1226 static enum surface_update_type get_plane_info_update_type(
1227 const struct dc_surface_update *u,
1230 struct dc_plane_info temp_plane_info;
1231 memset(&temp_plane_info, 0, sizeof(temp_plane_info));
1234 return UPDATE_TYPE_FAST;
1236 temp_plane_info = *u->plane_info;
1238 /* Copy all parameters that will cause a full update
1239 * from current surface, the rest of the parameters
1240 * from provided plane configuration.
1241 * Perform memory compare and special validation
1242 * for those that can cause fast/medium updates
1245 /* Full update parameters */
1246 temp_plane_info.color_space = u->surface->color_space;
1247 temp_plane_info.dcc = u->surface->dcc;
1248 temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
1249 temp_plane_info.plane_size = u->surface->plane_size;
1250 temp_plane_info.rotation = u->surface->rotation;
1251 temp_plane_info.stereo_format = u->surface->stereo_format;
1252 temp_plane_info.tiling_info = u->surface->tiling_info;
1254 if (surface_index == 0)
1255 temp_plane_info.visible = u->plane_info->visible;
1257 temp_plane_info.visible = u->surface->visible;
1259 if (memcmp(u->plane_info, &temp_plane_info,
1260 sizeof(struct dc_plane_info)) != 0)
1261 return UPDATE_TYPE_FULL;
1263 if (pixel_format_to_bpp(u->plane_info->format) !=
1264 pixel_format_to_bpp(u->surface->format)) {
1265 return UPDATE_TYPE_FULL;
1267 return UPDATE_TYPE_MED;
1271 static enum surface_update_type get_scaling_info_update_type(
1272 const struct dc_surface_update *u)
1274 if (!u->scaling_info)
1275 return UPDATE_TYPE_FAST;
1277 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1278 || u->scaling_info->src_rect.height != u->surface->src_rect.height
1279 || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1280 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1281 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1282 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
1283 return UPDATE_TYPE_FULL;
1285 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1286 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1287 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1288 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1289 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1290 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1291 return UPDATE_TYPE_MED;
1293 return UPDATE_TYPE_FAST;
1296 static enum surface_update_type det_surface_update(
1297 const struct core_dc *dc,
1298 const struct dc_surface_update *u,
1301 const struct validate_context *context = dc->current_context;
1302 enum surface_update_type type = UPDATE_TYPE_FAST;
1303 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1305 if (!is_surface_in_context(context, u->surface))
1306 return UPDATE_TYPE_FULL;
1308 type = get_plane_info_update_type(u, surface_index);
1309 if (overall_type < type)
1310 overall_type = type;
1312 type = get_scaling_info_update_type(u);
1313 if (overall_type < type)
1314 overall_type = type;
1316 if (u->in_transfer_func ||
1317 u->hdr_static_metadata) {
1318 if (overall_type < UPDATE_TYPE_MED)
1319 overall_type = UPDATE_TYPE_MED;
1322 return overall_type;
1325 enum surface_update_type dc_check_update_surfaces_for_stream(
1327 struct dc_surface_update *updates,
1329 struct dc_stream_update *stream_update,
1330 const struct dc_stream_status *stream_status)
1332 struct core_dc *core_dc = DC_TO_CORE(dc);
1334 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1336 if (stream_status == NULL || stream_status->surface_count != surface_count)
1337 return UPDATE_TYPE_FULL;
1340 return UPDATE_TYPE_FULL;
1342 for (i = 0 ; i < surface_count; i++) {
1343 enum surface_update_type type =
1344 det_surface_update(core_dc, &updates[i], i);
1346 if (type == UPDATE_TYPE_FULL)
1349 if (overall_type < type)
1350 overall_type = type;
1353 return overall_type;
1356 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1358 void dc_update_surfaces_and_stream(struct dc *dc,
1359 struct dc_surface_update *srf_updates, int surface_count,
1360 const struct dc_stream *dc_stream,
1361 struct dc_stream_update *stream_update)
1363 struct core_dc *core_dc = DC_TO_CORE(dc);
1364 struct validate_context *context;
1366 enum surface_update_type update_type;
1367 const struct dc_stream_status *stream_status;
1368 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1369 struct dc_context *dc_ctx = core_dc->ctx;
1371 stream_status = dc_stream_get_status(dc_stream);
1372 ASSERT(stream_status);
1374 return; /* Cannot commit surface to stream that is not committed */
1377 if (srf_updates->flip_addr) {
1378 if (srf_updates->flip_addr->address.grph.addr.low_part == 0)
1382 context = core_dc->current_context;
1384 /* update current stream with the new updates */
1385 if (stream_update) {
1386 if ((stream_update->src.height != 0) &&
1387 (stream_update->src.width != 0))
1388 stream->public.src = stream_update->src;
1390 if ((stream_update->dst.height != 0) &&
1391 (stream_update->dst.width != 0))
1392 stream->public.dst = stream_update->dst;
1394 if (stream_update->out_transfer_func &&
1395 stream_update->out_transfer_func !=
1396 dc_stream->out_transfer_func) {
1397 if (dc_stream->out_transfer_func != NULL)
1398 dc_transfer_func_release(dc_stream->out_transfer_func);
1399 dc_transfer_func_retain(stream_update->out_transfer_func);
1400 stream->public.out_transfer_func =
1401 stream_update->out_transfer_func;
1405 /* do not perform surface update if surface has invalid dimensions
1406 * (all zero) and no scaling_info is provided
1408 if (surface_count > 0 &&
1409 srf_updates->surface->src_rect.width == 0 &&
1410 srf_updates->surface->src_rect.height == 0 &&
1411 srf_updates->surface->dst_rect.width == 0 &&
1412 srf_updates->surface->dst_rect.height == 0 &&
1413 !srf_updates->scaling_info) {
1418 update_type = dc_check_update_surfaces_for_stream(
1419 dc, srf_updates, surface_count, stream_update, stream_status);
1421 if (update_type >= update_surface_trace_level)
1422 update_surface_trace(dc, srf_updates, surface_count);
1424 if (update_type >= UPDATE_TYPE_FULL) {
1425 struct dc_surface *new_surfaces[MAX_SURFACES] = {0};
1427 for (i = 0; i < surface_count; i++)
1428 new_surfaces[i] = srf_updates[i].surface;
1430 /* initialize scratch memory for building context */
1431 context = dm_alloc(sizeof(*context));
1432 if (context == NULL)
1433 goto context_alloc_fail;
1435 ++context->ref_count;
1437 dc_resource_validate_ctx_copy_construct(
1438 core_dc->current_context, context);
1440 /* add surface to context */
1441 if (!resource_attach_surfaces_to_context(
1442 new_surfaces, surface_count, dc_stream,
1443 context, core_dc->res_pool)) {
1444 BREAK_TO_DEBUGGER();
1449 /* save update parameters into surface */
1450 for (i = 0; i < surface_count; i++) {
1451 struct dc_surface *surface = srf_updates[i].surface;
1453 if (srf_updates[i].flip_addr) {
1454 surface->address = srf_updates[i].flip_addr->address;
1455 surface->flip_immediate =
1456 srf_updates[i].flip_addr->flip_immediate;
1459 if (srf_updates[i].scaling_info) {
1460 surface->scaling_quality =
1461 srf_updates[i].scaling_info->scaling_quality;
1463 srf_updates[i].scaling_info->dst_rect;
1465 srf_updates[i].scaling_info->src_rect;
1466 surface->clip_rect =
1467 srf_updates[i].scaling_info->clip_rect;
1470 if (srf_updates[i].plane_info) {
1471 surface->color_space =
1472 srf_updates[i].plane_info->color_space;
1474 srf_updates[i].plane_info->format;
1475 surface->plane_size =
1476 srf_updates[i].plane_info->plane_size;
1478 srf_updates[i].plane_info->rotation;
1479 surface->horizontal_mirror =
1480 srf_updates[i].plane_info->horizontal_mirror;
1481 surface->stereo_format =
1482 srf_updates[i].plane_info->stereo_format;
1483 surface->tiling_info =
1484 srf_updates[i].plane_info->tiling_info;
1486 srf_updates[i].plane_info->visible;
1487 surface->per_pixel_alpha =
1488 srf_updates[i].plane_info->per_pixel_alpha;
1490 srf_updates[i].plane_info->dcc;
1493 if (update_type >= UPDATE_TYPE_MED) {
1494 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1495 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1497 if (pipe_ctx->surface != surface)
1500 resource_build_scaling_params(pipe_ctx);
1504 if (srf_updates[i].gamma &&
1505 srf_updates[i].gamma != surface->gamma_correction) {
1506 if (surface->gamma_correction != NULL)
1507 dc_gamma_release(&surface->
1510 dc_gamma_retain(srf_updates[i].gamma);
1511 surface->gamma_correction =
1512 srf_updates[i].gamma;
1515 if (srf_updates[i].in_transfer_func &&
1516 srf_updates[i].in_transfer_func != surface->in_transfer_func) {
1517 if (surface->in_transfer_func != NULL)
1518 dc_transfer_func_release(
1522 dc_transfer_func_retain(
1523 srf_updates[i].in_transfer_func);
1524 surface->in_transfer_func =
1525 srf_updates[i].in_transfer_func;
1528 if (srf_updates[i].hdr_static_metadata)
1529 surface->hdr_static_ctx =
1530 *(srf_updates[i].hdr_static_metadata);
1533 if (update_type == UPDATE_TYPE_FULL) {
1534 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1535 BREAK_TO_DEBUGGER();
1538 core_dc->hwss.set_bandwidth(core_dc, context, false);
1539 context_clock_trace(dc, context);
1543 if (update_type > UPDATE_TYPE_FAST) {
1544 for (i = 0; i < surface_count; i++) {
1545 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1546 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1548 if (pipe_ctx->surface != srf_updates[i].surface)
1551 core_dc->hwss.wait_for_mpcc_disconnect(core_dc->res_pool, pipe_ctx);
1556 if (surface_count == 0)
1557 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1559 /* Lock pipes for provided surfaces, or all active if full update*/
1560 for (i = 0; i < surface_count; i++) {
1561 struct dc_surface *surface = srf_updates[i].surface;
1563 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1564 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1566 if (update_type != UPDATE_TYPE_FULL && pipe_ctx->surface != surface)
1568 if (!pipe_ctx->surface || pipe_ctx->top_pipe)
1571 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1572 core_dc->hwss.pipe_control_lock(
1578 if (update_type == UPDATE_TYPE_FULL)
1583 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1584 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1585 struct pipe_ctx *cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1586 bool is_new_pipe_surface = cur_pipe_ctx->surface != pipe_ctx->surface;
1587 struct dc_cursor_position position = { 0 };
1589 if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->surface)
1592 if (!pipe_ctx->top_pipe)
1593 core_dc->hwss.apply_ctx_for_surface(
1594 core_dc, pipe_ctx->surface, context);
1596 /* TODO: this is a hack w/a for switching from mpo to pipe split */
1597 dc_stream_set_cursor_position(&pipe_ctx->stream->public, &position);
1599 if (is_new_pipe_surface) {
1600 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1601 core_dc->hwss.set_input_transfer_func(
1602 pipe_ctx, pipe_ctx->surface);
1603 core_dc->hwss.set_output_transfer_func(
1604 pipe_ctx, pipe_ctx->stream);
1608 if (update_type > UPDATE_TYPE_FAST)
1609 context_timing_trace(dc, &context->res_ctx);
1611 /* Perform requested Updates */
1612 for (i = 0; i < surface_count; i++) {
1613 struct dc_surface *surface = srf_updates[i].surface;
1615 if (update_type == UPDATE_TYPE_MED)
1616 core_dc->hwss.apply_ctx_for_surface(
1617 core_dc, surface, context);
1619 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1620 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1622 if (pipe_ctx->surface != surface)
1625 if (srf_updates[i].flip_addr)
1626 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1628 if (update_type == UPDATE_TYPE_FAST)
1631 if (srf_updates[i].in_transfer_func)
1632 core_dc->hwss.set_input_transfer_func(
1633 pipe_ctx, pipe_ctx->surface);
1635 if (stream_update != NULL &&
1636 stream_update->out_transfer_func != NULL) {
1637 core_dc->hwss.set_output_transfer_func(
1638 pipe_ctx, pipe_ctx->stream);
1641 if (srf_updates[i].hdr_static_metadata) {
1642 resource_build_info_frame(pipe_ctx);
1643 core_dc->hwss.update_info_frame(pipe_ctx);
1649 for (i = core_dc->res_pool->pipe_count - 1; i >= 0; i--) {
1650 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1652 for (j = 0; j < surface_count; j++) {
1653 if (update_type != UPDATE_TYPE_FULL &&
1654 srf_updates[j].surface != pipe_ctx->surface)
1656 if (!pipe_ctx->surface || pipe_ctx->top_pipe)
1659 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1660 core_dc->hwss.pipe_control_lock(
1669 if (core_dc->current_context != context) {
1670 dc_release_validate_context(core_dc->current_context);
1671 core_dc->current_context = context;
1676 dc_release_validate_context(context);
1679 DC_ERROR("Failed to allocate new validate context!\n");
1682 uint8_t dc_get_current_stream_count(const struct dc *dc)
1684 struct core_dc *core_dc = DC_TO_CORE(dc);
1685 return core_dc->current_context->stream_count;
1688 struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1690 struct core_dc *core_dc = DC_TO_CORE(dc);
1691 if (i < core_dc->current_context->stream_count)
1692 return &(core_dc->current_context->streams[i]->public);
1696 const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1698 struct core_dc *core_dc = DC_TO_CORE(dc);
1699 return &core_dc->links[link_index]->public;
1702 const struct graphics_object_id dc_get_link_id_at_index(
1703 struct dc *dc, uint32_t link_index)
1705 struct core_dc *core_dc = DC_TO_CORE(dc);
1706 return core_dc->links[link_index]->link_id;
1709 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1710 struct dc *dc, uint32_t link_index)
1712 struct core_dc *core_dc = DC_TO_CORE(dc);
1713 return core_dc->links[link_index]->public.irq_source_hpd;
1716 const struct audio **dc_get_audios(struct dc *dc)
1718 struct core_dc *core_dc = DC_TO_CORE(dc);
1719 return (const struct audio **)core_dc->res_pool->audios;
1722 enum dc_irq_source dc_interrupt_to_irq_source(
1727 struct core_dc *core_dc = DC_TO_CORE(dc);
1728 return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1731 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1733 struct core_dc *core_dc;
1737 core_dc = DC_TO_CORE(dc);
1739 dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1742 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1744 struct core_dc *core_dc = DC_TO_CORE(dc);
1745 dal_irq_service_ack(core_dc->res_pool->irqs, src);
1748 void dc_set_power_state(
1750 enum dc_acpi_cm_power_state power_state)
1752 struct core_dc *core_dc = DC_TO_CORE(dc);
1755 switch (power_state) {
1756 case DC_ACPI_CM_POWER_STATE_D0:
1757 core_dc->hwss.init_hw(core_dc);
1761 core_dc->hwss.power_down(core_dc);
1763 /* Zero out the current context so that on resume we start with
1764 * clean state, and dc hw programming optimizations will not
1765 * cause any trouble.
1768 /* Preserve refcount */
1769 ref_count = core_dc->current_context->ref_count;
1770 dc_resource_validate_ctx_destruct(core_dc->current_context);
1771 memset(core_dc->current_context, 0,
1772 sizeof(*core_dc->current_context));
1773 core_dc->current_context->ref_count = ref_count;
1780 void dc_resume(const struct dc *dc)
1782 struct core_dc *core_dc = DC_TO_CORE(dc);
1786 for (i = 0; i < core_dc->link_count; i++)
1787 core_link_resume(core_dc->links[i]);
1790 bool dc_read_aux_dpcd(
1792 uint32_t link_index,
1797 struct core_dc *core_dc = DC_TO_CORE(dc);
1799 struct core_link *link = core_dc->links[link_index];
1800 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1807 return r == DDC_RESULT_SUCESSFULL;
1810 bool dc_write_aux_dpcd(
1812 uint32_t link_index,
1814 const uint8_t *data,
1817 struct core_dc *core_dc = DC_TO_CORE(dc);
1818 struct core_link *link = core_dc->links[link_index];
1820 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1827 return r == DDC_RESULT_SUCESSFULL;
1830 bool dc_read_aux_i2c(
1832 uint32_t link_index,
1833 enum i2c_mot_mode mot,
1838 struct core_dc *core_dc = DC_TO_CORE(dc);
1840 struct core_link *link = core_dc->links[link_index];
1841 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1848 return r == DDC_RESULT_SUCESSFULL;
1851 bool dc_write_aux_i2c(
1853 uint32_t link_index,
1854 enum i2c_mot_mode mot,
1856 const uint8_t *data,
1859 struct core_dc *core_dc = DC_TO_CORE(dc);
1860 struct core_link *link = core_dc->links[link_index];
1862 enum ddc_result r = dal_ddc_service_write_dpcd_data(
1869 return r == DDC_RESULT_SUCESSFULL;
1872 bool dc_query_ddc_data(
1874 uint32_t link_index,
1877 uint32_t write_size,
1879 uint32_t read_size) {
1881 struct core_dc *core_dc = DC_TO_CORE(dc);
1883 struct core_link *link = core_dc->links[link_index];
1885 bool result = dal_ddc_service_query_ddc_data(
1898 uint32_t link_index,
1899 struct i2c_command *cmd)
1901 struct core_dc *core_dc = DC_TO_CORE(dc);
1903 struct core_link *link = core_dc->links[link_index];
1904 struct ddc_service *ddc = link->public.ddc;
1906 return dal_i2caux_submit_i2c_command(
1912 static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1914 struct dc_link *dc_link = &core_link->public;
1916 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1917 BREAK_TO_DEBUGGER();
1921 dc_sink_retain(sink);
1923 dc_link->remote_sinks[dc_link->sink_count] = sink;
1924 dc_link->sink_count++;
1929 struct dc_sink *dc_link_add_remote_sink(
1930 const struct dc_link *link,
1931 const uint8_t *edid,
1933 struct dc_sink_init_data *init_data)
1935 struct dc_sink *dc_sink;
1936 enum dc_edid_status edid_status;
1937 struct core_link *core_link = DC_LINK_TO_LINK(link);
1939 if (len > MAX_EDID_BUFFER_SIZE) {
1940 dm_error("Max EDID buffer size breached!\n");
1945 BREAK_TO_DEBUGGER();
1949 if (!init_data->link) {
1950 BREAK_TO_DEBUGGER();
1954 dc_sink = dc_sink_create(init_data);
1959 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1960 dc_sink->dc_edid.length = len;
1962 if (!link_add_remote_sink_helper(
1967 edid_status = dm_helpers_parse_edid_caps(
1970 &dc_sink->edid_caps);
1972 if (edid_status != EDID_OK)
1977 dc_link_remove_remote_sink(link, dc_sink);
1979 dc_sink_release(dc_sink);
1983 void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1985 struct core_link *core_link = DC_LINK_TO_LINK(link);
1986 struct dc_link *dc_link = &core_link->public;
1988 dc_link->local_sink = sink;
1991 dc_link->type = dc_connection_none;
1993 dc_link->type = dc_connection_single;
1997 void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
2000 struct core_link *core_link = DC_LINK_TO_LINK(link);
2001 struct dc_link *dc_link = &core_link->public;
2003 if (!link->sink_count) {
2004 BREAK_TO_DEBUGGER();
2008 for (i = 0; i < dc_link->sink_count; i++) {
2009 if (dc_link->remote_sinks[i] == sink) {
2010 dc_sink_release(sink);
2011 dc_link->remote_sinks[i] = NULL;
2013 /* shrink array to remove empty place */
2014 while (i < dc_link->sink_count - 1) {
2015 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
2018 dc_link->remote_sinks[i] = NULL;
2019 dc_link->sink_count--;
2025 bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
2028 struct core_dc *core_dc = DC_TO_CORE(dc);
2029 struct mem_input *mi = NULL;
2031 for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
2032 if (core_dc->res_pool->mis[i] != NULL) {
2033 mi = core_dc->res_pool->mis[i];
2038 dm_error("no mem_input!\n");
2042 if (core_dc->hwss.update_dchub)
2043 core_dc->hwss.update_dchub(core_dc->hwseq, dh_data);
2045 ASSERT(core_dc->hwss.update_dchub);
2052 void dc_log_hw_state(struct dc *dc)
2054 struct core_dc *core_dc = DC_TO_CORE(dc);
2056 if (core_dc->hwss.log_hw_state)
2057 core_dc->hwss.log_hw_state(core_dc);