2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
36 #include "clock_source.h"
37 #include "dc_bios_types.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
44 #include "timing_generator.h"
46 #include "virtual/virtual_link_encoder.h"
48 #include "link_hwss.h"
49 #include "link_encoder.h"
51 #include "dc_link_ddc.h"
52 #include "dm_helpers.h"
53 #include "mem_input.h"
59 /*******************************************************************************
61 ******************************************************************************/
63 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
69 static void destroy_links(struct dc *dc)
73 for (i = 0; i < dc->link_count; i++) {
74 if (NULL != dc->links[i])
75 link_destroy(&dc->links[i]);
79 static bool create_links(
81 uint32_t num_virtual_links)
85 struct dc_bios *bios = dc->ctx->dc_bios;
89 connectors_num = bios->funcs->get_connectors_number(bios);
91 if (connectors_num > ENUM_ID_COUNT) {
93 "DC: Number of connectors %d exceeds maximum of %d!\n",
99 if (connectors_num == 0 && num_virtual_links == 0) {
100 dm_error("DC: Number of connectors is zero!\n");
103 dm_output_to_console(
104 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
109 for (i = 0; i < connectors_num; i++) {
110 struct link_init_data link_init_params = {0};
111 struct dc_link *link;
113 link_init_params.ctx = dc->ctx;
114 /* next BIOS object table connector */
115 link_init_params.connector_index = i;
116 link_init_params.link_index = dc->link_count;
117 link_init_params.dc = dc;
118 link = link_create(&link_init_params);
121 dc->links[dc->link_count] = link;
127 for (i = 0; i < num_virtual_links; i++) {
128 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
129 struct encoder_init_data enc_init = {0};
136 link->link_index = dc->link_count;
137 dc->links[dc->link_count] = link;
142 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
143 link->link_id.type = OBJECT_TYPE_CONNECTOR;
144 link->link_id.id = CONNECTOR_ID_VIRTUAL;
145 link->link_id.enum_id = ENUM_ID_1;
146 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
148 if (!link->link_enc) {
153 link->link_status.dpcd_caps = &link->dpcd_caps;
155 enc_init.ctx = dc->ctx;
156 enc_init.channel = CHANNEL_ID_UNKNOWN;
157 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
158 enc_init.transmitter = TRANSMITTER_UNKNOWN;
159 enc_init.connector = link->link_id;
160 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
161 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
162 enc_init.encoder.enum_id = ENUM_ID_1;
163 virtual_link_encoder_construct(link->link_enc, &enc_init);
172 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
173 struct dc_stream_state **streams, int num_streams,
176 /* TODO: Support multiple streams */
177 struct dc_stream_state *stream = streams[0];
181 for (i = 0; i < MAX_PIPES; i++) {
182 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
184 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
185 dc->hwss.set_drr(&pipe, 1, vmin, vmax);
187 /* build and update the info frame */
188 resource_build_info_frame(pipe);
189 dc->hwss.update_info_frame(pipe);
197 bool dc_stream_get_crtc_position(struct dc *dc,
198 struct dc_stream_state **streams, int num_streams,
199 unsigned int *v_pos, unsigned int *nom_v_pos)
201 /* TODO: Support multiple streams */
202 struct dc_stream_state *stream = streams[0];
205 struct crtc_position position;
207 for (i = 0; i < MAX_PIPES; i++) {
208 struct pipe_ctx *pipe =
209 &dc->current_state->res_ctx.pipe_ctx[i];
211 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
212 dc->hwss.get_position(&pipe, 1, &position);
214 *v_pos = position.vertical_count;
215 *nom_v_pos = position.nominal_vcount;
223 * dc_stream_configure_crc: Configure CRC capture for the given stream.
225 * @stream: The stream to configure CRC on.
226 * @enable: Enable CRC if true, disable otherwise.
227 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
230 * By default, only CRC0 is configured, and the entire frame is used to
233 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
234 bool enable, bool continuous)
237 struct pipe_ctx *pipe;
238 struct crc_params param;
239 struct timing_generator *tg;
241 for (i = 0; i < MAX_PIPES; i++) {
242 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
243 if (pipe->stream == stream)
246 /* Stream not found */
250 /* Always capture the full frame */
251 param.windowa_x_start = 0;
252 param.windowa_y_start = 0;
253 param.windowa_x_end = pipe->stream->timing.h_addressable;
254 param.windowa_y_end = pipe->stream->timing.v_addressable;
255 param.windowb_x_start = 0;
256 param.windowb_y_start = 0;
257 param.windowb_x_end = pipe->stream->timing.h_addressable;
258 param.windowb_y_end = pipe->stream->timing.v_addressable;
260 /* Default to the union of both windows */
261 param.selection = UNION_WINDOW_A_B;
262 param.continuous_mode = continuous;
263 param.enable = enable;
265 tg = pipe->stream_res.tg;
267 /* Only call if supported */
268 if (tg->funcs->configure_crc)
269 return tg->funcs->configure_crc(tg, ¶m);
270 DC_LOG_WARNING("CRC capture not supported.");
275 * dc_stream_get_crc: Get CRC values for the given stream.
277 * @stream: The DC stream state of the stream to get CRCs from.
278 * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
280 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
281 * Return false if stream is not found, or if CRCs are not enabled.
283 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
284 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
287 struct pipe_ctx *pipe;
288 struct timing_generator *tg;
290 for (i = 0; i < MAX_PIPES; i++) {
291 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
292 if (pipe->stream == stream)
295 /* Stream not found */
299 tg = pipe->stream_res.tg;
301 if (tg->funcs->get_crc)
302 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
303 DC_LOG_WARNING("CRC capture not supported.");
307 void dc_stream_set_dither_option(struct dc_stream_state *stream,
308 enum dc_dither_option option)
310 struct bit_depth_reduction_params params;
311 struct dc_link *link = stream->status.link;
312 struct pipe_ctx *pipes = NULL;
315 for (i = 0; i < MAX_PIPES; i++) {
316 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
318 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
325 if (option > DITHER_OPTION_MAX)
328 stream->dither_option = option;
330 memset(¶ms, 0, sizeof(params));
331 resource_build_bit_depth_reduction_params(stream, ¶ms);
332 stream->bit_depth_params = params;
334 if (pipes->plane_res.xfm &&
335 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
336 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
337 pipes->plane_res.xfm,
338 pipes->plane_res.scl_data.lb_params.depth,
339 &stream->bit_depth_params);
342 pipes->stream_res.opp->funcs->
343 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
346 void dc_stream_set_static_screen_events(struct dc *dc,
347 struct dc_stream_state **streams,
349 const struct dc_static_screen_events *events)
353 struct pipe_ctx *pipes_affected[MAX_PIPES];
354 int num_pipes_affected = 0;
356 for (i = 0; i < num_streams; i++) {
357 struct dc_stream_state *stream = streams[i];
359 for (j = 0; j < MAX_PIPES; j++) {
360 if (dc->current_state->res_ctx.pipe_ctx[j].stream
362 pipes_affected[num_pipes_affected++] =
363 &dc->current_state->res_ctx.pipe_ctx[j];
368 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
371 static void destruct(struct dc *dc)
373 dc_release_state(dc->current_state);
374 dc->current_state = NULL;
378 dc_destroy_resource_pool(dc);
380 if (dc->ctx->gpio_service)
381 dal_gpio_service_destroy(&dc->ctx->gpio_service);
384 dal_i2caux_destroy(&dc->ctx->i2caux);
386 if (dc->ctx->created_bios)
387 dal_bios_parser_destroy(&dc->ctx->dc_bios);
390 dal_logger_destroy(&dc->ctx->logger);
401 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
411 static bool construct(struct dc *dc,
412 const struct dc_init_data *init_params)
414 struct dal_logger *logger;
415 struct dc_context *dc_ctx;
416 struct bw_calcs_dceip *dc_dceip;
417 struct bw_calcs_vbios *dc_vbios;
418 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
419 struct dcn_soc_bounding_box *dcn_soc;
420 struct dcn_ip_params *dcn_ip;
423 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
425 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
427 dm_error("%s: failed to create dceip\n", __func__);
431 dc->bw_dceip = dc_dceip;
433 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
435 dm_error("%s: failed to create vbios\n", __func__);
439 dc->bw_vbios = dc_vbios;
440 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
441 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
443 dm_error("%s: failed to create dcn_soc\n", __func__);
447 dc->dcn_soc = dcn_soc;
449 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
451 dm_error("%s: failed to create dcn_ip\n", __func__);
458 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
460 dm_error("%s: failed to create ctx\n", __func__);
464 dc_ctx->cgs_device = init_params->cgs_device;
465 dc_ctx->driver_context = init_params->driver;
467 dc_ctx->asic_id = init_params->asic_id;
470 dc->current_state = dc_create_state();
472 if (!dc->current_state) {
473 dm_error("%s: failed to create validate ctx\n", __func__);
478 logger = dal_logger_create(dc_ctx, init_params->log_mask);
481 /* can *not* call logger. call base driver 'print error' */
482 dm_error("%s: failed to create Logger!\n", __func__);
485 dc_ctx->logger = logger;
486 dc_ctx->dce_environment = init_params->dce_environment;
488 dc_version = resource_parse_asic_id(init_params->asic_id);
489 dc_ctx->dce_version = dc_version;
491 /* Resource should construct all asic specific resources.
492 * This should be the only place where we need to parse the asic id
494 if (init_params->vbios_override)
495 dc_ctx->dc_bios = init_params->vbios_override;
497 /* Create BIOS parser */
498 struct bp_init_data bp_init_data;
500 bp_init_data.ctx = dc_ctx;
501 bp_init_data.bios = init_params->asic_id.atombios_base_address;
503 dc_ctx->dc_bios = dal_bios_parser_create(
504 &bp_init_data, dc_version);
506 if (!dc_ctx->dc_bios) {
507 ASSERT_CRITICAL(false);
511 dc_ctx->created_bios = true;
515 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
517 if (!dc_ctx->i2caux) {
518 ASSERT_CRITICAL(false);
522 /* Create GPIO service */
523 dc_ctx->gpio_service = dal_gpio_service_create(
525 dc_ctx->dce_environment,
528 if (!dc_ctx->gpio_service) {
529 ASSERT_CRITICAL(false);
533 dc->res_pool = dc_create_resource_pool(
535 init_params->num_virtual_links,
537 init_params->asic_id);
541 dc_resource_state_construct(dc, dc->current_state);
543 if (!create_links(dc, init_params->num_virtual_links))
554 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
557 struct dc_state *dangling_context = dc_create_state();
558 struct dc_state *current_ctx;
560 if (dangling_context == NULL)
563 dc_resource_state_copy_construct(dc->current_state, dangling_context);
565 for (i = 0; i < dc->res_pool->pipe_count; i++) {
566 struct dc_stream_state *old_stream =
567 dc->current_state->res_ctx.pipe_ctx[i].stream;
568 bool should_disable = true;
570 for (j = 0; j < context->stream_count; j++) {
571 if (old_stream == context->streams[j]) {
572 should_disable = false;
576 if (should_disable && old_stream) {
577 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
578 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
582 current_ctx = dc->current_state;
583 dc->current_state = dangling_context;
584 dc_release_state(current_ctx);
587 /*******************************************************************************
589 ******************************************************************************/
591 struct dc *dc_create(const struct dc_init_data *init_params)
593 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
594 unsigned int full_pipe_count;
599 if (false == construct(dc, init_params))
602 /*TODO: separate HW and SW initialization*/
603 dc->hwss.init_hw(dc);
605 full_pipe_count = dc->res_pool->pipe_count;
606 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
608 dc->caps.max_streams = min(
610 dc->res_pool->stream_enc_count);
612 dc->caps.max_links = dc->link_count;
613 dc->caps.max_audios = dc->res_pool->audio_count;
614 dc->caps.linear_pitch_alignment = 64;
616 /* Populate versioning information */
617 dc->versions.dc_ver = DC_VER;
619 if (dc->res_pool->dmcu != NULL)
620 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
622 dc->config = init_params->flags;
624 DC_LOG_DC("Display Core initialized\n");
627 /* TODO: missing feature to be enabled */
628 dc->debug.disable_dfs_bypass = true;
639 void dc_destroy(struct dc **dc)
646 static void enable_timing_multisync(
648 struct dc_state *ctx)
650 int i = 0, multisync_count = 0;
651 int pipe_count = dc->res_pool->pipe_count;
652 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
654 for (i = 0; i < pipe_count; i++) {
655 if (!ctx->res_ctx.pipe_ctx[i].stream ||
656 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
658 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
660 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
664 if (multisync_count > 0) {
665 dc->hwss.enable_per_frame_crtc_position_reset(
666 dc, multisync_count, multisync_pipes);
670 static void program_timing_sync(
672 struct dc_state *ctx)
676 int pipe_count = dc->res_pool->pipe_count;
677 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
679 for (i = 0; i < pipe_count; i++) {
680 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
683 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
686 for (i = 0; i < pipe_count; i++) {
688 struct pipe_ctx *pipe_set[MAX_PIPES];
690 if (!unsynced_pipes[i])
693 pipe_set[0] = unsynced_pipes[i];
694 unsynced_pipes[i] = NULL;
696 /* Add tg to the set, search rest of the tg's for ones with
697 * same timing, add all tgs with same timing to the group
699 for (j = i + 1; j < pipe_count; j++) {
700 if (!unsynced_pipes[j])
703 if (resource_are_streams_timing_synchronizable(
704 unsynced_pipes[j]->stream,
705 pipe_set[0]->stream)) {
706 pipe_set[group_size] = unsynced_pipes[j];
707 unsynced_pipes[j] = NULL;
712 /* set first unblanked pipe as master */
713 for (j = 0; j < group_size; j++) {
714 struct pipe_ctx *temp;
716 if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
721 pipe_set[0] = pipe_set[j];
727 /* remove any other unblanked pipes as they have already been synced */
728 for (j = j + 1; j < group_size; j++) {
729 if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
731 pipe_set[j] = pipe_set[group_size];
736 if (group_size > 1) {
737 dc->hwss.enable_timing_synchronization(
738 dc, group_index, group_size, pipe_set);
744 static bool context_changed(
746 struct dc_state *context)
750 if (context->stream_count != dc->current_state->stream_count)
753 for (i = 0; i < dc->current_state->stream_count; i++) {
754 if (dc->current_state->streams[i] != context->streams[i])
761 bool dc_enable_stereo(
763 struct dc_state *context,
764 struct dc_stream_state *streams[],
765 uint8_t stream_count)
769 struct pipe_ctx *pipe;
771 for (i = 0; i < MAX_PIPES; i++) {
773 pipe = &context->res_ctx.pipe_ctx[i];
775 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
776 for (j = 0 ; pipe && j < stream_count; j++) {
777 if (streams[j] && streams[j] == pipe->stream &&
778 dc->hwss.setup_stereo)
779 dc->hwss.setup_stereo(pipe, dc);
787 * Applies given context to HW and copy it into current context.
788 * It's up to the user to release the src context afterwards.
790 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
792 struct dc_bios *dcb = dc->ctx->dc_bios;
793 enum dc_status result = DC_ERROR_UNEXPECTED;
794 struct pipe_ctx *pipe;
796 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
798 disable_dangling_plane(dc, context);
800 for (i = 0; i < context->stream_count; i++)
801 dc_streams[i] = context->streams[i];
803 if (!dcb->funcs->is_accelerated_mode(dcb))
804 dc->hwss.enable_accelerated_mode(dc, context);
806 dc->hwss.set_bandwidth(dc, context, false);
808 /* re-program planes for existing stream, in case we need to
809 * free up plane resource for later use
811 for (i = 0; i < context->stream_count; i++) {
812 if (context->streams[i]->mode_changed)
815 dc->hwss.apply_ctx_for_surface(
816 dc, context->streams[i],
817 context->stream_status[i].plane_count,
818 context); /* use new pipe config in new context */
821 /* Program hardware */
822 dc->hwss.ready_shared_resources(dc, context);
824 for (i = 0; i < dc->res_pool->pipe_count; i++) {
825 pipe = &context->res_ctx.pipe_ctx[i];
826 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
829 result = dc->hwss.apply_ctx_to_hw(dc, context);
834 if (context->stream_count > 1) {
835 enable_timing_multisync(dc, context);
836 program_timing_sync(dc, context);
839 /* Program all planes within new context*/
840 for (i = 0; i < context->stream_count; i++) {
841 const struct dc_sink *sink = context->streams[i]->sink;
843 if (!context->streams[i]->mode_changed)
846 dc->hwss.apply_ctx_for_surface(
847 dc, context->streams[i],
848 context->stream_status[i].plane_count,
853 * TODO rework dc_enable_stereo call to work with validation sets?
855 for (k = 0; k < MAX_PIPES; k++) {
856 pipe = &context->res_ctx.pipe_ctx[k];
858 for (l = 0 ; pipe && l < context->stream_count; l++) {
859 if (context->streams[l] &&
860 context->streams[l] == pipe->stream &&
861 dc->hwss.setup_stereo)
862 dc->hwss.setup_stereo(pipe, dc);
866 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
867 context->streams[i]->timing.h_addressable,
868 context->streams[i]->timing.v_addressable,
869 context->streams[i]->timing.h_total,
870 context->streams[i]->timing.v_total,
871 context->streams[i]->timing.pix_clk_khz);
874 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
876 /* pplib is notified if disp_num changed */
877 dc->hwss.set_bandwidth(dc, context, true);
879 dc_release_state(dc->current_state);
881 dc->current_state = context;
883 dc_retain_state(dc->current_state);
885 dc->hwss.optimize_shared_resources(dc);
890 bool dc_commit_state(struct dc *dc, struct dc_state *context)
892 enum dc_status result = DC_ERROR_UNEXPECTED;
895 if (false == context_changed(dc, context))
898 DC_LOG_DC("%s: %d streams\n",
899 __func__, context->stream_count);
901 for (i = 0; i < context->stream_count; i++) {
902 struct dc_stream_state *stream = context->streams[i];
904 dc_stream_log(stream,
909 result = dc_commit_state_no_check(dc, context);
911 return (result == DC_OK);
914 bool dc_post_update_surfaces_to_stream(struct dc *dc)
917 struct dc_state *context = dc->current_state;
919 post_surface_trace(dc);
921 for (i = 0; i < dc->res_pool->pipe_count; i++)
922 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
923 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
924 context->res_ctx.pipe_ctx[i].pipe_idx = i;
925 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
928 dc->optimized_required = false;
930 /* 3rd param should be true, temp w/a for RV*/
931 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
932 dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
934 dc->hwss.set_bandwidth(dc, context, true);
939 struct dc_state *dc_create_state(void)
941 struct dc_state *context = kzalloc(sizeof(struct dc_state),
947 kref_init(&context->refcount);
951 void dc_retain_state(struct dc_state *context)
953 kref_get(&context->refcount);
956 static void dc_state_free(struct kref *kref)
958 struct dc_state *context = container_of(kref, struct dc_state, refcount);
959 dc_resource_state_destruct(context);
963 void dc_release_state(struct dc_state *context)
965 kref_put(&context->refcount, dc_state_free);
968 static bool is_surface_in_context(
969 const struct dc_state *context,
970 const struct dc_plane_state *plane_state)
974 for (j = 0; j < MAX_PIPES; j++) {
975 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
977 if (plane_state == pipe_ctx->plane_state) {
985 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
988 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
989 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
991 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
992 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
993 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
994 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
996 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
997 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
998 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
999 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1001 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1002 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1003 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1006 ASSERT_CRITICAL(false);
1011 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1013 union surface_update_flags *update_flags = &u->surface->update_flags;
1016 return UPDATE_TYPE_FAST;
1018 if (u->plane_info->color_space != u->surface->color_space)
1019 update_flags->bits.color_space_change = 1;
1021 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
1022 update_flags->bits.horizontal_mirror_change = 1;
1024 if (u->plane_info->rotation != u->surface->rotation)
1025 update_flags->bits.rotation_change = 1;
1027 if (u->plane_info->format != u->surface->format)
1028 update_flags->bits.pixel_format_change = 1;
1030 if (u->plane_info->stereo_format != u->surface->stereo_format)
1031 update_flags->bits.stereo_format_change = 1;
1033 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
1034 update_flags->bits.per_pixel_alpha_change = 1;
1036 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1037 || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
1038 || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
1039 update_flags->bits.dcc_change = 1;
1041 if (pixel_format_to_bpp(u->plane_info->format) !=
1042 pixel_format_to_bpp(u->surface->format))
1043 /* different bytes per element will require full bandwidth
1044 * and DML calculation
1046 update_flags->bits.bpp_change = 1;
1048 if (u->gamma && dce_use_lut(u->plane_info->format))
1049 update_flags->bits.gamma_change = 1;
1051 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1052 sizeof(union dc_tiling_info)) != 0) {
1053 update_flags->bits.swizzle_change = 1;
1054 /* todo: below are HW dependent, we should add a hook to
1055 * DCE/N resource and validated there.
1057 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
1058 /* swizzled mode requires RQ to be setup properly,
1059 * thus need to run DML to calculate RQ settings
1061 update_flags->bits.bandwidth_change = 1;
1064 if (update_flags->bits.rotation_change
1065 || update_flags->bits.stereo_format_change
1066 || update_flags->bits.pixel_format_change
1067 || update_flags->bits.gamma_change
1068 || update_flags->bits.bpp_change
1069 || update_flags->bits.bandwidth_change
1070 || update_flags->bits.output_tf_change)
1071 return UPDATE_TYPE_FULL;
1073 return UPDATE_TYPE_MED;
1076 static enum surface_update_type get_scaling_info_update_type(
1077 const struct dc_surface_update *u)
1079 union surface_update_flags *update_flags = &u->surface->update_flags;
1081 if (!u->scaling_info)
1082 return UPDATE_TYPE_FAST;
1084 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1085 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1086 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1087 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1088 update_flags->bits.scaling_change = 1;
1090 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1091 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1092 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1093 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1094 /* Making dst rect smaller requires a bandwidth change */
1095 update_flags->bits.bandwidth_change = 1;
1098 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1099 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1101 update_flags->bits.scaling_change = 1;
1102 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1103 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1104 /* Making src rect bigger requires a bandwidth change */
1105 update_flags->bits.clock_change = 1;
1108 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1109 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1110 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1111 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1112 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1113 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1114 update_flags->bits.position_change = 1;
1116 if (update_flags->bits.clock_change
1117 || update_flags->bits.bandwidth_change)
1118 return UPDATE_TYPE_FULL;
1120 if (update_flags->bits.scaling_change
1121 || update_flags->bits.position_change)
1122 return UPDATE_TYPE_MED;
1124 return UPDATE_TYPE_FAST;
1127 static enum surface_update_type det_surface_update(const struct dc *dc,
1128 const struct dc_surface_update *u)
1130 const struct dc_state *context = dc->current_state;
1131 enum surface_update_type type;
1132 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1133 union surface_update_flags *update_flags = &u->surface->update_flags;
1135 update_flags->raw = 0; // Reset all flags
1137 if (!is_surface_in_context(context, u->surface)) {
1138 update_flags->bits.new_plane = 1;
1139 return UPDATE_TYPE_FULL;
1142 type = get_plane_info_update_type(u);
1143 elevate_update_type(&overall_type, type);
1145 type = get_scaling_info_update_type(u);
1146 elevate_update_type(&overall_type, type);
1148 if (u->in_transfer_func)
1149 update_flags->bits.in_transfer_func_change = 1;
1151 if (u->input_csc_color_matrix)
1152 update_flags->bits.input_csc_change = 1;
1154 if (u->coeff_reduction_factor)
1155 update_flags->bits.coeff_reduction_change = 1;
1157 if (update_flags->bits.in_transfer_func_change) {
1158 type = UPDATE_TYPE_MED;
1159 elevate_update_type(&overall_type, type);
1162 if (update_flags->bits.input_csc_change
1163 || update_flags->bits.coeff_reduction_change) {
1164 type = UPDATE_TYPE_FULL;
1165 elevate_update_type(&overall_type, type);
1168 return overall_type;
1171 static enum surface_update_type check_update_surfaces_for_stream(
1173 struct dc_surface_update *updates,
1175 struct dc_stream_update *stream_update,
1176 const struct dc_stream_status *stream_status)
1179 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1181 if (stream_status == NULL || stream_status->plane_count != surface_count)
1182 return UPDATE_TYPE_FULL;
1185 return UPDATE_TYPE_FULL;
1187 for (i = 0 ; i < surface_count; i++) {
1188 enum surface_update_type type =
1189 det_surface_update(dc, &updates[i]);
1191 if (type == UPDATE_TYPE_FULL)
1194 elevate_update_type(&overall_type, type);
1197 return overall_type;
1200 enum surface_update_type dc_check_update_surfaces_for_stream(
1202 struct dc_surface_update *updates,
1204 struct dc_stream_update *stream_update,
1205 const struct dc_stream_status *stream_status)
1208 enum surface_update_type type;
1210 for (i = 0; i < surface_count; i++)
1211 updates[i].surface->update_flags.raw = 0;
1213 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1214 if (type == UPDATE_TYPE_FULL)
1215 for (i = 0; i < surface_count; i++)
1216 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1221 static struct dc_stream_status *stream_get_status(
1222 struct dc_state *ctx,
1223 struct dc_stream_state *stream)
1227 for (i = 0; i < ctx->stream_count; i++) {
1228 if (stream == ctx->streams[i]) {
1229 return &ctx->stream_status[i];
1236 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1239 static void commit_planes_for_stream(struct dc *dc,
1240 struct dc_surface_update *srf_updates,
1242 struct dc_stream_state *stream,
1243 struct dc_stream_update *stream_update,
1244 enum surface_update_type update_type,
1245 struct dc_state *context)
1248 struct pipe_ctx *top_pipe_to_program = NULL;
1250 if (update_type == UPDATE_TYPE_FULL) {
1251 dc->hwss.set_bandwidth(dc, context, false);
1252 context_clock_trace(dc, context);
1255 if (surface_count == 0) {
1257 * In case of turning off screen, no need to program front end a second time.
1258 * just return after program front end.
1260 dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
1265 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1266 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1268 if (!pipe_ctx->top_pipe &&
1270 pipe_ctx->stream == stream) {
1271 struct dc_stream_status *stream_status = NULL;
1273 top_pipe_to_program = pipe_ctx;
1275 if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state)
1279 stream_get_status(context, pipe_ctx->stream);
1281 dc->hwss.apply_ctx_for_surface(
1282 dc, pipe_ctx->stream, stream_status->plane_count, context);
1284 if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) {
1285 if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
1286 // if otg funcs defined check if blanked before programming
1287 if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
1288 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1289 pipe_ctx->stream_res.abm, stream->abm_level);
1291 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1292 pipe_ctx->stream_res.abm, stream->abm_level);
1295 if (stream_update && stream_update->periodic_fn_vsync_delta &&
1296 pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
1297 pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
1298 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
1299 pipe_ctx->stream->periodic_fn_vsync_delta);
1303 if (update_type == UPDATE_TYPE_FULL)
1304 context_timing_trace(dc, &context->res_ctx);
1306 /* Lock the top pipe while updating plane addrs, since freesync requires
1307 * plane addr update event triggers to be synchronized.
1308 * top_pipe_to_program is expected to never be NULL
1310 if (update_type == UPDATE_TYPE_FAST) {
1311 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
1313 /* Perform requested Updates */
1314 for (i = 0; i < surface_count; i++) {
1315 struct dc_plane_state *plane_state = srf_updates[i].surface;
1317 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1318 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1320 if (pipe_ctx->stream != stream)
1323 if (pipe_ctx->plane_state != plane_state)
1326 if (srf_updates[i].flip_addr)
1327 dc->hwss.update_plane_addr(dc, pipe_ctx);
1331 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
1334 if (stream && stream_update && update_type > UPDATE_TYPE_FAST)
1335 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1336 struct pipe_ctx *pipe_ctx =
1337 &context->res_ctx.pipe_ctx[j];
1339 if (pipe_ctx->stream != stream)
1342 if (stream_update->hdr_static_metadata) {
1343 resource_build_info_frame(pipe_ctx);
1344 dc->hwss.update_info_frame(pipe_ctx);
1349 void dc_commit_updates_for_stream(struct dc *dc,
1350 struct dc_surface_update *srf_updates,
1352 struct dc_stream_state *stream,
1353 struct dc_stream_update *stream_update,
1354 struct dc_plane_state **plane_states,
1355 struct dc_state *state)
1357 const struct dc_stream_status *stream_status;
1358 enum surface_update_type update_type;
1359 struct dc_state *context;
1360 struct dc_context *dc_ctx = dc->ctx;
1363 stream_status = dc_stream_get_status(stream);
1364 context = dc->current_state;
1366 update_type = dc_check_update_surfaces_for_stream(
1367 dc, srf_updates, surface_count, stream_update, stream_status);
1369 if (update_type >= update_surface_trace_level)
1370 update_surface_trace(dc, srf_updates, surface_count);
1373 if (update_type >= UPDATE_TYPE_FULL) {
1375 /* initialize scratch memory for building context */
1376 context = dc_create_state();
1377 if (context == NULL) {
1378 DC_ERROR("Failed to allocate new validate context!\n");
1382 dc_resource_state_copy_construct(state, context);
1386 for (i = 0; i < surface_count; i++) {
1387 struct dc_plane_state *surface = srf_updates[i].surface;
1389 /* TODO: On flip we don't build the state, so it still has the
1390 * old address. Which is why we are updating the address here
1392 if (srf_updates[i].flip_addr) {
1393 surface->address = srf_updates[i].flip_addr->address;
1394 surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
1398 if (update_type >= UPDATE_TYPE_MED) {
1399 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1400 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1402 if (pipe_ctx->plane_state != surface)
1405 resource_build_scaling_params(pipe_ctx);
1410 commit_planes_for_stream(
1418 /*update current_State*/
1419 if (dc->current_state != context) {
1421 struct dc_state *old = dc->current_state;
1423 dc->current_state = context;
1424 dc_release_state(old);
1427 /*let's use current_state to update watermark etc*/
1428 if (update_type >= UPDATE_TYPE_FULL)
1429 dc_post_update_surfaces_to_stream(dc);
1435 uint8_t dc_get_current_stream_count(struct dc *dc)
1437 return dc->current_state->stream_count;
1440 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
1442 if (i < dc->current_state->stream_count)
1443 return dc->current_state->streams[i];
1447 enum dc_irq_source dc_interrupt_to_irq_source(
1452 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1455 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1461 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
1464 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1466 dal_irq_service_ack(dc->res_pool->irqs, src);
1469 void dc_set_power_state(
1471 enum dc_acpi_cm_power_state power_state)
1473 struct kref refcount;
1475 switch (power_state) {
1476 case DC_ACPI_CM_POWER_STATE_D0:
1477 dc_resource_state_construct(dc, dc->current_state);
1479 dc->hwss.init_hw(dc);
1483 dc->hwss.power_down(dc);
1485 /* Zero out the current context so that on resume we start with
1486 * clean state, and dc hw programming optimizations will not
1487 * cause any trouble.
1490 /* Preserve refcount */
1491 refcount = dc->current_state->refcount;
1492 dc_resource_state_destruct(dc->current_state);
1493 memset(dc->current_state, 0,
1494 sizeof(*dc->current_state));
1496 dc->current_state->refcount = refcount;
1503 void dc_resume(struct dc *dc)
1508 for (i = 0; i < dc->link_count; i++)
1509 core_link_resume(dc->links[i]);
1514 uint32_t link_index,
1515 struct i2c_command *cmd)
1518 struct dc_link *link = dc->links[link_index];
1519 struct ddc_service *ddc = link->ddc;
1521 return dal_i2caux_submit_i2c_command(
1527 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
1529 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1530 BREAK_TO_DEBUGGER();
1534 dc_sink_retain(sink);
1536 dc_link->remote_sinks[dc_link->sink_count] = sink;
1537 dc_link->sink_count++;
1542 struct dc_sink *dc_link_add_remote_sink(
1543 struct dc_link *link,
1544 const uint8_t *edid,
1546 struct dc_sink_init_data *init_data)
1548 struct dc_sink *dc_sink;
1549 enum dc_edid_status edid_status;
1551 if (len > MAX_EDID_BUFFER_SIZE) {
1552 dm_error("Max EDID buffer size breached!\n");
1557 BREAK_TO_DEBUGGER();
1561 if (!init_data->link) {
1562 BREAK_TO_DEBUGGER();
1566 dc_sink = dc_sink_create(init_data);
1571 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1572 dc_sink->dc_edid.length = len;
1574 if (!link_add_remote_sink_helper(
1579 edid_status = dm_helpers_parse_edid_caps(
1582 &dc_sink->edid_caps);
1585 * Treat device as no EDID device if EDID
1588 if (edid_status != EDID_OK) {
1589 dc_sink->dc_edid.length = 0;
1590 dm_error("Bad EDID, status%d!\n", edid_status);
1596 dc_sink_release(dc_sink);
1600 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
1604 if (!link->sink_count) {
1605 BREAK_TO_DEBUGGER();
1609 for (i = 0; i < link->sink_count; i++) {
1610 if (link->remote_sinks[i] == sink) {
1611 dc_sink_release(sink);
1612 link->remote_sinks[i] = NULL;
1614 /* shrink array to remove empty place */
1615 while (i < link->sink_count - 1) {
1616 link->remote_sinks[i] = link->remote_sinks[i+1];
1619 link->remote_sinks[i] = NULL;