2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/slab.h>
28 #include "dm_services.h"
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
43 #include "bios_parser_interface.h"
44 #include "include/irq_service_interface.h"
45 #include "transform.h"
48 #include "timing_generator.h"
50 #include "virtual/virtual_link_encoder.h"
52 #include "link_hwss.h"
53 #include "link_encoder.h"
55 #include "dc_link_ddc.h"
56 #include "dm_helpers.h"
57 #include "mem_input.h"
60 #include "dc_link_dp.h"
61 #include "dc_dmub_srv.h"
63 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
67 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
68 #include "vm_helper.h"
71 #include "dce/dce_i2c.h"
76 const static char DC_BUILD_ID[] = "production-build";
81 * DC is the OS-agnostic component of the amdgpu DC driver.
83 * DC maintains and validates a set of structs representing the state of the
84 * driver and writes that state to AMD hardware
88 * struct dc - The central struct. One per driver. Created on driver load,
89 * destroyed on driver unload.
91 * struct dc_context - One per driver.
92 * Used as a backpointer by most other structs in dc.
94 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
95 * plugpoints). Created on driver load, destroyed on driver unload.
97 * struct dc_sink - One per display. Created on boot or hotplug.
98 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
99 * (the display directly attached). It may also have one or more remote
100 * sinks (in the Multi-Stream Transport case)
102 * struct resource_pool - One per driver. Represents the hw blocks not in the
103 * main pipeline. Not directly accessible by dm.
105 * Main dc state structs:
107 * These structs can be created and destroyed as needed. There is a full set of
108 * these structs in dc->current_state representing the currently programmed state.
110 * struct dc_state - The global DC state to track global state information,
111 * such as bandwidth values.
113 * struct dc_stream_state - Represents the hw configuration for the pipeline from
114 * a framebuffer to a display. Maps one-to-one with dc_sink.
116 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
117 * and may have more in the Multi-Plane Overlay case.
119 * struct resource_context - Represents the programmable state of everything in
120 * the resource_pool. Not directly accessible by dm.
122 * struct pipe_ctx - A member of struct resource_context. Represents the
123 * internal hardware pipeline components. Each dc_plane_state has either
124 * one or two (in the pipe-split case).
127 /*******************************************************************************
129 ******************************************************************************/
131 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
137 static void destroy_links(struct dc *dc)
141 for (i = 0; i < dc->link_count; i++) {
142 if (NULL != dc->links[i])
143 link_destroy(&dc->links[i]);
147 static bool create_links(
149 uint32_t num_virtual_links)
153 struct dc_bios *bios = dc->ctx->dc_bios;
157 connectors_num = bios->funcs->get_connectors_number(bios);
159 if (connectors_num > ENUM_ID_COUNT) {
161 "DC: Number of connectors %d exceeds maximum of %d!\n",
167 dm_output_to_console(
168 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
173 for (i = 0; i < connectors_num; i++) {
174 struct link_init_data link_init_params = {0};
175 struct dc_link *link;
177 link_init_params.ctx = dc->ctx;
178 /* next BIOS object table connector */
179 link_init_params.connector_index = i;
180 link_init_params.link_index = dc->link_count;
181 link_init_params.dc = dc;
182 link = link_create(&link_init_params);
185 bool should_destory_link = false;
187 if (link->connector_signal == SIGNAL_TYPE_EDP) {
188 if (dc->config.edp_not_connected)
189 should_destory_link = true;
190 else if (dc->debug.remove_disconnect_edp) {
191 enum dc_connection_type type;
192 dc_link_detect_sink(link, &type);
193 if (type == dc_connection_none)
194 should_destory_link = true;
198 if (dc->config.force_enum_edp || !should_destory_link) {
199 dc->links[dc->link_count] = link;
208 for (i = 0; i < num_virtual_links; i++) {
209 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
210 struct encoder_init_data enc_init = {0};
217 link->link_index = dc->link_count;
218 dc->links[dc->link_count] = link;
223 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
224 link->link_id.type = OBJECT_TYPE_CONNECTOR;
225 link->link_id.id = CONNECTOR_ID_VIRTUAL;
226 link->link_id.enum_id = ENUM_ID_1;
227 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
229 if (!link->link_enc) {
234 link->link_status.dpcd_caps = &link->dpcd_caps;
236 enc_init.ctx = dc->ctx;
237 enc_init.channel = CHANNEL_ID_UNKNOWN;
238 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
239 enc_init.transmitter = TRANSMITTER_UNKNOWN;
240 enc_init.connector = link->link_id;
241 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
242 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
243 enc_init.encoder.enum_id = ENUM_ID_1;
244 virtual_link_encoder_construct(link->link_enc, &enc_init);
253 static struct dc_perf_trace *dc_perf_trace_create(void)
255 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
258 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
265 *****************************************************************************
266 * Function: dc_stream_adjust_vmin_vmax
269 * Looks up the pipe context of dc_stream_state and updates the
270 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
271 * Rate, which is a power-saving feature that targets reducing panel
272 * refresh rate while the screen is static
274 * @param [in] dc: dc reference
275 * @param [in] stream: Initial dc stream state
276 * @param [in] adjust: Updated parameters for vertical_total_min and
278 *****************************************************************************
280 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
281 struct dc_stream_state *stream,
282 struct dc_crtc_timing_adjust *adjust)
287 for (i = 0; i < MAX_PIPES; i++) {
288 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
290 if (pipe->stream == stream && pipe->stream_res.tg) {
291 pipe->stream->adjust = *adjust;
292 dc->hwss.set_drr(&pipe,
297 adjust->v_total_mid_frame_num);
305 bool dc_stream_get_crtc_position(struct dc *dc,
306 struct dc_stream_state **streams, int num_streams,
307 unsigned int *v_pos, unsigned int *nom_v_pos)
309 /* TODO: Support multiple streams */
310 const struct dc_stream_state *stream = streams[0];
313 struct crtc_position position;
315 for (i = 0; i < MAX_PIPES; i++) {
316 struct pipe_ctx *pipe =
317 &dc->current_state->res_ctx.pipe_ctx[i];
319 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
320 dc->hwss.get_position(&pipe, 1, &position);
322 *v_pos = position.vertical_count;
323 *nom_v_pos = position.nominal_vcount;
331 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
333 * @stream: The stream to configure CRC on.
334 * @enable: Enable CRC if true, disable otherwise.
335 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
338 * By default, only CRC0 is configured, and the entire frame is used to
341 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
342 bool enable, bool continuous)
345 struct pipe_ctx *pipe;
346 struct crc_params param;
347 struct timing_generator *tg;
349 for (i = 0; i < MAX_PIPES; i++) {
350 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
351 if (pipe->stream == stream)
354 /* Stream not found */
358 /* Always capture the full frame */
359 param.windowa_x_start = 0;
360 param.windowa_y_start = 0;
361 param.windowa_x_end = pipe->stream->timing.h_addressable;
362 param.windowa_y_end = pipe->stream->timing.v_addressable;
363 param.windowb_x_start = 0;
364 param.windowb_y_start = 0;
365 param.windowb_x_end = pipe->stream->timing.h_addressable;
366 param.windowb_y_end = pipe->stream->timing.v_addressable;
368 /* Default to the union of both windows */
369 param.selection = UNION_WINDOW_A_B;
370 param.continuous_mode = continuous;
371 param.enable = enable;
373 tg = pipe->stream_res.tg;
375 /* Only call if supported */
376 if (tg->funcs->configure_crc)
377 return tg->funcs->configure_crc(tg, ¶m);
378 DC_LOG_WARNING("CRC capture not supported.");
383 * dc_stream_get_crc() - Get CRC values for the given stream.
385 * @stream: The DC stream state of the stream to get CRCs from.
386 * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
388 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
389 * Return false if stream is not found, or if CRCs are not enabled.
391 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
392 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
395 struct pipe_ctx *pipe;
396 struct timing_generator *tg;
398 for (i = 0; i < MAX_PIPES; i++) {
399 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
400 if (pipe->stream == stream)
403 /* Stream not found */
407 tg = pipe->stream_res.tg;
409 if (tg->funcs->get_crc)
410 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
411 DC_LOG_WARNING("CRC capture not supported.");
415 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
416 enum dc_dynamic_expansion option)
418 /* OPP FMT dyn expansion updates*/
420 struct pipe_ctx *pipe_ctx;
422 for (i = 0; i < MAX_PIPES; i++) {
423 if (dc->current_state->res_ctx.pipe_ctx[i].stream
425 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
426 pipe_ctx->stream_res.opp->dyn_expansion = option;
427 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
428 pipe_ctx->stream_res.opp,
429 COLOR_SPACE_YCBCR601,
430 stream->timing.display_color_depth,
436 void dc_stream_set_dither_option(struct dc_stream_state *stream,
437 enum dc_dither_option option)
439 struct bit_depth_reduction_params params;
440 struct dc_link *link = stream->link;
441 struct pipe_ctx *pipes = NULL;
444 for (i = 0; i < MAX_PIPES; i++) {
445 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
447 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
454 if (option > DITHER_OPTION_MAX)
457 stream->dither_option = option;
459 memset(¶ms, 0, sizeof(params));
460 resource_build_bit_depth_reduction_params(stream, ¶ms);
461 stream->bit_depth_params = params;
463 if (pipes->plane_res.xfm &&
464 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
465 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
466 pipes->plane_res.xfm,
467 pipes->plane_res.scl_data.lb_params.depth,
468 &stream->bit_depth_params);
471 pipes->stream_res.opp->funcs->
472 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
475 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
479 struct pipe_ctx *pipes;
481 for (i = 0; i < MAX_PIPES; i++) {
482 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
483 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
484 dc->hwss.program_gamut_remap(pipes);
492 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
496 struct pipe_ctx *pipes;
498 for (i = 0; i < MAX_PIPES; i++) {
499 if (dc->current_state->res_ctx.pipe_ctx[i].stream
502 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
503 dc->hwss.program_output_csc(dc,
505 stream->output_color_space,
506 stream->csc_color_matrix.matrix,
507 pipes->stream_res.opp->inst);
515 void dc_stream_set_static_screen_events(struct dc *dc,
516 struct dc_stream_state **streams,
518 const struct dc_static_screen_events *events)
522 struct pipe_ctx *pipes_affected[MAX_PIPES];
523 int num_pipes_affected = 0;
525 for (i = 0; i < num_streams; i++) {
526 struct dc_stream_state *stream = streams[i];
528 for (j = 0; j < MAX_PIPES; j++) {
529 if (dc->current_state->res_ctx.pipe_ctx[j].stream
531 pipes_affected[num_pipes_affected++] =
532 &dc->current_state->res_ctx.pipe_ctx[j];
537 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
540 static void destruct(struct dc *dc)
542 if (dc->current_state) {
543 dc_release_state(dc->current_state);
544 dc->current_state = NULL;
550 dc_destroy_clk_mgr(dc->clk_mgr);
554 dc_destroy_resource_pool(dc);
556 if (dc->ctx->gpio_service)
557 dal_gpio_service_destroy(&dc->ctx->gpio_service);
559 if (dc->ctx->created_bios)
560 dal_bios_parser_destroy(&dc->ctx->dc_bios);
562 dc_perf_trace_destroy(&dc->ctx->perf_trace);
573 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
581 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
582 kfree(dc->vm_helper);
583 dc->vm_helper = NULL;
588 static bool construct(struct dc *dc,
589 const struct dc_init_data *init_params)
591 struct dc_context *dc_ctx;
592 struct bw_calcs_dceip *dc_dceip;
593 struct bw_calcs_vbios *dc_vbios;
594 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
595 struct dcn_soc_bounding_box *dcn_soc;
596 struct dcn_ip_params *dcn_ip;
599 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
600 dc->config = init_params->flags;
602 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
603 // Allocate memory for the vm_helper
604 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
605 if (!dc->vm_helper) {
606 dm_error("%s: failed to create dc->vm_helper\n", __func__);
611 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
613 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
615 dm_error("%s: failed to create dceip\n", __func__);
619 dc->bw_dceip = dc_dceip;
621 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
623 dm_error("%s: failed to create vbios\n", __func__);
627 dc->bw_vbios = dc_vbios;
628 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
629 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
631 dm_error("%s: failed to create dcn_soc\n", __func__);
635 dc->dcn_soc = dcn_soc;
637 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
639 dm_error("%s: failed to create dcn_ip\n", __func__);
644 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
645 dc->soc_bounding_box = init_params->soc_bounding_box;
649 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
651 dm_error("%s: failed to create ctx\n", __func__);
655 dc_ctx->cgs_device = init_params->cgs_device;
656 dc_ctx->driver_context = init_params->driver;
658 dc_ctx->asic_id = init_params->asic_id;
659 dc_ctx->dc_sink_id_count = 0;
660 dc_ctx->dc_stream_id_count = 0;
665 dc_ctx->dce_environment = init_params->dce_environment;
667 dc_version = resource_parse_asic_id(init_params->asic_id);
668 dc_ctx->dce_version = dc_version;
670 /* Resource should construct all asic specific resources.
671 * This should be the only place where we need to parse the asic id
673 if (init_params->vbios_override)
674 dc_ctx->dc_bios = init_params->vbios_override;
676 /* Create BIOS parser */
677 struct bp_init_data bp_init_data;
679 bp_init_data.ctx = dc_ctx;
680 bp_init_data.bios = init_params->asic_id.atombios_base_address;
682 dc_ctx->dc_bios = dal_bios_parser_create(
683 &bp_init_data, dc_version);
685 if (!dc_ctx->dc_bios) {
686 ASSERT_CRITICAL(false);
690 dc_ctx->created_bios = true;
693 dc_ctx->perf_trace = dc_perf_trace_create();
694 if (!dc_ctx->perf_trace) {
695 ASSERT_CRITICAL(false);
699 /* Create GPIO service */
700 dc_ctx->gpio_service = dal_gpio_service_create(
702 dc_ctx->dce_environment,
705 if (!dc_ctx->gpio_service) {
706 ASSERT_CRITICAL(false);
710 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
714 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
718 #ifdef CONFIG_DRM_AMD_DC_DCN2_1
719 if (dc->res_pool->funcs->update_bw_bounding_box)
720 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
723 /* Creation of current_state must occur after dc->dml
724 * is initialized in dc_create_resource_pool because
725 * on creation it copies the contents of dc->dml
728 dc->current_state = dc_create_state(dc);
730 if (!dc->current_state) {
731 dm_error("%s: failed to create validate ctx\n", __func__);
735 dc_resource_state_construct(dc, dc->current_state);
737 if (!create_links(dc, init_params->num_virtual_links))
748 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
749 static bool disable_all_writeback_pipes_for_stream(
751 struct dc_stream_state *stream,
752 struct dc_state *context)
756 for (i = 0; i < stream->num_wb_info; i++)
757 stream->writeback_info[i].wb_enabled = false;
763 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
766 struct dc_state *dangling_context = dc_create_state(dc);
767 struct dc_state *current_ctx;
769 if (dangling_context == NULL)
772 dc_resource_state_copy_construct(dc->current_state, dangling_context);
774 for (i = 0; i < dc->res_pool->pipe_count; i++) {
775 struct dc_stream_state *old_stream =
776 dc->current_state->res_ctx.pipe_ctx[i].stream;
777 bool should_disable = true;
779 for (j = 0; j < context->stream_count; j++) {
780 if (old_stream == context->streams[j]) {
781 should_disable = false;
785 if (should_disable && old_stream) {
786 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
787 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
788 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
790 if (dc->hwss.apply_ctx_for_surface)
791 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
793 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
794 if (dc->hwss.program_front_end_for_ctx)
795 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
799 current_ctx = dc->current_state;
800 dc->current_state = dangling_context;
801 dc_release_state(current_ctx);
804 /*******************************************************************************
806 ******************************************************************************/
808 struct dc *dc_create(const struct dc_init_data *init_params)
810 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
811 unsigned int full_pipe_count;
816 if (false == construct(dc, init_params))
819 full_pipe_count = dc->res_pool->pipe_count;
820 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
822 dc->caps.max_streams = min(
824 dc->res_pool->stream_enc_count);
826 dc->caps.max_links = dc->link_count;
827 dc->caps.max_audios = dc->res_pool->audio_count;
828 dc->caps.linear_pitch_alignment = 64;
830 /* Populate versioning information */
831 dc->versions.dc_ver = DC_VER;
833 if (dc->res_pool->dmcu != NULL)
834 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
836 dc->build_id = DC_BUILD_ID;
838 DC_LOG_DC("Display Core initialized\n");
851 void dc_hardware_init(struct dc *dc)
853 dc->hwss.init_hw(dc);
856 void dc_init_callbacks(struct dc *dc,
857 const struct dc_callback_init *init_params)
859 #ifdef CONFIG_DRM_AMD_DC_HDCP
860 dc->ctx->cp_psp = init_params->cp_psp;
864 void dc_deinit_callbacks(struct dc *dc)
866 #ifdef CONFIG_DRM_AMD_DC_HDCP
867 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
871 void dc_destroy(struct dc **dc)
878 static void enable_timing_multisync(
880 struct dc_state *ctx)
882 int i = 0, multisync_count = 0;
883 int pipe_count = dc->res_pool->pipe_count;
884 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
886 for (i = 0; i < pipe_count; i++) {
887 if (!ctx->res_ctx.pipe_ctx[i].stream ||
888 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
890 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
892 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
896 if (multisync_count > 0) {
897 dc->hwss.enable_per_frame_crtc_position_reset(
898 dc, multisync_count, multisync_pipes);
902 static void program_timing_sync(
904 struct dc_state *ctx)
909 int pipe_count = dc->res_pool->pipe_count;
910 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
912 for (i = 0; i < pipe_count; i++) {
913 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
916 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
919 for (i = 0; i < pipe_count; i++) {
921 struct pipe_ctx *pipe_set[MAX_PIPES];
923 if (!unsynced_pipes[i])
926 pipe_set[0] = unsynced_pipes[i];
927 unsynced_pipes[i] = NULL;
929 /* Add tg to the set, search rest of the tg's for ones with
930 * same timing, add all tgs with same timing to the group
932 for (j = i + 1; j < pipe_count; j++) {
933 if (!unsynced_pipes[j])
936 if (resource_are_streams_timing_synchronizable(
937 unsynced_pipes[j]->stream,
938 pipe_set[0]->stream)) {
939 pipe_set[group_size] = unsynced_pipes[j];
940 unsynced_pipes[j] = NULL;
945 /* set first pipe with plane as master */
946 for (j = 0; j < group_size; j++) {
947 if (pipe_set[j]->plane_state) {
951 swap(pipe_set[0], pipe_set[j]);
957 for (k = 0; k < group_size; k++) {
958 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
960 status->timing_sync_info.group_id = num_group;
961 status->timing_sync_info.group_size = group_size;
963 status->timing_sync_info.master = true;
965 status->timing_sync_info.master = false;
968 /* remove any other pipes with plane as they have already been synced */
969 for (j = j + 1; j < group_size; j++) {
970 if (pipe_set[j]->plane_state) {
972 pipe_set[j] = pipe_set[group_size];
977 if (group_size > 1) {
978 dc->hwss.enable_timing_synchronization(
979 dc, group_index, group_size, pipe_set);
986 static bool context_changed(
988 struct dc_state *context)
992 if (context->stream_count != dc->current_state->stream_count)
995 for (i = 0; i < dc->current_state->stream_count; i++) {
996 if (dc->current_state->streams[i] != context->streams[i])
1003 bool dc_validate_seamless_boot_timing(const struct dc *dc,
1004 const struct dc_sink *sink,
1005 struct dc_crtc_timing *crtc_timing)
1007 struct timing_generator *tg;
1008 struct stream_encoder *se = NULL;
1010 struct dc_crtc_timing hw_crtc_timing = {0};
1012 struct dc_link *link = sink->link;
1013 unsigned int i, enc_inst, tg_inst = 0;
1015 // Seamless port only support single DP and EDP so far
1016 if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
1017 sink->sink_signal != SIGNAL_TYPE_EDP)
1020 /* Check for enabled DIG to identify enabled display */
1021 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1024 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1026 if (enc_inst == ENGINE_ID_UNKNOWN)
1029 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1030 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1032 se = dc->res_pool->stream_enc[i];
1034 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1035 dc->res_pool->stream_enc[i]);
1040 // tg_inst not found
1041 if (i == dc->res_pool->stream_enc_count)
1044 if (tg_inst >= dc->res_pool->timing_generator_count)
1047 tg = dc->res_pool->timing_generators[tg_inst];
1049 if (!tg->funcs->get_hw_timing)
1052 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1055 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1058 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1061 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1064 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1067 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1070 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1073 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1076 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1079 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1082 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1085 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1088 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1091 if (dc_is_dp_signal(link->connector_signal)) {
1092 unsigned int pix_clk_100hz;
1094 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1095 dc->res_pool->dp_clock_source,
1096 tg_inst, &pix_clk_100hz);
1098 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1101 if (!se->funcs->dp_get_pixel_format)
1104 if (!se->funcs->dp_get_pixel_format(
1106 &hw_crtc_timing.pixel_encoding,
1107 &hw_crtc_timing.display_color_depth))
1110 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1113 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1120 bool dc_enable_stereo(
1122 struct dc_state *context,
1123 struct dc_stream_state *streams[],
1124 uint8_t stream_count)
1128 struct pipe_ctx *pipe;
1130 for (i = 0; i < MAX_PIPES; i++) {
1131 if (context != NULL)
1132 pipe = &context->res_ctx.pipe_ctx[i];
1134 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1135 for (j = 0 ; pipe && j < stream_count; j++) {
1136 if (streams[j] && streams[j] == pipe->stream &&
1137 dc->hwss.setup_stereo)
1138 dc->hwss.setup_stereo(pipe, dc);
1146 * Applies given context to HW and copy it into current context.
1147 * It's up to the user to release the src context afterwards.
1149 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1151 struct dc_bios *dcb = dc->ctx->dc_bios;
1152 enum dc_status result = DC_ERROR_UNEXPECTED;
1153 struct pipe_ctx *pipe;
1155 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1157 disable_dangling_plane(dc, context);
1159 for (i = 0; i < context->stream_count; i++)
1160 dc_streams[i] = context->streams[i];
1162 if (!dcb->funcs->is_accelerated_mode(dcb))
1163 dc->hwss.enable_accelerated_mode(dc, context);
1165 for (i = 0; i < context->stream_count; i++) {
1166 if (context->streams[i]->apply_seamless_boot_optimization)
1167 dc->optimize_seamless_boot = true;
1170 if (!dc->optimize_seamless_boot)
1171 dc->hwss.prepare_bandwidth(dc, context);
1173 /* re-program planes for existing stream, in case we need to
1174 * free up plane resource for later use
1176 if (dc->hwss.apply_ctx_for_surface)
1177 for (i = 0; i < context->stream_count; i++) {
1178 if (context->streams[i]->mode_changed)
1181 dc->hwss.apply_ctx_for_surface(
1182 dc, context->streams[i],
1183 context->stream_status[i].plane_count,
1184 context); /* use new pipe config in new context */
1186 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1187 if (dc->hwss.program_front_end_for_ctx)
1188 dc->hwss.program_front_end_for_ctx(dc, context);
1191 /* Program hardware */
1192 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1193 pipe = &context->res_ctx.pipe_ctx[i];
1194 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1197 result = dc->hwss.apply_ctx_to_hw(dc, context);
1199 if (result != DC_OK)
1202 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1203 enable_timing_multisync(dc, context);
1204 program_timing_sync(dc, context);
1207 /* Program all planes within new context*/
1208 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1209 if (dc->hwss.program_front_end_for_ctx)
1210 dc->hwss.program_front_end_for_ctx(dc, context);
1212 for (i = 0; i < context->stream_count; i++) {
1213 const struct dc_link *link = context->streams[i]->link;
1215 if (!context->streams[i]->mode_changed)
1218 if (dc->hwss.apply_ctx_for_surface)
1219 dc->hwss.apply_ctx_for_surface(
1220 dc, context->streams[i],
1221 context->stream_status[i].plane_count,
1226 * TODO rework dc_enable_stereo call to work with validation sets?
1228 for (k = 0; k < MAX_PIPES; k++) {
1229 pipe = &context->res_ctx.pipe_ctx[k];
1231 for (l = 0 ; pipe && l < context->stream_count; l++) {
1232 if (context->streams[l] &&
1233 context->streams[l] == pipe->stream &&
1234 dc->hwss.setup_stereo)
1235 dc->hwss.setup_stereo(pipe, dc);
1239 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1240 context->streams[i]->timing.h_addressable,
1241 context->streams[i]->timing.v_addressable,
1242 context->streams[i]->timing.h_total,
1243 context->streams[i]->timing.v_total,
1244 context->streams[i]->timing.pix_clk_100hz / 10);
1247 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1249 for (i = 0; i < context->stream_count; i++)
1250 context->streams[i]->mode_changed = false;
1252 dc_release_state(dc->current_state);
1254 dc->current_state = context;
1256 dc_retain_state(dc->current_state);
1261 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1263 enum dc_status result = DC_ERROR_UNEXPECTED;
1266 if (false == context_changed(dc, context))
1269 DC_LOG_DC("%s: %d streams\n",
1270 __func__, context->stream_count);
1272 for (i = 0; i < context->stream_count; i++) {
1273 struct dc_stream_state *stream = context->streams[i];
1275 dc_stream_log(dc, stream);
1278 result = dc_commit_state_no_check(dc, context);
1280 return (result == DC_OK);
1283 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1286 struct dc_state *context = dc->current_state;
1288 if (!dc->optimized_required || dc->optimize_seamless_boot)
1291 post_surface_trace(dc);
1293 for (i = 0; i < dc->res_pool->pipe_count; i++)
1294 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1295 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1296 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1297 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1300 dc->optimized_required = false;
1302 dc->hwss.optimize_bandwidth(dc, context);
1306 struct dc_state *dc_create_state(struct dc *dc)
1308 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1313 /* Each context must have their own instance of VBA and in order to
1314 * initialize and obtain IP and SOC the base DML instance from DC is
1315 * initially copied into every context
1317 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
1318 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1321 kref_init(&context->refcount);
1326 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1329 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1333 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1335 for (i = 0; i < MAX_PIPES; i++) {
1336 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1338 if (cur_pipe->top_pipe)
1339 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1341 if (cur_pipe->bottom_pipe)
1342 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1344 if (cur_pipe->prev_odm_pipe)
1345 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1347 if (cur_pipe->next_odm_pipe)
1348 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
1352 for (i = 0; i < new_ctx->stream_count; i++) {
1353 dc_stream_retain(new_ctx->streams[i]);
1354 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1355 dc_plane_state_retain(
1356 new_ctx->stream_status[i].plane_states[j]);
1359 kref_init(&new_ctx->refcount);
1364 void dc_retain_state(struct dc_state *context)
1366 kref_get(&context->refcount);
1369 static void dc_state_free(struct kref *kref)
1371 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1372 dc_resource_state_destruct(context);
1376 void dc_release_state(struct dc_state *context)
1378 kref_put(&context->refcount, dc_state_free);
1381 bool dc_set_generic_gpio_for_stereo(bool enable,
1382 struct gpio_service *gpio_service)
1384 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
1385 struct gpio_pin_info pin_info;
1386 struct gpio *generic;
1387 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
1392 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
1394 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
1398 generic = dal_gpio_service_create_generic_mux(
1409 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
1411 config->enable_output_from_mux = enable;
1412 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
1414 if (gpio_result == GPIO_RESULT_OK)
1415 gpio_result = dal_mux_setup_config(generic, config);
1417 if (gpio_result == GPIO_RESULT_OK) {
1418 dal_gpio_close(generic);
1419 dal_gpio_destroy_generic_mux(&generic);
1423 dal_gpio_close(generic);
1424 dal_gpio_destroy_generic_mux(&generic);
1430 static bool is_surface_in_context(
1431 const struct dc_state *context,
1432 const struct dc_plane_state *plane_state)
1436 for (j = 0; j < MAX_PIPES; j++) {
1437 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1439 if (plane_state == pipe_ctx->plane_state) {
1447 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1449 union surface_update_flags *update_flags = &u->surface->update_flags;
1450 enum surface_update_type update_type = UPDATE_TYPE_FAST;
1453 return UPDATE_TYPE_FAST;
1455 if (u->plane_info->color_space != u->surface->color_space) {
1456 update_flags->bits.color_space_change = 1;
1457 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1460 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1461 update_flags->bits.horizontal_mirror_change = 1;
1462 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1465 if (u->plane_info->rotation != u->surface->rotation) {
1466 update_flags->bits.rotation_change = 1;
1467 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1470 if (u->plane_info->format != u->surface->format) {
1471 update_flags->bits.pixel_format_change = 1;
1472 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1475 if (u->plane_info->stereo_format != u->surface->stereo_format) {
1476 update_flags->bits.stereo_format_change = 1;
1477 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1480 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
1481 update_flags->bits.per_pixel_alpha_change = 1;
1482 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1485 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
1486 update_flags->bits.global_alpha_change = 1;
1487 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1490 if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
1491 update_flags->bits.sdr_white_level = 1;
1492 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1495 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1496 || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
1497 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
1498 update_flags->bits.dcc_change = 1;
1499 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1502 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1503 resource_pixel_format_to_bpp(u->surface->format)) {
1504 /* different bytes per element will require full bandwidth
1505 * and DML calculation
1507 update_flags->bits.bpp_change = 1;
1508 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1511 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1512 || u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1513 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
1514 update_flags->bits.plane_size_change = 1;
1515 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1519 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1520 sizeof(union dc_tiling_info)) != 0) {
1521 update_flags->bits.swizzle_change = 1;
1522 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1524 /* todo: below are HW dependent, we should add a hook to
1525 * DCE/N resource and validated there.
1527 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1528 /* swizzled mode requires RQ to be setup properly,
1529 * thus need to run DML to calculate RQ settings
1531 update_flags->bits.bandwidth_change = 1;
1532 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1536 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
1540 static enum surface_update_type get_scaling_info_update_type(
1541 const struct dc_surface_update *u)
1543 union surface_update_flags *update_flags = &u->surface->update_flags;
1545 if (!u->scaling_info)
1546 return UPDATE_TYPE_FAST;
1548 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1549 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1550 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1551 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1552 update_flags->bits.scaling_change = 1;
1554 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1555 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1556 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1557 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1558 /* Making dst rect smaller requires a bandwidth change */
1559 update_flags->bits.bandwidth_change = 1;
1562 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1563 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1565 update_flags->bits.scaling_change = 1;
1566 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1567 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1568 /* Making src rect bigger requires a bandwidth change */
1569 update_flags->bits.clock_change = 1;
1572 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1573 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1574 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1575 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1576 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1577 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1578 update_flags->bits.position_change = 1;
1580 if (update_flags->bits.clock_change
1581 || update_flags->bits.bandwidth_change)
1582 return UPDATE_TYPE_FULL;
1584 if (update_flags->bits.scaling_change
1585 || update_flags->bits.position_change)
1586 return UPDATE_TYPE_MED;
1588 return UPDATE_TYPE_FAST;
1591 static enum surface_update_type det_surface_update(const struct dc *dc,
1592 const struct dc_surface_update *u)
1594 const struct dc_state *context = dc->current_state;
1595 enum surface_update_type type;
1596 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1597 union surface_update_flags *update_flags = &u->surface->update_flags;
1600 update_flags->bits.addr_update = 1;
1602 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
1603 update_flags->raw = 0xFFFFFFFF;
1604 return UPDATE_TYPE_FULL;
1607 update_flags->raw = 0; // Reset all flags
1609 type = get_plane_info_update_type(u);
1610 elevate_update_type(&overall_type, type);
1612 type = get_scaling_info_update_type(u);
1613 elevate_update_type(&overall_type, type);
1616 update_flags->bits.addr_update = 1;
1618 if (u->in_transfer_func)
1619 update_flags->bits.in_transfer_func_change = 1;
1621 if (u->input_csc_color_matrix)
1622 update_flags->bits.input_csc_change = 1;
1624 if (u->coeff_reduction_factor)
1625 update_flags->bits.coeff_reduction_change = 1;
1628 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
1631 format = u->plane_info->format;
1632 else if (u->surface)
1633 format = u->surface->format;
1635 if (dce_use_lut(format))
1636 update_flags->bits.gamma_change = 1;
1639 if (update_flags->bits.in_transfer_func_change) {
1640 type = UPDATE_TYPE_MED;
1641 elevate_update_type(&overall_type, type);
1644 if (update_flags->bits.input_csc_change
1645 || update_flags->bits.coeff_reduction_change
1646 || update_flags->bits.gamma_change) {
1647 type = UPDATE_TYPE_FULL;
1648 elevate_update_type(&overall_type, type);
1651 return overall_type;
1654 static enum surface_update_type check_update_surfaces_for_stream(
1656 struct dc_surface_update *updates,
1658 struct dc_stream_update *stream_update,
1659 const struct dc_stream_status *stream_status)
1662 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1664 if (stream_status == NULL || stream_status->plane_count != surface_count)
1665 overall_type = UPDATE_TYPE_FULL;
1667 /* some stream updates require passive update */
1668 if (stream_update) {
1669 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
1671 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
1672 (stream_update->dst.height != 0 && stream_update->dst.width != 0))
1673 su_flags->bits.scaling = 1;
1675 if (stream_update->out_transfer_func)
1676 su_flags->bits.out_tf = 1;
1678 if (stream_update->abm_level)
1679 su_flags->bits.abm_level = 1;
1681 if (stream_update->dpms_off)
1682 su_flags->bits.dpms_off = 1;
1684 if (stream_update->gamut_remap)
1685 su_flags->bits.gamut_remap = 1;
1687 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1688 if (stream_update->wb_update)
1689 su_flags->bits.wb_update = 1;
1691 if (su_flags->raw != 0)
1692 overall_type = UPDATE_TYPE_FULL;
1694 if (stream_update->output_csc_transform || stream_update->output_color_space)
1695 su_flags->bits.out_csc = 1;
1698 for (i = 0 ; i < surface_count; i++) {
1699 enum surface_update_type type =
1700 det_surface_update(dc, &updates[i]);
1702 elevate_update_type(&overall_type, type);
1705 return overall_type;
1709 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
1711 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
1713 enum surface_update_type dc_check_update_surfaces_for_stream(
1715 struct dc_surface_update *updates,
1717 struct dc_stream_update *stream_update,
1718 const struct dc_stream_status *stream_status)
1721 enum surface_update_type type;
1724 stream_update->stream->update_flags.raw = 0;
1725 for (i = 0; i < surface_count; i++)
1726 updates[i].surface->update_flags.raw = 0;
1728 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1729 if (type == UPDATE_TYPE_FULL) {
1731 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
1732 for (i = 0; i < surface_count; i++)
1733 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1736 if (type == UPDATE_TYPE_FAST) {
1737 // If there's an available clock comparator, we use that.
1738 if (dc->clk_mgr->funcs->are_clock_states_equal) {
1739 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
1740 dc->optimized_required = true;
1741 // Else we fallback to mem compare.
1742 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
1743 dc->optimized_required = true;
1750 static struct dc_stream_status *stream_get_status(
1751 struct dc_state *ctx,
1752 struct dc_stream_state *stream)
1756 for (i = 0; i < ctx->stream_count; i++) {
1757 if (stream == ctx->streams[i]) {
1758 return &ctx->stream_status[i];
1765 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1767 static void copy_surface_update_to_plane(
1768 struct dc_plane_state *surface,
1769 struct dc_surface_update *srf_update)
1771 if (srf_update->flip_addr) {
1772 surface->address = srf_update->flip_addr->address;
1773 surface->flip_immediate =
1774 srf_update->flip_addr->flip_immediate;
1775 surface->time.time_elapsed_in_us[surface->time.index] =
1776 srf_update->flip_addr->flip_timestamp_in_us -
1777 surface->time.prev_update_time_in_us;
1778 surface->time.prev_update_time_in_us =
1779 srf_update->flip_addr->flip_timestamp_in_us;
1780 surface->time.index++;
1781 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
1782 surface->time.index = 0;
1785 if (srf_update->scaling_info) {
1786 surface->scaling_quality =
1787 srf_update->scaling_info->scaling_quality;
1789 srf_update->scaling_info->dst_rect;
1791 srf_update->scaling_info->src_rect;
1792 surface->clip_rect =
1793 srf_update->scaling_info->clip_rect;
1796 if (srf_update->plane_info) {
1797 surface->color_space =
1798 srf_update->plane_info->color_space;
1800 srf_update->plane_info->format;
1801 surface->plane_size =
1802 srf_update->plane_info->plane_size;
1804 srf_update->plane_info->rotation;
1805 surface->horizontal_mirror =
1806 srf_update->plane_info->horizontal_mirror;
1807 surface->stereo_format =
1808 srf_update->plane_info->stereo_format;
1809 surface->tiling_info =
1810 srf_update->plane_info->tiling_info;
1812 srf_update->plane_info->visible;
1813 surface->per_pixel_alpha =
1814 srf_update->plane_info->per_pixel_alpha;
1815 surface->global_alpha =
1816 srf_update->plane_info->global_alpha;
1817 surface->global_alpha_value =
1818 srf_update->plane_info->global_alpha_value;
1820 srf_update->plane_info->dcc;
1821 surface->sdr_white_level =
1822 srf_update->plane_info->sdr_white_level;
1823 surface->layer_index =
1824 srf_update->plane_info->layer_index;
1827 if (srf_update->gamma &&
1828 (surface->gamma_correction !=
1829 srf_update->gamma)) {
1830 memcpy(&surface->gamma_correction->entries,
1831 &srf_update->gamma->entries,
1832 sizeof(struct dc_gamma_entries));
1833 surface->gamma_correction->is_identity =
1834 srf_update->gamma->is_identity;
1835 surface->gamma_correction->num_entries =
1836 srf_update->gamma->num_entries;
1837 surface->gamma_correction->type =
1838 srf_update->gamma->type;
1841 if (srf_update->in_transfer_func &&
1842 (surface->in_transfer_func !=
1843 srf_update->in_transfer_func)) {
1844 surface->in_transfer_func->sdr_ref_white_level =
1845 srf_update->in_transfer_func->sdr_ref_white_level;
1846 surface->in_transfer_func->tf =
1847 srf_update->in_transfer_func->tf;
1848 surface->in_transfer_func->type =
1849 srf_update->in_transfer_func->type;
1850 memcpy(&surface->in_transfer_func->tf_pts,
1851 &srf_update->in_transfer_func->tf_pts,
1852 sizeof(struct dc_transfer_func_distributed_points));
1855 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1856 if (srf_update->func_shaper &&
1857 (surface->in_shaper_func !=
1858 srf_update->func_shaper))
1859 memcpy(surface->in_shaper_func, srf_update->func_shaper,
1860 sizeof(*surface->in_shaper_func));
1862 if (srf_update->lut3d_func &&
1863 (surface->lut3d_func !=
1864 srf_update->lut3d_func))
1865 memcpy(surface->lut3d_func, srf_update->lut3d_func,
1866 sizeof(*surface->lut3d_func));
1868 if (srf_update->blend_tf &&
1869 (surface->blend_tf !=
1870 srf_update->blend_tf))
1871 memcpy(surface->blend_tf, srf_update->blend_tf,
1872 sizeof(*surface->blend_tf));
1875 if (srf_update->input_csc_color_matrix)
1876 surface->input_csc_color_matrix =
1877 *srf_update->input_csc_color_matrix;
1879 if (srf_update->coeff_reduction_factor)
1880 surface->coeff_reduction_factor =
1881 *srf_update->coeff_reduction_factor;
1884 static void copy_stream_update_to_stream(struct dc *dc,
1885 struct dc_state *context,
1886 struct dc_stream_state *stream,
1887 const struct dc_stream_update *update)
1889 if (update == NULL || stream == NULL)
1892 if (update->src.height && update->src.width)
1893 stream->src = update->src;
1895 if (update->dst.height && update->dst.width)
1896 stream->dst = update->dst;
1898 if (update->out_transfer_func &&
1899 stream->out_transfer_func != update->out_transfer_func) {
1900 stream->out_transfer_func->sdr_ref_white_level =
1901 update->out_transfer_func->sdr_ref_white_level;
1902 stream->out_transfer_func->tf = update->out_transfer_func->tf;
1903 stream->out_transfer_func->type =
1904 update->out_transfer_func->type;
1905 memcpy(&stream->out_transfer_func->tf_pts,
1906 &update->out_transfer_func->tf_pts,
1907 sizeof(struct dc_transfer_func_distributed_points));
1910 if (update->hdr_static_metadata)
1911 stream->hdr_static_metadata = *update->hdr_static_metadata;
1913 if (update->abm_level)
1914 stream->abm_level = *update->abm_level;
1916 if (update->periodic_interrupt0)
1917 stream->periodic_interrupt0 = *update->periodic_interrupt0;
1919 if (update->periodic_interrupt1)
1920 stream->periodic_interrupt1 = *update->periodic_interrupt1;
1922 if (update->gamut_remap)
1923 stream->gamut_remap_matrix = *update->gamut_remap;
1925 /* Note: this being updated after mode set is currently not a use case
1926 * however if it arises OCSC would need to be reprogrammed at the
1929 if (update->output_color_space)
1930 stream->output_color_space = *update->output_color_space;
1932 if (update->output_csc_transform)
1933 stream->csc_color_matrix = *update->output_csc_transform;
1935 if (update->vrr_infopacket)
1936 stream->vrr_infopacket = *update->vrr_infopacket;
1938 if (update->dpms_off)
1939 stream->dpms_off = *update->dpms_off;
1941 if (update->vsc_infopacket)
1942 stream->vsc_infopacket = *update->vsc_infopacket;
1944 if (update->vsp_infopacket)
1945 stream->vsp_infopacket = *update->vsp_infopacket;
1947 if (update->dither_option)
1948 stream->dither_option = *update->dither_option;
1949 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1950 /* update current stream with writeback info */
1951 if (update->wb_update) {
1954 stream->num_wb_info = update->wb_update->num_wb_info;
1955 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
1956 for (i = 0; i < stream->num_wb_info; i++)
1957 stream->writeback_info[i] =
1958 update->wb_update->writeback_info[i];
1961 #if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
1962 if (update->dsc_config) {
1963 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
1964 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
1965 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
1966 update->dsc_config->num_slices_v != 0);
1968 stream->timing.dsc_cfg = *update->dsc_config;
1969 stream->timing.flags.DSC = enable_dsc;
1970 if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
1972 stream->timing.dsc_cfg = old_dsc_cfg;
1973 stream->timing.flags.DSC = old_dsc_enabled;
1979 static void commit_planes_do_stream_update(struct dc *dc,
1980 struct dc_stream_state *stream,
1981 struct dc_stream_update *stream_update,
1982 enum surface_update_type update_type,
1983 struct dc_state *context)
1986 bool should_program_abm;
1989 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1990 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1992 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
1994 if (stream_update->periodic_interrupt0 &&
1995 dc->hwss.setup_periodic_interrupt)
1996 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
1998 if (stream_update->periodic_interrupt1 &&
1999 dc->hwss.setup_periodic_interrupt)
2000 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
2002 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2003 stream_update->vrr_infopacket ||
2004 stream_update->vsc_infopacket ||
2005 stream_update->vsp_infopacket) {
2006 resource_build_info_frame(pipe_ctx);
2007 dc->hwss.update_info_frame(pipe_ctx);
2010 if (stream_update->gamut_remap)
2011 dc_stream_set_gamut_remap(dc, stream);
2013 if (stream_update->output_csc_transform)
2014 dc_stream_program_csc_matrix(dc, stream);
2016 if (stream_update->dither_option) {
2017 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2018 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2020 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2021 &pipe_ctx->stream->bit_depth_params);
2022 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2023 &stream->bit_depth_params,
2025 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2027 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2028 &stream->bit_depth_params,
2030 odm_pipe = odm_pipe->next_odm_pipe;
2035 #if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
2036 if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
2037 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
2038 dp_update_dsc_config(pipe_ctx);
2039 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
2043 if (update_type == UPDATE_TYPE_FAST)
2046 if (stream_update->dpms_off) {
2047 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
2049 if (*stream_update->dpms_off) {
2050 core_link_disable_stream(pipe_ctx);
2051 /* for dpms, keep acquired resources*/
2052 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2053 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2055 dc->hwss.optimize_bandwidth(dc, dc->current_state);
2057 if (!dc->optimize_seamless_boot)
2058 dc->hwss.prepare_bandwidth(dc, dc->current_state);
2060 core_link_enable_stream(dc->current_state, pipe_ctx);
2063 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
2066 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2067 should_program_abm = true;
2069 // if otg funcs defined check if blanked before programming
2070 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2071 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2072 should_program_abm = false;
2074 if (should_program_abm) {
2075 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2076 pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
2078 pipe_ctx->stream_res.abm->funcs->set_abm_level(
2079 pipe_ctx->stream_res.abm, stream->abm_level);
2087 static void commit_planes_for_stream(struct dc *dc,
2088 struct dc_surface_update *srf_updates,
2090 struct dc_stream_state *stream,
2091 struct dc_stream_update *stream_update,
2092 enum surface_update_type update_type,
2093 struct dc_state *context)
2096 struct pipe_ctx *top_pipe_to_program = NULL;
2098 if (dc->optimize_seamless_boot && surface_count > 0) {
2099 /* Optimize seamless boot flag keeps clocks and watermarks high until
2100 * first flip. After first flip, optimization is required to lower
2101 * bandwidth. Important to note that it is expected UEFI will
2102 * only light up a single display on POST, therefore we only expect
2103 * one stream with seamless boot flag set.
2105 if (stream->apply_seamless_boot_optimization) {
2106 stream->apply_seamless_boot_optimization = false;
2107 dc->optimize_seamless_boot = false;
2108 dc->optimized_required = true;
2112 if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
2113 dc->hwss.prepare_bandwidth(dc, context);
2114 context_clock_trace(dc, context);
2119 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2121 if (surface_count == 0) {
2123 * In case of turning off screen, no need to program front end a second time.
2124 * just return after program blank.
2126 if (dc->hwss.apply_ctx_for_surface)
2127 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2128 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2129 if (dc->hwss.program_front_end_for_ctx)
2130 dc->hwss.program_front_end_for_ctx(dc, context);
2136 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2137 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2138 for (i = 0; i < surface_count; i++) {
2139 struct dc_plane_state *plane_state = srf_updates[i].surface;
2140 /*set logical flag for lock/unlock use*/
2141 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2142 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2143 if (!pipe_ctx->plane_state)
2145 if (pipe_ctx->plane_state != plane_state)
2147 plane_state->triplebuffer_flips = false;
2148 if (update_type == UPDATE_TYPE_FAST &&
2149 dc->hwss.program_triplebuffer != NULL &&
2150 !plane_state->flip_immediate &&
2151 !dc->debug.disable_tri_buf) {
2152 /*triple buffer for VUpdate only*/
2153 plane_state->triplebuffer_flips = true;
2160 // Update Type FULL, Surface updates
2161 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2162 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2164 if (!pipe_ctx->top_pipe &&
2165 !pipe_ctx->prev_odm_pipe &&
2167 pipe_ctx->stream == stream) {
2168 struct dc_stream_status *stream_status = NULL;
2170 top_pipe_to_program = pipe_ctx;
2172 if (!pipe_ctx->plane_state)
2176 if (update_type == UPDATE_TYPE_FAST)
2179 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2180 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2182 if (dc->hwss.program_triplebuffer != NULL &&
2183 !dc->debug.disable_tri_buf) {
2184 /*turn off triple buffer for full update*/
2185 dc->hwss.program_triplebuffer(
2186 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2190 stream_get_status(context, pipe_ctx->stream);
2192 if (dc->hwss.apply_ctx_for_surface)
2193 dc->hwss.apply_ctx_for_surface(
2194 dc, pipe_ctx->stream, stream_status->plane_count, context);
2197 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2198 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST)
2199 dc->hwss.program_front_end_for_ctx(dc, context);
2202 // Update Type FAST, Surface updates
2203 if (update_type == UPDATE_TYPE_FAST) {
2204 /* Lock the top pipe while updating plane addrs, since freesync requires
2205 * plane addr update event triggers to be synchronized.
2206 * top_pipe_to_program is expected to never be NULL
2208 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2210 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2211 if (dc->hwss.set_flip_control_gsl)
2212 for (i = 0; i < surface_count; i++) {
2213 struct dc_plane_state *plane_state = srf_updates[i].surface;
2215 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2216 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2218 if (pipe_ctx->stream != stream)
2221 if (pipe_ctx->plane_state != plane_state)
2224 // GSL has to be used for flip immediate
2225 dc->hwss.set_flip_control_gsl(pipe_ctx,
2226 plane_state->flip_immediate);
2230 /* Perform requested Updates */
2231 for (i = 0; i < surface_count; i++) {
2232 struct dc_plane_state *plane_state = srf_updates[i].surface;
2234 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2235 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2237 if (pipe_ctx->stream != stream)
2240 if (pipe_ctx->plane_state != plane_state)
2242 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2243 /*program triple buffer after lock based on flip type*/
2244 if (dc->hwss.program_triplebuffer != NULL &&
2245 !dc->debug.disable_tri_buf) {
2246 /*only enable triplebuffer for fast_update*/
2247 dc->hwss.program_triplebuffer(
2248 dc, pipe_ctx, plane_state->triplebuffer_flips);
2251 if (srf_updates[i].flip_addr)
2252 dc->hwss.update_plane_addr(dc, pipe_ctx);
2256 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2259 // Fire manual trigger only when bottom plane is flipped
2260 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2261 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2263 if (pipe_ctx->bottom_pipe ||
2264 !pipe_ctx->stream ||
2265 pipe_ctx->stream != stream ||
2266 !pipe_ctx->plane_state->update_flags.bits.addr_update)
2269 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
2270 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
2274 void dc_commit_updates_for_stream(struct dc *dc,
2275 struct dc_surface_update *srf_updates,
2277 struct dc_stream_state *stream,
2278 struct dc_stream_update *stream_update,
2279 struct dc_state *state)
2281 const struct dc_stream_status *stream_status;
2282 enum surface_update_type update_type;
2283 struct dc_state *context;
2284 struct dc_context *dc_ctx = dc->ctx;
2287 stream_status = dc_stream_get_status(stream);
2288 context = dc->current_state;
2290 update_type = dc_check_update_surfaces_for_stream(
2291 dc, srf_updates, surface_count, stream_update, stream_status);
2293 if (update_type >= update_surface_trace_level)
2294 update_surface_trace(dc, srf_updates, surface_count);
2297 if (update_type >= UPDATE_TYPE_FULL) {
2299 /* initialize scratch memory for building context */
2300 context = dc_create_state(dc);
2301 if (context == NULL) {
2302 DC_ERROR("Failed to allocate new validate context!\n");
2306 dc_resource_state_copy_construct(state, context);
2308 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2309 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
2310 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2312 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
2313 new_pipe->plane_state->force_full_update = true;
2318 for (i = 0; i < surface_count; i++) {
2319 struct dc_plane_state *surface = srf_updates[i].surface;
2321 copy_surface_update_to_plane(surface, &srf_updates[i]);
2325 copy_stream_update_to_stream(dc, context, stream, stream_update);
2327 commit_planes_for_stream(
2335 /*update current_State*/
2336 if (dc->current_state != context) {
2338 struct dc_state *old = dc->current_state;
2340 dc->current_state = context;
2341 dc_release_state(old);
2343 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2344 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2346 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
2347 pipe_ctx->plane_state->force_full_update = false;
2350 /*let's use current_state to update watermark etc*/
2351 if (update_type >= UPDATE_TYPE_FULL)
2352 dc_post_update_surfaces_to_stream(dc);
2358 uint8_t dc_get_current_stream_count(struct dc *dc)
2360 return dc->current_state->stream_count;
2363 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
2365 if (i < dc->current_state->stream_count)
2366 return dc->current_state->streams[i];
2370 enum dc_irq_source dc_interrupt_to_irq_source(
2375 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
2379 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
2381 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
2387 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
2390 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
2392 dal_irq_service_ack(dc->res_pool->irqs, src);
2395 void dc_set_power_state(
2397 enum dc_acpi_cm_power_state power_state)
2399 struct kref refcount;
2400 struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
2407 switch (power_state) {
2408 case DC_ACPI_CM_POWER_STATE_D0:
2409 dc_resource_state_construct(dc, dc->current_state);
2411 if (dc->ctx->dmub_srv)
2412 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
2414 dc->hwss.init_hw(dc);
2416 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
2417 if (dc->hwss.init_sys_ctx != NULL &&
2418 dc->vm_pa_config.valid) {
2419 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
2425 ASSERT(dc->current_state->stream_count == 0);
2426 /* Zero out the current context so that on resume we start with
2427 * clean state, and dc hw programming optimizations will not
2428 * cause any trouble.
2431 /* Preserve refcount */
2432 refcount = dc->current_state->refcount;
2433 /* Preserve display mode lib */
2434 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
2436 dc_resource_state_destruct(dc->current_state);
2437 memset(dc->current_state, 0,
2438 sizeof(*dc->current_state));
2440 dc->current_state->refcount = refcount;
2441 dc->current_state->bw_ctx.dml = *dml;
2449 void dc_resume(struct dc *dc)
2454 for (i = 0; i < dc->link_count; i++)
2455 core_link_resume(dc->links[i]);
2458 unsigned int dc_get_current_backlight_pwm(struct dc *dc)
2460 struct abm *abm = dc->res_pool->abm;
2463 return abm->funcs->get_current_backlight(abm);
2468 unsigned int dc_get_target_backlight_pwm(struct dc *dc)
2470 struct abm *abm = dc->res_pool->abm;
2473 return abm->funcs->get_target_backlight(abm);
2478 bool dc_is_dmcu_initialized(struct dc *dc)
2480 struct dmcu *dmcu = dc->res_pool->dmcu;
2483 return dmcu->funcs->is_dmcu_initialized(dmcu);
2489 uint32_t link_index,
2490 struct i2c_command *cmd)
2493 struct dc_link *link = dc->links[link_index];
2494 struct ddc_service *ddc = link->ddc;
2495 return dce_i2c_submit_command(
2501 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
2503 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
2504 BREAK_TO_DEBUGGER();
2508 dc_sink_retain(sink);
2510 dc_link->remote_sinks[dc_link->sink_count] = sink;
2511 dc_link->sink_count++;
2517 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
2519 * EDID length is in bytes
2521 struct dc_sink *dc_link_add_remote_sink(
2522 struct dc_link *link,
2523 const uint8_t *edid,
2525 struct dc_sink_init_data *init_data)
2527 struct dc_sink *dc_sink;
2528 enum dc_edid_status edid_status;
2530 if (len > DC_MAX_EDID_BUFFER_SIZE) {
2531 dm_error("Max EDID buffer size breached!\n");
2536 BREAK_TO_DEBUGGER();
2540 if (!init_data->link) {
2541 BREAK_TO_DEBUGGER();
2545 dc_sink = dc_sink_create(init_data);
2550 memmove(dc_sink->dc_edid.raw_edid, edid, len);
2551 dc_sink->dc_edid.length = len;
2553 if (!link_add_remote_sink_helper(
2558 edid_status = dm_helpers_parse_edid_caps(
2561 &dc_sink->edid_caps);
2564 * Treat device as no EDID device if EDID
2567 if (edid_status != EDID_OK) {
2568 dc_sink->dc_edid.length = 0;
2569 dm_error("Bad EDID, status%d!\n", edid_status);
2575 dc_sink_release(dc_sink);
2580 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
2582 * Note that this just removes the struct dc_sink - it doesn't
2583 * program hardware or alter other members of dc_link
2585 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
2589 if (!link->sink_count) {
2590 BREAK_TO_DEBUGGER();
2594 for (i = 0; i < link->sink_count; i++) {
2595 if (link->remote_sinks[i] == sink) {
2596 dc_sink_release(sink);
2597 link->remote_sinks[i] = NULL;
2599 /* shrink array to remove empty place */
2600 while (i < link->sink_count - 1) {
2601 link->remote_sinks[i] = link->remote_sinks[i+1];
2604 link->remote_sinks[i] = NULL;
2611 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
2613 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
2614 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
2615 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
2616 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
2617 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
2618 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
2619 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
2620 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
2621 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
2623 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
2625 if (dc->hwss.set_clock)
2626 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
2627 return DC_ERROR_UNEXPECTED;
2629 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
2631 if (dc->hwss.get_clock)
2632 dc->hwss.get_clock(dc, clock_type, clock_cfg);