2 * Copyright 2012-15 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/version.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_dp_mst_helper.h>
29 #include "dm_services.h"
31 #include "amdgpu_dm.h"
32 #include "amdgpu_dm_mst_types.h"
35 #include "dm_helpers.h"
37 #include "dc_link_ddc.h"
39 #include "i2caux_interface.h"
40 #if defined(CONFIG_DEBUG_FS)
41 #include "amdgpu_dm_debugfs.h"
45 #if defined(CONFIG_DRM_AMD_DC_DCN)
46 #include "dc/dcn20/dcn20_resource.h"
49 /* #define TRACE_DPCD */
52 #define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
54 static inline char *side_band_msg_type_to_str(uint32_t address)
56 static char str[10] = {0};
58 if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
59 strcpy(str, "DOWN_REQ");
60 else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
61 strcpy(str, "UP_REP");
62 else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
63 strcpy(str, "DOWN_REP");
65 strcpy(str, "UP_REQ");
70 static void log_dpcd(uint8_t type,
76 DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
77 (type == DP_AUX_NATIVE_READ) ||
78 (type == DP_AUX_I2C_READ) ?
81 SIDE_BAND_MSG(address) ?
82 side_band_msg_type_to_str(address) : "Nop",
86 print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
91 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
92 struct drm_dp_aux_msg *msg)
95 struct aux_payload payload;
96 enum aux_channel_operation_result operation_result;
98 if (WARN_ON(msg->size > 16))
101 payload.address = msg->address;
102 payload.data = msg->buffer;
103 payload.length = msg->size;
104 payload.reply = &msg->reply;
105 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
106 payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
107 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
108 payload.defer_delay = 0;
110 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
117 switch (operation_result) {
118 case AUX_CHANNEL_OPERATION_SUCCEEDED:
120 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
121 case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
124 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
125 case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
128 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
137 dm_dp_mst_connector_destroy(struct drm_connector *connector)
139 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
140 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
142 kfree(amdgpu_dm_connector->edid);
143 amdgpu_dm_connector->edid = NULL;
145 drm_encoder_cleanup(&amdgpu_encoder->base);
146 kfree(amdgpu_encoder);
147 drm_connector_cleanup(connector);
148 drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
149 kfree(amdgpu_dm_connector);
153 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
155 struct amdgpu_dm_connector *amdgpu_dm_connector =
156 to_amdgpu_dm_connector(connector);
157 struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
159 #if defined(CONFIG_DEBUG_FS)
160 connector_debugfs_init(amdgpu_dm_connector);
161 amdgpu_dm_connector->debugfs_dpcd_address = 0;
162 amdgpu_dm_connector->debugfs_dpcd_size = 0;
165 return drm_dp_mst_connector_late_register(connector, port);
169 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
171 struct amdgpu_dm_connector *amdgpu_dm_connector =
172 to_amdgpu_dm_connector(connector);
173 struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
175 drm_dp_mst_connector_early_unregister(connector, port);
178 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
179 .fill_modes = drm_helper_probe_single_connector_modes,
180 .destroy = dm_dp_mst_connector_destroy,
181 .reset = amdgpu_dm_connector_funcs_reset,
182 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
183 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
184 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
185 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
186 .late_register = amdgpu_dm_mst_connector_late_register,
187 .early_unregister = amdgpu_dm_mst_connector_early_unregister,
190 #if defined(CONFIG_DRM_AMD_DC_DCN)
191 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
193 struct dc_sink *dc_sink = aconnector->dc_sink;
194 struct drm_dp_mst_port *port = aconnector->port;
195 u8 dsc_caps[16] = { 0 };
197 aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
199 if (!aconnector->dsc_aux)
202 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
205 if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
207 &dc_sink->sink_dsc_caps.dsc_dec_caps))
214 static int dm_dp_mst_get_modes(struct drm_connector *connector)
216 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
220 return drm_add_edid_modes(connector, NULL);
222 if (!aconnector->edid) {
224 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
227 drm_connector_update_edid_property(
233 aconnector->edid = edid;
236 if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
237 dc_sink_release(aconnector->dc_sink);
238 aconnector->dc_sink = NULL;
241 if (!aconnector->dc_sink) {
242 struct dc_sink *dc_sink;
243 struct dc_sink_init_data init_params = {
244 .link = aconnector->dc_link,
245 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
246 dc_sink = dc_link_add_remote_sink(
248 (uint8_t *)aconnector->edid,
249 (aconnector->edid->extensions + 1) * EDID_LENGTH,
252 dc_sink->priv = aconnector;
253 /* dc_link_add_remote_sink returns a new reference */
254 aconnector->dc_sink = dc_sink;
256 if (aconnector->dc_sink) {
257 amdgpu_dm_update_freesync_caps(
258 connector, aconnector->edid);
260 #if defined(CONFIG_DRM_AMD_DC_DCN)
261 if (!validate_dsc_caps_on_connector(aconnector))
262 memset(&aconnector->dc_sink->sink_dsc_caps,
263 0, sizeof(aconnector->dc_sink->sink_dsc_caps));
268 drm_connector_update_edid_property(
269 &aconnector->base, aconnector->edid);
271 ret = drm_add_edid_modes(connector, aconnector->edid);
276 static struct drm_encoder *
277 dm_mst_atomic_best_encoder(struct drm_connector *connector,
278 struct drm_connector_state *connector_state)
280 return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
284 dm_dp_mst_detect(struct drm_connector *connector,
285 struct drm_modeset_acquire_ctx *ctx, bool force)
287 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
288 struct amdgpu_dm_connector *master = aconnector->mst_port;
290 return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
294 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
295 struct drm_atomic_state *state)
297 struct drm_connector_state *new_conn_state =
298 drm_atomic_get_new_connector_state(state, connector);
299 struct drm_connector_state *old_conn_state =
300 drm_atomic_get_old_connector_state(state, connector);
301 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
302 struct drm_crtc_state *new_crtc_state;
303 struct drm_dp_mst_topology_mgr *mst_mgr;
304 struct drm_dp_mst_port *mst_port;
306 mst_port = aconnector->port;
307 mst_mgr = &aconnector->mst_port->mst_mgr;
309 if (!old_conn_state->crtc)
312 if (new_conn_state->crtc) {
313 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
314 if (!new_crtc_state ||
315 !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
316 new_crtc_state->enable)
320 return drm_dp_atomic_release_vcpi_slots(state,
325 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
326 .get_modes = dm_dp_mst_get_modes,
327 .mode_valid = amdgpu_dm_connector_mode_valid,
328 .atomic_best_encoder = dm_mst_atomic_best_encoder,
329 .detect_ctx = dm_dp_mst_detect,
330 .atomic_check = dm_dp_mst_atomic_check,
333 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
335 drm_encoder_cleanup(encoder);
339 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
340 .destroy = amdgpu_dm_encoder_destroy,
343 static struct amdgpu_encoder *
344 dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
346 struct drm_device *dev = connector->base.dev;
347 struct amdgpu_device *adev = dev->dev_private;
348 struct amdgpu_encoder *amdgpu_encoder;
349 struct drm_encoder *encoder;
351 amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
355 encoder = &amdgpu_encoder->base;
356 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
360 &amdgpu_encoder->base,
361 &amdgpu_dm_encoder_funcs,
362 DRM_MODE_ENCODER_DPMST,
365 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
367 return amdgpu_encoder;
370 static struct drm_connector *
371 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
372 struct drm_dp_mst_port *port,
373 const char *pathprop)
375 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
376 struct drm_device *dev = master->base.dev;
377 struct amdgpu_device *adev = dev->dev_private;
378 struct amdgpu_dm_connector *aconnector;
379 struct drm_connector *connector;
381 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
385 connector = &aconnector->base;
386 aconnector->port = port;
387 aconnector->mst_port = master;
389 if (drm_connector_init(
392 &dm_dp_mst_connector_funcs,
393 DRM_MODE_CONNECTOR_DisplayPort)) {
397 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
399 amdgpu_dm_connector_init_helper(
402 DRM_MODE_CONNECTOR_DisplayPort,
404 master->connector_id);
406 aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
407 drm_connector_attach_encoder(&aconnector->base,
408 &aconnector->mst_encoder->base);
410 drm_object_attach_property(
412 dev->mode_config.path_property,
414 drm_object_attach_property(
416 dev->mode_config.tile_property,
419 drm_connector_set_path_property(connector, pathprop);
422 * Initialize connector state before adding the connectror to drm and
425 amdgpu_dm_connector_funcs_reset(connector);
427 DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
428 aconnector, connector->base.id, aconnector->mst_port);
430 drm_dp_mst_get_port_malloc(port);
432 DRM_DEBUG_KMS(":%d\n", connector->base.id);
437 static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
438 struct drm_connector *connector)
440 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
441 struct drm_device *dev = master->base.dev;
442 struct amdgpu_device *adev = dev->dev_private;
443 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
445 DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
446 aconnector, connector->base.id, aconnector->mst_port);
448 if (aconnector->dc_sink) {
449 amdgpu_dm_update_freesync_caps(connector, NULL);
450 dc_link_remove_remote_sink(aconnector->dc_link,
451 aconnector->dc_sink);
452 dc_sink_release(aconnector->dc_sink);
453 aconnector->dc_sink = NULL;
456 drm_connector_unregister(connector);
457 if (adev->mode_info.rfbdev)
458 drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
459 drm_connector_put(connector);
462 static void dm_dp_mst_register_connector(struct drm_connector *connector)
464 struct drm_device *dev = connector->dev;
465 struct amdgpu_device *adev = dev->dev_private;
467 if (adev->mode_info.rfbdev)
468 drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
470 DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
472 drm_connector_register(connector);
475 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
476 .add_connector = dm_dp_add_mst_connector,
477 .destroy_connector = dm_dp_destroy_mst_connector,
478 .register_connector = dm_dp_mst_register_connector
481 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
482 struct amdgpu_dm_connector *aconnector)
484 aconnector->dm_dp_aux.aux.name = "dmdc";
485 aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
486 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
487 aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
489 drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
490 drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
493 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
496 aconnector->mst_mgr.cbs = &dm_mst_cbs;
497 drm_dp_mst_topology_mgr_init(
498 &aconnector->mst_mgr,
500 &aconnector->dm_dp_aux.aux,
503 aconnector->connector_id);
506 int dm_mst_get_pbn_divider(struct dc_link *link)
511 return dc_link_bandwidth_kbps(link,
512 dc_link_get_link_cap(link)) / (8 * 1000 * 54);
515 #if defined(CONFIG_DRM_AMD_DC_DCN)
517 struct dsc_mst_fairness_params {
518 struct dc_crtc_timing *timing;
519 struct dc_sink *sink;
520 struct dc_dsc_bw_range bw_range;
521 bool compression_possible;
522 struct drm_dp_mst_port *port;
525 struct dsc_mst_fairness_vars {
531 static int kbps_to_peak_pbn(int kbps)
533 u64 peak_kbps = kbps;
536 peak_kbps = div_u64(peak_kbps, 1000);
537 return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
540 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
541 struct dsc_mst_fairness_vars *vars,
546 for (i = 0; i < count; i++) {
547 memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
548 if (vars[i].dsc_enabled && dc_dsc_compute_config(
549 params[i].sink->ctx->dc->res_pool->dscs[0],
550 ¶ms[i].sink->sink_dsc_caps.dsc_dec_caps,
551 params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
554 ¶ms[i].timing->dsc_cfg)) {
555 params[i].timing->flags.DSC = 1;
556 params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
558 params[i].timing->flags.DSC = 0;
563 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
565 struct dc_dsc_config dsc_config;
568 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
569 dc_dsc_compute_config(
570 param.sink->ctx->dc->res_pool->dscs[0],
571 ¶m.sink->sink_dsc_caps.dsc_dec_caps,
572 param.sink->ctx->dc->debug.dsc_min_slice_height_override,
573 (int) kbps, param.timing, &dsc_config);
575 return dsc_config.bits_per_pixel;
578 static void increase_dsc_bpp(struct drm_atomic_state *state,
579 struct dc_link *dc_link,
580 struct dsc_mst_fairness_params *params,
581 struct dsc_mst_fairness_vars *vars,
585 bool bpp_increased[MAX_PIPES];
586 int initial_slack[MAX_PIPES];
587 int min_initial_slack;
589 int remaining_to_increase = 0;
590 int pbn_per_timeslot;
591 int link_timeslots_used;
594 for (i = 0; i < count; i++) {
595 if (vars[i].dsc_enabled) {
596 initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
597 bpp_increased[i] = false;
598 remaining_to_increase += 1;
600 initial_slack[i] = 0;
601 bpp_increased[i] = true;
605 pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
606 dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
608 while (remaining_to_increase) {
610 min_initial_slack = -1;
611 for (i = 0; i < count; i++) {
612 if (!bpp_increased[i]) {
613 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
614 min_initial_slack = initial_slack[i];
620 if (next_index == -1)
623 link_timeslots_used = 0;
625 for (i = 0; i < count; i++)
626 link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
628 fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
630 if (initial_slack[next_index] > fair_pbn_alloc) {
631 vars[next_index].pbn += fair_pbn_alloc;
632 if (drm_dp_atomic_find_vcpi_slots(state,
633 params[next_index].port->mgr,
634 params[next_index].port,
635 vars[next_index].pbn,
636 dm_mst_get_pbn_divider(dc_link)) < 0)
638 if (!drm_dp_mst_atomic_check(state)) {
639 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
641 vars[next_index].pbn -= fair_pbn_alloc;
642 if (drm_dp_atomic_find_vcpi_slots(state,
643 params[next_index].port->mgr,
644 params[next_index].port,
645 vars[next_index].pbn,
646 dm_mst_get_pbn_divider(dc_link)) < 0)
650 vars[next_index].pbn += initial_slack[next_index];
651 if (drm_dp_atomic_find_vcpi_slots(state,
652 params[next_index].port->mgr,
653 params[next_index].port,
654 vars[next_index].pbn,
655 dm_mst_get_pbn_divider(dc_link)) < 0)
657 if (!drm_dp_mst_atomic_check(state)) {
658 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
660 vars[next_index].pbn -= initial_slack[next_index];
661 if (drm_dp_atomic_find_vcpi_slots(state,
662 params[next_index].port->mgr,
663 params[next_index].port,
664 vars[next_index].pbn,
665 dm_mst_get_pbn_divider(dc_link)) < 0)
670 bpp_increased[next_index] = true;
671 remaining_to_increase--;
675 static void try_disable_dsc(struct drm_atomic_state *state,
676 struct dc_link *dc_link,
677 struct dsc_mst_fairness_params *params,
678 struct dsc_mst_fairness_vars *vars,
682 bool tried[MAX_PIPES];
683 int kbps_increase[MAX_PIPES];
684 int max_kbps_increase;
686 int remaining_to_try = 0;
688 for (i = 0; i < count; i++) {
689 if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
690 kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
692 remaining_to_try += 1;
694 kbps_increase[i] = 0;
699 while (remaining_to_try) {
701 max_kbps_increase = -1;
702 for (i = 0; i < count; i++) {
704 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
705 max_kbps_increase = kbps_increase[i];
711 if (next_index == -1)
714 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
715 if (drm_dp_atomic_find_vcpi_slots(state,
716 params[next_index].port->mgr,
717 params[next_index].port,
718 vars[next_index].pbn,
722 if (!drm_dp_mst_atomic_check(state)) {
723 vars[next_index].dsc_enabled = false;
724 vars[next_index].bpp_x16 = 0;
726 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
727 if (drm_dp_atomic_find_vcpi_slots(state,
728 params[next_index].port->mgr,
729 params[next_index].port,
730 vars[next_index].pbn,
731 dm_mst_get_pbn_divider(dc_link)) < 0)
735 tried[next_index] = true;
740 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
741 struct dc_state *dc_state,
742 struct dc_link *dc_link)
745 struct dc_stream_state *stream;
746 struct dsc_mst_fairness_params params[MAX_PIPES];
747 struct dsc_mst_fairness_vars vars[MAX_PIPES];
748 struct amdgpu_dm_connector *aconnector;
751 memset(params, 0, sizeof(params));
754 for (i = 0; i < dc_state->stream_count; i++) {
755 struct dc_dsc_policy dsc_policy = {0};
757 stream = dc_state->streams[i];
759 if (stream->link != dc_link)
762 stream->timing.flags.DSC = 0;
764 params[count].timing = &stream->timing;
765 params[count].sink = stream->sink;
766 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
767 params[count].port = aconnector->port;
768 params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
769 dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
770 if (!dc_dsc_compute_bandwidth_range(
771 stream->sink->ctx->dc->res_pool->dscs[0],
772 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
773 dsc_policy.min_target_bpp,
774 dsc_policy.max_target_bpp,
775 &stream->sink->sink_dsc_caps.dsc_dec_caps,
776 &stream->timing, ¶ms[count].bw_range))
777 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
781 /* Try no compression */
782 for (i = 0; i < count; i++) {
783 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
784 vars[i].dsc_enabled = false;
786 if (drm_dp_atomic_find_vcpi_slots(state,
793 if (!drm_dp_mst_atomic_check(state)) {
794 set_dsc_configs_from_fairness_vars(params, vars, count);
798 /* Try max compression */
799 for (i = 0; i < count; i++) {
800 if (params[i].compression_possible) {
801 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
802 vars[i].dsc_enabled = true;
803 vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
804 if (drm_dp_atomic_find_vcpi_slots(state,
808 dm_mst_get_pbn_divider(dc_link)) < 0)
811 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
812 vars[i].dsc_enabled = false;
814 if (drm_dp_atomic_find_vcpi_slots(state,
822 if (drm_dp_mst_atomic_check(state))
825 /* Optimize degree of compression */
826 increase_dsc_bpp(state, dc_link, params, vars, count);
828 try_disable_dsc(state, dc_link, params, vars, count);
830 set_dsc_configs_from_fairness_vars(params, vars, count);
835 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
836 struct dc_state *dc_state)
839 struct dc_stream_state *stream;
840 bool computed_streams[MAX_PIPES];
841 struct amdgpu_dm_connector *aconnector;
843 for (i = 0; i < dc_state->stream_count; i++)
844 computed_streams[i] = false;
846 for (i = 0; i < dc_state->stream_count; i++) {
847 stream = dc_state->streams[i];
849 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
852 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
854 if (!aconnector || !aconnector->dc_sink)
857 if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
860 if (computed_streams[i])
863 mutex_lock(&aconnector->mst_mgr.lock);
864 if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
865 mutex_unlock(&aconnector->mst_mgr.lock);
868 mutex_unlock(&aconnector->mst_mgr.lock);
870 for (j = 0; j < dc_state->stream_count; j++) {
871 if (dc_state->streams[j]->link == stream->link)
872 computed_streams[j] = true;
876 for (i = 0; i < dc_state->stream_count; i++) {
877 stream = dc_state->streams[i];
879 if (stream->timing.flags.DSC == 1)
880 dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);