2 * Copyright 2012-15 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/version.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_dp_mst_helper.h>
29 #include "dm_services.h"
31 #include "amdgpu_dm.h"
32 #include "amdgpu_dm_mst_types.h"
35 #include "dm_helpers.h"
37 #include "dc_link_ddc.h"
39 #include "i2caux_interface.h"
40 #if defined(CONFIG_DEBUG_FS)
41 #include "amdgpu_dm_debugfs.h"
45 #if defined(CONFIG_DRM_AMD_DC_DCN)
46 #include "dc/dcn20/dcn20_resource.h"
49 /* #define TRACE_DPCD */
52 #define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
54 static inline char *side_band_msg_type_to_str(uint32_t address)
56 static char str[10] = {0};
58 if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
59 strcpy(str, "DOWN_REQ");
60 else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
61 strcpy(str, "UP_REP");
62 else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
63 strcpy(str, "DOWN_REP");
65 strcpy(str, "UP_REQ");
70 static void log_dpcd(uint8_t type,
76 DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
77 (type == DP_AUX_NATIVE_READ) ||
78 (type == DP_AUX_I2C_READ) ?
81 SIDE_BAND_MSG(address) ?
82 side_band_msg_type_to_str(address) : "Nop",
86 print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
91 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
92 struct drm_dp_aux_msg *msg)
95 struct aux_payload payload;
96 enum aux_channel_operation_result operation_result;
98 if (WARN_ON(msg->size > 16))
101 payload.address = msg->address;
102 payload.data = msg->buffer;
103 payload.length = msg->size;
104 payload.reply = &msg->reply;
105 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
106 payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
107 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
108 payload.defer_delay = 0;
110 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
117 switch (operation_result) {
118 case AUX_CHANNEL_OPERATION_SUCCEEDED:
120 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
121 case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
124 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
125 case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
128 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
137 dm_dp_mst_connector_destroy(struct drm_connector *connector)
139 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
140 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
142 kfree(amdgpu_dm_connector->edid);
143 amdgpu_dm_connector->edid = NULL;
145 drm_encoder_cleanup(&amdgpu_encoder->base);
146 kfree(amdgpu_encoder);
147 drm_connector_cleanup(connector);
148 drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
149 kfree(amdgpu_dm_connector);
153 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
155 struct amdgpu_dm_connector *amdgpu_dm_connector =
156 to_amdgpu_dm_connector(connector);
157 struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
160 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
161 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
165 #if defined(CONFIG_DEBUG_FS)
166 connector_debugfs_init(amdgpu_dm_connector);
169 return drm_dp_mst_connector_late_register(connector, port);
173 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
175 struct amdgpu_dm_connector *amdgpu_dm_connector =
176 to_amdgpu_dm_connector(connector);
177 struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
179 drm_dp_mst_connector_early_unregister(connector, port);
182 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
183 .fill_modes = drm_helper_probe_single_connector_modes,
184 .destroy = dm_dp_mst_connector_destroy,
185 .reset = amdgpu_dm_connector_funcs_reset,
186 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
187 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
188 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
189 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
190 .late_register = amdgpu_dm_mst_connector_late_register,
191 .early_unregister = amdgpu_dm_mst_connector_early_unregister,
194 #if defined(CONFIG_DRM_AMD_DC_DCN)
195 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
197 struct dc_sink *dc_sink = aconnector->dc_sink;
198 struct drm_dp_mst_port *port = aconnector->port;
199 u8 dsc_caps[16] = { 0 };
201 aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
203 if (!aconnector->dsc_aux)
206 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
209 if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
211 &dc_sink->sink_dsc_caps.dsc_dec_caps))
218 static int dm_dp_mst_get_modes(struct drm_connector *connector)
220 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
224 return drm_add_edid_modes(connector, NULL);
226 if (!aconnector->edid) {
228 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
231 drm_connector_update_edid_property(
237 aconnector->edid = edid;
240 if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
241 dc_sink_release(aconnector->dc_sink);
242 aconnector->dc_sink = NULL;
245 if (!aconnector->dc_sink) {
246 struct dc_sink *dc_sink;
247 struct dc_sink_init_data init_params = {
248 .link = aconnector->dc_link,
249 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
250 dc_sink = dc_link_add_remote_sink(
252 (uint8_t *)aconnector->edid,
253 (aconnector->edid->extensions + 1) * EDID_LENGTH,
256 dc_sink->priv = aconnector;
257 /* dc_link_add_remote_sink returns a new reference */
258 aconnector->dc_sink = dc_sink;
260 if (aconnector->dc_sink) {
261 amdgpu_dm_update_freesync_caps(
262 connector, aconnector->edid);
264 #if defined(CONFIG_DRM_AMD_DC_DCN)
265 if (!validate_dsc_caps_on_connector(aconnector))
266 memset(&aconnector->dc_sink->sink_dsc_caps,
267 0, sizeof(aconnector->dc_sink->sink_dsc_caps));
272 drm_connector_update_edid_property(
273 &aconnector->base, aconnector->edid);
275 ret = drm_add_edid_modes(connector, aconnector->edid);
280 static struct drm_encoder *
281 dm_mst_atomic_best_encoder(struct drm_connector *connector,
282 struct drm_connector_state *connector_state)
284 return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
288 dm_dp_mst_detect(struct drm_connector *connector,
289 struct drm_modeset_acquire_ctx *ctx, bool force)
291 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
292 struct amdgpu_dm_connector *master = aconnector->mst_port;
294 return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
298 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
299 struct drm_atomic_state *state)
301 struct drm_connector_state *new_conn_state =
302 drm_atomic_get_new_connector_state(state, connector);
303 struct drm_connector_state *old_conn_state =
304 drm_atomic_get_old_connector_state(state, connector);
305 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
306 struct drm_crtc_state *new_crtc_state;
307 struct drm_dp_mst_topology_mgr *mst_mgr;
308 struct drm_dp_mst_port *mst_port;
310 mst_port = aconnector->port;
311 mst_mgr = &aconnector->mst_port->mst_mgr;
313 if (!old_conn_state->crtc)
316 if (new_conn_state->crtc) {
317 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
318 if (!new_crtc_state ||
319 !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
320 new_crtc_state->enable)
324 return drm_dp_atomic_release_vcpi_slots(state,
329 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
330 .get_modes = dm_dp_mst_get_modes,
331 .mode_valid = amdgpu_dm_connector_mode_valid,
332 .atomic_best_encoder = dm_mst_atomic_best_encoder,
333 .detect_ctx = dm_dp_mst_detect,
334 .atomic_check = dm_dp_mst_atomic_check,
337 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
339 drm_encoder_cleanup(encoder);
343 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
344 .destroy = amdgpu_dm_encoder_destroy,
347 static struct amdgpu_encoder *
348 dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
350 struct drm_device *dev = connector->base.dev;
351 struct amdgpu_device *adev = dev->dev_private;
352 struct amdgpu_encoder *amdgpu_encoder;
353 struct drm_encoder *encoder;
355 amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
359 encoder = &amdgpu_encoder->base;
360 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
364 &amdgpu_encoder->base,
365 &amdgpu_dm_encoder_funcs,
366 DRM_MODE_ENCODER_DPMST,
369 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
371 return amdgpu_encoder;
374 static struct drm_connector *
375 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
376 struct drm_dp_mst_port *port,
377 const char *pathprop)
379 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
380 struct drm_device *dev = master->base.dev;
381 struct amdgpu_device *adev = dev->dev_private;
382 struct amdgpu_dm_connector *aconnector;
383 struct drm_connector *connector;
385 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
389 connector = &aconnector->base;
390 aconnector->port = port;
391 aconnector->mst_port = master;
393 if (drm_connector_init(
396 &dm_dp_mst_connector_funcs,
397 DRM_MODE_CONNECTOR_DisplayPort)) {
401 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
403 amdgpu_dm_connector_init_helper(
406 DRM_MODE_CONNECTOR_DisplayPort,
408 master->connector_id);
410 aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
411 drm_connector_attach_encoder(&aconnector->base,
412 &aconnector->mst_encoder->base);
414 drm_object_attach_property(
416 dev->mode_config.path_property,
418 drm_object_attach_property(
420 dev->mode_config.tile_property,
423 drm_connector_set_path_property(connector, pathprop);
426 * Initialize connector state before adding the connectror to drm and
429 amdgpu_dm_connector_funcs_reset(connector);
431 DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
432 aconnector, connector->base.id, aconnector->mst_port);
434 drm_dp_mst_get_port_malloc(port);
436 DRM_DEBUG_KMS(":%d\n", connector->base.id);
441 static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
442 struct drm_connector *connector)
444 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
445 struct drm_device *dev = master->base.dev;
446 struct amdgpu_device *adev = dev->dev_private;
447 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
449 DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
450 aconnector, connector->base.id, aconnector->mst_port);
452 if (aconnector->dc_sink) {
453 amdgpu_dm_update_freesync_caps(connector, NULL);
454 dc_link_remove_remote_sink(aconnector->dc_link,
455 aconnector->dc_sink);
456 dc_sink_release(aconnector->dc_sink);
457 aconnector->dc_sink = NULL;
460 drm_connector_unregister(connector);
461 if (adev->mode_info.rfbdev)
462 drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
463 drm_connector_put(connector);
466 static void dm_dp_mst_register_connector(struct drm_connector *connector)
468 struct drm_device *dev = connector->dev;
469 struct amdgpu_device *adev = dev->dev_private;
471 if (adev->mode_info.rfbdev)
472 drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
474 DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
476 drm_connector_register(connector);
479 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
480 .add_connector = dm_dp_add_mst_connector,
481 .destroy_connector = dm_dp_destroy_mst_connector,
482 .register_connector = dm_dp_mst_register_connector
485 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
486 struct amdgpu_dm_connector *aconnector)
488 aconnector->dm_dp_aux.aux.name = "dmdc";
489 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
490 aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
492 drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
493 drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
496 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
499 aconnector->mst_mgr.cbs = &dm_mst_cbs;
500 drm_dp_mst_topology_mgr_init(
501 &aconnector->mst_mgr,
503 &aconnector->dm_dp_aux.aux,
506 aconnector->connector_id);
509 int dm_mst_get_pbn_divider(struct dc_link *link)
514 return dc_link_bandwidth_kbps(link,
515 dc_link_get_link_cap(link)) / (8 * 1000 * 54);
518 #if defined(CONFIG_DRM_AMD_DC_DCN)
520 struct dsc_mst_fairness_params {
521 struct dc_crtc_timing *timing;
522 struct dc_sink *sink;
523 struct dc_dsc_bw_range bw_range;
524 bool compression_possible;
525 struct drm_dp_mst_port *port;
528 struct dsc_mst_fairness_vars {
534 static int kbps_to_peak_pbn(int kbps)
536 u64 peak_kbps = kbps;
539 peak_kbps = div_u64(peak_kbps, 1000);
540 return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
543 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
544 struct dsc_mst_fairness_vars *vars,
549 for (i = 0; i < count; i++) {
550 memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
551 if (vars[i].dsc_enabled && dc_dsc_compute_config(
552 params[i].sink->ctx->dc->res_pool->dscs[0],
553 ¶ms[i].sink->sink_dsc_caps.dsc_dec_caps,
554 params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
557 ¶ms[i].timing->dsc_cfg)) {
558 params[i].timing->flags.DSC = 1;
559 params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
561 params[i].timing->flags.DSC = 0;
566 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
568 struct dc_dsc_config dsc_config;
571 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
572 dc_dsc_compute_config(
573 param.sink->ctx->dc->res_pool->dscs[0],
574 ¶m.sink->sink_dsc_caps.dsc_dec_caps,
575 param.sink->ctx->dc->debug.dsc_min_slice_height_override,
576 (int) kbps, param.timing, &dsc_config);
578 return dsc_config.bits_per_pixel;
581 static void increase_dsc_bpp(struct drm_atomic_state *state,
582 struct dc_link *dc_link,
583 struct dsc_mst_fairness_params *params,
584 struct dsc_mst_fairness_vars *vars,
588 bool bpp_increased[MAX_PIPES];
589 int initial_slack[MAX_PIPES];
590 int min_initial_slack;
592 int remaining_to_increase = 0;
593 int pbn_per_timeslot;
594 int link_timeslots_used;
597 for (i = 0; i < count; i++) {
598 if (vars[i].dsc_enabled) {
599 initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
600 bpp_increased[i] = false;
601 remaining_to_increase += 1;
603 initial_slack[i] = 0;
604 bpp_increased[i] = true;
608 pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
609 dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
611 while (remaining_to_increase) {
613 min_initial_slack = -1;
614 for (i = 0; i < count; i++) {
615 if (!bpp_increased[i]) {
616 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
617 min_initial_slack = initial_slack[i];
623 if (next_index == -1)
626 link_timeslots_used = 0;
628 for (i = 0; i < count; i++)
629 link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
631 fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
633 if (initial_slack[next_index] > fair_pbn_alloc) {
634 vars[next_index].pbn += fair_pbn_alloc;
635 if (drm_dp_atomic_find_vcpi_slots(state,
636 params[next_index].port->mgr,
637 params[next_index].port,
638 vars[next_index].pbn,
639 dm_mst_get_pbn_divider(dc_link)) < 0)
641 if (!drm_dp_mst_atomic_check(state)) {
642 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
644 vars[next_index].pbn -= fair_pbn_alloc;
645 if (drm_dp_atomic_find_vcpi_slots(state,
646 params[next_index].port->mgr,
647 params[next_index].port,
648 vars[next_index].pbn,
649 dm_mst_get_pbn_divider(dc_link)) < 0)
653 vars[next_index].pbn += initial_slack[next_index];
654 if (drm_dp_atomic_find_vcpi_slots(state,
655 params[next_index].port->mgr,
656 params[next_index].port,
657 vars[next_index].pbn,
658 dm_mst_get_pbn_divider(dc_link)) < 0)
660 if (!drm_dp_mst_atomic_check(state)) {
661 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
663 vars[next_index].pbn -= initial_slack[next_index];
664 if (drm_dp_atomic_find_vcpi_slots(state,
665 params[next_index].port->mgr,
666 params[next_index].port,
667 vars[next_index].pbn,
668 dm_mst_get_pbn_divider(dc_link)) < 0)
673 bpp_increased[next_index] = true;
674 remaining_to_increase--;
678 static void try_disable_dsc(struct drm_atomic_state *state,
679 struct dc_link *dc_link,
680 struct dsc_mst_fairness_params *params,
681 struct dsc_mst_fairness_vars *vars,
685 bool tried[MAX_PIPES];
686 int kbps_increase[MAX_PIPES];
687 int max_kbps_increase;
689 int remaining_to_try = 0;
691 for (i = 0; i < count; i++) {
692 if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
693 kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
695 remaining_to_try += 1;
697 kbps_increase[i] = 0;
702 while (remaining_to_try) {
704 max_kbps_increase = -1;
705 for (i = 0; i < count; i++) {
707 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
708 max_kbps_increase = kbps_increase[i];
714 if (next_index == -1)
717 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
718 if (drm_dp_atomic_find_vcpi_slots(state,
719 params[next_index].port->mgr,
720 params[next_index].port,
721 vars[next_index].pbn,
725 if (!drm_dp_mst_atomic_check(state)) {
726 vars[next_index].dsc_enabled = false;
727 vars[next_index].bpp_x16 = 0;
729 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
730 if (drm_dp_atomic_find_vcpi_slots(state,
731 params[next_index].port->mgr,
732 params[next_index].port,
733 vars[next_index].pbn,
734 dm_mst_get_pbn_divider(dc_link)) < 0)
738 tried[next_index] = true;
743 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
744 struct dc_state *dc_state,
745 struct dc_link *dc_link)
748 struct dc_stream_state *stream;
749 struct dsc_mst_fairness_params params[MAX_PIPES];
750 struct dsc_mst_fairness_vars vars[MAX_PIPES];
751 struct amdgpu_dm_connector *aconnector;
754 memset(params, 0, sizeof(params));
757 for (i = 0; i < dc_state->stream_count; i++) {
758 struct dc_dsc_policy dsc_policy = {0};
760 stream = dc_state->streams[i];
762 if (stream->link != dc_link)
765 stream->timing.flags.DSC = 0;
767 params[count].timing = &stream->timing;
768 params[count].sink = stream->sink;
769 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
770 params[count].port = aconnector->port;
771 params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
772 dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
773 if (!dc_dsc_compute_bandwidth_range(
774 stream->sink->ctx->dc->res_pool->dscs[0],
775 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
776 dsc_policy.min_target_bpp,
777 dsc_policy.max_target_bpp,
778 &stream->sink->sink_dsc_caps.dsc_dec_caps,
779 &stream->timing, ¶ms[count].bw_range))
780 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
784 /* Try no compression */
785 for (i = 0; i < count; i++) {
786 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
787 vars[i].dsc_enabled = false;
789 if (drm_dp_atomic_find_vcpi_slots(state,
796 if (!drm_dp_mst_atomic_check(state)) {
797 set_dsc_configs_from_fairness_vars(params, vars, count);
801 /* Try max compression */
802 for (i = 0; i < count; i++) {
803 if (params[i].compression_possible) {
804 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
805 vars[i].dsc_enabled = true;
806 vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
807 if (drm_dp_atomic_find_vcpi_slots(state,
811 dm_mst_get_pbn_divider(dc_link)) < 0)
814 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
815 vars[i].dsc_enabled = false;
817 if (drm_dp_atomic_find_vcpi_slots(state,
825 if (drm_dp_mst_atomic_check(state))
828 /* Optimize degree of compression */
829 increase_dsc_bpp(state, dc_link, params, vars, count);
831 try_disable_dsc(state, dc_link, params, vars, count);
833 set_dsc_configs_from_fairness_vars(params, vars, count);
838 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
839 struct dc_state *dc_state)
842 struct dc_stream_state *stream;
843 bool computed_streams[MAX_PIPES];
844 struct amdgpu_dm_connector *aconnector;
846 for (i = 0; i < dc_state->stream_count; i++)
847 computed_streams[i] = false;
849 for (i = 0; i < dc_state->stream_count; i++) {
850 stream = dc_state->streams[i];
852 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
855 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
857 if (!aconnector || !aconnector->dc_sink)
860 if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
863 if (computed_streams[i])
866 mutex_lock(&aconnector->mst_mgr.lock);
867 if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
868 mutex_unlock(&aconnector->mst_mgr.lock);
871 mutex_unlock(&aconnector->mst_mgr.lock);
873 for (j = 0; j < dc_state->stream_count; j++) {
874 if (dc_state->streams[j]->link == stream->link)
875 computed_streams[j] = true;
879 for (i = 0; i < dc_state->stream_count; i++) {
880 stream = dc_state->streams[i];
882 if (stream->timing.flags.DSC == 1)
883 dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);