soundwire: sysfs: add slave status and device number before probe
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_mst_types.c
1 /*
2  * Copyright 2012-15 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include <linux/version.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_dp_mst_helper.h>
29 #include "dm_services.h"
30 #include "amdgpu.h"
31 #include "amdgpu_dm.h"
32 #include "amdgpu_dm_mst_types.h"
33
34 #include "dc.h"
35 #include "dm_helpers.h"
36
37 #include "dc_link_ddc.h"
38
39 #include "i2caux_interface.h"
40 #if defined(CONFIG_DEBUG_FS)
41 #include "amdgpu_dm_debugfs.h"
42 #endif
43
44 #if defined(CONFIG_DRM_AMD_DC_DCN)
45 #include "dc/dcn20/dcn20_resource.h"
46 #endif
47
48 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
49                                   struct drm_dp_aux_msg *msg)
50 {
51         ssize_t result = 0;
52         struct aux_payload payload;
53         enum aux_channel_operation_result operation_result;
54
55         if (WARN_ON(msg->size > 16))
56                 return -E2BIG;
57
58         payload.address = msg->address;
59         payload.data = msg->buffer;
60         payload.length = msg->size;
61         payload.reply = &msg->reply;
62         payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
63         payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
64         payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
65         payload.defer_delay = 0;
66
67         result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
68                                       &operation_result);
69
70         if (payload.write)
71                 result = msg->size;
72
73         if (result < 0)
74                 switch (operation_result) {
75                 case AUX_CHANNEL_OPERATION_SUCCEEDED:
76                         break;
77                 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
78                 case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
79                         result = -EIO;
80                         break;
81                 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
82                 case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
83                         result = -EBUSY;
84                         break;
85                 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
86                         result = -ETIMEDOUT;
87                         break;
88                 }
89
90         return result;
91 }
92
93 static void
94 dm_dp_mst_connector_destroy(struct drm_connector *connector)
95 {
96         struct amdgpu_dm_connector *aconnector =
97                 to_amdgpu_dm_connector(connector);
98
99         if (aconnector->dc_sink) {
100                 dc_link_remove_remote_sink(aconnector->dc_link,
101                                            aconnector->dc_sink);
102                 dc_sink_release(aconnector->dc_sink);
103         }
104
105         kfree(aconnector->edid);
106
107         drm_connector_cleanup(connector);
108         drm_dp_mst_put_port_malloc(aconnector->port);
109         kfree(aconnector);
110 }
111
112 static int
113 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
114 {
115         struct amdgpu_dm_connector *amdgpu_dm_connector =
116                 to_amdgpu_dm_connector(connector);
117         int r;
118
119         r = drm_dp_mst_connector_late_register(connector,
120                                                amdgpu_dm_connector->port);
121         if (r < 0)
122                 return r;
123
124 #if defined(CONFIG_DEBUG_FS)
125         connector_debugfs_init(amdgpu_dm_connector);
126 #endif
127
128         return 0;
129 }
130
131 static void
132 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
133 {
134         struct amdgpu_dm_connector *amdgpu_dm_connector =
135                 to_amdgpu_dm_connector(connector);
136         struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
137
138         drm_dp_mst_connector_early_unregister(connector, port);
139 }
140
141 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
142         .fill_modes = drm_helper_probe_single_connector_modes,
143         .destroy = dm_dp_mst_connector_destroy,
144         .reset = amdgpu_dm_connector_funcs_reset,
145         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
146         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
147         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
148         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
149         .late_register = amdgpu_dm_mst_connector_late_register,
150         .early_unregister = amdgpu_dm_mst_connector_early_unregister,
151 };
152
153 #if defined(CONFIG_DRM_AMD_DC_DCN)
154 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
155 {
156         struct dc_sink *dc_sink = aconnector->dc_sink;
157         struct drm_dp_mst_port *port = aconnector->port;
158         u8 dsc_caps[16] = { 0 };
159
160         aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
161
162         if (!aconnector->dsc_aux)
163                 return false;
164
165         if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
166                 return false;
167
168         if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
169                                    dsc_caps, NULL,
170                                    &dc_sink->dsc_caps.dsc_dec_caps))
171                 return false;
172
173         return true;
174 }
175 #endif
176
177 static int dm_dp_mst_get_modes(struct drm_connector *connector)
178 {
179         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
180         int ret = 0;
181
182         if (!aconnector)
183                 return drm_add_edid_modes(connector, NULL);
184
185         if (!aconnector->edid) {
186                 struct edid *edid;
187                 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
188
189                 if (!edid) {
190                         drm_connector_update_edid_property(
191                                 &aconnector->base,
192                                 NULL);
193                         return ret;
194                 }
195
196                 aconnector->edid = edid;
197         }
198
199         if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
200                 dc_sink_release(aconnector->dc_sink);
201                 aconnector->dc_sink = NULL;
202         }
203
204         if (!aconnector->dc_sink) {
205                 struct dc_sink *dc_sink;
206                 struct dc_sink_init_data init_params = {
207                                 .link = aconnector->dc_link,
208                                 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
209                 dc_sink = dc_link_add_remote_sink(
210                         aconnector->dc_link,
211                         (uint8_t *)aconnector->edid,
212                         (aconnector->edid->extensions + 1) * EDID_LENGTH,
213                         &init_params);
214
215                 dc_sink->priv = aconnector;
216                 /* dc_link_add_remote_sink returns a new reference */
217                 aconnector->dc_sink = dc_sink;
218
219                 if (aconnector->dc_sink) {
220                         amdgpu_dm_update_freesync_caps(
221                                         connector, aconnector->edid);
222
223 #if defined(CONFIG_DRM_AMD_DC_DCN)
224                         if (!validate_dsc_caps_on_connector(aconnector))
225                                 memset(&aconnector->dc_sink->dsc_caps,
226                                        0, sizeof(aconnector->dc_sink->dsc_caps));
227 #endif
228                 }
229         }
230
231         drm_connector_update_edid_property(
232                                         &aconnector->base, aconnector->edid);
233
234         ret = drm_add_edid_modes(connector, aconnector->edid);
235
236         return ret;
237 }
238
239 static struct drm_encoder *
240 dm_mst_atomic_best_encoder(struct drm_connector *connector,
241                            struct drm_connector_state *connector_state)
242 {
243         struct drm_device *dev = connector->dev;
244         struct amdgpu_device *adev = dev->dev_private;
245         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
246
247         return &adev->dm.mst_encoders[acrtc->crtc_id].base;
248 }
249
250 static int
251 dm_dp_mst_detect(struct drm_connector *connector,
252                  struct drm_modeset_acquire_ctx *ctx, bool force)
253 {
254         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
255         struct amdgpu_dm_connector *master = aconnector->mst_port;
256
257         return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
258                                       aconnector->port);
259 }
260
261 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
262                                 struct drm_atomic_state *state)
263 {
264         struct drm_connector_state *new_conn_state =
265                         drm_atomic_get_new_connector_state(state, connector);
266         struct drm_connector_state *old_conn_state =
267                         drm_atomic_get_old_connector_state(state, connector);
268         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
269         struct drm_crtc_state *new_crtc_state;
270         struct drm_dp_mst_topology_mgr *mst_mgr;
271         struct drm_dp_mst_port *mst_port;
272
273         mst_port = aconnector->port;
274         mst_mgr = &aconnector->mst_port->mst_mgr;
275
276         if (!old_conn_state->crtc)
277                 return 0;
278
279         if (new_conn_state->crtc) {
280                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
281                 if (!new_crtc_state ||
282                     !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
283                     new_crtc_state->enable)
284                         return 0;
285                 }
286
287         return drm_dp_atomic_release_vcpi_slots(state,
288                                                 mst_mgr,
289                                                 mst_port);
290 }
291
292 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
293         .get_modes = dm_dp_mst_get_modes,
294         .mode_valid = amdgpu_dm_connector_mode_valid,
295         .atomic_best_encoder = dm_mst_atomic_best_encoder,
296         .detect_ctx = dm_dp_mst_detect,
297         .atomic_check = dm_dp_mst_atomic_check,
298 };
299
300 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
301 {
302         drm_encoder_cleanup(encoder);
303         kfree(encoder);
304 }
305
306 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
307         .destroy = amdgpu_dm_encoder_destroy,
308 };
309
310 void
311 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
312 {
313         struct drm_device *dev = adev->ddev;
314         int i;
315
316         for (i = 0; i < adev->dm.display_indexes_num; i++) {
317                 struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i];
318                 struct drm_encoder *encoder = &amdgpu_encoder->base;
319
320                 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
321
322                 drm_encoder_init(
323                         dev,
324                         &amdgpu_encoder->base,
325                         &amdgpu_dm_encoder_funcs,
326                         DRM_MODE_ENCODER_DPMST,
327                         NULL);
328
329                 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
330         }
331 }
332
333 static struct drm_connector *
334 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
335                         struct drm_dp_mst_port *port,
336                         const char *pathprop)
337 {
338         struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
339         struct drm_device *dev = master->base.dev;
340         struct amdgpu_device *adev = dev->dev_private;
341         struct amdgpu_dm_connector *aconnector;
342         struct drm_connector *connector;
343         int i;
344
345         aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
346         if (!aconnector)
347                 return NULL;
348
349         connector = &aconnector->base;
350         aconnector->port = port;
351         aconnector->mst_port = master;
352
353         if (drm_connector_init(
354                 dev,
355                 connector,
356                 &dm_dp_mst_connector_funcs,
357                 DRM_MODE_CONNECTOR_DisplayPort)) {
358                 kfree(aconnector);
359                 return NULL;
360         }
361         drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
362
363         amdgpu_dm_connector_init_helper(
364                 &adev->dm,
365                 aconnector,
366                 DRM_MODE_CONNECTOR_DisplayPort,
367                 master->dc_link,
368                 master->connector_id);
369
370         for (i = 0; i < adev->dm.display_indexes_num; i++) {
371                 drm_connector_attach_encoder(&aconnector->base,
372                                              &adev->dm.mst_encoders[i].base);
373         }
374
375         connector->max_bpc_property = master->base.max_bpc_property;
376         if (connector->max_bpc_property)
377                 drm_connector_attach_max_bpc_property(connector, 8, 16);
378
379         connector->vrr_capable_property = master->base.vrr_capable_property;
380         if (connector->vrr_capable_property)
381                 drm_connector_attach_vrr_capable_property(connector);
382
383         drm_object_attach_property(
384                 &connector->base,
385                 dev->mode_config.path_property,
386                 0);
387         drm_object_attach_property(
388                 &connector->base,
389                 dev->mode_config.tile_property,
390                 0);
391
392         drm_connector_set_path_property(connector, pathprop);
393
394         /*
395          * Initialize connector state before adding the connectror to drm and
396          * framebuffer lists
397          */
398         amdgpu_dm_connector_funcs_reset(connector);
399
400         drm_dp_mst_get_port_malloc(port);
401
402         return connector;
403 }
404
405 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
406         .add_connector = dm_dp_add_mst_connector,
407 };
408
409 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
410                                        struct amdgpu_dm_connector *aconnector,
411                                        int link_index)
412 {
413         aconnector->dm_dp_aux.aux.name =
414                 kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
415                           link_index);
416         aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
417         aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
418
419         drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
420         drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
421                                       &aconnector->base);
422
423         if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
424                 return;
425
426         aconnector->mst_mgr.cbs = &dm_mst_cbs;
427         drm_dp_mst_topology_mgr_init(
428                 &aconnector->mst_mgr,
429                 dm->adev->ddev,
430                 &aconnector->dm_dp_aux.aux,
431                 16,
432                 4,
433                 aconnector->connector_id);
434 }
435
436 int dm_mst_get_pbn_divider(struct dc_link *link)
437 {
438         if (!link)
439                 return 0;
440
441         return dc_link_bandwidth_kbps(link,
442                         dc_link_get_link_cap(link)) / (8 * 1000 * 54);
443 }
444
445 #if defined(CONFIG_DRM_AMD_DC_DCN)
446
447 struct dsc_mst_fairness_params {
448         struct dc_crtc_timing *timing;
449         struct dc_sink *sink;
450         struct dc_dsc_bw_range bw_range;
451         bool compression_possible;
452         struct drm_dp_mst_port *port;
453 };
454
455 struct dsc_mst_fairness_vars {
456         int pbn;
457         bool dsc_enabled;
458         int bpp_x16;
459 };
460
461 static int kbps_to_peak_pbn(int kbps)
462 {
463         u64 peak_kbps = kbps;
464
465         peak_kbps *= 1006;
466         peak_kbps = div_u64(peak_kbps, 1000);
467         return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
468 }
469
470 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
471                 struct dsc_mst_fairness_vars *vars,
472                 int count)
473 {
474         int i;
475
476         for (i = 0; i < count; i++) {
477                 memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
478                 if (vars[i].dsc_enabled && dc_dsc_compute_config(
479                                         params[i].sink->ctx->dc->res_pool->dscs[0],
480                                         &params[i].sink->dsc_caps.dsc_dec_caps,
481                                         params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
482                                         0,
483                                         params[i].timing,
484                                         &params[i].timing->dsc_cfg)) {
485                         params[i].timing->flags.DSC = 1;
486                         params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
487                 } else {
488                         params[i].timing->flags.DSC = 0;
489                 }
490         }
491 }
492
493 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
494 {
495         struct dc_dsc_config dsc_config;
496         u64 kbps;
497
498         kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
499         dc_dsc_compute_config(
500                         param.sink->ctx->dc->res_pool->dscs[0],
501                         &param.sink->dsc_caps.dsc_dec_caps,
502                         param.sink->ctx->dc->debug.dsc_min_slice_height_override,
503                         (int) kbps, param.timing, &dsc_config);
504
505         return dsc_config.bits_per_pixel;
506 }
507
508 static void increase_dsc_bpp(struct drm_atomic_state *state,
509                              struct dc_link *dc_link,
510                              struct dsc_mst_fairness_params *params,
511                              struct dsc_mst_fairness_vars *vars,
512                              int count)
513 {
514         int i;
515         bool bpp_increased[MAX_PIPES];
516         int initial_slack[MAX_PIPES];
517         int min_initial_slack;
518         int next_index;
519         int remaining_to_increase = 0;
520         int pbn_per_timeslot;
521         int link_timeslots_used;
522         int fair_pbn_alloc;
523
524         pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
525
526         for (i = 0; i < count; i++) {
527                 if (vars[i].dsc_enabled) {
528                         initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
529                         bpp_increased[i] = false;
530                         remaining_to_increase += 1;
531                 } else {
532                         initial_slack[i] = 0;
533                         bpp_increased[i] = true;
534                 }
535         }
536
537         while (remaining_to_increase) {
538                 next_index = -1;
539                 min_initial_slack = -1;
540                 for (i = 0; i < count; i++) {
541                         if (!bpp_increased[i]) {
542                                 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
543                                         min_initial_slack = initial_slack[i];
544                                         next_index = i;
545                                 }
546                         }
547                 }
548
549                 if (next_index == -1)
550                         break;
551
552                 link_timeslots_used = 0;
553
554                 for (i = 0; i < count; i++)
555                         link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
556
557                 fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
558
559                 if (initial_slack[next_index] > fair_pbn_alloc) {
560                         vars[next_index].pbn += fair_pbn_alloc;
561                         if (drm_dp_atomic_find_vcpi_slots(state,
562                                                           params[next_index].port->mgr,
563                                                           params[next_index].port,
564                                                           vars[next_index].pbn,
565                                                           pbn_per_timeslot) < 0)
566                                 return;
567                         if (!drm_dp_mst_atomic_check(state)) {
568                                 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
569                         } else {
570                                 vars[next_index].pbn -= fair_pbn_alloc;
571                                 if (drm_dp_atomic_find_vcpi_slots(state,
572                                                                   params[next_index].port->mgr,
573                                                                   params[next_index].port,
574                                                                   vars[next_index].pbn,
575                                                                   pbn_per_timeslot) < 0)
576                                         return;
577                         }
578                 } else {
579                         vars[next_index].pbn += initial_slack[next_index];
580                         if (drm_dp_atomic_find_vcpi_slots(state,
581                                                           params[next_index].port->mgr,
582                                                           params[next_index].port,
583                                                           vars[next_index].pbn,
584                                                           pbn_per_timeslot) < 0)
585                                 return;
586                         if (!drm_dp_mst_atomic_check(state)) {
587                                 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
588                         } else {
589                                 vars[next_index].pbn -= initial_slack[next_index];
590                                 if (drm_dp_atomic_find_vcpi_slots(state,
591                                                                   params[next_index].port->mgr,
592                                                                   params[next_index].port,
593                                                                   vars[next_index].pbn,
594                                                                   pbn_per_timeslot) < 0)
595                                         return;
596                         }
597                 }
598
599                 bpp_increased[next_index] = true;
600                 remaining_to_increase--;
601         }
602 }
603
604 static void try_disable_dsc(struct drm_atomic_state *state,
605                             struct dc_link *dc_link,
606                             struct dsc_mst_fairness_params *params,
607                             struct dsc_mst_fairness_vars *vars,
608                             int count)
609 {
610         int i;
611         bool tried[MAX_PIPES];
612         int kbps_increase[MAX_PIPES];
613         int max_kbps_increase;
614         int next_index;
615         int remaining_to_try = 0;
616
617         for (i = 0; i < count; i++) {
618                 if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
619                         kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
620                         tried[i] = false;
621                         remaining_to_try += 1;
622                 } else {
623                         kbps_increase[i] = 0;
624                         tried[i] = true;
625                 }
626         }
627
628         while (remaining_to_try) {
629                 next_index = -1;
630                 max_kbps_increase = -1;
631                 for (i = 0; i < count; i++) {
632                         if (!tried[i]) {
633                                 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
634                                         max_kbps_increase = kbps_increase[i];
635                                         next_index = i;
636                                 }
637                         }
638                 }
639
640                 if (next_index == -1)
641                         break;
642
643                 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
644                 if (drm_dp_atomic_find_vcpi_slots(state,
645                                                   params[next_index].port->mgr,
646                                                   params[next_index].port,
647                                                   vars[next_index].pbn,
648                                                   dm_mst_get_pbn_divider(dc_link)) < 0)
649                         return;
650
651                 if (!drm_dp_mst_atomic_check(state)) {
652                         vars[next_index].dsc_enabled = false;
653                         vars[next_index].bpp_x16 = 0;
654                 } else {
655                         vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
656                         if (drm_dp_atomic_find_vcpi_slots(state,
657                                                           params[next_index].port->mgr,
658                                                           params[next_index].port,
659                                                           vars[next_index].pbn,
660                                                           dm_mst_get_pbn_divider(dc_link)) < 0)
661                                 return;
662                 }
663
664                 tried[next_index] = true;
665                 remaining_to_try--;
666         }
667 }
668
669 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
670                                              struct dc_state *dc_state,
671                                              struct dc_link *dc_link)
672 {
673         int i;
674         struct dc_stream_state *stream;
675         struct dsc_mst_fairness_params params[MAX_PIPES];
676         struct dsc_mst_fairness_vars vars[MAX_PIPES];
677         struct amdgpu_dm_connector *aconnector;
678         int count = 0;
679
680         memset(params, 0, sizeof(params));
681
682         /* Set up params */
683         for (i = 0; i < dc_state->stream_count; i++) {
684                 struct dc_dsc_policy dsc_policy = {0};
685
686                 stream = dc_state->streams[i];
687
688                 if (stream->link != dc_link)
689                         continue;
690
691                 stream->timing.flags.DSC = 0;
692
693                 params[count].timing = &stream->timing;
694                 params[count].sink = stream->sink;
695                 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
696                 params[count].port = aconnector->port;
697                 params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
698                 dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
699                 if (!dc_dsc_compute_bandwidth_range(
700                                 stream->sink->ctx->dc->res_pool->dscs[0],
701                                 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
702                                 dsc_policy.min_target_bpp,
703                                 dsc_policy.max_target_bpp,
704                                 &stream->sink->dsc_caps.dsc_dec_caps,
705                                 &stream->timing, &params[count].bw_range))
706                         params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
707
708                 count++;
709         }
710         /* Try no compression */
711         for (i = 0; i < count; i++) {
712                 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
713                 vars[i].dsc_enabled = false;
714                 vars[i].bpp_x16 = 0;
715                 if (drm_dp_atomic_find_vcpi_slots(state,
716                                                  params[i].port->mgr,
717                                                  params[i].port,
718                                                  vars[i].pbn,
719                                                  dm_mst_get_pbn_divider(dc_link)) < 0)
720                         return false;
721         }
722         if (!drm_dp_mst_atomic_check(state)) {
723                 set_dsc_configs_from_fairness_vars(params, vars, count);
724                 return true;
725         }
726
727         /* Try max compression */
728         for (i = 0; i < count; i++) {
729                 if (params[i].compression_possible) {
730                         vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
731                         vars[i].dsc_enabled = true;
732                         vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
733                         if (drm_dp_atomic_find_vcpi_slots(state,
734                                                           params[i].port->mgr,
735                                                           params[i].port,
736                                                           vars[i].pbn,
737                                                           dm_mst_get_pbn_divider(dc_link)) < 0)
738                                 return false;
739                 } else {
740                         vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
741                         vars[i].dsc_enabled = false;
742                         vars[i].bpp_x16 = 0;
743                         if (drm_dp_atomic_find_vcpi_slots(state,
744                                                           params[i].port->mgr,
745                                                           params[i].port,
746                                                           vars[i].pbn,
747                                                           dm_mst_get_pbn_divider(dc_link)) < 0)
748                                 return false;
749                 }
750         }
751         if (drm_dp_mst_atomic_check(state))
752                 return false;
753
754         /* Optimize degree of compression */
755         increase_dsc_bpp(state, dc_link, params, vars, count);
756
757         try_disable_dsc(state, dc_link, params, vars, count);
758
759         set_dsc_configs_from_fairness_vars(params, vars, count);
760
761         return true;
762 }
763
764 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
765                                        struct dc_state *dc_state)
766 {
767         int i, j;
768         struct dc_stream_state *stream;
769         bool computed_streams[MAX_PIPES];
770         struct amdgpu_dm_connector *aconnector;
771
772         for (i = 0; i < dc_state->stream_count; i++)
773                 computed_streams[i] = false;
774
775         for (i = 0; i < dc_state->stream_count; i++) {
776                 stream = dc_state->streams[i];
777
778                 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
779                         continue;
780
781                 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
782
783                 if (!aconnector || !aconnector->dc_sink)
784                         continue;
785
786                 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
787                         continue;
788
789                 if (computed_streams[i])
790                         continue;
791
792                 mutex_lock(&aconnector->mst_mgr.lock);
793                 if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
794                         mutex_unlock(&aconnector->mst_mgr.lock);
795                         return false;
796                 }
797                 mutex_unlock(&aconnector->mst_mgr.lock);
798
799                 for (j = 0; j < dc_state->stream_count; j++) {
800                         if (dc_state->streams[j]->link == stream->link)
801                                 computed_streams[j] = true;
802                 }
803         }
804
805         for (i = 0; i < dc_state->stream_count; i++) {
806                 stream = dc_state->streams[i];
807
808                 if (stream->timing.flags.DSC == 1)
809                         dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
810         }
811
812         return true;
813 }
814
815 #endif