drm/amdgpu/display: split dp connector registration (v4)
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_mst_types.c
1 /*
2  * Copyright 2012-15 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include <linux/version.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_dp_mst_helper.h>
29 #include "dm_services.h"
30 #include "amdgpu.h"
31 #include "amdgpu_dm.h"
32 #include "amdgpu_dm_mst_types.h"
33
34 #include "dc.h"
35 #include "dm_helpers.h"
36
37 #include "dc_link_ddc.h"
38
39 #include "i2caux_interface.h"
40 #if defined(CONFIG_DEBUG_FS)
41 #include "amdgpu_dm_debugfs.h"
42 #endif
43
44
45 #if defined(CONFIG_DRM_AMD_DC_DCN)
46 #include "dc/dcn20/dcn20_resource.h"
47 #endif
48
49 /* #define TRACE_DPCD */
50
51 #ifdef TRACE_DPCD
52 #define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
53
54 static inline char *side_band_msg_type_to_str(uint32_t address)
55 {
56         static char str[10] = {0};
57
58         if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
59                 strcpy(str, "DOWN_REQ");
60         else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
61                 strcpy(str, "UP_REP");
62         else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
63                 strcpy(str, "DOWN_REP");
64         else
65                 strcpy(str, "UP_REQ");
66
67         return str;
68 }
69
70 static void log_dpcd(uint8_t type,
71                      uint32_t address,
72                      uint8_t *data,
73                      uint32_t size,
74                      bool res)
75 {
76         DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
77                         (type == DP_AUX_NATIVE_READ) ||
78                         (type == DP_AUX_I2C_READ) ?
79                                         "Read" : "Write",
80                         address,
81                         SIDE_BAND_MSG(address) ?
82                                         side_band_msg_type_to_str(address) : "Nop",
83                         res ? "OK" : "Fail");
84
85         if (res) {
86                 print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
87         }
88 }
89 #endif
90
91 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
92                                   struct drm_dp_aux_msg *msg)
93 {
94         ssize_t result = 0;
95         struct aux_payload payload;
96         enum aux_channel_operation_result operation_result;
97
98         if (WARN_ON(msg->size > 16))
99                 return -E2BIG;
100
101         payload.address = msg->address;
102         payload.data = msg->buffer;
103         payload.length = msg->size;
104         payload.reply = &msg->reply;
105         payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
106         payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
107         payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
108         payload.defer_delay = 0;
109
110         result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
111                                       &operation_result);
112
113         if (payload.write)
114                 result = msg->size;
115
116         if (result < 0)
117                 switch (operation_result) {
118                 case AUX_CHANNEL_OPERATION_SUCCEEDED:
119                         break;
120                 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
121                 case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
122                         result = -EIO;
123                         break;
124                 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
125                 case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
126                         result = -EBUSY;
127                         break;
128                 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
129                         result = -ETIMEDOUT;
130                         break;
131                 }
132
133         return result;
134 }
135
136 static void
137 dm_dp_mst_connector_destroy(struct drm_connector *connector)
138 {
139         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
140         struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
141
142         kfree(amdgpu_dm_connector->edid);
143         amdgpu_dm_connector->edid = NULL;
144
145         drm_encoder_cleanup(&amdgpu_encoder->base);
146         kfree(amdgpu_encoder);
147         drm_connector_cleanup(connector);
148         drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
149         kfree(amdgpu_dm_connector);
150 }
151
152 static int
153 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
154 {
155         struct amdgpu_dm_connector *amdgpu_dm_connector =
156                 to_amdgpu_dm_connector(connector);
157         struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
158         int r;
159
160         amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
161         r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
162         if (r)
163                 return r;
164
165 #if defined(CONFIG_DEBUG_FS)
166         connector_debugfs_init(amdgpu_dm_connector);
167 #endif
168
169         return drm_dp_mst_connector_late_register(connector, port);
170 }
171
172 static void
173 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
174 {
175         struct amdgpu_dm_connector *amdgpu_dm_connector =
176                 to_amdgpu_dm_connector(connector);
177         struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
178
179         drm_dp_mst_connector_early_unregister(connector, port);
180 }
181
182 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
183         .fill_modes = drm_helper_probe_single_connector_modes,
184         .destroy = dm_dp_mst_connector_destroy,
185         .reset = amdgpu_dm_connector_funcs_reset,
186         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
187         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
188         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
189         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
190         .late_register = amdgpu_dm_mst_connector_late_register,
191         .early_unregister = amdgpu_dm_mst_connector_early_unregister,
192 };
193
194 #if defined(CONFIG_DRM_AMD_DC_DCN)
195 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
196 {
197         struct dc_sink *dc_sink = aconnector->dc_sink;
198         struct drm_dp_mst_port *port = aconnector->port;
199         u8 dsc_caps[16] = { 0 };
200
201         aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
202
203         if (!aconnector->dsc_aux)
204                 return false;
205
206         if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
207                 return false;
208
209         if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
210                                    dsc_caps, NULL,
211                                    &dc_sink->sink_dsc_caps.dsc_dec_caps))
212                 return false;
213
214         return true;
215 }
216 #endif
217
218 static int dm_dp_mst_get_modes(struct drm_connector *connector)
219 {
220         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
221         int ret = 0;
222
223         if (!aconnector)
224                 return drm_add_edid_modes(connector, NULL);
225
226         if (!aconnector->edid) {
227                 struct edid *edid;
228                 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
229
230                 if (!edid) {
231                         drm_connector_update_edid_property(
232                                 &aconnector->base,
233                                 NULL);
234                         return ret;
235                 }
236
237                 aconnector->edid = edid;
238         }
239
240         if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
241                 dc_sink_release(aconnector->dc_sink);
242                 aconnector->dc_sink = NULL;
243         }
244
245         if (!aconnector->dc_sink) {
246                 struct dc_sink *dc_sink;
247                 struct dc_sink_init_data init_params = {
248                                 .link = aconnector->dc_link,
249                                 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
250                 dc_sink = dc_link_add_remote_sink(
251                         aconnector->dc_link,
252                         (uint8_t *)aconnector->edid,
253                         (aconnector->edid->extensions + 1) * EDID_LENGTH,
254                         &init_params);
255
256                 dc_sink->priv = aconnector;
257                 /* dc_link_add_remote_sink returns a new reference */
258                 aconnector->dc_sink = dc_sink;
259
260                 if (aconnector->dc_sink) {
261                         amdgpu_dm_update_freesync_caps(
262                                         connector, aconnector->edid);
263
264 #if defined(CONFIG_DRM_AMD_DC_DCN)
265                         if (!validate_dsc_caps_on_connector(aconnector))
266                                 memset(&aconnector->dc_sink->sink_dsc_caps,
267                                        0, sizeof(aconnector->dc_sink->sink_dsc_caps));
268 #endif
269                 }
270         }
271
272         drm_connector_update_edid_property(
273                                         &aconnector->base, aconnector->edid);
274
275         ret = drm_add_edid_modes(connector, aconnector->edid);
276
277         return ret;
278 }
279
280 static struct drm_encoder *
281 dm_mst_atomic_best_encoder(struct drm_connector *connector,
282                            struct drm_connector_state *connector_state)
283 {
284         return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
285 }
286
287 static int
288 dm_dp_mst_detect(struct drm_connector *connector,
289                  struct drm_modeset_acquire_ctx *ctx, bool force)
290 {
291         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
292         struct amdgpu_dm_connector *master = aconnector->mst_port;
293
294         return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
295                                       aconnector->port);
296 }
297
298 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
299                                 struct drm_atomic_state *state)
300 {
301         struct drm_connector_state *new_conn_state =
302                         drm_atomic_get_new_connector_state(state, connector);
303         struct drm_connector_state *old_conn_state =
304                         drm_atomic_get_old_connector_state(state, connector);
305         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
306         struct drm_crtc_state *new_crtc_state;
307         struct drm_dp_mst_topology_mgr *mst_mgr;
308         struct drm_dp_mst_port *mst_port;
309
310         mst_port = aconnector->port;
311         mst_mgr = &aconnector->mst_port->mst_mgr;
312
313         if (!old_conn_state->crtc)
314                 return 0;
315
316         if (new_conn_state->crtc) {
317                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
318                 if (!new_crtc_state ||
319                     !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
320                     new_crtc_state->enable)
321                         return 0;
322                 }
323
324         return drm_dp_atomic_release_vcpi_slots(state,
325                                                 mst_mgr,
326                                                 mst_port);
327 }
328
329 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
330         .get_modes = dm_dp_mst_get_modes,
331         .mode_valid = amdgpu_dm_connector_mode_valid,
332         .atomic_best_encoder = dm_mst_atomic_best_encoder,
333         .detect_ctx = dm_dp_mst_detect,
334         .atomic_check = dm_dp_mst_atomic_check,
335 };
336
337 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
338 {
339         drm_encoder_cleanup(encoder);
340         kfree(encoder);
341 }
342
343 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
344         .destroy = amdgpu_dm_encoder_destroy,
345 };
346
347 static struct amdgpu_encoder *
348 dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
349 {
350         struct drm_device *dev = connector->base.dev;
351         struct amdgpu_device *adev = dev->dev_private;
352         struct amdgpu_encoder *amdgpu_encoder;
353         struct drm_encoder *encoder;
354
355         amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
356         if (!amdgpu_encoder)
357                 return NULL;
358
359         encoder = &amdgpu_encoder->base;
360         encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
361
362         drm_encoder_init(
363                 dev,
364                 &amdgpu_encoder->base,
365                 &amdgpu_dm_encoder_funcs,
366                 DRM_MODE_ENCODER_DPMST,
367                 NULL);
368
369         drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
370
371         return amdgpu_encoder;
372 }
373
374 static struct drm_connector *
375 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
376                         struct drm_dp_mst_port *port,
377                         const char *pathprop)
378 {
379         struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
380         struct drm_device *dev = master->base.dev;
381         struct amdgpu_device *adev = dev->dev_private;
382         struct amdgpu_dm_connector *aconnector;
383         struct drm_connector *connector;
384
385         aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
386         if (!aconnector)
387                 return NULL;
388
389         connector = &aconnector->base;
390         aconnector->port = port;
391         aconnector->mst_port = master;
392
393         if (drm_connector_init(
394                 dev,
395                 connector,
396                 &dm_dp_mst_connector_funcs,
397                 DRM_MODE_CONNECTOR_DisplayPort)) {
398                 kfree(aconnector);
399                 return NULL;
400         }
401         drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
402
403         amdgpu_dm_connector_init_helper(
404                 &adev->dm,
405                 aconnector,
406                 DRM_MODE_CONNECTOR_DisplayPort,
407                 master->dc_link,
408                 master->connector_id);
409
410         aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
411         drm_connector_attach_encoder(&aconnector->base,
412                                      &aconnector->mst_encoder->base);
413
414         drm_object_attach_property(
415                 &connector->base,
416                 dev->mode_config.path_property,
417                 0);
418         drm_object_attach_property(
419                 &connector->base,
420                 dev->mode_config.tile_property,
421                 0);
422
423         drm_connector_set_path_property(connector, pathprop);
424
425         /*
426          * Initialize connector state before adding the connectror to drm and
427          * framebuffer lists
428          */
429         amdgpu_dm_connector_funcs_reset(connector);
430
431         DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
432                  aconnector, connector->base.id, aconnector->mst_port);
433
434         drm_dp_mst_get_port_malloc(port);
435
436         DRM_DEBUG_KMS(":%d\n", connector->base.id);
437
438         return connector;
439 }
440
441 static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
442                                         struct drm_connector *connector)
443 {
444         struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
445         struct drm_device *dev = master->base.dev;
446         struct amdgpu_device *adev = dev->dev_private;
447         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
448
449         DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
450                  aconnector, connector->base.id, aconnector->mst_port);
451
452         if (aconnector->dc_sink) {
453                 amdgpu_dm_update_freesync_caps(connector, NULL);
454                 dc_link_remove_remote_sink(aconnector->dc_link,
455                                            aconnector->dc_sink);
456                 dc_sink_release(aconnector->dc_sink);
457                 aconnector->dc_sink = NULL;
458         }
459
460         drm_connector_unregister(connector);
461         if (adev->mode_info.rfbdev)
462                 drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
463         drm_connector_put(connector);
464 }
465
466 static void dm_dp_mst_register_connector(struct drm_connector *connector)
467 {
468         struct drm_device *dev = connector->dev;
469         struct amdgpu_device *adev = dev->dev_private;
470
471         if (adev->mode_info.rfbdev)
472                 drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
473         else
474                 DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
475
476         drm_connector_register(connector);
477 }
478
479 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
480         .add_connector = dm_dp_add_mst_connector,
481         .destroy_connector = dm_dp_destroy_mst_connector,
482         .register_connector = dm_dp_mst_register_connector
483 };
484
485 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
486                                        struct amdgpu_dm_connector *aconnector)
487 {
488         aconnector->dm_dp_aux.aux.name = "dmdc";
489         aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
490         aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
491
492         drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
493         drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
494                                       &aconnector->base);
495
496         if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
497                 return;
498
499         aconnector->mst_mgr.cbs = &dm_mst_cbs;
500         drm_dp_mst_topology_mgr_init(
501                 &aconnector->mst_mgr,
502                 dm->adev->ddev,
503                 &aconnector->dm_dp_aux.aux,
504                 16,
505                 4,
506                 aconnector->connector_id);
507 }
508
509 int dm_mst_get_pbn_divider(struct dc_link *link)
510 {
511         if (!link)
512                 return 0;
513
514         return dc_link_bandwidth_kbps(link,
515                         dc_link_get_link_cap(link)) / (8 * 1000 * 54);
516 }
517
518 #if defined(CONFIG_DRM_AMD_DC_DCN)
519
520 struct dsc_mst_fairness_params {
521         struct dc_crtc_timing *timing;
522         struct dc_sink *sink;
523         struct dc_dsc_bw_range bw_range;
524         bool compression_possible;
525         struct drm_dp_mst_port *port;
526 };
527
528 struct dsc_mst_fairness_vars {
529         int pbn;
530         bool dsc_enabled;
531         int bpp_x16;
532 };
533
534 static int kbps_to_peak_pbn(int kbps)
535 {
536         u64 peak_kbps = kbps;
537
538         peak_kbps *= 1006;
539         peak_kbps = div_u64(peak_kbps, 1000);
540         return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
541 }
542
543 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
544                 struct dsc_mst_fairness_vars *vars,
545                 int count)
546 {
547         int i;
548
549         for (i = 0; i < count; i++) {
550                 memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
551                 if (vars[i].dsc_enabled && dc_dsc_compute_config(
552                                         params[i].sink->ctx->dc->res_pool->dscs[0],
553                                         &params[i].sink->sink_dsc_caps.dsc_dec_caps,
554                                         params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
555                                         0,
556                                         params[i].timing,
557                                         &params[i].timing->dsc_cfg)) {
558                         params[i].timing->flags.DSC = 1;
559                         params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
560                 } else {
561                         params[i].timing->flags.DSC = 0;
562                 }
563         }
564 }
565
566 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
567 {
568         struct dc_dsc_config dsc_config;
569         u64 kbps;
570
571         kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
572         dc_dsc_compute_config(
573                         param.sink->ctx->dc->res_pool->dscs[0],
574                         &param.sink->sink_dsc_caps.dsc_dec_caps,
575                         param.sink->ctx->dc->debug.dsc_min_slice_height_override,
576                         (int) kbps, param.timing, &dsc_config);
577
578         return dsc_config.bits_per_pixel;
579 }
580
581 static void increase_dsc_bpp(struct drm_atomic_state *state,
582                              struct dc_link *dc_link,
583                              struct dsc_mst_fairness_params *params,
584                              struct dsc_mst_fairness_vars *vars,
585                              int count)
586 {
587         int i;
588         bool bpp_increased[MAX_PIPES];
589         int initial_slack[MAX_PIPES];
590         int min_initial_slack;
591         int next_index;
592         int remaining_to_increase = 0;
593         int pbn_per_timeslot;
594         int link_timeslots_used;
595         int fair_pbn_alloc;
596
597         for (i = 0; i < count; i++) {
598                 if (vars[i].dsc_enabled) {
599                         initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
600                         bpp_increased[i] = false;
601                         remaining_to_increase += 1;
602                 } else {
603                         initial_slack[i] = 0;
604                         bpp_increased[i] = true;
605                 }
606         }
607
608         pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
609                         dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
610
611         while (remaining_to_increase) {
612                 next_index = -1;
613                 min_initial_slack = -1;
614                 for (i = 0; i < count; i++) {
615                         if (!bpp_increased[i]) {
616                                 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
617                                         min_initial_slack = initial_slack[i];
618                                         next_index = i;
619                                 }
620                         }
621                 }
622
623                 if (next_index == -1)
624                         break;
625
626                 link_timeslots_used = 0;
627
628                 for (i = 0; i < count; i++)
629                         link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
630
631                 fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
632
633                 if (initial_slack[next_index] > fair_pbn_alloc) {
634                         vars[next_index].pbn += fair_pbn_alloc;
635                         if (drm_dp_atomic_find_vcpi_slots(state,
636                                                           params[next_index].port->mgr,
637                                                           params[next_index].port,
638                                                           vars[next_index].pbn,
639                                                           dm_mst_get_pbn_divider(dc_link)) < 0)
640                                 return;
641                         if (!drm_dp_mst_atomic_check(state)) {
642                                 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
643                         } else {
644                                 vars[next_index].pbn -= fair_pbn_alloc;
645                                 if (drm_dp_atomic_find_vcpi_slots(state,
646                                                                   params[next_index].port->mgr,
647                                                                   params[next_index].port,
648                                                                   vars[next_index].pbn,
649                                                                   dm_mst_get_pbn_divider(dc_link)) < 0)
650                                         return;
651                         }
652                 } else {
653                         vars[next_index].pbn += initial_slack[next_index];
654                         if (drm_dp_atomic_find_vcpi_slots(state,
655                                                           params[next_index].port->mgr,
656                                                           params[next_index].port,
657                                                           vars[next_index].pbn,
658                                                           dm_mst_get_pbn_divider(dc_link)) < 0)
659                                 return;
660                         if (!drm_dp_mst_atomic_check(state)) {
661                                 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
662                         } else {
663                                 vars[next_index].pbn -= initial_slack[next_index];
664                                 if (drm_dp_atomic_find_vcpi_slots(state,
665                                                                   params[next_index].port->mgr,
666                                                                   params[next_index].port,
667                                                                   vars[next_index].pbn,
668                                                                   dm_mst_get_pbn_divider(dc_link)) < 0)
669                                         return;
670                         }
671                 }
672
673                 bpp_increased[next_index] = true;
674                 remaining_to_increase--;
675         }
676 }
677
678 static void try_disable_dsc(struct drm_atomic_state *state,
679                             struct dc_link *dc_link,
680                             struct dsc_mst_fairness_params *params,
681                             struct dsc_mst_fairness_vars *vars,
682                             int count)
683 {
684         int i;
685         bool tried[MAX_PIPES];
686         int kbps_increase[MAX_PIPES];
687         int max_kbps_increase;
688         int next_index;
689         int remaining_to_try = 0;
690
691         for (i = 0; i < count; i++) {
692                 if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
693                         kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
694                         tried[i] = false;
695                         remaining_to_try += 1;
696                 } else {
697                         kbps_increase[i] = 0;
698                         tried[i] = true;
699                 }
700         }
701
702         while (remaining_to_try) {
703                 next_index = -1;
704                 max_kbps_increase = -1;
705                 for (i = 0; i < count; i++) {
706                         if (!tried[i]) {
707                                 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
708                                         max_kbps_increase = kbps_increase[i];
709                                         next_index = i;
710                                 }
711                         }
712                 }
713
714                 if (next_index == -1)
715                         break;
716
717                 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
718                 if (drm_dp_atomic_find_vcpi_slots(state,
719                                                   params[next_index].port->mgr,
720                                                   params[next_index].port,
721                                                   vars[next_index].pbn,
722                                                   0) < 0)
723                         return;
724
725                 if (!drm_dp_mst_atomic_check(state)) {
726                         vars[next_index].dsc_enabled = false;
727                         vars[next_index].bpp_x16 = 0;
728                 } else {
729                         vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
730                         if (drm_dp_atomic_find_vcpi_slots(state,
731                                                           params[next_index].port->mgr,
732                                                           params[next_index].port,
733                                                           vars[next_index].pbn,
734                                                           dm_mst_get_pbn_divider(dc_link)) < 0)
735                                 return;
736                 }
737
738                 tried[next_index] = true;
739                 remaining_to_try--;
740         }
741 }
742
743 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
744                                              struct dc_state *dc_state,
745                                              struct dc_link *dc_link)
746 {
747         int i;
748         struct dc_stream_state *stream;
749         struct dsc_mst_fairness_params params[MAX_PIPES];
750         struct dsc_mst_fairness_vars vars[MAX_PIPES];
751         struct amdgpu_dm_connector *aconnector;
752         int count = 0;
753
754         memset(params, 0, sizeof(params));
755
756         /* Set up params */
757         for (i = 0; i < dc_state->stream_count; i++) {
758                 struct dc_dsc_policy dsc_policy = {0};
759
760                 stream = dc_state->streams[i];
761
762                 if (stream->link != dc_link)
763                         continue;
764
765                 stream->timing.flags.DSC = 0;
766
767                 params[count].timing = &stream->timing;
768                 params[count].sink = stream->sink;
769                 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
770                 params[count].port = aconnector->port;
771                 params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
772                 dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
773                 if (!dc_dsc_compute_bandwidth_range(
774                                 stream->sink->ctx->dc->res_pool->dscs[0],
775                                 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
776                                 dsc_policy.min_target_bpp,
777                                 dsc_policy.max_target_bpp,
778                                 &stream->sink->sink_dsc_caps.dsc_dec_caps,
779                                 &stream->timing, &params[count].bw_range))
780                         params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
781
782                 count++;
783         }
784         /* Try no compression */
785         for (i = 0; i < count; i++) {
786                 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
787                 vars[i].dsc_enabled = false;
788                 vars[i].bpp_x16 = 0;
789                 if (drm_dp_atomic_find_vcpi_slots(state,
790                                                  params[i].port->mgr,
791                                                  params[i].port,
792                                                  vars[i].pbn,
793                                                  0) < 0)
794                         return false;
795         }
796         if (!drm_dp_mst_atomic_check(state)) {
797                 set_dsc_configs_from_fairness_vars(params, vars, count);
798                 return true;
799         }
800
801         /* Try max compression */
802         for (i = 0; i < count; i++) {
803                 if (params[i].compression_possible) {
804                         vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
805                         vars[i].dsc_enabled = true;
806                         vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
807                         if (drm_dp_atomic_find_vcpi_slots(state,
808                                                           params[i].port->mgr,
809                                                           params[i].port,
810                                                           vars[i].pbn,
811                                                           dm_mst_get_pbn_divider(dc_link)) < 0)
812                                 return false;
813                 } else {
814                         vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
815                         vars[i].dsc_enabled = false;
816                         vars[i].bpp_x16 = 0;
817                         if (drm_dp_atomic_find_vcpi_slots(state,
818                                                           params[i].port->mgr,
819                                                           params[i].port,
820                                                           vars[i].pbn,
821                                                           0) < 0)
822                                 return false;
823                 }
824         }
825         if (drm_dp_mst_atomic_check(state))
826                 return false;
827
828         /* Optimize degree of compression */
829         increase_dsc_bpp(state, dc_link, params, vars, count);
830
831         try_disable_dsc(state, dc_link, params, vars, count);
832
833         set_dsc_configs_from_fairness_vars(params, vars, count);
834
835         return true;
836 }
837
838 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
839                                        struct dc_state *dc_state)
840 {
841         int i, j;
842         struct dc_stream_state *stream;
843         bool computed_streams[MAX_PIPES];
844         struct amdgpu_dm_connector *aconnector;
845
846         for (i = 0; i < dc_state->stream_count; i++)
847                 computed_streams[i] = false;
848
849         for (i = 0; i < dc_state->stream_count; i++) {
850                 stream = dc_state->streams[i];
851
852                 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
853                         continue;
854
855                 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
856
857                 if (!aconnector || !aconnector->dc_sink)
858                         continue;
859
860                 if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
861                         continue;
862
863                 if (computed_streams[i])
864                         continue;
865
866                 mutex_lock(&aconnector->mst_mgr.lock);
867                 if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
868                         mutex_unlock(&aconnector->mst_mgr.lock);
869                         return false;
870                 }
871                 mutex_unlock(&aconnector->mst_mgr.lock);
872
873                 for (j = 0; j < dc_state->stream_count; j++) {
874                         if (dc_state->streams[j]->link == stream->link)
875                                 computed_streams[j] = true;
876                 }
877         }
878
879         for (i = 0; i < dc_state->stream_count; i++) {
880                 stream = dc_state->streams[i];
881
882                 if (stream->timing.flags.DSC == 1)
883                         dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);
884         }
885
886         return true;
887 }
888
889 #endif