Merge tag 'arm-dt-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_mst_types.c
1 /*
2  * Copyright 2012-15 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include <drm/display/drm_dp_helper.h>
27 #include <drm/display/drm_dp_mst_helper.h>
28 #include <drm/drm_atomic.h>
29 #include <drm/drm_atomic_helper.h>
30 #include "dm_services.h"
31 #include "amdgpu.h"
32 #include "amdgpu_dm.h"
33 #include "amdgpu_dm_mst_types.h"
34
35 #include "dc.h"
36 #include "dm_helpers.h"
37
38 #include "dc_link_ddc.h"
39 #include "ddc_service_types.h"
40 #include "dpcd_defs.h"
41
42 #include "i2caux_interface.h"
43 #include "dmub_cmd.h"
44 #if defined(CONFIG_DEBUG_FS)
45 #include "amdgpu_dm_debugfs.h"
46 #endif
47
48 #include "dc/dcn20/dcn20_resource.h"
49 bool is_timing_changed(struct dc_stream_state *cur_stream,
50                        struct dc_stream_state *new_stream);
51
52
53 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
54                                   struct drm_dp_aux_msg *msg)
55 {
56         ssize_t result = 0;
57         struct aux_payload payload;
58         enum aux_return_code_type operation_result;
59         struct amdgpu_device *adev;
60         struct ddc_service *ddc;
61
62         if (WARN_ON(msg->size > 16))
63                 return -E2BIG;
64
65         payload.address = msg->address;
66         payload.data = msg->buffer;
67         payload.length = msg->size;
68         payload.reply = &msg->reply;
69         payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
70         payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
71         payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
72         payload.write_status_update =
73                         (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
74         payload.defer_delay = 0;
75
76         result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
77                                       &operation_result);
78
79         /*
80          * w/a on certain intel platform where hpd is unexpected to pull low during
81          * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON
82          * aux transaction is succuess in such case, therefore bypass the error
83          */
84         ddc = TO_DM_AUX(aux)->ddc_service;
85         adev = ddc->ctx->driver_context;
86         if (adev->dm.aux_hpd_discon_quirk) {
87                 if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
88                         operation_result == AUX_RET_ERROR_HPD_DISCON) {
89                         result = 0;
90                         operation_result = AUX_RET_SUCCESS;
91                 }
92         }
93
94         if (payload.write && result >= 0)
95                 result = msg->size;
96
97         if (result < 0)
98                 switch (operation_result) {
99                 case AUX_RET_SUCCESS:
100                         break;
101                 case AUX_RET_ERROR_HPD_DISCON:
102                 case AUX_RET_ERROR_UNKNOWN:
103                 case AUX_RET_ERROR_INVALID_OPERATION:
104                 case AUX_RET_ERROR_PROTOCOL_ERROR:
105                         result = -EIO;
106                         break;
107                 case AUX_RET_ERROR_INVALID_REPLY:
108                 case AUX_RET_ERROR_ENGINE_ACQUIRE:
109                         result = -EBUSY;
110                         break;
111                 case AUX_RET_ERROR_TIMEOUT:
112                         result = -ETIMEDOUT;
113                         break;
114                 }
115
116         return result;
117 }
118
119 static void
120 dm_dp_mst_connector_destroy(struct drm_connector *connector)
121 {
122         struct amdgpu_dm_connector *aconnector =
123                 to_amdgpu_dm_connector(connector);
124
125         if (aconnector->dc_sink) {
126                 dc_link_remove_remote_sink(aconnector->dc_link,
127                                            aconnector->dc_sink);
128                 dc_sink_release(aconnector->dc_sink);
129         }
130
131         kfree(aconnector->edid);
132
133         drm_connector_cleanup(connector);
134         drm_dp_mst_put_port_malloc(aconnector->port);
135         kfree(aconnector);
136 }
137
138 static int
139 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
140 {
141         struct amdgpu_dm_connector *amdgpu_dm_connector =
142                 to_amdgpu_dm_connector(connector);
143         int r;
144
145         r = drm_dp_mst_connector_late_register(connector,
146                                                amdgpu_dm_connector->port);
147         if (r < 0)
148                 return r;
149
150 #if defined(CONFIG_DEBUG_FS)
151         connector_debugfs_init(amdgpu_dm_connector);
152 #endif
153
154         return 0;
155 }
156
157 static void
158 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
159 {
160         struct amdgpu_dm_connector *amdgpu_dm_connector =
161                 to_amdgpu_dm_connector(connector);
162         struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
163
164         drm_dp_mst_connector_early_unregister(connector, port);
165 }
166
167 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
168         .fill_modes = drm_helper_probe_single_connector_modes,
169         .destroy = dm_dp_mst_connector_destroy,
170         .reset = amdgpu_dm_connector_funcs_reset,
171         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
172         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
173         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
174         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
175         .late_register = amdgpu_dm_mst_connector_late_register,
176         .early_unregister = amdgpu_dm_mst_connector_early_unregister,
177 };
178
179 #if defined(CONFIG_DRM_AMD_DC_DCN)
180 bool needs_dsc_aux_workaround(struct dc_link *link)
181 {
182         if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
183             (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
184             link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
185                 return true;
186
187         return false;
188 }
189
190 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
191 {
192         struct dc_sink *dc_sink = aconnector->dc_sink;
193         struct drm_dp_mst_port *port = aconnector->port;
194         u8 dsc_caps[16] = { 0 };
195         u8 dsc_branch_dec_caps_raw[3] = { 0 };  // DSC branch decoder caps 0xA0 ~ 0xA2
196         u8 *dsc_branch_dec_caps = NULL;
197
198         aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
199
200         /*
201          * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
202          * because it only check the dsc/fec caps of the "port variable" and not the dock
203          *
204          * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display
205          *
206          * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
207          *
208          */
209         if (!aconnector->dsc_aux && !port->parent->port_parent &&
210             needs_dsc_aux_workaround(aconnector->dc_link))
211                 aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
212
213         if (!aconnector->dsc_aux)
214                 return false;
215
216         if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
217                 return false;
218
219         if (drm_dp_dpcd_read(aconnector->dsc_aux,
220                         DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3)
221                 dsc_branch_dec_caps = dsc_branch_dec_caps_raw;
222
223         if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
224                                   dsc_caps, dsc_branch_dec_caps,
225                                   &dc_sink->dsc_caps.dsc_dec_caps))
226                 return false;
227
228         return true;
229 }
230
231 static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector)
232 {
233         union dp_downstream_port_present ds_port_present;
234
235         if (!aconnector->dsc_aux)
236                 return false;
237
238         if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) {
239                 DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n");
240                 return false;
241         }
242
243         aconnector->mst_downstream_port_present = ds_port_present;
244         DRM_INFO("Downstream port present %d, type %d\n",
245                         ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE);
246
247         return true;
248 }
249 #endif
250
251 static int dm_dp_mst_get_modes(struct drm_connector *connector)
252 {
253         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
254         int ret = 0;
255
256         if (!aconnector)
257                 return drm_add_edid_modes(connector, NULL);
258
259         if (!aconnector->edid) {
260                 struct edid *edid;
261                 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
262
263                 if (!edid) {
264                         drm_connector_update_edid_property(
265                                 &aconnector->base,
266                                 NULL);
267
268                         DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name);
269                         if (!aconnector->dc_sink) {
270                                 struct dc_sink *dc_sink;
271                                 struct dc_sink_init_data init_params = {
272                                         .link = aconnector->dc_link,
273                                         .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
274
275                                 dc_sink = dc_link_add_remote_sink(
276                                         aconnector->dc_link,
277                                         NULL,
278                                         0,
279                                         &init_params);
280
281                                 if (!dc_sink) {
282                                         DRM_ERROR("Unable to add a remote sink\n");
283                                         return 0;
284                                 }
285
286                                 dc_sink->priv = aconnector;
287                                 aconnector->dc_sink = dc_sink;
288                         }
289
290                         return ret;
291                 }
292
293                 aconnector->edid = edid;
294         }
295
296         if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
297                 dc_sink_release(aconnector->dc_sink);
298                 aconnector->dc_sink = NULL;
299         }
300
301         if (!aconnector->dc_sink) {
302                 struct dc_sink *dc_sink;
303                 struct dc_sink_init_data init_params = {
304                                 .link = aconnector->dc_link,
305                                 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
306                 dc_sink = dc_link_add_remote_sink(
307                         aconnector->dc_link,
308                         (uint8_t *)aconnector->edid,
309                         (aconnector->edid->extensions + 1) * EDID_LENGTH,
310                         &init_params);
311
312                 if (!dc_sink) {
313                         DRM_ERROR("Unable to add a remote sink\n");
314                         return 0;
315                 }
316
317                 dc_sink->priv = aconnector;
318                 /* dc_link_add_remote_sink returns a new reference */
319                 aconnector->dc_sink = dc_sink;
320
321                 if (aconnector->dc_sink) {
322                         amdgpu_dm_update_freesync_caps(
323                                         connector, aconnector->edid);
324
325 #if defined(CONFIG_DRM_AMD_DC_DCN)
326                         if (!validate_dsc_caps_on_connector(aconnector))
327                                 memset(&aconnector->dc_sink->dsc_caps,
328                                        0, sizeof(aconnector->dc_sink->dsc_caps));
329
330                         if (!retrieve_downstream_port_device(aconnector))
331                                 memset(&aconnector->mst_downstream_port_present,
332                                         0, sizeof(aconnector->mst_downstream_port_present));
333 #endif
334                 }
335         }
336
337         drm_connector_update_edid_property(
338                                         &aconnector->base, aconnector->edid);
339
340         ret = drm_add_edid_modes(connector, aconnector->edid);
341
342         return ret;
343 }
344
345 static struct drm_encoder *
346 dm_mst_atomic_best_encoder(struct drm_connector *connector,
347                            struct drm_atomic_state *state)
348 {
349         struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
350                                                                                          connector);
351         struct drm_device *dev = connector->dev;
352         struct amdgpu_device *adev = drm_to_adev(dev);
353         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
354
355         return &adev->dm.mst_encoders[acrtc->crtc_id].base;
356 }
357
358 static int
359 dm_dp_mst_detect(struct drm_connector *connector,
360                  struct drm_modeset_acquire_ctx *ctx, bool force)
361 {
362         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
363         struct amdgpu_dm_connector *master = aconnector->mst_port;
364
365         if (drm_connector_is_unregistered(connector))
366                 return connector_status_disconnected;
367
368         return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
369                                       aconnector->port);
370 }
371
372 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
373                                 struct drm_atomic_state *state)
374 {
375         struct drm_connector_state *new_conn_state =
376                         drm_atomic_get_new_connector_state(state, connector);
377         struct drm_connector_state *old_conn_state =
378                         drm_atomic_get_old_connector_state(state, connector);
379         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
380         struct drm_crtc_state *new_crtc_state;
381         struct drm_dp_mst_topology_mgr *mst_mgr;
382         struct drm_dp_mst_port *mst_port;
383
384         mst_port = aconnector->port;
385         mst_mgr = &aconnector->mst_port->mst_mgr;
386
387         if (!old_conn_state->crtc)
388                 return 0;
389
390         if (new_conn_state->crtc) {
391                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
392                 if (!new_crtc_state ||
393                     !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
394                     new_crtc_state->enable)
395                         return 0;
396                 }
397
398         return drm_dp_atomic_release_vcpi_slots(state,
399                                                 mst_mgr,
400                                                 mst_port);
401 }
402
403 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
404         .get_modes = dm_dp_mst_get_modes,
405         .mode_valid = amdgpu_dm_connector_mode_valid,
406         .atomic_best_encoder = dm_mst_atomic_best_encoder,
407         .detect_ctx = dm_dp_mst_detect,
408         .atomic_check = dm_dp_mst_atomic_check,
409 };
410
411 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
412 {
413         drm_encoder_cleanup(encoder);
414         kfree(encoder);
415 }
416
417 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
418         .destroy = amdgpu_dm_encoder_destroy,
419 };
420
421 void
422 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
423 {
424         struct drm_device *dev = adev_to_drm(adev);
425         int i;
426
427         for (i = 0; i < adev->dm.display_indexes_num; i++) {
428                 struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i];
429                 struct drm_encoder *encoder = &amdgpu_encoder->base;
430
431                 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
432
433                 drm_encoder_init(
434                         dev,
435                         &amdgpu_encoder->base,
436                         &amdgpu_dm_encoder_funcs,
437                         DRM_MODE_ENCODER_DPMST,
438                         NULL);
439
440                 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
441         }
442 }
443
444 static struct drm_connector *
445 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
446                         struct drm_dp_mst_port *port,
447                         const char *pathprop)
448 {
449         struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
450         struct drm_device *dev = master->base.dev;
451         struct amdgpu_device *adev = drm_to_adev(dev);
452         struct amdgpu_dm_connector *aconnector;
453         struct drm_connector *connector;
454         int i;
455
456         aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
457         if (!aconnector)
458                 return NULL;
459
460         connector = &aconnector->base;
461         aconnector->port = port;
462         aconnector->mst_port = master;
463
464         if (drm_connector_init(
465                 dev,
466                 connector,
467                 &dm_dp_mst_connector_funcs,
468                 DRM_MODE_CONNECTOR_DisplayPort)) {
469                 kfree(aconnector);
470                 return NULL;
471         }
472         drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
473
474         amdgpu_dm_connector_init_helper(
475                 &adev->dm,
476                 aconnector,
477                 DRM_MODE_CONNECTOR_DisplayPort,
478                 master->dc_link,
479                 master->connector_id);
480
481         for (i = 0; i < adev->dm.display_indexes_num; i++) {
482                 drm_connector_attach_encoder(&aconnector->base,
483                                              &adev->dm.mst_encoders[i].base);
484         }
485
486         connector->max_bpc_property = master->base.max_bpc_property;
487         if (connector->max_bpc_property)
488                 drm_connector_attach_max_bpc_property(connector, 8, 16);
489
490         connector->vrr_capable_property = master->base.vrr_capable_property;
491         if (connector->vrr_capable_property)
492                 drm_connector_attach_vrr_capable_property(connector);
493
494         drm_object_attach_property(
495                 &connector->base,
496                 dev->mode_config.path_property,
497                 0);
498         drm_object_attach_property(
499                 &connector->base,
500                 dev->mode_config.tile_property,
501                 0);
502
503         drm_connector_set_path_property(connector, pathprop);
504
505         /*
506          * Initialize connector state before adding the connectror to drm and
507          * framebuffer lists
508          */
509         amdgpu_dm_connector_funcs_reset(connector);
510
511         drm_dp_mst_get_port_malloc(port);
512
513         return connector;
514 }
515
516 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
517         .add_connector = dm_dp_add_mst_connector,
518 };
519
520 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
521                                        struct amdgpu_dm_connector *aconnector,
522                                        int link_index)
523 {
524         struct dc_link_settings max_link_enc_cap = {0};
525
526         aconnector->dm_dp_aux.aux.name =
527                 kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
528                           link_index);
529         aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
530         aconnector->dm_dp_aux.aux.drm_dev = dm->ddev;
531         aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
532
533         drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
534         drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
535                                       &aconnector->base);
536
537         if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
538                 return;
539
540         dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap);
541         aconnector->mst_mgr.cbs = &dm_mst_cbs;
542         drm_dp_mst_topology_mgr_init(
543                 &aconnector->mst_mgr,
544                 adev_to_drm(dm->adev),
545                 &aconnector->dm_dp_aux.aux,
546                 16,
547                 4,
548                 max_link_enc_cap.lane_count,
549                 drm_dp_bw_code_to_link_rate(max_link_enc_cap.link_rate),
550                 aconnector->connector_id);
551
552         drm_connector_attach_dp_subconnector_property(&aconnector->base);
553 }
554
555 int dm_mst_get_pbn_divider(struct dc_link *link)
556 {
557         if (!link)
558                 return 0;
559
560         return dc_link_bandwidth_kbps(link,
561                         dc_link_get_link_cap(link)) / (8 * 1000 * 54);
562 }
563
564 #if defined(CONFIG_DRM_AMD_DC_DCN)
565
566 struct dsc_mst_fairness_params {
567         struct dc_crtc_timing *timing;
568         struct dc_sink *sink;
569         struct dc_dsc_bw_range bw_range;
570         bool compression_possible;
571         struct drm_dp_mst_port *port;
572         enum dsc_clock_force_state clock_force_enable;
573         uint32_t num_slices_h;
574         uint32_t num_slices_v;
575         uint32_t bpp_overwrite;
576         struct amdgpu_dm_connector *aconnector;
577 };
578
579 static int kbps_to_peak_pbn(int kbps)
580 {
581         u64 peak_kbps = kbps;
582
583         peak_kbps *= 1006;
584         peak_kbps = div_u64(peak_kbps, 1000);
585         return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
586 }
587
588 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
589                 struct dsc_mst_fairness_vars *vars,
590                 int count,
591                 int k)
592 {
593         int i;
594
595         for (i = 0; i < count; i++) {
596                 memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
597                 if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
598                                         params[i].sink->ctx->dc->res_pool->dscs[0],
599                                         &params[i].sink->dsc_caps.dsc_dec_caps,
600                                         params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
601                                         params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
602                                         0,
603                                         params[i].timing,
604                                         &params[i].timing->dsc_cfg)) {
605                         params[i].timing->flags.DSC = 1;
606
607                         if (params[i].bpp_overwrite)
608                                 params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite;
609                         else
610                                 params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16;
611
612                         if (params[i].num_slices_h)
613                                 params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
614
615                         if (params[i].num_slices_v)
616                                 params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v;
617                 } else {
618                         params[i].timing->flags.DSC = 0;
619                 }
620                 params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn;
621         }
622
623         for (i = 0; i < count; i++) {
624                 if (params[i].sink) {
625                         if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
626                                 params[i].sink->sink_signal != SIGNAL_TYPE_NONE)
627                                 DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i,
628                                         params[i].sink->edid_caps.display_name);
629                 }
630
631                 DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n",
632                         params[i].timing->flags.DSC,
633                         params[i].timing->dsc_cfg.bits_per_pixel,
634                         vars[i + k].pbn);
635         }
636 }
637
638 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
639 {
640         struct dc_dsc_config dsc_config;
641         u64 kbps;
642
643         kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
644         dc_dsc_compute_config(
645                         param.sink->ctx->dc->res_pool->dscs[0],
646                         &param.sink->dsc_caps.dsc_dec_caps,
647                         param.sink->ctx->dc->debug.dsc_min_slice_height_override,
648                         param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
649                         (int) kbps, param.timing, &dsc_config);
650
651         return dsc_config.bits_per_pixel;
652 }
653
654 static void increase_dsc_bpp(struct drm_atomic_state *state,
655                              struct dc_link *dc_link,
656                              struct dsc_mst_fairness_params *params,
657                              struct dsc_mst_fairness_vars *vars,
658                              int count,
659                              int k)
660 {
661         int i;
662         bool bpp_increased[MAX_PIPES];
663         int initial_slack[MAX_PIPES];
664         int min_initial_slack;
665         int next_index;
666         int remaining_to_increase = 0;
667         int pbn_per_timeslot;
668         int link_timeslots_used;
669         int fair_pbn_alloc;
670
671         pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
672
673         for (i = 0; i < count; i++) {
674                 if (vars[i + k].dsc_enabled) {
675                         initial_slack[i] =
676                         kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
677                         bpp_increased[i] = false;
678                         remaining_to_increase += 1;
679                 } else {
680                         initial_slack[i] = 0;
681                         bpp_increased[i] = true;
682                 }
683         }
684
685         while (remaining_to_increase) {
686                 next_index = -1;
687                 min_initial_slack = -1;
688                 for (i = 0; i < count; i++) {
689                         if (!bpp_increased[i]) {
690                                 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
691                                         min_initial_slack = initial_slack[i];
692                                         next_index = i;
693                                 }
694                         }
695                 }
696
697                 if (next_index == -1)
698                         break;
699
700                 link_timeslots_used = 0;
701
702                 for (i = 0; i < count; i++)
703                         link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot);
704
705                 fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
706
707                 if (initial_slack[next_index] > fair_pbn_alloc) {
708                         vars[next_index].pbn += fair_pbn_alloc;
709                         if (drm_dp_atomic_find_vcpi_slots(state,
710                                                           params[next_index].port->mgr,
711                                                           params[next_index].port,
712                                                           vars[next_index].pbn,
713                                                           pbn_per_timeslot) < 0)
714                                 return;
715                         if (!drm_dp_mst_atomic_check(state)) {
716                                 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
717                         } else {
718                                 vars[next_index].pbn -= fair_pbn_alloc;
719                                 if (drm_dp_atomic_find_vcpi_slots(state,
720                                                                   params[next_index].port->mgr,
721                                                                   params[next_index].port,
722                                                                   vars[next_index].pbn,
723                                                                   pbn_per_timeslot) < 0)
724                                         return;
725                         }
726                 } else {
727                         vars[next_index].pbn += initial_slack[next_index];
728                         if (drm_dp_atomic_find_vcpi_slots(state,
729                                                           params[next_index].port->mgr,
730                                                           params[next_index].port,
731                                                           vars[next_index].pbn,
732                                                           pbn_per_timeslot) < 0)
733                                 return;
734                         if (!drm_dp_mst_atomic_check(state)) {
735                                 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
736                         } else {
737                                 vars[next_index].pbn -= initial_slack[next_index];
738                                 if (drm_dp_atomic_find_vcpi_slots(state,
739                                                                   params[next_index].port->mgr,
740                                                                   params[next_index].port,
741                                                                   vars[next_index].pbn,
742                                                                   pbn_per_timeslot) < 0)
743                                         return;
744                         }
745                 }
746
747                 bpp_increased[next_index] = true;
748                 remaining_to_increase--;
749         }
750 }
751
752 static void try_disable_dsc(struct drm_atomic_state *state,
753                             struct dc_link *dc_link,
754                             struct dsc_mst_fairness_params *params,
755                             struct dsc_mst_fairness_vars *vars,
756                             int count,
757                             int k)
758 {
759         int i;
760         bool tried[MAX_PIPES];
761         int kbps_increase[MAX_PIPES];
762         int max_kbps_increase;
763         int next_index;
764         int remaining_to_try = 0;
765
766         for (i = 0; i < count; i++) {
767                 if (vars[i + k].dsc_enabled
768                                 && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16
769                                 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
770                         kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
771                         tried[i] = false;
772                         remaining_to_try += 1;
773                 } else {
774                         kbps_increase[i] = 0;
775                         tried[i] = true;
776                 }
777         }
778
779         while (remaining_to_try) {
780                 next_index = -1;
781                 max_kbps_increase = -1;
782                 for (i = 0; i < count; i++) {
783                         if (!tried[i]) {
784                                 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
785                                         max_kbps_increase = kbps_increase[i];
786                                         next_index = i;
787                                 }
788                         }
789                 }
790
791                 if (next_index == -1)
792                         break;
793
794                 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
795                 if (drm_dp_atomic_find_vcpi_slots(state,
796                                                   params[next_index].port->mgr,
797                                                   params[next_index].port,
798                                                   vars[next_index].pbn,
799                                                   dm_mst_get_pbn_divider(dc_link)) < 0)
800                         return;
801
802                 if (!drm_dp_mst_atomic_check(state)) {
803                         vars[next_index].dsc_enabled = false;
804                         vars[next_index].bpp_x16 = 0;
805                 } else {
806                         vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
807                         if (drm_dp_atomic_find_vcpi_slots(state,
808                                                           params[next_index].port->mgr,
809                                                           params[next_index].port,
810                                                           vars[next_index].pbn,
811                                                           dm_mst_get_pbn_divider(dc_link)) < 0)
812                                 return;
813                 }
814
815                 tried[next_index] = true;
816                 remaining_to_try--;
817         }
818 }
819
820 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
821                                              struct dc_state *dc_state,
822                                              struct dc_link *dc_link,
823                                              struct dsc_mst_fairness_vars *vars,
824                                              int *link_vars_start_index)
825 {
826         int i, k;
827         struct dc_stream_state *stream;
828         struct dsc_mst_fairness_params params[MAX_PIPES];
829         struct amdgpu_dm_connector *aconnector;
830         int count = 0;
831         bool debugfs_overwrite = false;
832
833         memset(params, 0, sizeof(params));
834
835         /* Set up params */
836         for (i = 0; i < dc_state->stream_count; i++) {
837                 struct dc_dsc_policy dsc_policy = {0};
838
839                 stream = dc_state->streams[i];
840
841                 if (stream->link != dc_link)
842                         continue;
843
844                 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
845                 if (!aconnector)
846                         continue;
847
848                 if (!aconnector->port)
849                         continue;
850
851                 stream->timing.flags.DSC = 0;
852
853                 params[count].timing = &stream->timing;
854                 params[count].sink = stream->sink;
855                 params[count].aconnector = aconnector;
856                 params[count].port = aconnector->port;
857                 params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
858                 if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
859                         debugfs_overwrite = true;
860                 params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
861                 params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
862                 params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
863                 params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
864                 dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy);
865                 if (!dc_dsc_compute_bandwidth_range(
866                                 stream->sink->ctx->dc->res_pool->dscs[0],
867                                 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
868                                 dsc_policy.min_target_bpp * 16,
869                                 dsc_policy.max_target_bpp * 16,
870                                 &stream->sink->dsc_caps.dsc_dec_caps,
871                                 &stream->timing, &params[count].bw_range))
872                         params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
873
874                 count++;
875         }
876
877         if (count == 0) {
878                 ASSERT(0);
879                 return true;
880         }
881
882         /* k is start index of vars for current phy link used by mst hub */
883         k = *link_vars_start_index;
884         /* set vars start index for next mst hub phy link */
885         *link_vars_start_index += count;
886
887         /* Try no compression */
888         for (i = 0; i < count; i++) {
889                 vars[i + k].aconnector = params[i].aconnector;
890                 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
891                 vars[i + k].dsc_enabled = false;
892                 vars[i + k].bpp_x16 = 0;
893                 if (drm_dp_atomic_find_vcpi_slots(state,
894                                                  params[i].port->mgr,
895                                                  params[i].port,
896                                                  vars[i + k].pbn,
897                                                  dm_mst_get_pbn_divider(dc_link)) < 0)
898                         return false;
899         }
900         if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
901                 set_dsc_configs_from_fairness_vars(params, vars, count, k);
902                 return true;
903         }
904
905         /* Try max compression */
906         for (i = 0; i < count; i++) {
907                 if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
908                         vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
909                         vars[i + k].dsc_enabled = true;
910                         vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
911                         if (drm_dp_atomic_find_vcpi_slots(state,
912                                                           params[i].port->mgr,
913                                                           params[i].port,
914                                                           vars[i + k].pbn,
915                                                           dm_mst_get_pbn_divider(dc_link)) < 0)
916                                 return false;
917                 } else {
918                         vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
919                         vars[i + k].dsc_enabled = false;
920                         vars[i + k].bpp_x16 = 0;
921                         if (drm_dp_atomic_find_vcpi_slots(state,
922                                                           params[i].port->mgr,
923                                                           params[i].port,
924                                                           vars[i + k].pbn,
925                                                           dm_mst_get_pbn_divider(dc_link)) < 0)
926                                 return false;
927                 }
928         }
929         if (drm_dp_mst_atomic_check(state))
930                 return false;
931
932         /* Optimize degree of compression */
933         increase_dsc_bpp(state, dc_link, params, vars, count, k);
934
935         try_disable_dsc(state, dc_link, params, vars, count, k);
936
937         set_dsc_configs_from_fairness_vars(params, vars, count, k);
938
939         return true;
940 }
941
942 static bool is_dsc_need_re_compute(
943         struct drm_atomic_state *state,
944         struct dc_state *dc_state,
945         struct dc_link *dc_link)
946 {
947         int i, j;
948         bool is_dsc_need_re_compute = false;
949         struct amdgpu_dm_connector *stream_on_link[MAX_PIPES];
950         int new_stream_on_link_num = 0;
951         struct amdgpu_dm_connector *aconnector;
952         struct dc_stream_state *stream;
953         const struct dc *dc = dc_link->dc;
954
955         /* only check phy used by dsc mst branch */
956         if (dc_link->type != dc_connection_mst_branch)
957                 return false;
958
959         if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
960                 dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
961                 return false;
962
963         for (i = 0; i < MAX_PIPES; i++)
964                 stream_on_link[i] = NULL;
965
966         /* check if there is mode change in new request */
967         for (i = 0; i < dc_state->stream_count; i++) {
968                 struct drm_crtc_state *new_crtc_state;
969                 struct drm_connector_state *new_conn_state;
970
971                 stream = dc_state->streams[i];
972                 if (!stream)
973                         continue;
974
975                 /* check if stream using the same link for mst */
976                 if (stream->link != dc_link)
977                         continue;
978
979                 aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
980                 if (!aconnector)
981                         continue;
982
983                 stream_on_link[new_stream_on_link_num] = aconnector;
984                 new_stream_on_link_num++;
985
986                 new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
987                 if (!new_conn_state)
988                         continue;
989
990                 if (IS_ERR(new_conn_state))
991                         continue;
992
993                 if (!new_conn_state->crtc)
994                         continue;
995
996                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
997                 if (!new_crtc_state)
998                         continue;
999
1000                 if (IS_ERR(new_crtc_state))
1001                         continue;
1002
1003                 if (new_crtc_state->enable && new_crtc_state->active) {
1004                         if (new_crtc_state->mode_changed || new_crtc_state->active_changed ||
1005                                 new_crtc_state->connectors_changed)
1006                                 return true;
1007                 }
1008         }
1009
1010         /* check current_state if there stream on link but it is not in
1011          * new request state
1012          */
1013         for (i = 0; i < dc->current_state->stream_count; i++) {
1014                 stream = dc->current_state->streams[i];
1015                 /* only check stream on the mst hub */
1016                 if (stream->link != dc_link)
1017                         continue;
1018
1019                 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1020                 if (!aconnector)
1021                         continue;
1022
1023                 for (j = 0; j < new_stream_on_link_num; j++) {
1024                         if (stream_on_link[j]) {
1025                                 if (aconnector == stream_on_link[j])
1026                                         break;
1027                         }
1028                 }
1029
1030                 if (j == new_stream_on_link_num) {
1031                         /* not in new state */
1032                         is_dsc_need_re_compute = true;
1033                         break;
1034                 }
1035         }
1036
1037         return is_dsc_need_re_compute;
1038 }
1039
1040 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
1041                                        struct dc_state *dc_state,
1042                                        struct dsc_mst_fairness_vars *vars)
1043 {
1044         int i, j;
1045         struct dc_stream_state *stream;
1046         bool computed_streams[MAX_PIPES];
1047         struct amdgpu_dm_connector *aconnector;
1048         int link_vars_start_index = 0;
1049
1050         for (i = 0; i < dc_state->stream_count; i++)
1051                 computed_streams[i] = false;
1052
1053         for (i = 0; i < dc_state->stream_count; i++) {
1054                 stream = dc_state->streams[i];
1055
1056                 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
1057                         continue;
1058
1059                 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1060
1061                 if (!aconnector || !aconnector->dc_sink)
1062                         continue;
1063
1064                 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
1065                         continue;
1066
1067                 if (computed_streams[i])
1068                         continue;
1069
1070                 if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
1071                         return false;
1072
1073                 if (!is_dsc_need_re_compute(state, dc_state, stream->link))
1074                         continue;
1075
1076                 mutex_lock(&aconnector->mst_mgr.lock);
1077                 if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link,
1078                         vars, &link_vars_start_index)) {
1079                         mutex_unlock(&aconnector->mst_mgr.lock);
1080                         return false;
1081                 }
1082                 mutex_unlock(&aconnector->mst_mgr.lock);
1083
1084                 for (j = 0; j < dc_state->stream_count; j++) {
1085                         if (dc_state->streams[j]->link == stream->link)
1086                                 computed_streams[j] = true;
1087                 }
1088         }
1089
1090         for (i = 0; i < dc_state->stream_count; i++) {
1091                 stream = dc_state->streams[i];
1092
1093                 if (stream->timing.flags.DSC == 1)
1094                         if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
1095                                 return false;
1096         }
1097
1098         return true;
1099 }
1100
1101 static bool
1102         pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
1103                                               struct dc_state *dc_state,
1104                                               struct dsc_mst_fairness_vars *vars)
1105 {
1106         int i, j;
1107         struct dc_stream_state *stream;
1108         bool computed_streams[MAX_PIPES];
1109         struct amdgpu_dm_connector *aconnector;
1110         int link_vars_start_index = 0;
1111
1112         for (i = 0; i < dc_state->stream_count; i++)
1113                 computed_streams[i] = false;
1114
1115         for (i = 0; i < dc_state->stream_count; i++) {
1116                 stream = dc_state->streams[i];
1117
1118                 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
1119                         continue;
1120
1121                 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1122
1123                 if (!aconnector || !aconnector->dc_sink)
1124                         continue;
1125
1126                 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
1127                         continue;
1128
1129                 if (computed_streams[i])
1130                         continue;
1131
1132                 if (!is_dsc_need_re_compute(state, dc_state, stream->link))
1133                         continue;
1134
1135                 mutex_lock(&aconnector->mst_mgr.lock);
1136                 if (!compute_mst_dsc_configs_for_link(state,
1137                                                       dc_state,
1138                                                       stream->link,
1139                                                       vars,
1140                                                       &link_vars_start_index)) {
1141                         mutex_unlock(&aconnector->mst_mgr.lock);
1142                         return false;
1143                 }
1144                 mutex_unlock(&aconnector->mst_mgr.lock);
1145
1146                 for (j = 0; j < dc_state->stream_count; j++) {
1147                         if (dc_state->streams[j]->link == stream->link)
1148                                 computed_streams[j] = true;
1149                 }
1150         }
1151
1152         return true;
1153 }
1154
1155 static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state,
1156                                               struct dc_stream_state *stream)
1157 {
1158         int i;
1159         struct drm_crtc *crtc;
1160         struct drm_crtc_state *new_state, *old_state;
1161
1162         for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, i) {
1163                 struct dm_crtc_state *dm_state = to_dm_crtc_state(new_state);
1164
1165                 if (dm_state->stream == stream)
1166                         return i;
1167         }
1168         return -1;
1169 }
1170
1171 static bool is_link_to_dschub(struct dc_link *dc_link)
1172 {
1173         union dpcd_dsc_basic_capabilities *dsc_caps =
1174                         &dc_link->dpcd_caps.dsc_caps.dsc_basic_caps;
1175
1176         /* only check phy used by dsc mst branch */
1177         if (dc_link->type != dc_connection_mst_branch)
1178                 return false;
1179
1180         if (!(dsc_caps->fields.dsc_support.DSC_SUPPORT ||
1181               dsc_caps->fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
1182                 return false;
1183         return true;
1184 }
1185
1186 static bool is_dsc_precompute_needed(struct drm_atomic_state *state)
1187 {
1188         int i;
1189         struct drm_crtc *crtc;
1190         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1191         bool ret = false;
1192
1193         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1194                 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(new_crtc_state);
1195
1196                 if (!amdgpu_dm_find_first_crtc_matching_connector(state, crtc)) {
1197                         ret =  false;
1198                         break;
1199                 }
1200                 if (dm_crtc_state->stream && dm_crtc_state->stream->link)
1201                         if (is_link_to_dschub(dm_crtc_state->stream->link))
1202                                 ret = true;
1203         }
1204         return ret;
1205 }
1206
1207 void pre_validate_dsc(struct drm_atomic_state *state,
1208                       struct dm_atomic_state **dm_state_ptr,
1209                       struct dsc_mst_fairness_vars *vars)
1210 {
1211         int i;
1212         struct dm_atomic_state *dm_state;
1213         struct dc_state *local_dc_state = NULL;
1214
1215         if (!is_dsc_precompute_needed(state)) {
1216                 DRM_INFO_ONCE("DSC precompute is not needed.\n");
1217                 return;
1218         }
1219         if (dm_atomic_get_state(state, dm_state_ptr)) {
1220                 DRM_INFO_ONCE("dm_atomic_get_state() failed\n");
1221                 return;
1222         }
1223         dm_state = *dm_state_ptr;
1224
1225         /*
1226          * create local vailable for dc_state. copy content of streams of dm_state->context
1227          * to local variable. make sure stream pointer of local variable not the same as stream
1228          * from dm_state->context.
1229          */
1230
1231         local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL);
1232         if (!local_dc_state)
1233                 return;
1234
1235         for (i = 0; i < local_dc_state->stream_count; i++) {
1236                 struct dc_stream_state *stream = dm_state->context->streams[i];
1237                 int ind = find_crtc_index_in_state_by_stream(state, stream);
1238
1239                 if (ind >= 0) {
1240                         struct amdgpu_dm_connector *aconnector;
1241                         struct drm_connector_state *drm_new_conn_state;
1242                         struct dm_connector_state *dm_new_conn_state;
1243                         struct dm_crtc_state *dm_old_crtc_state;
1244
1245                         aconnector =
1246                                 amdgpu_dm_find_first_crtc_matching_connector(state,
1247                                                                              state->crtcs[ind].ptr);
1248                         drm_new_conn_state =
1249                                 drm_atomic_get_new_connector_state(state,
1250                                                                    &aconnector->base);
1251                         dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
1252                         dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state);
1253
1254                         local_dc_state->streams[i] =
1255                                 create_validate_stream_for_sink(aconnector,
1256                                                                 &state->crtcs[ind].new_state->mode,
1257                                                                 dm_new_conn_state,
1258                                                                 dm_old_crtc_state->stream);
1259                 }
1260         }
1261
1262         if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) {
1263                 DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
1264                 goto clean_exit;
1265         }
1266
1267         /*
1268          * compare local_streams -> timing  with dm_state->context,
1269          * if the same set crtc_state->mode-change = 0;
1270          */
1271         for (i = 0; i < local_dc_state->stream_count; i++) {
1272                 struct dc_stream_state *stream = dm_state->context->streams[i];
1273
1274                 if (local_dc_state->streams[i] &&
1275                     is_timing_changed(stream, local_dc_state->streams[i])) {
1276                         DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i);
1277                 } else {
1278                         int ind = find_crtc_index_in_state_by_stream(state, stream);
1279
1280                         if (ind >= 0)
1281                                 state->crtcs[ind].new_state->mode_changed = 0;
1282                 }
1283         }
1284 clean_exit:
1285         for (i = 0; i < local_dc_state->stream_count; i++) {
1286                 struct dc_stream_state *stream = dm_state->context->streams[i];
1287
1288                 if (local_dc_state->streams[i] != stream)
1289                         dc_stream_release(local_dc_state->streams[i]);
1290         }
1291
1292         kfree(local_dc_state);
1293 }
1294 #endif