Merge tag 'kbuild-v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #include "amdgpu_dm_plane.h"
50 #include "amdgpu_dm_crtc.h"
51 #ifdef CONFIG_DRM_AMD_DC_HDCP
52 #include "amdgpu_dm_hdcp.h"
53 #include <drm/display/drm_hdcp_helper.h>
54 #endif
55 #include "amdgpu_pm.h"
56 #include "amdgpu_atombios.h"
57
58 #include "amd_shared.h"
59 #include "amdgpu_dm_irq.h"
60 #include "dm_helpers.h"
61 #include "amdgpu_dm_mst_types.h"
62 #if defined(CONFIG_DEBUG_FS)
63 #include "amdgpu_dm_debugfs.h"
64 #endif
65 #include "amdgpu_dm_psr.h"
66
67 #include "ivsrcid/ivsrcid_vislands30.h"
68
69 #include "i2caux_interface.h"
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/types.h>
73 #include <linux/pm_runtime.h>
74 #include <linux/pci.h>
75 #include <linux/firmware.h>
76 #include <linux/component.h>
77 #include <linux/dmi.h>
78
79 #include <drm/display/drm_dp_mst_helper.h>
80 #include <drm/display/drm_hdmi_helper.h>
81 #include <drm/drm_atomic.h>
82 #include <drm/drm_atomic_uapi.h>
83 #include <drm/drm_atomic_helper.h>
84 #include <drm/drm_blend.h>
85 #include <drm/drm_fourcc.h>
86 #include <drm/drm_edid.h>
87 #include <drm/drm_vblank.h>
88 #include <drm/drm_audio_component.h>
89 #include <drm/drm_gem_atomic_helper.h>
90 #include <drm/drm_plane_helper.h>
91
92 #include <acpi/video.h>
93
94 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
95
96 #include "dcn/dcn_1_0_offset.h"
97 #include "dcn/dcn_1_0_sh_mask.h"
98 #include "soc15_hw_ip.h"
99 #include "soc15_common.h"
100 #include "vega10_ip_offset.h"
101
102 #include "gc/gc_11_0_0_offset.h"
103 #include "gc/gc_11_0_0_sh_mask.h"
104
105 #include "modules/inc/mod_freesync.h"
106 #include "modules/power/power_helpers.h"
107 #include "modules/inc/mod_info_packet.h"
108
109 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
111 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
113 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
115 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
117 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
119 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
121 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
123 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
125 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
127 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
129 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
130 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
131
132 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
133 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
134 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
135 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
136
137 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
138 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
139
140 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
141 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
142
143 /* Number of bytes in PSP header for firmware. */
144 #define PSP_HEADER_BYTES 0x100
145
146 /* Number of bytes in PSP footer for firmware. */
147 #define PSP_FOOTER_BYTES 0x100
148
149 /**
150  * DOC: overview
151  *
152  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
153  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
154  * requests into DC requests, and DC responses into DRM responses.
155  *
156  * The root control structure is &struct amdgpu_display_manager.
157  */
158
159 /* basic init/fini API */
160 static int amdgpu_dm_init(struct amdgpu_device *adev);
161 static void amdgpu_dm_fini(struct amdgpu_device *adev);
162 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
163
164 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
165 {
166         switch (link->dpcd_caps.dongle_type) {
167         case DISPLAY_DONGLE_NONE:
168                 return DRM_MODE_SUBCONNECTOR_Native;
169         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
170                 return DRM_MODE_SUBCONNECTOR_VGA;
171         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
172         case DISPLAY_DONGLE_DP_DVI_DONGLE:
173                 return DRM_MODE_SUBCONNECTOR_DVID;
174         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
175         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
176                 return DRM_MODE_SUBCONNECTOR_HDMIA;
177         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
178         default:
179                 return DRM_MODE_SUBCONNECTOR_Unknown;
180         }
181 }
182
183 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
184 {
185         struct dc_link *link = aconnector->dc_link;
186         struct drm_connector *connector = &aconnector->base;
187         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
188
189         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
190                 return;
191
192         if (aconnector->dc_sink)
193                 subconnector = get_subconnector_type(link);
194
195         drm_object_property_set_value(&connector->base,
196                         connector->dev->mode_config.dp_subconnector_property,
197                         subconnector);
198 }
199
200 /*
201  * initializes drm_device display related structures, based on the information
202  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
203  * drm_encoder, drm_mode_config
204  *
205  * Returns 0 on success
206  */
207 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
208 /* removes and deallocates the drm structures, created by the above function */
209 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
210
211 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
212                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
213                                     uint32_t link_index,
214                                     struct amdgpu_encoder *amdgpu_encoder);
215 static int amdgpu_dm_encoder_init(struct drm_device *dev,
216                                   struct amdgpu_encoder *aencoder,
217                                   uint32_t link_index);
218
219 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
220
221 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
222
223 static int amdgpu_dm_atomic_check(struct drm_device *dev,
224                                   struct drm_atomic_state *state);
225
226 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
227 static void handle_hpd_rx_irq(void *param);
228
229 static bool
230 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
231                                  struct drm_crtc_state *new_crtc_state);
232 /*
233  * dm_vblank_get_counter
234  *
235  * @brief
236  * Get counter for number of vertical blanks
237  *
238  * @param
239  * struct amdgpu_device *adev - [in] desired amdgpu device
240  * int disp_idx - [in] which CRTC to get the counter from
241  *
242  * @return
243  * Counter for vertical blanks
244  */
245 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
246 {
247         if (crtc >= adev->mode_info.num_crtc)
248                 return 0;
249         else {
250                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
251
252                 if (acrtc->dm_irq_params.stream == NULL) {
253                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
254                                   crtc);
255                         return 0;
256                 }
257
258                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
259         }
260 }
261
262 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
263                                   u32 *vbl, u32 *position)
264 {
265         uint32_t v_blank_start, v_blank_end, h_position, v_position;
266
267         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
268                 return -EINVAL;
269         else {
270                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
271
272                 if (acrtc->dm_irq_params.stream ==  NULL) {
273                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
274                                   crtc);
275                         return 0;
276                 }
277
278                 /*
279                  * TODO rework base driver to use values directly.
280                  * for now parse it back into reg-format
281                  */
282                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
283                                          &v_blank_start,
284                                          &v_blank_end,
285                                          &h_position,
286                                          &v_position);
287
288                 *position = v_position | (h_position << 16);
289                 *vbl = v_blank_start | (v_blank_end << 16);
290         }
291
292         return 0;
293 }
294
295 static bool dm_is_idle(void *handle)
296 {
297         /* XXX todo */
298         return true;
299 }
300
301 static int dm_wait_for_idle(void *handle)
302 {
303         /* XXX todo */
304         return 0;
305 }
306
307 static bool dm_check_soft_reset(void *handle)
308 {
309         return false;
310 }
311
312 static int dm_soft_reset(void *handle)
313 {
314         /* XXX todo */
315         return 0;
316 }
317
318 static struct amdgpu_crtc *
319 get_crtc_by_otg_inst(struct amdgpu_device *adev,
320                      int otg_inst)
321 {
322         struct drm_device *dev = adev_to_drm(adev);
323         struct drm_crtc *crtc;
324         struct amdgpu_crtc *amdgpu_crtc;
325
326         if (WARN_ON(otg_inst == -1))
327                 return adev->mode_info.crtcs[0];
328
329         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
330                 amdgpu_crtc = to_amdgpu_crtc(crtc);
331
332                 if (amdgpu_crtc->otg_inst == otg_inst)
333                         return amdgpu_crtc;
334         }
335
336         return NULL;
337 }
338
339 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
340                                               struct dm_crtc_state *new_state)
341 {
342         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
343                 return true;
344         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
345                 return true;
346         else
347                 return false;
348 }
349
350 /**
351  * dm_pflip_high_irq() - Handle pageflip interrupt
352  * @interrupt_params: ignored
353  *
354  * Handles the pageflip interrupt by notifying all interested parties
355  * that the pageflip has been completed.
356  */
357 static void dm_pflip_high_irq(void *interrupt_params)
358 {
359         struct amdgpu_crtc *amdgpu_crtc;
360         struct common_irq_params *irq_params = interrupt_params;
361         struct amdgpu_device *adev = irq_params->adev;
362         unsigned long flags;
363         struct drm_pending_vblank_event *e;
364         uint32_t vpos, hpos, v_blank_start, v_blank_end;
365         bool vrr_active;
366
367         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
368
369         /* IRQ could occur when in initial stage */
370         /* TODO work and BO cleanup */
371         if (amdgpu_crtc == NULL) {
372                 DC_LOG_PFLIP("CRTC is null, returning.\n");
373                 return;
374         }
375
376         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
377
378         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
379                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
380                                                  amdgpu_crtc->pflip_status,
381                                                  AMDGPU_FLIP_SUBMITTED,
382                                                  amdgpu_crtc->crtc_id,
383                                                  amdgpu_crtc);
384                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
385                 return;
386         }
387
388         /* page flip completed. */
389         e = amdgpu_crtc->event;
390         amdgpu_crtc->event = NULL;
391
392         WARN_ON(!e);
393
394         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
395
396         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
397         if (!vrr_active ||
398             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
399                                       &v_blank_end, &hpos, &vpos) ||
400             (vpos < v_blank_start)) {
401                 /* Update to correct count and vblank timestamp if racing with
402                  * vblank irq. This also updates to the correct vblank timestamp
403                  * even in VRR mode, as scanout is past the front-porch atm.
404                  */
405                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
406
407                 /* Wake up userspace by sending the pageflip event with proper
408                  * count and timestamp of vblank of flip completion.
409                  */
410                 if (e) {
411                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
412
413                         /* Event sent, so done with vblank for this flip */
414                         drm_crtc_vblank_put(&amdgpu_crtc->base);
415                 }
416         } else if (e) {
417                 /* VRR active and inside front-porch: vblank count and
418                  * timestamp for pageflip event will only be up to date after
419                  * drm_crtc_handle_vblank() has been executed from late vblank
420                  * irq handler after start of back-porch (vline 0). We queue the
421                  * pageflip event for send-out by drm_crtc_handle_vblank() with
422                  * updated timestamp and count, once it runs after us.
423                  *
424                  * We need to open-code this instead of using the helper
425                  * drm_crtc_arm_vblank_event(), as that helper would
426                  * call drm_crtc_accurate_vblank_count(), which we must
427                  * not call in VRR mode while we are in front-porch!
428                  */
429
430                 /* sequence will be replaced by real count during send-out. */
431                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
432                 e->pipe = amdgpu_crtc->crtc_id;
433
434                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
435                 e = NULL;
436         }
437
438         /* Keep track of vblank of this flip for flip throttling. We use the
439          * cooked hw counter, as that one incremented at start of this vblank
440          * of pageflip completion, so last_flip_vblank is the forbidden count
441          * for queueing new pageflips if vsync + VRR is enabled.
442          */
443         amdgpu_crtc->dm_irq_params.last_flip_vblank =
444                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
445
446         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
447         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
448
449         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
450                      amdgpu_crtc->crtc_id, amdgpu_crtc,
451                      vrr_active, (int) !e);
452 }
453
454 static void dm_vupdate_high_irq(void *interrupt_params)
455 {
456         struct common_irq_params *irq_params = interrupt_params;
457         struct amdgpu_device *adev = irq_params->adev;
458         struct amdgpu_crtc *acrtc;
459         struct drm_device *drm_dev;
460         struct drm_vblank_crtc *vblank;
461         ktime_t frame_duration_ns, previous_timestamp;
462         unsigned long flags;
463         int vrr_active;
464
465         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
466
467         if (acrtc) {
468                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
469                 drm_dev = acrtc->base.dev;
470                 vblank = &drm_dev->vblank[acrtc->base.index];
471                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
472                 frame_duration_ns = vblank->time - previous_timestamp;
473
474                 if (frame_duration_ns > 0) {
475                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
476                                                 frame_duration_ns,
477                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
478                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
479                 }
480
481                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
482                               acrtc->crtc_id,
483                               vrr_active);
484
485                 /* Core vblank handling is done here after end of front-porch in
486                  * vrr mode, as vblank timestamping will give valid results
487                  * while now done after front-porch. This will also deliver
488                  * page-flip completion events that have been queued to us
489                  * if a pageflip happened inside front-porch.
490                  */
491                 if (vrr_active) {
492                         dm_crtc_handle_vblank(acrtc);
493
494                         /* BTR processing for pre-DCE12 ASICs */
495                         if (acrtc->dm_irq_params.stream &&
496                             adev->family < AMDGPU_FAMILY_AI) {
497                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
498                                 mod_freesync_handle_v_update(
499                                     adev->dm.freesync_module,
500                                     acrtc->dm_irq_params.stream,
501                                     &acrtc->dm_irq_params.vrr_params);
502
503                                 dc_stream_adjust_vmin_vmax(
504                                     adev->dm.dc,
505                                     acrtc->dm_irq_params.stream,
506                                     &acrtc->dm_irq_params.vrr_params.adjust);
507                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
508                         }
509                 }
510         }
511 }
512
513 /**
514  * dm_crtc_high_irq() - Handles CRTC interrupt
515  * @interrupt_params: used for determining the CRTC instance
516  *
517  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
518  * event handler.
519  */
520 static void dm_crtc_high_irq(void *interrupt_params)
521 {
522         struct common_irq_params *irq_params = interrupt_params;
523         struct amdgpu_device *adev = irq_params->adev;
524         struct amdgpu_crtc *acrtc;
525         unsigned long flags;
526         int vrr_active;
527
528         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
529         if (!acrtc)
530                 return;
531
532         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
533
534         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
535                       vrr_active, acrtc->dm_irq_params.active_planes);
536
537         /**
538          * Core vblank handling at start of front-porch is only possible
539          * in non-vrr mode, as only there vblank timestamping will give
540          * valid results while done in front-porch. Otherwise defer it
541          * to dm_vupdate_high_irq after end of front-porch.
542          */
543         if (!vrr_active)
544                 dm_crtc_handle_vblank(acrtc);
545
546         /**
547          * Following stuff must happen at start of vblank, for crc
548          * computation and below-the-range btr support in vrr mode.
549          */
550         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
551
552         /* BTR updates need to happen before VUPDATE on Vega and above. */
553         if (adev->family < AMDGPU_FAMILY_AI)
554                 return;
555
556         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
557
558         if (acrtc->dm_irq_params.stream &&
559             acrtc->dm_irq_params.vrr_params.supported &&
560             acrtc->dm_irq_params.freesync_config.state ==
561                     VRR_STATE_ACTIVE_VARIABLE) {
562                 mod_freesync_handle_v_update(adev->dm.freesync_module,
563                                              acrtc->dm_irq_params.stream,
564                                              &acrtc->dm_irq_params.vrr_params);
565
566                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
567                                            &acrtc->dm_irq_params.vrr_params.adjust);
568         }
569
570         /*
571          * If there aren't any active_planes then DCH HUBP may be clock-gated.
572          * In that case, pageflip completion interrupts won't fire and pageflip
573          * completion events won't get delivered. Prevent this by sending
574          * pending pageflip events from here if a flip is still pending.
575          *
576          * If any planes are enabled, use dm_pflip_high_irq() instead, to
577          * avoid race conditions between flip programming and completion,
578          * which could cause too early flip completion events.
579          */
580         if (adev->family >= AMDGPU_FAMILY_RV &&
581             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
582             acrtc->dm_irq_params.active_planes == 0) {
583                 if (acrtc->event) {
584                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
585                         acrtc->event = NULL;
586                         drm_crtc_vblank_put(&acrtc->base);
587                 }
588                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
589         }
590
591         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
592 }
593
594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
595 /**
596  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
597  * DCN generation ASICs
598  * @interrupt_params: interrupt parameters
599  *
600  * Used to set crc window/read out crc value at vertical line 0 position
601  */
602 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
603 {
604         struct common_irq_params *irq_params = interrupt_params;
605         struct amdgpu_device *adev = irq_params->adev;
606         struct amdgpu_crtc *acrtc;
607
608         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
609
610         if (!acrtc)
611                 return;
612
613         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
614 }
615 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
616
617 /**
618  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
619  * @adev: amdgpu_device pointer
620  * @notify: dmub notification structure
621  *
622  * Dmub AUX or SET_CONFIG command completion processing callback
623  * Copies dmub notification to DM which is to be read by AUX command.
624  * issuing thread and also signals the event to wake up the thread.
625  */
626 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
627                                         struct dmub_notification *notify)
628 {
629         if (adev->dm.dmub_notify)
630                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
631         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
632                 complete(&adev->dm.dmub_aux_transfer_done);
633 }
634
635 /**
636  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
637  * @adev: amdgpu_device pointer
638  * @notify: dmub notification structure
639  *
640  * Dmub Hpd interrupt processing callback. Gets displayindex through the
641  * ink index and calls helper to do the processing.
642  */
643 static void dmub_hpd_callback(struct amdgpu_device *adev,
644                               struct dmub_notification *notify)
645 {
646         struct amdgpu_dm_connector *aconnector;
647         struct amdgpu_dm_connector *hpd_aconnector = NULL;
648         struct drm_connector *connector;
649         struct drm_connector_list_iter iter;
650         struct dc_link *link;
651         uint8_t link_index = 0;
652         struct drm_device *dev;
653
654         if (adev == NULL)
655                 return;
656
657         if (notify == NULL) {
658                 DRM_ERROR("DMUB HPD callback notification was NULL");
659                 return;
660         }
661
662         if (notify->link_index > adev->dm.dc->link_count) {
663                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
664                 return;
665         }
666
667         link_index = notify->link_index;
668         link = adev->dm.dc->links[link_index];
669         dev = adev->dm.ddev;
670
671         drm_connector_list_iter_begin(dev, &iter);
672         drm_for_each_connector_iter(connector, &iter) {
673                 aconnector = to_amdgpu_dm_connector(connector);
674                 if (link && aconnector->dc_link == link) {
675                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
676                         hpd_aconnector = aconnector;
677                         break;
678                 }
679         }
680         drm_connector_list_iter_end(&iter);
681
682         if (hpd_aconnector) {
683                 if (notify->type == DMUB_NOTIFICATION_HPD)
684                         handle_hpd_irq_helper(hpd_aconnector);
685                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
686                         handle_hpd_rx_irq(hpd_aconnector);
687         }
688 }
689
690 /**
691  * register_dmub_notify_callback - Sets callback for DMUB notify
692  * @adev: amdgpu_device pointer
693  * @type: Type of dmub notification
694  * @callback: Dmub interrupt callback function
695  * @dmub_int_thread_offload: offload indicator
696  *
697  * API to register a dmub callback handler for a dmub notification
698  * Also sets indicator whether callback processing to be offloaded.
699  * to dmub interrupt handling thread
700  * Return: true if successfully registered, false if there is existing registration
701  */
702 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
703                                           enum dmub_notification_type type,
704                                           dmub_notify_interrupt_callback_t callback,
705                                           bool dmub_int_thread_offload)
706 {
707         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
708                 adev->dm.dmub_callback[type] = callback;
709                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
710         } else
711                 return false;
712
713         return true;
714 }
715
716 static void dm_handle_hpd_work(struct work_struct *work)
717 {
718         struct dmub_hpd_work *dmub_hpd_wrk;
719
720         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
721
722         if (!dmub_hpd_wrk->dmub_notify) {
723                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
724                 return;
725         }
726
727         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
728                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
729                 dmub_hpd_wrk->dmub_notify);
730         }
731
732         kfree(dmub_hpd_wrk->dmub_notify);
733         kfree(dmub_hpd_wrk);
734
735 }
736
737 #define DMUB_TRACE_MAX_READ 64
738 /**
739  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
740  * @interrupt_params: used for determining the Outbox instance
741  *
742  * Handles the Outbox Interrupt
743  * event handler.
744  */
745 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
746 {
747         struct dmub_notification notify;
748         struct common_irq_params *irq_params = interrupt_params;
749         struct amdgpu_device *adev = irq_params->adev;
750         struct amdgpu_display_manager *dm = &adev->dm;
751         struct dmcub_trace_buf_entry entry = { 0 };
752         uint32_t count = 0;
753         struct dmub_hpd_work *dmub_hpd_wrk;
754         struct dc_link *plink = NULL;
755
756         if (dc_enable_dmub_notifications(adev->dm.dc) &&
757                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
758
759                 do {
760                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
761                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
762                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
763                                 continue;
764                         }
765                         if (!dm->dmub_callback[notify.type]) {
766                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
767                                 continue;
768                         }
769                         if (dm->dmub_thread_offload[notify.type] == true) {
770                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
771                                 if (!dmub_hpd_wrk) {
772                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
773                                         return;
774                                 }
775                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
776                                 if (!dmub_hpd_wrk->dmub_notify) {
777                                         kfree(dmub_hpd_wrk);
778                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
779                                         return;
780                                 }
781                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
782                                 if (dmub_hpd_wrk->dmub_notify)
783                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
784                                 dmub_hpd_wrk->adev = adev;
785                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
786                                         plink = adev->dm.dc->links[notify.link_index];
787                                         if (plink) {
788                                                 plink->hpd_status =
789                                                         notify.hpd_status == DP_HPD_PLUG;
790                                         }
791                                 }
792                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
793                         } else {
794                                 dm->dmub_callback[notify.type](adev, &notify);
795                         }
796                 } while (notify.pending_notification);
797         }
798
799
800         do {
801                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
802                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
803                                                         entry.param0, entry.param1);
804
805                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
806                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
807                 } else
808                         break;
809
810                 count++;
811
812         } while (count <= DMUB_TRACE_MAX_READ);
813
814         if (count > DMUB_TRACE_MAX_READ)
815                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
816 }
817
818 static int dm_set_clockgating_state(void *handle,
819                   enum amd_clockgating_state state)
820 {
821         return 0;
822 }
823
824 static int dm_set_powergating_state(void *handle,
825                   enum amd_powergating_state state)
826 {
827         return 0;
828 }
829
830 /* Prototypes of private functions */
831 static int dm_early_init(void* handle);
832
833 /* Allocate memory for FBC compressed data  */
834 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
835 {
836         struct drm_device *dev = connector->dev;
837         struct amdgpu_device *adev = drm_to_adev(dev);
838         struct dm_compressor_info *compressor = &adev->dm.compressor;
839         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
840         struct drm_display_mode *mode;
841         unsigned long max_size = 0;
842
843         if (adev->dm.dc->fbc_compressor == NULL)
844                 return;
845
846         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
847                 return;
848
849         if (compressor->bo_ptr)
850                 return;
851
852
853         list_for_each_entry(mode, &connector->modes, head) {
854                 if (max_size < mode->htotal * mode->vtotal)
855                         max_size = mode->htotal * mode->vtotal;
856         }
857
858         if (max_size) {
859                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
860                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
861                             &compressor->gpu_addr, &compressor->cpu_addr);
862
863                 if (r)
864                         DRM_ERROR("DM: Failed to initialize FBC\n");
865                 else {
866                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
867                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
868                 }
869
870         }
871
872 }
873
874 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
875                                           int pipe, bool *enabled,
876                                           unsigned char *buf, int max_bytes)
877 {
878         struct drm_device *dev = dev_get_drvdata(kdev);
879         struct amdgpu_device *adev = drm_to_adev(dev);
880         struct drm_connector *connector;
881         struct drm_connector_list_iter conn_iter;
882         struct amdgpu_dm_connector *aconnector;
883         int ret = 0;
884
885         *enabled = false;
886
887         mutex_lock(&adev->dm.audio_lock);
888
889         drm_connector_list_iter_begin(dev, &conn_iter);
890         drm_for_each_connector_iter(connector, &conn_iter) {
891                 aconnector = to_amdgpu_dm_connector(connector);
892                 if (aconnector->audio_inst != port)
893                         continue;
894
895                 *enabled = true;
896                 ret = drm_eld_size(connector->eld);
897                 memcpy(buf, connector->eld, min(max_bytes, ret));
898
899                 break;
900         }
901         drm_connector_list_iter_end(&conn_iter);
902
903         mutex_unlock(&adev->dm.audio_lock);
904
905         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
906
907         return ret;
908 }
909
910 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
911         .get_eld = amdgpu_dm_audio_component_get_eld,
912 };
913
914 static int amdgpu_dm_audio_component_bind(struct device *kdev,
915                                        struct device *hda_kdev, void *data)
916 {
917         struct drm_device *dev = dev_get_drvdata(kdev);
918         struct amdgpu_device *adev = drm_to_adev(dev);
919         struct drm_audio_component *acomp = data;
920
921         acomp->ops = &amdgpu_dm_audio_component_ops;
922         acomp->dev = kdev;
923         adev->dm.audio_component = acomp;
924
925         return 0;
926 }
927
928 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
929                                           struct device *hda_kdev, void *data)
930 {
931         struct drm_device *dev = dev_get_drvdata(kdev);
932         struct amdgpu_device *adev = drm_to_adev(dev);
933         struct drm_audio_component *acomp = data;
934
935         acomp->ops = NULL;
936         acomp->dev = NULL;
937         adev->dm.audio_component = NULL;
938 }
939
940 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
941         .bind   = amdgpu_dm_audio_component_bind,
942         .unbind = amdgpu_dm_audio_component_unbind,
943 };
944
945 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
946 {
947         int i, ret;
948
949         if (!amdgpu_audio)
950                 return 0;
951
952         adev->mode_info.audio.enabled = true;
953
954         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
955
956         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
957                 adev->mode_info.audio.pin[i].channels = -1;
958                 adev->mode_info.audio.pin[i].rate = -1;
959                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
960                 adev->mode_info.audio.pin[i].status_bits = 0;
961                 adev->mode_info.audio.pin[i].category_code = 0;
962                 adev->mode_info.audio.pin[i].connected = false;
963                 adev->mode_info.audio.pin[i].id =
964                         adev->dm.dc->res_pool->audios[i]->inst;
965                 adev->mode_info.audio.pin[i].offset = 0;
966         }
967
968         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
969         if (ret < 0)
970                 return ret;
971
972         adev->dm.audio_registered = true;
973
974         return 0;
975 }
976
977 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
978 {
979         if (!amdgpu_audio)
980                 return;
981
982         if (!adev->mode_info.audio.enabled)
983                 return;
984
985         if (adev->dm.audio_registered) {
986                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
987                 adev->dm.audio_registered = false;
988         }
989
990         /* TODO: Disable audio? */
991
992         adev->mode_info.audio.enabled = false;
993 }
994
995 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
996 {
997         struct drm_audio_component *acomp = adev->dm.audio_component;
998
999         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1000                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1001
1002                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1003                                                  pin, -1);
1004         }
1005 }
1006
1007 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1008 {
1009         const struct dmcub_firmware_header_v1_0 *hdr;
1010         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1011         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1012         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1013         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1014         struct abm *abm = adev->dm.dc->res_pool->abm;
1015         struct dmub_srv_hw_params hw_params;
1016         enum dmub_status status;
1017         const unsigned char *fw_inst_const, *fw_bss_data;
1018         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1019         bool has_hw_support;
1020
1021         if (!dmub_srv)
1022                 /* DMUB isn't supported on the ASIC. */
1023                 return 0;
1024
1025         if (!fb_info) {
1026                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1027                 return -EINVAL;
1028         }
1029
1030         if (!dmub_fw) {
1031                 /* Firmware required for DMUB support. */
1032                 DRM_ERROR("No firmware provided for DMUB.\n");
1033                 return -EINVAL;
1034         }
1035
1036         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1037         if (status != DMUB_STATUS_OK) {
1038                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1039                 return -EINVAL;
1040         }
1041
1042         if (!has_hw_support) {
1043                 DRM_INFO("DMUB unsupported on ASIC\n");
1044                 return 0;
1045         }
1046
1047         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1048         status = dmub_srv_hw_reset(dmub_srv);
1049         if (status != DMUB_STATUS_OK)
1050                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1051
1052         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1053
1054         fw_inst_const = dmub_fw->data +
1055                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1056                         PSP_HEADER_BYTES;
1057
1058         fw_bss_data = dmub_fw->data +
1059                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1060                       le32_to_cpu(hdr->inst_const_bytes);
1061
1062         /* Copy firmware and bios info into FB memory. */
1063         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1064                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1065
1066         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1067
1068         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1069          * amdgpu_ucode_init_single_fw will load dmub firmware
1070          * fw_inst_const part to cw0; otherwise, the firmware back door load
1071          * will be done by dm_dmub_hw_init
1072          */
1073         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1074                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1075                                 fw_inst_const_size);
1076         }
1077
1078         if (fw_bss_data_size)
1079                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1080                        fw_bss_data, fw_bss_data_size);
1081
1082         /* Copy firmware bios info into FB memory. */
1083         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1084                adev->bios_size);
1085
1086         /* Reset regions that need to be reset. */
1087         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1088         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1089
1090         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1091                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1092
1093         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1094                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1095
1096         /* Initialize hardware. */
1097         memset(&hw_params, 0, sizeof(hw_params));
1098         hw_params.fb_base = adev->gmc.fb_start;
1099         hw_params.fb_offset = adev->vm_manager.vram_base_offset;
1100
1101         /* backdoor load firmware and trigger dmub running */
1102         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1103                 hw_params.load_inst_const = true;
1104
1105         if (dmcu)
1106                 hw_params.psp_version = dmcu->psp_version;
1107
1108         for (i = 0; i < fb_info->num_fb; ++i)
1109                 hw_params.fb[i] = &fb_info->fb[i];
1110
1111         switch (adev->ip_versions[DCE_HWIP][0]) {
1112         case IP_VERSION(3, 1, 3):
1113         case IP_VERSION(3, 1, 4):
1114                 hw_params.dpia_supported = true;
1115                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1116                 break;
1117         default:
1118                 break;
1119         }
1120
1121         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1122         if (status != DMUB_STATUS_OK) {
1123                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1124                 return -EINVAL;
1125         }
1126
1127         /* Wait for firmware load to finish. */
1128         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1129         if (status != DMUB_STATUS_OK)
1130                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1131
1132         /* Init DMCU and ABM if available. */
1133         if (dmcu && abm) {
1134                 dmcu->funcs->dmcu_init(dmcu);
1135                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1136         }
1137
1138         if (!adev->dm.dc->ctx->dmub_srv)
1139                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1140         if (!adev->dm.dc->ctx->dmub_srv) {
1141                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1142                 return -ENOMEM;
1143         }
1144
1145         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1146                  adev->dm.dmcub_fw_version);
1147
1148         return 0;
1149 }
1150
1151 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1152 {
1153         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1154         enum dmub_status status;
1155         bool init;
1156
1157         if (!dmub_srv) {
1158                 /* DMUB isn't supported on the ASIC. */
1159                 return;
1160         }
1161
1162         status = dmub_srv_is_hw_init(dmub_srv, &init);
1163         if (status != DMUB_STATUS_OK)
1164                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1165
1166         if (status == DMUB_STATUS_OK && init) {
1167                 /* Wait for firmware load to finish. */
1168                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1169                 if (status != DMUB_STATUS_OK)
1170                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1171         } else {
1172                 /* Perform the full hardware initialization. */
1173                 dm_dmub_hw_init(adev);
1174         }
1175 }
1176
1177 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1178 {
1179         uint64_t pt_base;
1180         uint32_t logical_addr_low;
1181         uint32_t logical_addr_high;
1182         uint32_t agp_base, agp_bot, agp_top;
1183         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1184
1185         memset(pa_config, 0, sizeof(*pa_config));
1186
1187         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1188         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1189
1190         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1191                 /*
1192                  * Raven2 has a HW issue that it is unable to use the vram which
1193                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1194                  * workaround that increase system aperture high address (add 1)
1195                  * to get rid of the VM fault and hardware hang.
1196                  */
1197                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1198         else
1199                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1200
1201         agp_base = 0;
1202         agp_bot = adev->gmc.agp_start >> 24;
1203         agp_top = adev->gmc.agp_end >> 24;
1204
1205
1206         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1207         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1208         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1209         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1210         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1211         page_table_base.low_part = lower_32_bits(pt_base);
1212
1213         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1214         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1215
1216         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1217         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1218         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1219
1220         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1221         pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
1222         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1223
1224         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1225         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1226         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1227
1228         pa_config->is_hvm_enabled = 0;
1229
1230 }
1231
1232 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1233 {
1234         struct hpd_rx_irq_offload_work *offload_work;
1235         struct amdgpu_dm_connector *aconnector;
1236         struct dc_link *dc_link;
1237         struct amdgpu_device *adev;
1238         enum dc_connection_type new_connection_type = dc_connection_none;
1239         unsigned long flags;
1240
1241         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1242         aconnector = offload_work->offload_wq->aconnector;
1243
1244         if (!aconnector) {
1245                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1246                 goto skip;
1247         }
1248
1249         adev = drm_to_adev(aconnector->base.dev);
1250         dc_link = aconnector->dc_link;
1251
1252         mutex_lock(&aconnector->hpd_lock);
1253         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1254                 DRM_ERROR("KMS: Failed to detect connector\n");
1255         mutex_unlock(&aconnector->hpd_lock);
1256
1257         if (new_connection_type == dc_connection_none)
1258                 goto skip;
1259
1260         if (amdgpu_in_reset(adev))
1261                 goto skip;
1262
1263         mutex_lock(&adev->dm.dc_lock);
1264         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1265                 dc_link_dp_handle_automated_test(dc_link);
1266         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1267                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1268                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1269                 dc_link_dp_handle_link_loss(dc_link);
1270                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1271                 offload_work->offload_wq->is_handling_link_loss = false;
1272                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1273         }
1274         mutex_unlock(&adev->dm.dc_lock);
1275
1276 skip:
1277         kfree(offload_work);
1278
1279 }
1280
1281 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1282 {
1283         int max_caps = dc->caps.max_links;
1284         int i = 0;
1285         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1286
1287         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1288
1289         if (!hpd_rx_offload_wq)
1290                 return NULL;
1291
1292
1293         for (i = 0; i < max_caps; i++) {
1294                 hpd_rx_offload_wq[i].wq =
1295                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1296
1297                 if (hpd_rx_offload_wq[i].wq == NULL) {
1298                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1299                         goto out_err;
1300                 }
1301
1302                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1303         }
1304
1305         return hpd_rx_offload_wq;
1306
1307 out_err:
1308         for (i = 0; i < max_caps; i++) {
1309                 if (hpd_rx_offload_wq[i].wq)
1310                         destroy_workqueue(hpd_rx_offload_wq[i].wq);
1311         }
1312         kfree(hpd_rx_offload_wq);
1313         return NULL;
1314 }
1315
1316 struct amdgpu_stutter_quirk {
1317         u16 chip_vendor;
1318         u16 chip_device;
1319         u16 subsys_vendor;
1320         u16 subsys_device;
1321         u8 revision;
1322 };
1323
1324 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1325         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1326         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1327         { 0, 0, 0, 0, 0 },
1328 };
1329
1330 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1331 {
1332         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1333
1334         while (p && p->chip_device != 0) {
1335                 if (pdev->vendor == p->chip_vendor &&
1336                     pdev->device == p->chip_device &&
1337                     pdev->subsystem_vendor == p->subsys_vendor &&
1338                     pdev->subsystem_device == p->subsys_device &&
1339                     pdev->revision == p->revision) {
1340                         return true;
1341                 }
1342                 ++p;
1343         }
1344         return false;
1345 }
1346
1347 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1348         {
1349                 .matches = {
1350                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1351                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1352                 },
1353         },
1354         {
1355                 .matches = {
1356                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1357                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1358                 },
1359         },
1360         {
1361                 .matches = {
1362                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1363                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1364                 },
1365         },
1366         {
1367                 .matches = {
1368                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1369                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
1370                 },
1371         },
1372         {
1373                 .matches = {
1374                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1375                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
1376                 },
1377         },
1378         {
1379                 .matches = {
1380                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1381                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
1382                 },
1383         },
1384         {
1385                 .matches = {
1386                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1387                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
1388                 },
1389         },
1390         {
1391                 .matches = {
1392                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1393                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
1394                 },
1395         },
1396         {
1397                 .matches = {
1398                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1399                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
1400                 },
1401         },
1402         {}
1403         /* TODO: refactor this from a fixed table to a dynamic option */
1404 };
1405
1406 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1407 {
1408         const struct dmi_system_id *dmi_id;
1409
1410         dm->aux_hpd_discon_quirk = false;
1411
1412         dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1413         if (dmi_id) {
1414                 dm->aux_hpd_discon_quirk = true;
1415                 DRM_INFO("aux_hpd_discon_quirk attached\n");
1416         }
1417 }
1418
1419 static int amdgpu_dm_init(struct amdgpu_device *adev)
1420 {
1421         struct dc_init_data init_data;
1422 #ifdef CONFIG_DRM_AMD_DC_HDCP
1423         struct dc_callback_init init_params;
1424 #endif
1425         int r;
1426
1427         adev->dm.ddev = adev_to_drm(adev);
1428         adev->dm.adev = adev;
1429
1430         /* Zero all the fields */
1431         memset(&init_data, 0, sizeof(init_data));
1432 #ifdef CONFIG_DRM_AMD_DC_HDCP
1433         memset(&init_params, 0, sizeof(init_params));
1434 #endif
1435
1436         mutex_init(&adev->dm.dpia_aux_lock);
1437         mutex_init(&adev->dm.dc_lock);
1438         mutex_init(&adev->dm.audio_lock);
1439
1440         if(amdgpu_dm_irq_init(adev)) {
1441                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1442                 goto error;
1443         }
1444
1445         init_data.asic_id.chip_family = adev->family;
1446
1447         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1448         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1449         init_data.asic_id.chip_id = adev->pdev->device;
1450
1451         init_data.asic_id.vram_width = adev->gmc.vram_width;
1452         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1453         init_data.asic_id.atombios_base_address =
1454                 adev->mode_info.atom_context->bios;
1455
1456         init_data.driver = adev;
1457
1458         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1459
1460         if (!adev->dm.cgs_device) {
1461                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1462                 goto error;
1463         }
1464
1465         init_data.cgs_device = adev->dm.cgs_device;
1466
1467         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1468
1469         switch (adev->ip_versions[DCE_HWIP][0]) {
1470         case IP_VERSION(2, 1, 0):
1471                 switch (adev->dm.dmcub_fw_version) {
1472                 case 0: /* development */
1473                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1474                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1475                         init_data.flags.disable_dmcu = false;
1476                         break;
1477                 default:
1478                         init_data.flags.disable_dmcu = true;
1479                 }
1480                 break;
1481         case IP_VERSION(2, 0, 3):
1482                 init_data.flags.disable_dmcu = true;
1483                 break;
1484         default:
1485                 break;
1486         }
1487
1488         switch (adev->asic_type) {
1489         case CHIP_CARRIZO:
1490         case CHIP_STONEY:
1491                 init_data.flags.gpu_vm_support = true;
1492                 break;
1493         default:
1494                 switch (adev->ip_versions[DCE_HWIP][0]) {
1495                 case IP_VERSION(1, 0, 0):
1496                 case IP_VERSION(1, 0, 1):
1497                         /* enable S/G on PCO and RV2 */
1498                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1499                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1500                                 init_data.flags.gpu_vm_support = true;
1501                         break;
1502                 case IP_VERSION(2, 1, 0):
1503                 case IP_VERSION(3, 0, 1):
1504                 case IP_VERSION(3, 1, 2):
1505                 case IP_VERSION(3, 1, 3):
1506                 case IP_VERSION(3, 1, 5):
1507                 case IP_VERSION(3, 1, 6):
1508                         init_data.flags.gpu_vm_support = true;
1509                         break;
1510                 default:
1511                         break;
1512                 }
1513                 break;
1514         }
1515
1516         if (init_data.flags.gpu_vm_support)
1517                 adev->mode_info.gpu_vm_support = true;
1518
1519         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1520                 init_data.flags.fbc_support = true;
1521
1522         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1523                 init_data.flags.multi_mon_pp_mclk_switch = true;
1524
1525         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1526                 init_data.flags.disable_fractional_pwm = true;
1527
1528         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1529                 init_data.flags.edp_no_power_sequencing = true;
1530
1531         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1532                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1533         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1534                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1535
1536         init_data.flags.seamless_boot_edp_requested = false;
1537
1538         if (check_seamless_boot_capability(adev)) {
1539                 init_data.flags.seamless_boot_edp_requested = true;
1540                 init_data.flags.allow_seamless_boot_optimization = true;
1541                 DRM_INFO("Seamless boot condition check passed\n");
1542         }
1543
1544         init_data.flags.enable_mipi_converter_optimization = true;
1545
1546         init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1547         init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1548
1549         INIT_LIST_HEAD(&adev->dm.da_list);
1550
1551         retrieve_dmi_info(&adev->dm);
1552
1553         /* Display Core create. */
1554         adev->dm.dc = dc_create(&init_data);
1555
1556         if (adev->dm.dc) {
1557                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1558         } else {
1559                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1560                 goto error;
1561         }
1562
1563         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1564                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1565                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1566         }
1567
1568         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1569                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1570         if (dm_should_disable_stutter(adev->pdev))
1571                 adev->dm.dc->debug.disable_stutter = true;
1572
1573         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1574                 adev->dm.dc->debug.disable_stutter = true;
1575
1576         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1577                 adev->dm.dc->debug.disable_dsc = true;
1578         }
1579
1580         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1581                 adev->dm.dc->debug.disable_clock_gate = true;
1582
1583         if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1584                 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1585
1586         adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1587
1588         /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
1589         adev->dm.dc->debug.ignore_cable_id = true;
1590
1591         r = dm_dmub_hw_init(adev);
1592         if (r) {
1593                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1594                 goto error;
1595         }
1596
1597         dc_hardware_init(adev->dm.dc);
1598
1599         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1600         if (!adev->dm.hpd_rx_offload_wq) {
1601                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1602                 goto error;
1603         }
1604
1605         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1606                 struct dc_phy_addr_space_config pa_config;
1607
1608                 mmhub_read_system_context(adev, &pa_config);
1609
1610                 // Call the DC init_memory func
1611                 dc_setup_system_context(adev->dm.dc, &pa_config);
1612         }
1613
1614         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1615         if (!adev->dm.freesync_module) {
1616                 DRM_ERROR(
1617                 "amdgpu: failed to initialize freesync_module.\n");
1618         } else
1619                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1620                                 adev->dm.freesync_module);
1621
1622         amdgpu_dm_init_color_mod();
1623
1624         if (adev->dm.dc->caps.max_links > 0) {
1625                 adev->dm.vblank_control_workqueue =
1626                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1627                 if (!adev->dm.vblank_control_workqueue)
1628                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1629         }
1630
1631 #ifdef CONFIG_DRM_AMD_DC_HDCP
1632         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1633                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1634
1635                 if (!adev->dm.hdcp_workqueue)
1636                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1637                 else
1638                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1639
1640                 dc_init_callbacks(adev->dm.dc, &init_params);
1641         }
1642 #endif
1643 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1644         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1645 #endif
1646         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1647                 init_completion(&adev->dm.dmub_aux_transfer_done);
1648                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1649                 if (!adev->dm.dmub_notify) {
1650                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1651                         goto error;
1652                 }
1653
1654                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1655                 if (!adev->dm.delayed_hpd_wq) {
1656                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1657                         goto error;
1658                 }
1659
1660                 amdgpu_dm_outbox_init(adev);
1661                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1662                         dmub_aux_setconfig_callback, false)) {
1663                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1664                         goto error;
1665                 }
1666                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1667                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1668                         goto error;
1669                 }
1670                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1671                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1672                         goto error;
1673                 }
1674         }
1675
1676         /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1677          * It is expected that DMUB will resend any pending notifications at this point, for
1678          * example HPD from DPIA.
1679          */
1680         if (dc_is_dmub_outbox_supported(adev->dm.dc))
1681                 dc_enable_dmub_outbox(adev->dm.dc);
1682
1683         if (amdgpu_dm_initialize_drm_device(adev)) {
1684                 DRM_ERROR(
1685                 "amdgpu: failed to initialize sw for display support.\n");
1686                 goto error;
1687         }
1688
1689         /* create fake encoders for MST */
1690         dm_dp_create_fake_mst_encoders(adev);
1691
1692         /* TODO: Add_display_info? */
1693
1694         /* TODO use dynamic cursor width */
1695         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1696         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1697
1698         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1699                 DRM_ERROR(
1700                 "amdgpu: failed to initialize sw for display support.\n");
1701                 goto error;
1702         }
1703
1704
1705         DRM_DEBUG_DRIVER("KMS initialized.\n");
1706
1707         return 0;
1708 error:
1709         amdgpu_dm_fini(adev);
1710
1711         return -EINVAL;
1712 }
1713
1714 static int amdgpu_dm_early_fini(void *handle)
1715 {
1716         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1717
1718         amdgpu_dm_audio_fini(adev);
1719
1720         return 0;
1721 }
1722
1723 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1724 {
1725         int i;
1726
1727         if (adev->dm.vblank_control_workqueue) {
1728                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1729                 adev->dm.vblank_control_workqueue = NULL;
1730         }
1731
1732         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1733                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1734         }
1735
1736         amdgpu_dm_destroy_drm_device(&adev->dm);
1737
1738 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1739         if (adev->dm.crc_rd_wrk) {
1740                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1741                 kfree(adev->dm.crc_rd_wrk);
1742                 adev->dm.crc_rd_wrk = NULL;
1743         }
1744 #endif
1745 #ifdef CONFIG_DRM_AMD_DC_HDCP
1746         if (adev->dm.hdcp_workqueue) {
1747                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1748                 adev->dm.hdcp_workqueue = NULL;
1749         }
1750
1751         if (adev->dm.dc)
1752                 dc_deinit_callbacks(adev->dm.dc);
1753 #endif
1754
1755         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1756
1757         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1758                 kfree(adev->dm.dmub_notify);
1759                 adev->dm.dmub_notify = NULL;
1760                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1761                 adev->dm.delayed_hpd_wq = NULL;
1762         }
1763
1764         if (adev->dm.dmub_bo)
1765                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1766                                       &adev->dm.dmub_bo_gpu_addr,
1767                                       &adev->dm.dmub_bo_cpu_addr);
1768
1769         if (adev->dm.hpd_rx_offload_wq) {
1770                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1771                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1772                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1773                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1774                         }
1775                 }
1776
1777                 kfree(adev->dm.hpd_rx_offload_wq);
1778                 adev->dm.hpd_rx_offload_wq = NULL;
1779         }
1780
1781         /* DC Destroy TODO: Replace destroy DAL */
1782         if (adev->dm.dc)
1783                 dc_destroy(&adev->dm.dc);
1784         /*
1785          * TODO: pageflip, vlank interrupt
1786          *
1787          * amdgpu_dm_irq_fini(adev);
1788          */
1789
1790         if (adev->dm.cgs_device) {
1791                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1792                 adev->dm.cgs_device = NULL;
1793         }
1794         if (adev->dm.freesync_module) {
1795                 mod_freesync_destroy(adev->dm.freesync_module);
1796                 adev->dm.freesync_module = NULL;
1797         }
1798
1799         mutex_destroy(&adev->dm.audio_lock);
1800         mutex_destroy(&adev->dm.dc_lock);
1801         mutex_destroy(&adev->dm.dpia_aux_lock);
1802
1803         return;
1804 }
1805
1806 static int load_dmcu_fw(struct amdgpu_device *adev)
1807 {
1808         const char *fw_name_dmcu = NULL;
1809         int r;
1810         const struct dmcu_firmware_header_v1_0 *hdr;
1811
1812         switch(adev->asic_type) {
1813 #if defined(CONFIG_DRM_AMD_DC_SI)
1814         case CHIP_TAHITI:
1815         case CHIP_PITCAIRN:
1816         case CHIP_VERDE:
1817         case CHIP_OLAND:
1818 #endif
1819         case CHIP_BONAIRE:
1820         case CHIP_HAWAII:
1821         case CHIP_KAVERI:
1822         case CHIP_KABINI:
1823         case CHIP_MULLINS:
1824         case CHIP_TONGA:
1825         case CHIP_FIJI:
1826         case CHIP_CARRIZO:
1827         case CHIP_STONEY:
1828         case CHIP_POLARIS11:
1829         case CHIP_POLARIS10:
1830         case CHIP_POLARIS12:
1831         case CHIP_VEGAM:
1832         case CHIP_VEGA10:
1833         case CHIP_VEGA12:
1834         case CHIP_VEGA20:
1835                 return 0;
1836         case CHIP_NAVI12:
1837                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1838                 break;
1839         case CHIP_RAVEN:
1840                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1841                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1842                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1843                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1844                 else
1845                         return 0;
1846                 break;
1847         default:
1848                 switch (adev->ip_versions[DCE_HWIP][0]) {
1849                 case IP_VERSION(2, 0, 2):
1850                 case IP_VERSION(2, 0, 3):
1851                 case IP_VERSION(2, 0, 0):
1852                 case IP_VERSION(2, 1, 0):
1853                 case IP_VERSION(3, 0, 0):
1854                 case IP_VERSION(3, 0, 2):
1855                 case IP_VERSION(3, 0, 3):
1856                 case IP_VERSION(3, 0, 1):
1857                 case IP_VERSION(3, 1, 2):
1858                 case IP_VERSION(3, 1, 3):
1859                 case IP_VERSION(3, 1, 4):
1860                 case IP_VERSION(3, 1, 5):
1861                 case IP_VERSION(3, 1, 6):
1862                 case IP_VERSION(3, 2, 0):
1863                 case IP_VERSION(3, 2, 1):
1864                         return 0;
1865                 default:
1866                         break;
1867                 }
1868                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1869                 return -EINVAL;
1870         }
1871
1872         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1873                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1874                 return 0;
1875         }
1876
1877         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1878         if (r == -ENOENT) {
1879                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1880                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1881                 adev->dm.fw_dmcu = NULL;
1882                 return 0;
1883         }
1884         if (r) {
1885                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1886                         fw_name_dmcu);
1887                 return r;
1888         }
1889
1890         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1891         if (r) {
1892                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1893                         fw_name_dmcu);
1894                 release_firmware(adev->dm.fw_dmcu);
1895                 adev->dm.fw_dmcu = NULL;
1896                 return r;
1897         }
1898
1899         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1900         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1901         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1902         adev->firmware.fw_size +=
1903                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1904
1905         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1906         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1907         adev->firmware.fw_size +=
1908                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1909
1910         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1911
1912         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1913
1914         return 0;
1915 }
1916
1917 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1918 {
1919         struct amdgpu_device *adev = ctx;
1920
1921         return dm_read_reg(adev->dm.dc->ctx, address);
1922 }
1923
1924 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1925                                      uint32_t value)
1926 {
1927         struct amdgpu_device *adev = ctx;
1928
1929         return dm_write_reg(adev->dm.dc->ctx, address, value);
1930 }
1931
1932 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1933 {
1934         struct dmub_srv_create_params create_params;
1935         struct dmub_srv_region_params region_params;
1936         struct dmub_srv_region_info region_info;
1937         struct dmub_srv_fb_params fb_params;
1938         struct dmub_srv_fb_info *fb_info;
1939         struct dmub_srv *dmub_srv;
1940         const struct dmcub_firmware_header_v1_0 *hdr;
1941         const char *fw_name_dmub;
1942         enum dmub_asic dmub_asic;
1943         enum dmub_status status;
1944         int r;
1945
1946         switch (adev->ip_versions[DCE_HWIP][0]) {
1947         case IP_VERSION(2, 1, 0):
1948                 dmub_asic = DMUB_ASIC_DCN21;
1949                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1950                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1951                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1952                 break;
1953         case IP_VERSION(3, 0, 0):
1954                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1955                         dmub_asic = DMUB_ASIC_DCN30;
1956                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1957                 } else {
1958                         dmub_asic = DMUB_ASIC_DCN30;
1959                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1960                 }
1961                 break;
1962         case IP_VERSION(3, 0, 1):
1963                 dmub_asic = DMUB_ASIC_DCN301;
1964                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1965                 break;
1966         case IP_VERSION(3, 0, 2):
1967                 dmub_asic = DMUB_ASIC_DCN302;
1968                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1969                 break;
1970         case IP_VERSION(3, 0, 3):
1971                 dmub_asic = DMUB_ASIC_DCN303;
1972                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1973                 break;
1974         case IP_VERSION(3, 1, 2):
1975         case IP_VERSION(3, 1, 3):
1976                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1977                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1978                 break;
1979         case IP_VERSION(3, 1, 4):
1980                 dmub_asic = DMUB_ASIC_DCN314;
1981                 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
1982                 break;
1983         case IP_VERSION(3, 1, 5):
1984                 dmub_asic = DMUB_ASIC_DCN315;
1985                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1986                 break;
1987         case IP_VERSION(3, 1, 6):
1988                 dmub_asic = DMUB_ASIC_DCN316;
1989                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1990                 break;
1991         case IP_VERSION(3, 2, 0):
1992                 dmub_asic = DMUB_ASIC_DCN32;
1993                 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1994                 break;
1995         case IP_VERSION(3, 2, 1):
1996                 dmub_asic = DMUB_ASIC_DCN321;
1997                 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1998                 break;
1999         default:
2000                 /* ASIC doesn't support DMUB. */
2001                 return 0;
2002         }
2003
2004         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
2005         if (r) {
2006                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
2007                 return 0;
2008         }
2009
2010         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
2011         if (r) {
2012                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2013                 return 0;
2014         }
2015
2016         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2017         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2018
2019         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2020                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2021                         AMDGPU_UCODE_ID_DMCUB;
2022                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2023                         adev->dm.dmub_fw;
2024                 adev->firmware.fw_size +=
2025                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2026
2027                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2028                          adev->dm.dmcub_fw_version);
2029         }
2030
2031
2032         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2033         dmub_srv = adev->dm.dmub_srv;
2034
2035         if (!dmub_srv) {
2036                 DRM_ERROR("Failed to allocate DMUB service!\n");
2037                 return -ENOMEM;
2038         }
2039
2040         memset(&create_params, 0, sizeof(create_params));
2041         create_params.user_ctx = adev;
2042         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2043         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2044         create_params.asic = dmub_asic;
2045
2046         /* Create the DMUB service. */
2047         status = dmub_srv_create(dmub_srv, &create_params);
2048         if (status != DMUB_STATUS_OK) {
2049                 DRM_ERROR("Error creating DMUB service: %d\n", status);
2050                 return -EINVAL;
2051         }
2052
2053         /* Calculate the size of all the regions for the DMUB service. */
2054         memset(&region_params, 0, sizeof(region_params));
2055
2056         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2057                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2058         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2059         region_params.vbios_size = adev->bios_size;
2060         region_params.fw_bss_data = region_params.bss_data_size ?
2061                 adev->dm.dmub_fw->data +
2062                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2063                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2064         region_params.fw_inst_const =
2065                 adev->dm.dmub_fw->data +
2066                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2067                 PSP_HEADER_BYTES;
2068
2069         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2070                                            &region_info);
2071
2072         if (status != DMUB_STATUS_OK) {
2073                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2074                 return -EINVAL;
2075         }
2076
2077         /*
2078          * Allocate a framebuffer based on the total size of all the regions.
2079          * TODO: Move this into GART.
2080          */
2081         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2082                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2083                                     &adev->dm.dmub_bo_gpu_addr,
2084                                     &adev->dm.dmub_bo_cpu_addr);
2085         if (r)
2086                 return r;
2087
2088         /* Rebase the regions on the framebuffer address. */
2089         memset(&fb_params, 0, sizeof(fb_params));
2090         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2091         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2092         fb_params.region_info = &region_info;
2093
2094         adev->dm.dmub_fb_info =
2095                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2096         fb_info = adev->dm.dmub_fb_info;
2097
2098         if (!fb_info) {
2099                 DRM_ERROR(
2100                         "Failed to allocate framebuffer info for DMUB service!\n");
2101                 return -ENOMEM;
2102         }
2103
2104         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2105         if (status != DMUB_STATUS_OK) {
2106                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2107                 return -EINVAL;
2108         }
2109
2110         return 0;
2111 }
2112
2113 static int dm_sw_init(void *handle)
2114 {
2115         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2116         int r;
2117
2118         r = dm_dmub_sw_init(adev);
2119         if (r)
2120                 return r;
2121
2122         return load_dmcu_fw(adev);
2123 }
2124
2125 static int dm_sw_fini(void *handle)
2126 {
2127         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2128
2129         kfree(adev->dm.dmub_fb_info);
2130         adev->dm.dmub_fb_info = NULL;
2131
2132         if (adev->dm.dmub_srv) {
2133                 dmub_srv_destroy(adev->dm.dmub_srv);
2134                 adev->dm.dmub_srv = NULL;
2135         }
2136
2137         release_firmware(adev->dm.dmub_fw);
2138         adev->dm.dmub_fw = NULL;
2139
2140         release_firmware(adev->dm.fw_dmcu);
2141         adev->dm.fw_dmcu = NULL;
2142
2143         return 0;
2144 }
2145
2146 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2147 {
2148         struct amdgpu_dm_connector *aconnector;
2149         struct drm_connector *connector;
2150         struct drm_connector_list_iter iter;
2151         int ret = 0;
2152
2153         drm_connector_list_iter_begin(dev, &iter);
2154         drm_for_each_connector_iter(connector, &iter) {
2155                 aconnector = to_amdgpu_dm_connector(connector);
2156                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2157                     aconnector->mst_mgr.aux) {
2158                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2159                                          aconnector,
2160                                          aconnector->base.base.id);
2161
2162                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2163                         if (ret < 0) {
2164                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2165                                 aconnector->dc_link->type =
2166                                         dc_connection_single;
2167                                 break;
2168                         }
2169                 }
2170         }
2171         drm_connector_list_iter_end(&iter);
2172
2173         return ret;
2174 }
2175
2176 static int dm_late_init(void *handle)
2177 {
2178         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2179
2180         struct dmcu_iram_parameters params;
2181         unsigned int linear_lut[16];
2182         int i;
2183         struct dmcu *dmcu = NULL;
2184
2185         dmcu = adev->dm.dc->res_pool->dmcu;
2186
2187         for (i = 0; i < 16; i++)
2188                 linear_lut[i] = 0xFFFF * i / 15;
2189
2190         params.set = 0;
2191         params.backlight_ramping_override = false;
2192         params.backlight_ramping_start = 0xCCCC;
2193         params.backlight_ramping_reduction = 0xCCCCCCCC;
2194         params.backlight_lut_array_size = 16;
2195         params.backlight_lut_array = linear_lut;
2196
2197         /* Min backlight level after ABM reduction,  Don't allow below 1%
2198          * 0xFFFF x 0.01 = 0x28F
2199          */
2200         params.min_abm_backlight = 0x28F;
2201         /* In the case where abm is implemented on dmcub,
2202         * dmcu object will be null.
2203         * ABM 2.4 and up are implemented on dmcub.
2204         */
2205         if (dmcu) {
2206                 if (!dmcu_load_iram(dmcu, params))
2207                         return -EINVAL;
2208         } else if (adev->dm.dc->ctx->dmub_srv) {
2209                 struct dc_link *edp_links[MAX_NUM_EDP];
2210                 int edp_num;
2211
2212                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2213                 for (i = 0; i < edp_num; i++) {
2214                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2215                                 return -EINVAL;
2216                 }
2217         }
2218
2219         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2220 }
2221
2222 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2223 {
2224         struct amdgpu_dm_connector *aconnector;
2225         struct drm_connector *connector;
2226         struct drm_connector_list_iter iter;
2227         struct drm_dp_mst_topology_mgr *mgr;
2228         int ret;
2229         bool need_hotplug = false;
2230
2231         drm_connector_list_iter_begin(dev, &iter);
2232         drm_for_each_connector_iter(connector, &iter) {
2233                 aconnector = to_amdgpu_dm_connector(connector);
2234                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2235                     aconnector->mst_port)
2236                         continue;
2237
2238                 mgr = &aconnector->mst_mgr;
2239
2240                 if (suspend) {
2241                         drm_dp_mst_topology_mgr_suspend(mgr);
2242                 } else {
2243                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2244                         if (ret < 0) {
2245                                 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2246                                         aconnector->dc_link);
2247                                 need_hotplug = true;
2248                         }
2249                 }
2250         }
2251         drm_connector_list_iter_end(&iter);
2252
2253         if (need_hotplug)
2254                 drm_kms_helper_hotplug_event(dev);
2255 }
2256
2257 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2258 {
2259         int ret = 0;
2260
2261         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2262          * on window driver dc implementation.
2263          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2264          * should be passed to smu during boot up and resume from s3.
2265          * boot up: dc calculate dcn watermark clock settings within dc_create,
2266          * dcn20_resource_construct
2267          * then call pplib functions below to pass the settings to smu:
2268          * smu_set_watermarks_for_clock_ranges
2269          * smu_set_watermarks_table
2270          * navi10_set_watermarks_table
2271          * smu_write_watermarks_table
2272          *
2273          * For Renoir, clock settings of dcn watermark are also fixed values.
2274          * dc has implemented different flow for window driver:
2275          * dc_hardware_init / dc_set_power_state
2276          * dcn10_init_hw
2277          * notify_wm_ranges
2278          * set_wm_ranges
2279          * -- Linux
2280          * smu_set_watermarks_for_clock_ranges
2281          * renoir_set_watermarks_table
2282          * smu_write_watermarks_table
2283          *
2284          * For Linux,
2285          * dc_hardware_init -> amdgpu_dm_init
2286          * dc_set_power_state --> dm_resume
2287          *
2288          * therefore, this function apply to navi10/12/14 but not Renoir
2289          * *
2290          */
2291         switch (adev->ip_versions[DCE_HWIP][0]) {
2292         case IP_VERSION(2, 0, 2):
2293         case IP_VERSION(2, 0, 0):
2294                 break;
2295         default:
2296                 return 0;
2297         }
2298
2299         ret = amdgpu_dpm_write_watermarks_table(adev);
2300         if (ret) {
2301                 DRM_ERROR("Failed to update WMTABLE!\n");
2302                 return ret;
2303         }
2304
2305         return 0;
2306 }
2307
2308 /**
2309  * dm_hw_init() - Initialize DC device
2310  * @handle: The base driver device containing the amdgpu_dm device.
2311  *
2312  * Initialize the &struct amdgpu_display_manager device. This involves calling
2313  * the initializers of each DM component, then populating the struct with them.
2314  *
2315  * Although the function implies hardware initialization, both hardware and
2316  * software are initialized here. Splitting them out to their relevant init
2317  * hooks is a future TODO item.
2318  *
2319  * Some notable things that are initialized here:
2320  *
2321  * - Display Core, both software and hardware
2322  * - DC modules that we need (freesync and color management)
2323  * - DRM software states
2324  * - Interrupt sources and handlers
2325  * - Vblank support
2326  * - Debug FS entries, if enabled
2327  */
2328 static int dm_hw_init(void *handle)
2329 {
2330         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2331         /* Create DAL display manager */
2332         amdgpu_dm_init(adev);
2333         amdgpu_dm_hpd_init(adev);
2334
2335         return 0;
2336 }
2337
2338 /**
2339  * dm_hw_fini() - Teardown DC device
2340  * @handle: The base driver device containing the amdgpu_dm device.
2341  *
2342  * Teardown components within &struct amdgpu_display_manager that require
2343  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2344  * were loaded. Also flush IRQ workqueues and disable them.
2345  */
2346 static int dm_hw_fini(void *handle)
2347 {
2348         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2349
2350         amdgpu_dm_hpd_fini(adev);
2351
2352         amdgpu_dm_irq_fini(adev);
2353         amdgpu_dm_fini(adev);
2354         return 0;
2355 }
2356
2357
2358 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2359                                  struct dc_state *state, bool enable)
2360 {
2361         enum dc_irq_source irq_source;
2362         struct amdgpu_crtc *acrtc;
2363         int rc = -EBUSY;
2364         int i = 0;
2365
2366         for (i = 0; i < state->stream_count; i++) {
2367                 acrtc = get_crtc_by_otg_inst(
2368                                 adev, state->stream_status[i].primary_otg_inst);
2369
2370                 if (acrtc && state->stream_status[i].plane_count != 0) {
2371                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2372                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2373                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2374                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2375                         if (rc)
2376                                 DRM_WARN("Failed to %s pflip interrupts\n",
2377                                          enable ? "enable" : "disable");
2378
2379                         if (enable) {
2380                                 rc = dm_enable_vblank(&acrtc->base);
2381                                 if (rc)
2382                                         DRM_WARN("Failed to enable vblank interrupts\n");
2383                         } else {
2384                                 dm_disable_vblank(&acrtc->base);
2385                         }
2386
2387                 }
2388         }
2389
2390 }
2391
2392 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2393 {
2394         struct dc_state *context = NULL;
2395         enum dc_status res = DC_ERROR_UNEXPECTED;
2396         int i;
2397         struct dc_stream_state *del_streams[MAX_PIPES];
2398         int del_streams_count = 0;
2399
2400         memset(del_streams, 0, sizeof(del_streams));
2401
2402         context = dc_create_state(dc);
2403         if (context == NULL)
2404                 goto context_alloc_fail;
2405
2406         dc_resource_state_copy_construct_current(dc, context);
2407
2408         /* First remove from context all streams */
2409         for (i = 0; i < context->stream_count; i++) {
2410                 struct dc_stream_state *stream = context->streams[i];
2411
2412                 del_streams[del_streams_count++] = stream;
2413         }
2414
2415         /* Remove all planes for removed streams and then remove the streams */
2416         for (i = 0; i < del_streams_count; i++) {
2417                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2418                         res = DC_FAIL_DETACH_SURFACES;
2419                         goto fail;
2420                 }
2421
2422                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2423                 if (res != DC_OK)
2424                         goto fail;
2425         }
2426
2427         res = dc_commit_state(dc, context);
2428
2429 fail:
2430         dc_release_state(context);
2431
2432 context_alloc_fail:
2433         return res;
2434 }
2435
2436 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2437 {
2438         int i;
2439
2440         if (dm->hpd_rx_offload_wq) {
2441                 for (i = 0; i < dm->dc->caps.max_links; i++)
2442                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2443         }
2444 }
2445
2446 static int dm_suspend(void *handle)
2447 {
2448         struct amdgpu_device *adev = handle;
2449         struct amdgpu_display_manager *dm = &adev->dm;
2450         int ret = 0;
2451
2452         if (amdgpu_in_reset(adev)) {
2453                 mutex_lock(&dm->dc_lock);
2454
2455                 dc_allow_idle_optimizations(adev->dm.dc, false);
2456
2457                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2458
2459                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2460
2461                 amdgpu_dm_commit_zero_streams(dm->dc);
2462
2463                 amdgpu_dm_irq_suspend(adev);
2464
2465                 hpd_rx_irq_work_suspend(dm);
2466
2467                 return ret;
2468         }
2469
2470         WARN_ON(adev->dm.cached_state);
2471         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2472
2473         s3_handle_mst(adev_to_drm(adev), true);
2474
2475         amdgpu_dm_irq_suspend(adev);
2476
2477         hpd_rx_irq_work_suspend(dm);
2478
2479         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2480
2481         return 0;
2482 }
2483
2484 struct amdgpu_dm_connector *
2485 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2486                                              struct drm_crtc *crtc)
2487 {
2488         uint32_t i;
2489         struct drm_connector_state *new_con_state;
2490         struct drm_connector *connector;
2491         struct drm_crtc *crtc_from_state;
2492
2493         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2494                 crtc_from_state = new_con_state->crtc;
2495
2496                 if (crtc_from_state == crtc)
2497                         return to_amdgpu_dm_connector(connector);
2498         }
2499
2500         return NULL;
2501 }
2502
2503 static void emulated_link_detect(struct dc_link *link)
2504 {
2505         struct dc_sink_init_data sink_init_data = { 0 };
2506         struct display_sink_capability sink_caps = { 0 };
2507         enum dc_edid_status edid_status;
2508         struct dc_context *dc_ctx = link->ctx;
2509         struct dc_sink *sink = NULL;
2510         struct dc_sink *prev_sink = NULL;
2511
2512         link->type = dc_connection_none;
2513         prev_sink = link->local_sink;
2514
2515         if (prev_sink)
2516                 dc_sink_release(prev_sink);
2517
2518         switch (link->connector_signal) {
2519         case SIGNAL_TYPE_HDMI_TYPE_A: {
2520                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2521                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2522                 break;
2523         }
2524
2525         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2526                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2527                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2528                 break;
2529         }
2530
2531         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2532                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2533                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2534                 break;
2535         }
2536
2537         case SIGNAL_TYPE_LVDS: {
2538                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2539                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2540                 break;
2541         }
2542
2543         case SIGNAL_TYPE_EDP: {
2544                 sink_caps.transaction_type =
2545                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2546                 sink_caps.signal = SIGNAL_TYPE_EDP;
2547                 break;
2548         }
2549
2550         case SIGNAL_TYPE_DISPLAY_PORT: {
2551                 sink_caps.transaction_type =
2552                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2553                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2554                 break;
2555         }
2556
2557         default:
2558                 DC_ERROR("Invalid connector type! signal:%d\n",
2559                         link->connector_signal);
2560                 return;
2561         }
2562
2563         sink_init_data.link = link;
2564         sink_init_data.sink_signal = sink_caps.signal;
2565
2566         sink = dc_sink_create(&sink_init_data);
2567         if (!sink) {
2568                 DC_ERROR("Failed to create sink!\n");
2569                 return;
2570         }
2571
2572         /* dc_sink_create returns a new reference */
2573         link->local_sink = sink;
2574
2575         edid_status = dm_helpers_read_local_edid(
2576                         link->ctx,
2577                         link,
2578                         sink);
2579
2580         if (edid_status != EDID_OK)
2581                 DC_ERROR("Failed to read EDID");
2582
2583 }
2584
2585 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2586                                      struct amdgpu_display_manager *dm)
2587 {
2588         struct {
2589                 struct dc_surface_update surface_updates[MAX_SURFACES];
2590                 struct dc_plane_info plane_infos[MAX_SURFACES];
2591                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2592                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2593                 struct dc_stream_update stream_update;
2594         } * bundle;
2595         int k, m;
2596
2597         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2598
2599         if (!bundle) {
2600                 dm_error("Failed to allocate update bundle\n");
2601                 goto cleanup;
2602         }
2603
2604         for (k = 0; k < dc_state->stream_count; k++) {
2605                 bundle->stream_update.stream = dc_state->streams[k];
2606
2607                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2608                         bundle->surface_updates[m].surface =
2609                                 dc_state->stream_status->plane_states[m];
2610                         bundle->surface_updates[m].surface->force_full_update =
2611                                 true;
2612                 }
2613                 dc_commit_updates_for_stream(
2614                         dm->dc, bundle->surface_updates,
2615                         dc_state->stream_status->plane_count,
2616                         dc_state->streams[k], &bundle->stream_update, dc_state);
2617         }
2618
2619 cleanup:
2620         kfree(bundle);
2621
2622         return;
2623 }
2624
2625 static int dm_resume(void *handle)
2626 {
2627         struct amdgpu_device *adev = handle;
2628         struct drm_device *ddev = adev_to_drm(adev);
2629         struct amdgpu_display_manager *dm = &adev->dm;
2630         struct amdgpu_dm_connector *aconnector;
2631         struct drm_connector *connector;
2632         struct drm_connector_list_iter iter;
2633         struct drm_crtc *crtc;
2634         struct drm_crtc_state *new_crtc_state;
2635         struct dm_crtc_state *dm_new_crtc_state;
2636         struct drm_plane *plane;
2637         struct drm_plane_state *new_plane_state;
2638         struct dm_plane_state *dm_new_plane_state;
2639         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2640         enum dc_connection_type new_connection_type = dc_connection_none;
2641         struct dc_state *dc_state;
2642         int i, r, j;
2643
2644         if (amdgpu_in_reset(adev)) {
2645                 dc_state = dm->cached_dc_state;
2646
2647                 /*
2648                  * The dc->current_state is backed up into dm->cached_dc_state
2649                  * before we commit 0 streams.
2650                  *
2651                  * DC will clear link encoder assignments on the real state
2652                  * but the changes won't propagate over to the copy we made
2653                  * before the 0 streams commit.
2654                  *
2655                  * DC expects that link encoder assignments are *not* valid
2656                  * when committing a state, so as a workaround we can copy
2657                  * off of the current state.
2658                  *
2659                  * We lose the previous assignments, but we had already
2660                  * commit 0 streams anyway.
2661                  */
2662                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2663
2664                 r = dm_dmub_hw_init(adev);
2665                 if (r)
2666                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2667
2668                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2669                 dc_resume(dm->dc);
2670
2671                 amdgpu_dm_irq_resume_early(adev);
2672
2673                 for (i = 0; i < dc_state->stream_count; i++) {
2674                         dc_state->streams[i]->mode_changed = true;
2675                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2676                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2677                                         = 0xffffffff;
2678                         }
2679                 }
2680
2681                 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2682                         amdgpu_dm_outbox_init(adev);
2683                         dc_enable_dmub_outbox(adev->dm.dc);
2684                 }
2685
2686                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2687
2688                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2689
2690                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2691
2692                 dc_release_state(dm->cached_dc_state);
2693                 dm->cached_dc_state = NULL;
2694
2695                 amdgpu_dm_irq_resume_late(adev);
2696
2697                 mutex_unlock(&dm->dc_lock);
2698
2699                 return 0;
2700         }
2701         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2702         dc_release_state(dm_state->context);
2703         dm_state->context = dc_create_state(dm->dc);
2704         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2705         dc_resource_state_construct(dm->dc, dm_state->context);
2706
2707         /* Before powering on DC we need to re-initialize DMUB. */
2708         dm_dmub_hw_resume(adev);
2709
2710         /* Re-enable outbox interrupts for DPIA. */
2711         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2712                 amdgpu_dm_outbox_init(adev);
2713                 dc_enable_dmub_outbox(adev->dm.dc);
2714         }
2715
2716         /* power on hardware */
2717         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2718
2719         /* program HPD filter */
2720         dc_resume(dm->dc);
2721
2722         /*
2723          * early enable HPD Rx IRQ, should be done before set mode as short
2724          * pulse interrupts are used for MST
2725          */
2726         amdgpu_dm_irq_resume_early(adev);
2727
2728         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2729         s3_handle_mst(ddev, false);
2730
2731         /* Do detection*/
2732         drm_connector_list_iter_begin(ddev, &iter);
2733         drm_for_each_connector_iter(connector, &iter) {
2734                 aconnector = to_amdgpu_dm_connector(connector);
2735
2736                 /*
2737                  * this is the case when traversing through already created
2738                  * MST connectors, should be skipped
2739                  */
2740                 if (aconnector->dc_link &&
2741                     aconnector->dc_link->type == dc_connection_mst_branch)
2742                         continue;
2743
2744                 mutex_lock(&aconnector->hpd_lock);
2745                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2746                         DRM_ERROR("KMS: Failed to detect connector\n");
2747
2748                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2749                         emulated_link_detect(aconnector->dc_link);
2750                 } else {
2751                         mutex_lock(&dm->dc_lock);
2752                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2753                         mutex_unlock(&dm->dc_lock);
2754                 }
2755
2756                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2757                         aconnector->fake_enable = false;
2758
2759                 if (aconnector->dc_sink)
2760                         dc_sink_release(aconnector->dc_sink);
2761                 aconnector->dc_sink = NULL;
2762                 amdgpu_dm_update_connector_after_detect(aconnector);
2763                 mutex_unlock(&aconnector->hpd_lock);
2764         }
2765         drm_connector_list_iter_end(&iter);
2766
2767         /* Force mode set in atomic commit */
2768         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2769                 new_crtc_state->active_changed = true;
2770
2771         /*
2772          * atomic_check is expected to create the dc states. We need to release
2773          * them here, since they were duplicated as part of the suspend
2774          * procedure.
2775          */
2776         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2777                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2778                 if (dm_new_crtc_state->stream) {
2779                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2780                         dc_stream_release(dm_new_crtc_state->stream);
2781                         dm_new_crtc_state->stream = NULL;
2782                 }
2783         }
2784
2785         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2786                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2787                 if (dm_new_plane_state->dc_state) {
2788                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2789                         dc_plane_state_release(dm_new_plane_state->dc_state);
2790                         dm_new_plane_state->dc_state = NULL;
2791                 }
2792         }
2793
2794         drm_atomic_helper_resume(ddev, dm->cached_state);
2795
2796         dm->cached_state = NULL;
2797
2798         amdgpu_dm_irq_resume_late(adev);
2799
2800         amdgpu_dm_smu_write_watermarks_table(adev);
2801
2802         return 0;
2803 }
2804
2805 /**
2806  * DOC: DM Lifecycle
2807  *
2808  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2809  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2810  * the base driver's device list to be initialized and torn down accordingly.
2811  *
2812  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2813  */
2814
2815 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2816         .name = "dm",
2817         .early_init = dm_early_init,
2818         .late_init = dm_late_init,
2819         .sw_init = dm_sw_init,
2820         .sw_fini = dm_sw_fini,
2821         .early_fini = amdgpu_dm_early_fini,
2822         .hw_init = dm_hw_init,
2823         .hw_fini = dm_hw_fini,
2824         .suspend = dm_suspend,
2825         .resume = dm_resume,
2826         .is_idle = dm_is_idle,
2827         .wait_for_idle = dm_wait_for_idle,
2828         .check_soft_reset = dm_check_soft_reset,
2829         .soft_reset = dm_soft_reset,
2830         .set_clockgating_state = dm_set_clockgating_state,
2831         .set_powergating_state = dm_set_powergating_state,
2832 };
2833
2834 const struct amdgpu_ip_block_version dm_ip_block =
2835 {
2836         .type = AMD_IP_BLOCK_TYPE_DCE,
2837         .major = 1,
2838         .minor = 0,
2839         .rev = 0,
2840         .funcs = &amdgpu_dm_funcs,
2841 };
2842
2843
2844 /**
2845  * DOC: atomic
2846  *
2847  * *WIP*
2848  */
2849
2850 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2851         .fb_create = amdgpu_display_user_framebuffer_create,
2852         .get_format_info = amd_get_format_info,
2853         .atomic_check = amdgpu_dm_atomic_check,
2854         .atomic_commit = drm_atomic_helper_commit,
2855 };
2856
2857 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2858         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
2859         .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
2860 };
2861
2862 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2863 {
2864         struct amdgpu_dm_backlight_caps *caps;
2865         struct amdgpu_display_manager *dm;
2866         struct drm_connector *conn_base;
2867         struct amdgpu_device *adev;
2868         struct dc_link *link = NULL;
2869         struct drm_luminance_range_info *luminance_range;
2870         int i;
2871
2872         if (!aconnector || !aconnector->dc_link)
2873                 return;
2874
2875         link = aconnector->dc_link;
2876         if (link->connector_signal != SIGNAL_TYPE_EDP)
2877                 return;
2878
2879         conn_base = &aconnector->base;
2880         adev = drm_to_adev(conn_base->dev);
2881         dm = &adev->dm;
2882         for (i = 0; i < dm->num_of_edps; i++) {
2883                 if (link == dm->backlight_link[i])
2884                         break;
2885         }
2886         if (i >= dm->num_of_edps)
2887                 return;
2888         caps = &dm->backlight_caps[i];
2889         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2890         caps->aux_support = false;
2891
2892         if (caps->ext_caps->bits.oled == 1 /*||
2893             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2894             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2895                 caps->aux_support = true;
2896
2897         if (amdgpu_backlight == 0)
2898                 caps->aux_support = false;
2899         else if (amdgpu_backlight == 1)
2900                 caps->aux_support = true;
2901
2902         luminance_range = &conn_base->display_info.luminance_range;
2903         caps->aux_min_input_signal = luminance_range->min_luminance;
2904         caps->aux_max_input_signal = luminance_range->max_luminance;
2905 }
2906
2907 void amdgpu_dm_update_connector_after_detect(
2908                 struct amdgpu_dm_connector *aconnector)
2909 {
2910         struct drm_connector *connector = &aconnector->base;
2911         struct drm_device *dev = connector->dev;
2912         struct dc_sink *sink;
2913
2914         /* MST handled by drm_mst framework */
2915         if (aconnector->mst_mgr.mst_state == true)
2916                 return;
2917
2918         sink = aconnector->dc_link->local_sink;
2919         if (sink)
2920                 dc_sink_retain(sink);
2921
2922         /*
2923          * Edid mgmt connector gets first update only in mode_valid hook and then
2924          * the connector sink is set to either fake or physical sink depends on link status.
2925          * Skip if already done during boot.
2926          */
2927         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2928                         && aconnector->dc_em_sink) {
2929
2930                 /*
2931                  * For S3 resume with headless use eml_sink to fake stream
2932                  * because on resume connector->sink is set to NULL
2933                  */
2934                 mutex_lock(&dev->mode_config.mutex);
2935
2936                 if (sink) {
2937                         if (aconnector->dc_sink) {
2938                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2939                                 /*
2940                                  * retain and release below are used to
2941                                  * bump up refcount for sink because the link doesn't point
2942                                  * to it anymore after disconnect, so on next crtc to connector
2943                                  * reshuffle by UMD we will get into unwanted dc_sink release
2944                                  */
2945                                 dc_sink_release(aconnector->dc_sink);
2946                         }
2947                         aconnector->dc_sink = sink;
2948                         dc_sink_retain(aconnector->dc_sink);
2949                         amdgpu_dm_update_freesync_caps(connector,
2950                                         aconnector->edid);
2951                 } else {
2952                         amdgpu_dm_update_freesync_caps(connector, NULL);
2953                         if (!aconnector->dc_sink) {
2954                                 aconnector->dc_sink = aconnector->dc_em_sink;
2955                                 dc_sink_retain(aconnector->dc_sink);
2956                         }
2957                 }
2958
2959                 mutex_unlock(&dev->mode_config.mutex);
2960
2961                 if (sink)
2962                         dc_sink_release(sink);
2963                 return;
2964         }
2965
2966         /*
2967          * TODO: temporary guard to look for proper fix
2968          * if this sink is MST sink, we should not do anything
2969          */
2970         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2971                 dc_sink_release(sink);
2972                 return;
2973         }
2974
2975         if (aconnector->dc_sink == sink) {
2976                 /*
2977                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2978                  * Do nothing!!
2979                  */
2980                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2981                                 aconnector->connector_id);
2982                 if (sink)
2983                         dc_sink_release(sink);
2984                 return;
2985         }
2986
2987         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2988                 aconnector->connector_id, aconnector->dc_sink, sink);
2989
2990         mutex_lock(&dev->mode_config.mutex);
2991
2992         /*
2993          * 1. Update status of the drm connector
2994          * 2. Send an event and let userspace tell us what to do
2995          */
2996         if (sink) {
2997                 /*
2998                  * TODO: check if we still need the S3 mode update workaround.
2999                  * If yes, put it here.
3000                  */
3001                 if (aconnector->dc_sink) {
3002                         amdgpu_dm_update_freesync_caps(connector, NULL);
3003                         dc_sink_release(aconnector->dc_sink);
3004                 }
3005
3006                 aconnector->dc_sink = sink;
3007                 dc_sink_retain(aconnector->dc_sink);
3008                 if (sink->dc_edid.length == 0) {
3009                         aconnector->edid = NULL;
3010                         if (aconnector->dc_link->aux_mode) {
3011                                 drm_dp_cec_unset_edid(
3012                                         &aconnector->dm_dp_aux.aux);
3013                         }
3014                 } else {
3015                         aconnector->edid =
3016                                 (struct edid *)sink->dc_edid.raw_edid;
3017
3018                         if (aconnector->dc_link->aux_mode)
3019                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3020                                                     aconnector->edid);
3021                 }
3022
3023                 drm_connector_update_edid_property(connector, aconnector->edid);
3024                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3025                 update_connector_ext_caps(aconnector);
3026         } else {
3027                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3028                 amdgpu_dm_update_freesync_caps(connector, NULL);
3029                 drm_connector_update_edid_property(connector, NULL);
3030                 aconnector->num_modes = 0;
3031                 dc_sink_release(aconnector->dc_sink);
3032                 aconnector->dc_sink = NULL;
3033                 aconnector->edid = NULL;
3034 #ifdef CONFIG_DRM_AMD_DC_HDCP
3035                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3036                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3037                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3038 #endif
3039         }
3040
3041         mutex_unlock(&dev->mode_config.mutex);
3042
3043         update_subconnector_property(aconnector);
3044
3045         if (sink)
3046                 dc_sink_release(sink);
3047 }
3048
3049 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3050 {
3051         struct drm_connector *connector = &aconnector->base;
3052         struct drm_device *dev = connector->dev;
3053         enum dc_connection_type new_connection_type = dc_connection_none;
3054         struct amdgpu_device *adev = drm_to_adev(dev);
3055 #ifdef CONFIG_DRM_AMD_DC_HDCP
3056         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3057 #endif
3058         bool ret = false;
3059
3060         if (adev->dm.disable_hpd_irq)
3061                 return;
3062
3063         /*
3064          * In case of failure or MST no need to update connector status or notify the OS
3065          * since (for MST case) MST does this in its own context.
3066          */
3067         mutex_lock(&aconnector->hpd_lock);
3068
3069 #ifdef CONFIG_DRM_AMD_DC_HDCP
3070         if (adev->dm.hdcp_workqueue) {
3071                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3072                 dm_con_state->update_hdcp = true;
3073         }
3074 #endif
3075         if (aconnector->fake_enable)
3076                 aconnector->fake_enable = false;
3077
3078         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3079                 DRM_ERROR("KMS: Failed to detect connector\n");
3080
3081         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3082                 emulated_link_detect(aconnector->dc_link);
3083
3084                 drm_modeset_lock_all(dev);
3085                 dm_restore_drm_connector_state(dev, connector);
3086                 drm_modeset_unlock_all(dev);
3087
3088                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3089                         drm_kms_helper_connector_hotplug_event(connector);
3090         } else {
3091                 mutex_lock(&adev->dm.dc_lock);
3092                 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3093                 mutex_unlock(&adev->dm.dc_lock);
3094                 if (ret) {
3095                         amdgpu_dm_update_connector_after_detect(aconnector);
3096
3097                         drm_modeset_lock_all(dev);
3098                         dm_restore_drm_connector_state(dev, connector);
3099                         drm_modeset_unlock_all(dev);
3100
3101                         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3102                                 drm_kms_helper_connector_hotplug_event(connector);
3103                 }
3104         }
3105         mutex_unlock(&aconnector->hpd_lock);
3106
3107 }
3108
3109 static void handle_hpd_irq(void *param)
3110 {
3111         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3112
3113         handle_hpd_irq_helper(aconnector);
3114
3115 }
3116
3117 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3118 {
3119         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3120         uint8_t dret;
3121         bool new_irq_handled = false;
3122         int dpcd_addr;
3123         int dpcd_bytes_to_read;
3124
3125         const int max_process_count = 30;
3126         int process_count = 0;
3127
3128         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3129
3130         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3131                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3132                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3133                 dpcd_addr = DP_SINK_COUNT;
3134         } else {
3135                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3136                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3137                 dpcd_addr = DP_SINK_COUNT_ESI;
3138         }
3139
3140         dret = drm_dp_dpcd_read(
3141                 &aconnector->dm_dp_aux.aux,
3142                 dpcd_addr,
3143                 esi,
3144                 dpcd_bytes_to_read);
3145
3146         while (dret == dpcd_bytes_to_read &&
3147                 process_count < max_process_count) {
3148                 uint8_t retry;
3149                 dret = 0;
3150
3151                 process_count++;
3152
3153                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3154                 /* handle HPD short pulse irq */
3155                 if (aconnector->mst_mgr.mst_state)
3156                         drm_dp_mst_hpd_irq(
3157                                 &aconnector->mst_mgr,
3158                                 esi,
3159                                 &new_irq_handled);
3160
3161                 if (new_irq_handled) {
3162                         /* ACK at DPCD to notify down stream */
3163                         const int ack_dpcd_bytes_to_write =
3164                                 dpcd_bytes_to_read - 1;
3165
3166                         for (retry = 0; retry < 3; retry++) {
3167                                 uint8_t wret;
3168
3169                                 wret = drm_dp_dpcd_write(
3170                                         &aconnector->dm_dp_aux.aux,
3171                                         dpcd_addr + 1,
3172                                         &esi[1],
3173                                         ack_dpcd_bytes_to_write);
3174                                 if (wret == ack_dpcd_bytes_to_write)
3175                                         break;
3176                         }
3177
3178                         /* check if there is new irq to be handled */
3179                         dret = drm_dp_dpcd_read(
3180                                 &aconnector->dm_dp_aux.aux,
3181                                 dpcd_addr,
3182                                 esi,
3183                                 dpcd_bytes_to_read);
3184
3185                         new_irq_handled = false;
3186                 } else {
3187                         break;
3188                 }
3189         }
3190
3191         if (process_count == max_process_count)
3192                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3193 }
3194
3195 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3196                                                         union hpd_irq_data hpd_irq_data)
3197 {
3198         struct hpd_rx_irq_offload_work *offload_work =
3199                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3200
3201         if (!offload_work) {
3202                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3203                 return;
3204         }
3205
3206         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3207         offload_work->data = hpd_irq_data;
3208         offload_work->offload_wq = offload_wq;
3209
3210         queue_work(offload_wq->wq, &offload_work->work);
3211         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3212 }
3213
3214 static void handle_hpd_rx_irq(void *param)
3215 {
3216         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3217         struct drm_connector *connector = &aconnector->base;
3218         struct drm_device *dev = connector->dev;
3219         struct dc_link *dc_link = aconnector->dc_link;
3220         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3221         bool result = false;
3222         enum dc_connection_type new_connection_type = dc_connection_none;
3223         struct amdgpu_device *adev = drm_to_adev(dev);
3224         union hpd_irq_data hpd_irq_data;
3225         bool link_loss = false;
3226         bool has_left_work = false;
3227         int idx = aconnector->base.index;
3228         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3229
3230         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3231
3232         if (adev->dm.disable_hpd_irq)
3233                 return;
3234
3235         /*
3236          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3237          * conflict, after implement i2c helper, this mutex should be
3238          * retired.
3239          */
3240         mutex_lock(&aconnector->hpd_lock);
3241
3242         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3243                                                 &link_loss, true, &has_left_work);
3244
3245         if (!has_left_work)
3246                 goto out;
3247
3248         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3249                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3250                 goto out;
3251         }
3252
3253         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3254                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3255                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3256                         dm_handle_mst_sideband_msg(aconnector);
3257                         goto out;
3258                 }
3259
3260                 if (link_loss) {
3261                         bool skip = false;
3262
3263                         spin_lock(&offload_wq->offload_lock);
3264                         skip = offload_wq->is_handling_link_loss;
3265
3266                         if (!skip)
3267                                 offload_wq->is_handling_link_loss = true;
3268
3269                         spin_unlock(&offload_wq->offload_lock);
3270
3271                         if (!skip)
3272                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3273
3274                         goto out;
3275                 }
3276         }
3277
3278 out:
3279         if (result && !is_mst_root_connector) {
3280                 /* Downstream Port status changed. */
3281                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3282                         DRM_ERROR("KMS: Failed to detect connector\n");
3283
3284                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3285                         emulated_link_detect(dc_link);
3286
3287                         if (aconnector->fake_enable)
3288                                 aconnector->fake_enable = false;
3289
3290                         amdgpu_dm_update_connector_after_detect(aconnector);
3291
3292
3293                         drm_modeset_lock_all(dev);
3294                         dm_restore_drm_connector_state(dev, connector);
3295                         drm_modeset_unlock_all(dev);
3296
3297                         drm_kms_helper_connector_hotplug_event(connector);
3298                 } else {
3299                         bool ret = false;
3300
3301                         mutex_lock(&adev->dm.dc_lock);
3302                         ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3303                         mutex_unlock(&adev->dm.dc_lock);
3304
3305                         if (ret) {
3306                                 if (aconnector->fake_enable)
3307                                         aconnector->fake_enable = false;
3308
3309                                 amdgpu_dm_update_connector_after_detect(aconnector);
3310
3311                                 drm_modeset_lock_all(dev);
3312                                 dm_restore_drm_connector_state(dev, connector);
3313                                 drm_modeset_unlock_all(dev);
3314
3315                                 drm_kms_helper_connector_hotplug_event(connector);
3316                         }
3317                 }
3318         }
3319 #ifdef CONFIG_DRM_AMD_DC_HDCP
3320         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3321                 if (adev->dm.hdcp_workqueue)
3322                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3323         }
3324 #endif
3325
3326         if (dc_link->type != dc_connection_mst_branch)
3327                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3328
3329         mutex_unlock(&aconnector->hpd_lock);
3330 }
3331
3332 static void register_hpd_handlers(struct amdgpu_device *adev)
3333 {
3334         struct drm_device *dev = adev_to_drm(adev);
3335         struct drm_connector *connector;
3336         struct amdgpu_dm_connector *aconnector;
3337         const struct dc_link *dc_link;
3338         struct dc_interrupt_params int_params = {0};
3339
3340         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3341         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3342
3343         list_for_each_entry(connector,
3344                         &dev->mode_config.connector_list, head) {
3345
3346                 aconnector = to_amdgpu_dm_connector(connector);
3347                 dc_link = aconnector->dc_link;
3348
3349                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3350                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3351                         int_params.irq_source = dc_link->irq_source_hpd;
3352
3353                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3354                                         handle_hpd_irq,
3355                                         (void *) aconnector);
3356                 }
3357
3358                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3359
3360                         /* Also register for DP short pulse (hpd_rx). */
3361                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3362                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3363
3364                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3365                                         handle_hpd_rx_irq,
3366                                         (void *) aconnector);
3367
3368                         if (adev->dm.hpd_rx_offload_wq)
3369                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3370                                         aconnector;
3371                 }
3372         }
3373 }
3374
3375 #if defined(CONFIG_DRM_AMD_DC_SI)
3376 /* Register IRQ sources and initialize IRQ callbacks */
3377 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3378 {
3379         struct dc *dc = adev->dm.dc;
3380         struct common_irq_params *c_irq_params;
3381         struct dc_interrupt_params int_params = {0};
3382         int r;
3383         int i;
3384         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3385
3386         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3387         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3388
3389         /*
3390          * Actions of amdgpu_irq_add_id():
3391          * 1. Register a set() function with base driver.
3392          *    Base driver will call set() function to enable/disable an
3393          *    interrupt in DC hardware.
3394          * 2. Register amdgpu_dm_irq_handler().
3395          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3396          *    coming from DC hardware.
3397          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3398          *    for acknowledging and handling. */
3399
3400         /* Use VBLANK interrupt */
3401         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3402                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3403                 if (r) {
3404                         DRM_ERROR("Failed to add crtc irq id!\n");
3405                         return r;
3406                 }
3407
3408                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3409                 int_params.irq_source =
3410                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3411
3412                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3413
3414                 c_irq_params->adev = adev;
3415                 c_irq_params->irq_src = int_params.irq_source;
3416
3417                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3418                                 dm_crtc_high_irq, c_irq_params);
3419         }
3420
3421         /* Use GRPH_PFLIP interrupt */
3422         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3423                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3424                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3425                 if (r) {
3426                         DRM_ERROR("Failed to add page flip irq id!\n");
3427                         return r;
3428                 }
3429
3430                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3431                 int_params.irq_source =
3432                         dc_interrupt_to_irq_source(dc, i, 0);
3433
3434                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3435
3436                 c_irq_params->adev = adev;
3437                 c_irq_params->irq_src = int_params.irq_source;
3438
3439                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3440                                 dm_pflip_high_irq, c_irq_params);
3441
3442         }
3443
3444         /* HPD */
3445         r = amdgpu_irq_add_id(adev, client_id,
3446                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3447         if (r) {
3448                 DRM_ERROR("Failed to add hpd irq id!\n");
3449                 return r;
3450         }
3451
3452         register_hpd_handlers(adev);
3453
3454         return 0;
3455 }
3456 #endif
3457
3458 /* Register IRQ sources and initialize IRQ callbacks */
3459 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3460 {
3461         struct dc *dc = adev->dm.dc;
3462         struct common_irq_params *c_irq_params;
3463         struct dc_interrupt_params int_params = {0};
3464         int r;
3465         int i;
3466         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3467
3468         if (adev->family >= AMDGPU_FAMILY_AI)
3469                 client_id = SOC15_IH_CLIENTID_DCE;
3470
3471         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3472         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3473
3474         /*
3475          * Actions of amdgpu_irq_add_id():
3476          * 1. Register a set() function with base driver.
3477          *    Base driver will call set() function to enable/disable an
3478          *    interrupt in DC hardware.
3479          * 2. Register amdgpu_dm_irq_handler().
3480          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3481          *    coming from DC hardware.
3482          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3483          *    for acknowledging and handling. */
3484
3485         /* Use VBLANK interrupt */
3486         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3487                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3488                 if (r) {
3489                         DRM_ERROR("Failed to add crtc irq id!\n");
3490                         return r;
3491                 }
3492
3493                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3494                 int_params.irq_source =
3495                         dc_interrupt_to_irq_source(dc, i, 0);
3496
3497                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3498
3499                 c_irq_params->adev = adev;
3500                 c_irq_params->irq_src = int_params.irq_source;
3501
3502                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3503                                 dm_crtc_high_irq, c_irq_params);
3504         }
3505
3506         /* Use VUPDATE interrupt */
3507         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3508                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3509                 if (r) {
3510                         DRM_ERROR("Failed to add vupdate irq id!\n");
3511                         return r;
3512                 }
3513
3514                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3515                 int_params.irq_source =
3516                         dc_interrupt_to_irq_source(dc, i, 0);
3517
3518                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3519
3520                 c_irq_params->adev = adev;
3521                 c_irq_params->irq_src = int_params.irq_source;
3522
3523                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3524                                 dm_vupdate_high_irq, c_irq_params);
3525         }
3526
3527         /* Use GRPH_PFLIP interrupt */
3528         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3529                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3530                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3531                 if (r) {
3532                         DRM_ERROR("Failed to add page flip irq id!\n");
3533                         return r;
3534                 }
3535
3536                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3537                 int_params.irq_source =
3538                         dc_interrupt_to_irq_source(dc, i, 0);
3539
3540                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3541
3542                 c_irq_params->adev = adev;
3543                 c_irq_params->irq_src = int_params.irq_source;
3544
3545                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3546                                 dm_pflip_high_irq, c_irq_params);
3547
3548         }
3549
3550         /* HPD */
3551         r = amdgpu_irq_add_id(adev, client_id,
3552                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3553         if (r) {
3554                 DRM_ERROR("Failed to add hpd irq id!\n");
3555                 return r;
3556         }
3557
3558         register_hpd_handlers(adev);
3559
3560         return 0;
3561 }
3562
3563 /* Register IRQ sources and initialize IRQ callbacks */
3564 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3565 {
3566         struct dc *dc = adev->dm.dc;
3567         struct common_irq_params *c_irq_params;
3568         struct dc_interrupt_params int_params = {0};
3569         int r;
3570         int i;
3571 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3572         static const unsigned int vrtl_int_srcid[] = {
3573                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3574                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3575                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3576                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3577                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3578                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3579         };
3580 #endif
3581
3582         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3583         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3584
3585         /*
3586          * Actions of amdgpu_irq_add_id():
3587          * 1. Register a set() function with base driver.
3588          *    Base driver will call set() function to enable/disable an
3589          *    interrupt in DC hardware.
3590          * 2. Register amdgpu_dm_irq_handler().
3591          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3592          *    coming from DC hardware.
3593          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3594          *    for acknowledging and handling.
3595          */
3596
3597         /* Use VSTARTUP interrupt */
3598         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3599                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3600                         i++) {
3601                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3602
3603                 if (r) {
3604                         DRM_ERROR("Failed to add crtc irq id!\n");
3605                         return r;
3606                 }
3607
3608                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3609                 int_params.irq_source =
3610                         dc_interrupt_to_irq_source(dc, i, 0);
3611
3612                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3613
3614                 c_irq_params->adev = adev;
3615                 c_irq_params->irq_src = int_params.irq_source;
3616
3617                 amdgpu_dm_irq_register_interrupt(
3618                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3619         }
3620
3621         /* Use otg vertical line interrupt */
3622 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3623         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3624                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3625                                 vrtl_int_srcid[i], &adev->vline0_irq);
3626
3627                 if (r) {
3628                         DRM_ERROR("Failed to add vline0 irq id!\n");
3629                         return r;
3630                 }
3631
3632                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3633                 int_params.irq_source =
3634                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3635
3636                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3637                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3638                         break;
3639                 }
3640
3641                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3642                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3643
3644                 c_irq_params->adev = adev;
3645                 c_irq_params->irq_src = int_params.irq_source;
3646
3647                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3648                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3649         }
3650 #endif
3651
3652         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3653          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3654          * to trigger at end of each vblank, regardless of state of the lock,
3655          * matching DCE behaviour.
3656          */
3657         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3658              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3659              i++) {
3660                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3661
3662                 if (r) {
3663                         DRM_ERROR("Failed to add vupdate irq id!\n");
3664                         return r;
3665                 }
3666
3667                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3668                 int_params.irq_source =
3669                         dc_interrupt_to_irq_source(dc, i, 0);
3670
3671                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3672
3673                 c_irq_params->adev = adev;
3674                 c_irq_params->irq_src = int_params.irq_source;
3675
3676                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3677                                 dm_vupdate_high_irq, c_irq_params);
3678         }
3679
3680         /* Use GRPH_PFLIP interrupt */
3681         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3682                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3683                         i++) {
3684                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3685                 if (r) {
3686                         DRM_ERROR("Failed to add page flip irq id!\n");
3687                         return r;
3688                 }
3689
3690                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3691                 int_params.irq_source =
3692                         dc_interrupt_to_irq_source(dc, i, 0);
3693
3694                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3695
3696                 c_irq_params->adev = adev;
3697                 c_irq_params->irq_src = int_params.irq_source;
3698
3699                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3700                                 dm_pflip_high_irq, c_irq_params);
3701
3702         }
3703
3704         /* HPD */
3705         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3706                         &adev->hpd_irq);
3707         if (r) {
3708                 DRM_ERROR("Failed to add hpd irq id!\n");
3709                 return r;
3710         }
3711
3712         register_hpd_handlers(adev);
3713
3714         return 0;
3715 }
3716 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3717 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3718 {
3719         struct dc *dc = adev->dm.dc;
3720         struct common_irq_params *c_irq_params;
3721         struct dc_interrupt_params int_params = {0};
3722         int r, i;
3723
3724         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3725         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3726
3727         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3728                         &adev->dmub_outbox_irq);
3729         if (r) {
3730                 DRM_ERROR("Failed to add outbox irq id!\n");
3731                 return r;
3732         }
3733
3734         if (dc->ctx->dmub_srv) {
3735                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3736                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3737                 int_params.irq_source =
3738                 dc_interrupt_to_irq_source(dc, i, 0);
3739
3740                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3741
3742                 c_irq_params->adev = adev;
3743                 c_irq_params->irq_src = int_params.irq_source;
3744
3745                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3746                                 dm_dmub_outbox1_low_irq, c_irq_params);
3747         }
3748
3749         return 0;
3750 }
3751
3752 /*
3753  * Acquires the lock for the atomic state object and returns
3754  * the new atomic state.
3755  *
3756  * This should only be called during atomic check.
3757  */
3758 int dm_atomic_get_state(struct drm_atomic_state *state,
3759                         struct dm_atomic_state **dm_state)
3760 {
3761         struct drm_device *dev = state->dev;
3762         struct amdgpu_device *adev = drm_to_adev(dev);
3763         struct amdgpu_display_manager *dm = &adev->dm;
3764         struct drm_private_state *priv_state;
3765
3766         if (*dm_state)
3767                 return 0;
3768
3769         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3770         if (IS_ERR(priv_state))
3771                 return PTR_ERR(priv_state);
3772
3773         *dm_state = to_dm_atomic_state(priv_state);
3774
3775         return 0;
3776 }
3777
3778 static struct dm_atomic_state *
3779 dm_atomic_get_new_state(struct drm_atomic_state *state)
3780 {
3781         struct drm_device *dev = state->dev;
3782         struct amdgpu_device *adev = drm_to_adev(dev);
3783         struct amdgpu_display_manager *dm = &adev->dm;
3784         struct drm_private_obj *obj;
3785         struct drm_private_state *new_obj_state;
3786         int i;
3787
3788         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3789                 if (obj->funcs == dm->atomic_obj.funcs)
3790                         return to_dm_atomic_state(new_obj_state);
3791         }
3792
3793         return NULL;
3794 }
3795
3796 static struct drm_private_state *
3797 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3798 {
3799         struct dm_atomic_state *old_state, *new_state;
3800
3801         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3802         if (!new_state)
3803                 return NULL;
3804
3805         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3806
3807         old_state = to_dm_atomic_state(obj->state);
3808
3809         if (old_state && old_state->context)
3810                 new_state->context = dc_copy_state(old_state->context);
3811
3812         if (!new_state->context) {
3813                 kfree(new_state);
3814                 return NULL;
3815         }
3816
3817         return &new_state->base;
3818 }
3819
3820 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3821                                     struct drm_private_state *state)
3822 {
3823         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3824
3825         if (dm_state && dm_state->context)
3826                 dc_release_state(dm_state->context);
3827
3828         kfree(dm_state);
3829 }
3830
3831 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3832         .atomic_duplicate_state = dm_atomic_duplicate_state,
3833         .atomic_destroy_state = dm_atomic_destroy_state,
3834 };
3835
3836 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3837 {
3838         struct dm_atomic_state *state;
3839         int r;
3840
3841         adev->mode_info.mode_config_initialized = true;
3842
3843         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3844         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3845
3846         adev_to_drm(adev)->mode_config.max_width = 16384;
3847         adev_to_drm(adev)->mode_config.max_height = 16384;
3848
3849         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3850         if (adev->asic_type == CHIP_HAWAII)
3851                 /* disable prefer shadow for now due to hibernation issues */
3852                 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3853         else
3854                 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3855         /* indicates support for immediate flip */
3856         adev_to_drm(adev)->mode_config.async_page_flip = true;
3857
3858         state = kzalloc(sizeof(*state), GFP_KERNEL);
3859         if (!state)
3860                 return -ENOMEM;
3861
3862         state->context = dc_create_state(adev->dm.dc);
3863         if (!state->context) {
3864                 kfree(state);
3865                 return -ENOMEM;
3866         }
3867
3868         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3869
3870         drm_atomic_private_obj_init(adev_to_drm(adev),
3871                                     &adev->dm.atomic_obj,
3872                                     &state->base,
3873                                     &dm_atomic_state_funcs);
3874
3875         r = amdgpu_display_modeset_create_props(adev);
3876         if (r) {
3877                 dc_release_state(state->context);
3878                 kfree(state);
3879                 return r;
3880         }
3881
3882         r = amdgpu_dm_audio_init(adev);
3883         if (r) {
3884                 dc_release_state(state->context);
3885                 kfree(state);
3886                 return r;
3887         }
3888
3889         return 0;
3890 }
3891
3892 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3893 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3894 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3895
3896 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3897                                             int bl_idx)
3898 {
3899 #if defined(CONFIG_ACPI)
3900         struct amdgpu_dm_backlight_caps caps;
3901
3902         memset(&caps, 0, sizeof(caps));
3903
3904         if (dm->backlight_caps[bl_idx].caps_valid)
3905                 return;
3906
3907         amdgpu_acpi_get_backlight_caps(&caps);
3908         if (caps.caps_valid) {
3909                 dm->backlight_caps[bl_idx].caps_valid = true;
3910                 if (caps.aux_support)
3911                         return;
3912                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3913                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3914         } else {
3915                 dm->backlight_caps[bl_idx].min_input_signal =
3916                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3917                 dm->backlight_caps[bl_idx].max_input_signal =
3918                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3919         }
3920 #else
3921         if (dm->backlight_caps[bl_idx].aux_support)
3922                 return;
3923
3924         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3925         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3926 #endif
3927 }
3928
3929 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3930                                 unsigned *min, unsigned *max)
3931 {
3932         if (!caps)
3933                 return 0;
3934
3935         if (caps->aux_support) {
3936                 // Firmware limits are in nits, DC API wants millinits.
3937                 *max = 1000 * caps->aux_max_input_signal;
3938                 *min = 1000 * caps->aux_min_input_signal;
3939         } else {
3940                 // Firmware limits are 8-bit, PWM control is 16-bit.
3941                 *max = 0x101 * caps->max_input_signal;
3942                 *min = 0x101 * caps->min_input_signal;
3943         }
3944         return 1;
3945 }
3946
3947 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3948                                         uint32_t brightness)
3949 {
3950         unsigned min, max;
3951
3952         if (!get_brightness_range(caps, &min, &max))
3953                 return brightness;
3954
3955         // Rescale 0..255 to min..max
3956         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3957                                        AMDGPU_MAX_BL_LEVEL);
3958 }
3959
3960 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3961                                       uint32_t brightness)
3962 {
3963         unsigned min, max;
3964
3965         if (!get_brightness_range(caps, &min, &max))
3966                 return brightness;
3967
3968         if (brightness < min)
3969                 return 0;
3970         // Rescale min..max to 0..255
3971         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3972                                  max - min);
3973 }
3974
3975 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3976                                          int bl_idx,
3977                                          u32 user_brightness)
3978 {
3979         struct amdgpu_dm_backlight_caps caps;
3980         struct dc_link *link;
3981         u32 brightness;
3982         bool rc;
3983
3984         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3985         caps = dm->backlight_caps[bl_idx];
3986
3987         dm->brightness[bl_idx] = user_brightness;
3988         /* update scratch register */
3989         if (bl_idx == 0)
3990                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3991         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3992         link = (struct dc_link *)dm->backlight_link[bl_idx];
3993
3994         /* Change brightness based on AUX property */
3995         if (caps.aux_support) {
3996                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3997                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3998                 if (!rc)
3999                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4000         } else {
4001                 rc = dc_link_set_backlight_level(link, brightness, 0);
4002                 if (!rc)
4003                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4004         }
4005
4006         if (rc)
4007                 dm->actual_brightness[bl_idx] = user_brightness;
4008 }
4009
4010 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4011 {
4012         struct amdgpu_display_manager *dm = bl_get_data(bd);
4013         int i;
4014
4015         for (i = 0; i < dm->num_of_edps; i++) {
4016                 if (bd == dm->backlight_dev[i])
4017                         break;
4018         }
4019         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4020                 i = 0;
4021         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4022
4023         return 0;
4024 }
4025
4026 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4027                                          int bl_idx)
4028 {
4029         struct amdgpu_dm_backlight_caps caps;
4030         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4031
4032         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4033         caps = dm->backlight_caps[bl_idx];
4034
4035         if (caps.aux_support) {
4036                 u32 avg, peak;
4037                 bool rc;
4038
4039                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4040                 if (!rc)
4041                         return dm->brightness[bl_idx];
4042                 return convert_brightness_to_user(&caps, avg);
4043         } else {
4044                 int ret = dc_link_get_backlight_level(link);
4045
4046                 if (ret == DC_ERROR_UNEXPECTED)
4047                         return dm->brightness[bl_idx];
4048                 return convert_brightness_to_user(&caps, ret);
4049         }
4050 }
4051
4052 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4053 {
4054         struct amdgpu_display_manager *dm = bl_get_data(bd);
4055         int i;
4056
4057         for (i = 0; i < dm->num_of_edps; i++) {
4058                 if (bd == dm->backlight_dev[i])
4059                         break;
4060         }
4061         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4062                 i = 0;
4063         return amdgpu_dm_backlight_get_level(dm, i);
4064 }
4065
4066 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4067         .options = BL_CORE_SUSPENDRESUME,
4068         .get_brightness = amdgpu_dm_backlight_get_brightness,
4069         .update_status  = amdgpu_dm_backlight_update_status,
4070 };
4071
4072 static void
4073 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4074 {
4075         char bl_name[16];
4076         struct backlight_properties props = { 0 };
4077
4078         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4079         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4080
4081         if (!acpi_video_backlight_use_native()) {
4082                 drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n");
4083                 /* Try registering an ACPI video backlight device instead. */
4084                 acpi_video_register_backlight();
4085                 return;
4086         }
4087
4088         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4089         props.brightness = AMDGPU_MAX_BL_LEVEL;
4090         props.type = BACKLIGHT_RAW;
4091
4092         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4093                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4094
4095         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4096                                                                        adev_to_drm(dm->adev)->dev,
4097                                                                        dm,
4098                                                                        &amdgpu_dm_backlight_ops,
4099                                                                        &props);
4100
4101         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4102                 DRM_ERROR("DM: Backlight registration failed!\n");
4103         else
4104                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4105 }
4106
4107 static int initialize_plane(struct amdgpu_display_manager *dm,
4108                             struct amdgpu_mode_info *mode_info, int plane_id,
4109                             enum drm_plane_type plane_type,
4110                             const struct dc_plane_cap *plane_cap)
4111 {
4112         struct drm_plane *plane;
4113         unsigned long possible_crtcs;
4114         int ret = 0;
4115
4116         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4117         if (!plane) {
4118                 DRM_ERROR("KMS: Failed to allocate plane\n");
4119                 return -ENOMEM;
4120         }
4121         plane->type = plane_type;
4122
4123         /*
4124          * HACK: IGT tests expect that the primary plane for a CRTC
4125          * can only have one possible CRTC. Only expose support for
4126          * any CRTC if they're not going to be used as a primary plane
4127          * for a CRTC - like overlay or underlay planes.
4128          */
4129         possible_crtcs = 1 << plane_id;
4130         if (plane_id >= dm->dc->caps.max_streams)
4131                 possible_crtcs = 0xff;
4132
4133         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4134
4135         if (ret) {
4136                 DRM_ERROR("KMS: Failed to initialize plane\n");
4137                 kfree(plane);
4138                 return ret;
4139         }
4140
4141         if (mode_info)
4142                 mode_info->planes[plane_id] = plane;
4143
4144         return ret;
4145 }
4146
4147
4148 static void register_backlight_device(struct amdgpu_display_manager *dm,
4149                                       struct dc_link *link)
4150 {
4151         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4152             link->type != dc_connection_none) {
4153                 /*
4154                  * Event if registration failed, we should continue with
4155                  * DM initialization because not having a backlight control
4156                  * is better then a black screen.
4157                  */
4158                 if (!dm->backlight_dev[dm->num_of_edps])
4159                         amdgpu_dm_register_backlight_device(dm);
4160
4161                 if (dm->backlight_dev[dm->num_of_edps]) {
4162                         dm->backlight_link[dm->num_of_edps] = link;
4163                         dm->num_of_edps++;
4164                 }
4165         }
4166 }
4167
4168 static void amdgpu_set_panel_orientation(struct drm_connector *connector);
4169
4170 /*
4171  * In this architecture, the association
4172  * connector -> encoder -> crtc
4173  * id not really requried. The crtc and connector will hold the
4174  * display_index as an abstraction to use with DAL component
4175  *
4176  * Returns 0 on success
4177  */
4178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4179 {
4180         struct amdgpu_display_manager *dm = &adev->dm;
4181         int32_t i;
4182         struct amdgpu_dm_connector *aconnector = NULL;
4183         struct amdgpu_encoder *aencoder = NULL;
4184         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4185         uint32_t link_cnt;
4186         int32_t primary_planes;
4187         enum dc_connection_type new_connection_type = dc_connection_none;
4188         const struct dc_plane_cap *plane;
4189         bool psr_feature_enabled = false;
4190
4191         dm->display_indexes_num = dm->dc->caps.max_streams;
4192         /* Update the actual used number of crtc */
4193         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4194
4195         link_cnt = dm->dc->caps.max_links;
4196         if (amdgpu_dm_mode_config_init(dm->adev)) {
4197                 DRM_ERROR("DM: Failed to initialize mode config\n");
4198                 return -EINVAL;
4199         }
4200
4201         /* There is one primary plane per CRTC */
4202         primary_planes = dm->dc->caps.max_streams;
4203         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4204
4205         /*
4206          * Initialize primary planes, implicit planes for legacy IOCTLS.
4207          * Order is reversed to match iteration order in atomic check.
4208          */
4209         for (i = (primary_planes - 1); i >= 0; i--) {
4210                 plane = &dm->dc->caps.planes[i];
4211
4212                 if (initialize_plane(dm, mode_info, i,
4213                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4214                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4215                         goto fail;
4216                 }
4217         }
4218
4219         /*
4220          * Initialize overlay planes, index starting after primary planes.
4221          * These planes have a higher DRM index than the primary planes since
4222          * they should be considered as having a higher z-order.
4223          * Order is reversed to match iteration order in atomic check.
4224          *
4225          * Only support DCN for now, and only expose one so we don't encourage
4226          * userspace to use up all the pipes.
4227          */
4228         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4229                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4230
4231                 /* Do not create overlay if MPO disabled */
4232                 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4233                         break;
4234
4235                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4236                         continue;
4237
4238                 if (!plane->blends_with_above || !plane->blends_with_below)
4239                         continue;
4240
4241                 if (!plane->pixel_format_support.argb8888)
4242                         continue;
4243
4244                 if (initialize_plane(dm, NULL, primary_planes + i,
4245                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4246                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4247                         goto fail;
4248                 }
4249
4250                 /* Only create one overlay plane. */
4251                 break;
4252         }
4253
4254         for (i = 0; i < dm->dc->caps.max_streams; i++)
4255                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4256                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4257                         goto fail;
4258                 }
4259
4260         /* Use Outbox interrupt */
4261         switch (adev->ip_versions[DCE_HWIP][0]) {
4262         case IP_VERSION(3, 0, 0):
4263         case IP_VERSION(3, 1, 2):
4264         case IP_VERSION(3, 1, 3):
4265         case IP_VERSION(3, 1, 4):
4266         case IP_VERSION(3, 1, 5):
4267         case IP_VERSION(3, 1, 6):
4268         case IP_VERSION(3, 2, 0):
4269         case IP_VERSION(3, 2, 1):
4270         case IP_VERSION(2, 1, 0):
4271                 if (register_outbox_irq_handlers(dm->adev)) {
4272                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4273                         goto fail;
4274                 }
4275                 break;
4276         default:
4277                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4278                               adev->ip_versions[DCE_HWIP][0]);
4279         }
4280
4281         /* Determine whether to enable PSR support by default. */
4282         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4283                 switch (adev->ip_versions[DCE_HWIP][0]) {
4284                 case IP_VERSION(3, 1, 2):
4285                 case IP_VERSION(3, 1, 3):
4286                 case IP_VERSION(3, 1, 4):
4287                 case IP_VERSION(3, 1, 5):
4288                 case IP_VERSION(3, 1, 6):
4289                 case IP_VERSION(3, 2, 0):
4290                 case IP_VERSION(3, 2, 1):
4291                         psr_feature_enabled = true;
4292                         break;
4293                 default:
4294                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4295                         break;
4296                 }
4297         }
4298
4299         /* loops over all connectors on the board */
4300         for (i = 0; i < link_cnt; i++) {
4301                 struct dc_link *link = NULL;
4302
4303                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4304                         DRM_ERROR(
4305                                 "KMS: Cannot support more than %d display indexes\n",
4306                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4307                         continue;
4308                 }
4309
4310                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4311                 if (!aconnector)
4312                         goto fail;
4313
4314                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4315                 if (!aencoder)
4316                         goto fail;
4317
4318                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4319                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4320                         goto fail;
4321                 }
4322
4323                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4324                         DRM_ERROR("KMS: Failed to initialize connector\n");
4325                         goto fail;
4326                 }
4327
4328                 link = dc_get_link_at_index(dm->dc, i);
4329
4330                 if (!dc_link_detect_sink(link, &new_connection_type))
4331                         DRM_ERROR("KMS: Failed to detect connector\n");
4332
4333                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4334                         emulated_link_detect(link);
4335                         amdgpu_dm_update_connector_after_detect(aconnector);
4336                 } else {
4337                         bool ret = false;
4338
4339                         mutex_lock(&dm->dc_lock);
4340                         ret = dc_link_detect(link, DETECT_REASON_BOOT);
4341                         mutex_unlock(&dm->dc_lock);
4342
4343                         if (ret) {
4344                                 amdgpu_dm_update_connector_after_detect(aconnector);
4345                                 register_backlight_device(dm, link);
4346
4347                                 if (dm->num_of_edps)
4348                                         update_connector_ext_caps(aconnector);
4349
4350                                 if (psr_feature_enabled)
4351                                         amdgpu_dm_set_psr_caps(link);
4352
4353                                 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4354                                  * PSR is also supported.
4355                                  */
4356                                 if (link->psr_settings.psr_feature_enabled)
4357                                         adev_to_drm(adev)->vblank_disable_immediate = false;
4358                         }
4359                 }
4360                 amdgpu_set_panel_orientation(&aconnector->base);
4361         }
4362
4363         /* Software is initialized. Now we can register interrupt handlers. */
4364         switch (adev->asic_type) {
4365 #if defined(CONFIG_DRM_AMD_DC_SI)
4366         case CHIP_TAHITI:
4367         case CHIP_PITCAIRN:
4368         case CHIP_VERDE:
4369         case CHIP_OLAND:
4370                 if (dce60_register_irq_handlers(dm->adev)) {
4371                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4372                         goto fail;
4373                 }
4374                 break;
4375 #endif
4376         case CHIP_BONAIRE:
4377         case CHIP_HAWAII:
4378         case CHIP_KAVERI:
4379         case CHIP_KABINI:
4380         case CHIP_MULLINS:
4381         case CHIP_TONGA:
4382         case CHIP_FIJI:
4383         case CHIP_CARRIZO:
4384         case CHIP_STONEY:
4385         case CHIP_POLARIS11:
4386         case CHIP_POLARIS10:
4387         case CHIP_POLARIS12:
4388         case CHIP_VEGAM:
4389         case CHIP_VEGA10:
4390         case CHIP_VEGA12:
4391         case CHIP_VEGA20:
4392                 if (dce110_register_irq_handlers(dm->adev)) {
4393                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4394                         goto fail;
4395                 }
4396                 break;
4397         default:
4398                 switch (adev->ip_versions[DCE_HWIP][0]) {
4399                 case IP_VERSION(1, 0, 0):
4400                 case IP_VERSION(1, 0, 1):
4401                 case IP_VERSION(2, 0, 2):
4402                 case IP_VERSION(2, 0, 3):
4403                 case IP_VERSION(2, 0, 0):
4404                 case IP_VERSION(2, 1, 0):
4405                 case IP_VERSION(3, 0, 0):
4406                 case IP_VERSION(3, 0, 2):
4407                 case IP_VERSION(3, 0, 3):
4408                 case IP_VERSION(3, 0, 1):
4409                 case IP_VERSION(3, 1, 2):
4410                 case IP_VERSION(3, 1, 3):
4411                 case IP_VERSION(3, 1, 4):
4412                 case IP_VERSION(3, 1, 5):
4413                 case IP_VERSION(3, 1, 6):
4414                 case IP_VERSION(3, 2, 0):
4415                 case IP_VERSION(3, 2, 1):
4416                         if (dcn10_register_irq_handlers(dm->adev)) {
4417                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4418                                 goto fail;
4419                         }
4420                         break;
4421                 default:
4422                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4423                                         adev->ip_versions[DCE_HWIP][0]);
4424                         goto fail;
4425                 }
4426                 break;
4427         }
4428
4429         return 0;
4430 fail:
4431         kfree(aencoder);
4432         kfree(aconnector);
4433
4434         return -EINVAL;
4435 }
4436
4437 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4438 {
4439         drm_atomic_private_obj_fini(&dm->atomic_obj);
4440         return;
4441 }
4442
4443 /******************************************************************************
4444  * amdgpu_display_funcs functions
4445  *****************************************************************************/
4446
4447 /*
4448  * dm_bandwidth_update - program display watermarks
4449  *
4450  * @adev: amdgpu_device pointer
4451  *
4452  * Calculate and program the display watermarks and line buffer allocation.
4453  */
4454 static void dm_bandwidth_update(struct amdgpu_device *adev)
4455 {
4456         /* TODO: implement later */
4457 }
4458
4459 static const struct amdgpu_display_funcs dm_display_funcs = {
4460         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4461         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4462         .backlight_set_level = NULL, /* never called for DC */
4463         .backlight_get_level = NULL, /* never called for DC */
4464         .hpd_sense = NULL,/* called unconditionally */
4465         .hpd_set_polarity = NULL, /* called unconditionally */
4466         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4467         .page_flip_get_scanoutpos =
4468                 dm_crtc_get_scanoutpos,/* called unconditionally */
4469         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4470         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4471 };
4472
4473 #if defined(CONFIG_DEBUG_KERNEL_DC)
4474
4475 static ssize_t s3_debug_store(struct device *device,
4476                               struct device_attribute *attr,
4477                               const char *buf,
4478                               size_t count)
4479 {
4480         int ret;
4481         int s3_state;
4482         struct drm_device *drm_dev = dev_get_drvdata(device);
4483         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4484
4485         ret = kstrtoint(buf, 0, &s3_state);
4486
4487         if (ret == 0) {
4488                 if (s3_state) {
4489                         dm_resume(adev);
4490                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4491                 } else
4492                         dm_suspend(adev);
4493         }
4494
4495         return ret == 0 ? count : 0;
4496 }
4497
4498 DEVICE_ATTR_WO(s3_debug);
4499
4500 #endif
4501
4502 static int dm_early_init(void *handle)
4503 {
4504         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4505
4506         switch (adev->asic_type) {
4507 #if defined(CONFIG_DRM_AMD_DC_SI)
4508         case CHIP_TAHITI:
4509         case CHIP_PITCAIRN:
4510         case CHIP_VERDE:
4511                 adev->mode_info.num_crtc = 6;
4512                 adev->mode_info.num_hpd = 6;
4513                 adev->mode_info.num_dig = 6;
4514                 break;
4515         case CHIP_OLAND:
4516                 adev->mode_info.num_crtc = 2;
4517                 adev->mode_info.num_hpd = 2;
4518                 adev->mode_info.num_dig = 2;
4519                 break;
4520 #endif
4521         case CHIP_BONAIRE:
4522         case CHIP_HAWAII:
4523                 adev->mode_info.num_crtc = 6;
4524                 adev->mode_info.num_hpd = 6;
4525                 adev->mode_info.num_dig = 6;
4526                 break;
4527         case CHIP_KAVERI:
4528                 adev->mode_info.num_crtc = 4;
4529                 adev->mode_info.num_hpd = 6;
4530                 adev->mode_info.num_dig = 7;
4531                 break;
4532         case CHIP_KABINI:
4533         case CHIP_MULLINS:
4534                 adev->mode_info.num_crtc = 2;
4535                 adev->mode_info.num_hpd = 6;
4536                 adev->mode_info.num_dig = 6;
4537                 break;
4538         case CHIP_FIJI:
4539         case CHIP_TONGA:
4540                 adev->mode_info.num_crtc = 6;
4541                 adev->mode_info.num_hpd = 6;
4542                 adev->mode_info.num_dig = 7;
4543                 break;
4544         case CHIP_CARRIZO:
4545                 adev->mode_info.num_crtc = 3;
4546                 adev->mode_info.num_hpd = 6;
4547                 adev->mode_info.num_dig = 9;
4548                 break;
4549         case CHIP_STONEY:
4550                 adev->mode_info.num_crtc = 2;
4551                 adev->mode_info.num_hpd = 6;
4552                 adev->mode_info.num_dig = 9;
4553                 break;
4554         case CHIP_POLARIS11:
4555         case CHIP_POLARIS12:
4556                 adev->mode_info.num_crtc = 5;
4557                 adev->mode_info.num_hpd = 5;
4558                 adev->mode_info.num_dig = 5;
4559                 break;
4560         case CHIP_POLARIS10:
4561         case CHIP_VEGAM:
4562                 adev->mode_info.num_crtc = 6;
4563                 adev->mode_info.num_hpd = 6;
4564                 adev->mode_info.num_dig = 6;
4565                 break;
4566         case CHIP_VEGA10:
4567         case CHIP_VEGA12:
4568         case CHIP_VEGA20:
4569                 adev->mode_info.num_crtc = 6;
4570                 adev->mode_info.num_hpd = 6;
4571                 adev->mode_info.num_dig = 6;
4572                 break;
4573         default:
4574
4575                 switch (adev->ip_versions[DCE_HWIP][0]) {
4576                 case IP_VERSION(2, 0, 2):
4577                 case IP_VERSION(3, 0, 0):
4578                         adev->mode_info.num_crtc = 6;
4579                         adev->mode_info.num_hpd = 6;
4580                         adev->mode_info.num_dig = 6;
4581                         break;
4582                 case IP_VERSION(2, 0, 0):
4583                 case IP_VERSION(3, 0, 2):
4584                         adev->mode_info.num_crtc = 5;
4585                         adev->mode_info.num_hpd = 5;
4586                         adev->mode_info.num_dig = 5;
4587                         break;
4588                 case IP_VERSION(2, 0, 3):
4589                 case IP_VERSION(3, 0, 3):
4590                         adev->mode_info.num_crtc = 2;
4591                         adev->mode_info.num_hpd = 2;
4592                         adev->mode_info.num_dig = 2;
4593                         break;
4594                 case IP_VERSION(1, 0, 0):
4595                 case IP_VERSION(1, 0, 1):
4596                 case IP_VERSION(3, 0, 1):
4597                 case IP_VERSION(2, 1, 0):
4598                 case IP_VERSION(3, 1, 2):
4599                 case IP_VERSION(3, 1, 3):
4600                 case IP_VERSION(3, 1, 4):
4601                 case IP_VERSION(3, 1, 5):
4602                 case IP_VERSION(3, 1, 6):
4603                 case IP_VERSION(3, 2, 0):
4604                 case IP_VERSION(3, 2, 1):
4605                         adev->mode_info.num_crtc = 4;
4606                         adev->mode_info.num_hpd = 4;
4607                         adev->mode_info.num_dig = 4;
4608                         break;
4609                 default:
4610                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4611                                         adev->ip_versions[DCE_HWIP][0]);
4612                         return -EINVAL;
4613                 }
4614                 break;
4615         }
4616
4617         amdgpu_dm_set_irq_funcs(adev);
4618
4619         if (adev->mode_info.funcs == NULL)
4620                 adev->mode_info.funcs = &dm_display_funcs;
4621
4622         /*
4623          * Note: Do NOT change adev->audio_endpt_rreg and
4624          * adev->audio_endpt_wreg because they are initialised in
4625          * amdgpu_device_init()
4626          */
4627 #if defined(CONFIG_DEBUG_KERNEL_DC)
4628         device_create_file(
4629                 adev_to_drm(adev)->dev,
4630                 &dev_attr_s3_debug);
4631 #endif
4632         adev->dc_enabled = true;
4633
4634         return 0;
4635 }
4636
4637 static bool modereset_required(struct drm_crtc_state *crtc_state)
4638 {
4639         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4640 }
4641
4642 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4643 {
4644         drm_encoder_cleanup(encoder);
4645         kfree(encoder);
4646 }
4647
4648 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4649         .destroy = amdgpu_dm_encoder_destroy,
4650 };
4651
4652 static int
4653 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4654                             const enum surface_pixel_format format,
4655                             enum dc_color_space *color_space)
4656 {
4657         bool full_range;
4658
4659         *color_space = COLOR_SPACE_SRGB;
4660
4661         /* DRM color properties only affect non-RGB formats. */
4662         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4663                 return 0;
4664
4665         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4666
4667         switch (plane_state->color_encoding) {
4668         case DRM_COLOR_YCBCR_BT601:
4669                 if (full_range)
4670                         *color_space = COLOR_SPACE_YCBCR601;
4671                 else
4672                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4673                 break;
4674
4675         case DRM_COLOR_YCBCR_BT709:
4676                 if (full_range)
4677                         *color_space = COLOR_SPACE_YCBCR709;
4678                 else
4679                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4680                 break;
4681
4682         case DRM_COLOR_YCBCR_BT2020:
4683                 if (full_range)
4684                         *color_space = COLOR_SPACE_2020_YCBCR;
4685                 else
4686                         return -EINVAL;
4687                 break;
4688
4689         default:
4690                 return -EINVAL;
4691         }
4692
4693         return 0;
4694 }
4695
4696 static int
4697 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4698                             const struct drm_plane_state *plane_state,
4699                             const uint64_t tiling_flags,
4700                             struct dc_plane_info *plane_info,
4701                             struct dc_plane_address *address,
4702                             bool tmz_surface,
4703                             bool force_disable_dcc)
4704 {
4705         const struct drm_framebuffer *fb = plane_state->fb;
4706         const struct amdgpu_framebuffer *afb =
4707                 to_amdgpu_framebuffer(plane_state->fb);
4708         int ret;
4709
4710         memset(plane_info, 0, sizeof(*plane_info));
4711
4712         switch (fb->format->format) {
4713         case DRM_FORMAT_C8:
4714                 plane_info->format =
4715                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4716                 break;
4717         case DRM_FORMAT_RGB565:
4718                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4719                 break;
4720         case DRM_FORMAT_XRGB8888:
4721         case DRM_FORMAT_ARGB8888:
4722                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4723                 break;
4724         case DRM_FORMAT_XRGB2101010:
4725         case DRM_FORMAT_ARGB2101010:
4726                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4727                 break;
4728         case DRM_FORMAT_XBGR2101010:
4729         case DRM_FORMAT_ABGR2101010:
4730                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4731                 break;
4732         case DRM_FORMAT_XBGR8888:
4733         case DRM_FORMAT_ABGR8888:
4734                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4735                 break;
4736         case DRM_FORMAT_NV21:
4737                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4738                 break;
4739         case DRM_FORMAT_NV12:
4740                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4741                 break;
4742         case DRM_FORMAT_P010:
4743                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4744                 break;
4745         case DRM_FORMAT_XRGB16161616F:
4746         case DRM_FORMAT_ARGB16161616F:
4747                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4748                 break;
4749         case DRM_FORMAT_XBGR16161616F:
4750         case DRM_FORMAT_ABGR16161616F:
4751                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4752                 break;
4753         case DRM_FORMAT_XRGB16161616:
4754         case DRM_FORMAT_ARGB16161616:
4755                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4756                 break;
4757         case DRM_FORMAT_XBGR16161616:
4758         case DRM_FORMAT_ABGR16161616:
4759                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4760                 break;
4761         default:
4762                 DRM_ERROR(
4763                         "Unsupported screen format %p4cc\n",
4764                         &fb->format->format);
4765                 return -EINVAL;
4766         }
4767
4768         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4769         case DRM_MODE_ROTATE_0:
4770                 plane_info->rotation = ROTATION_ANGLE_0;
4771                 break;
4772         case DRM_MODE_ROTATE_90:
4773                 plane_info->rotation = ROTATION_ANGLE_90;
4774                 break;
4775         case DRM_MODE_ROTATE_180:
4776                 plane_info->rotation = ROTATION_ANGLE_180;
4777                 break;
4778         case DRM_MODE_ROTATE_270:
4779                 plane_info->rotation = ROTATION_ANGLE_270;
4780                 break;
4781         default:
4782                 plane_info->rotation = ROTATION_ANGLE_0;
4783                 break;
4784         }
4785
4786
4787         plane_info->visible = true;
4788         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4789
4790         plane_info->layer_index = plane_state->normalized_zpos;
4791
4792         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4793                                           &plane_info->color_space);
4794         if (ret)
4795                 return ret;
4796
4797         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4798                                            plane_info->rotation, tiling_flags,
4799                                            &plane_info->tiling_info,
4800                                            &plane_info->plane_size,
4801                                            &plane_info->dcc, address,
4802                                            tmz_surface, force_disable_dcc);
4803         if (ret)
4804                 return ret;
4805
4806         fill_blending_from_plane_state(
4807                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
4808                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4809
4810         return 0;
4811 }
4812
4813 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4814                                     struct dc_plane_state *dc_plane_state,
4815                                     struct drm_plane_state *plane_state,
4816                                     struct drm_crtc_state *crtc_state)
4817 {
4818         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4819         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4820         struct dc_scaling_info scaling_info;
4821         struct dc_plane_info plane_info;
4822         int ret;
4823         bool force_disable_dcc = false;
4824
4825         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
4826         if (ret)
4827                 return ret;
4828
4829         dc_plane_state->src_rect = scaling_info.src_rect;
4830         dc_plane_state->dst_rect = scaling_info.dst_rect;
4831         dc_plane_state->clip_rect = scaling_info.clip_rect;
4832         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4833
4834         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4835         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4836                                           afb->tiling_flags,
4837                                           &plane_info,
4838                                           &dc_plane_state->address,
4839                                           afb->tmz_surface,
4840                                           force_disable_dcc);
4841         if (ret)
4842                 return ret;
4843
4844         dc_plane_state->format = plane_info.format;
4845         dc_plane_state->color_space = plane_info.color_space;
4846         dc_plane_state->format = plane_info.format;
4847         dc_plane_state->plane_size = plane_info.plane_size;
4848         dc_plane_state->rotation = plane_info.rotation;
4849         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4850         dc_plane_state->stereo_format = plane_info.stereo_format;
4851         dc_plane_state->tiling_info = plane_info.tiling_info;
4852         dc_plane_state->visible = plane_info.visible;
4853         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4854         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
4855         dc_plane_state->global_alpha = plane_info.global_alpha;
4856         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4857         dc_plane_state->dcc = plane_info.dcc;
4858         dc_plane_state->layer_index = plane_info.layer_index;
4859         dc_plane_state->flip_int_enabled = true;
4860
4861         /*
4862          * Always set input transfer function, since plane state is refreshed
4863          * every time.
4864          */
4865         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4866         if (ret)
4867                 return ret;
4868
4869         return 0;
4870 }
4871
4872 static inline void fill_dc_dirty_rect(struct drm_plane *plane,
4873                                       struct rect *dirty_rect, int32_t x,
4874                                       int32_t y, int32_t width, int32_t height,
4875                                       int *i, bool ffu)
4876 {
4877         if (*i > DC_MAX_DIRTY_RECTS)
4878                 return;
4879
4880         if (*i == DC_MAX_DIRTY_RECTS)
4881                 goto out;
4882
4883         dirty_rect->x = x;
4884         dirty_rect->y = y;
4885         dirty_rect->width = width;
4886         dirty_rect->height = height;
4887
4888         if (ffu)
4889                 drm_dbg(plane->dev,
4890                         "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
4891                         plane->base.id, width, height);
4892         else
4893                 drm_dbg(plane->dev,
4894                         "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
4895                         plane->base.id, x, y, width, height);
4896
4897 out:
4898         (*i)++;
4899 }
4900
4901 /**
4902  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
4903  *
4904  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
4905  *         remote fb
4906  * @old_plane_state: Old state of @plane
4907  * @new_plane_state: New state of @plane
4908  * @crtc_state: New state of CRTC connected to the @plane
4909  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
4910  *
4911  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
4912  * (referred to as "damage clips" in DRM nomenclature) that require updating on
4913  * the eDP remote buffer. The responsibility of specifying the dirty regions is
4914  * amdgpu_dm's.
4915  *
4916  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
4917  * plane with regions that require flushing to the eDP remote buffer. In
4918  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
4919  * implicitly provide damage clips without any client support via the plane
4920  * bounds.
4921  */
4922 static void fill_dc_dirty_rects(struct drm_plane *plane,
4923                                 struct drm_plane_state *old_plane_state,
4924                                 struct drm_plane_state *new_plane_state,
4925                                 struct drm_crtc_state *crtc_state,
4926                                 struct dc_flip_addrs *flip_addrs)
4927 {
4928         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4929         struct rect *dirty_rects = flip_addrs->dirty_rects;
4930         uint32_t num_clips;
4931         struct drm_mode_rect *clips;
4932         bool bb_changed;
4933         bool fb_changed;
4934         uint32_t i = 0;
4935
4936         /*
4937          * Cursor plane has it's own dirty rect update interface. See
4938          * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
4939          */
4940         if (plane->type == DRM_PLANE_TYPE_CURSOR)
4941                 return;
4942
4943         num_clips = drm_plane_get_damage_clips_count(new_plane_state);
4944         clips = drm_plane_get_damage_clips(new_plane_state);
4945
4946         if (!dm_crtc_state->mpo_requested) {
4947                 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
4948                         goto ffu;
4949
4950                 for (; flip_addrs->dirty_rect_count < num_clips; clips++)
4951                         fill_dc_dirty_rect(new_plane_state->plane,
4952                                            &dirty_rects[i], clips->x1,
4953                                            clips->y1, clips->x2 - clips->x1,
4954                                            clips->y2 - clips->y1,
4955                                            &flip_addrs->dirty_rect_count,
4956                                            false);
4957                 return;
4958         }
4959
4960         /*
4961          * MPO is requested. Add entire plane bounding box to dirty rects if
4962          * flipped to or damaged.
4963          *
4964          * If plane is moved or resized, also add old bounding box to dirty
4965          * rects.
4966          */
4967         fb_changed = old_plane_state->fb->base.id !=
4968                      new_plane_state->fb->base.id;
4969         bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
4970                       old_plane_state->crtc_y != new_plane_state->crtc_y ||
4971                       old_plane_state->crtc_w != new_plane_state->crtc_w ||
4972                       old_plane_state->crtc_h != new_plane_state->crtc_h);
4973
4974         drm_dbg(plane->dev,
4975                 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
4976                 new_plane_state->plane->base.id,
4977                 bb_changed, fb_changed, num_clips);
4978
4979         if (bb_changed) {
4980                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
4981                                    new_plane_state->crtc_x,
4982                                    new_plane_state->crtc_y,
4983                                    new_plane_state->crtc_w,
4984                                    new_plane_state->crtc_h, &i, false);
4985
4986                 /* Add old plane bounding-box if plane is moved or resized */
4987                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
4988                                    old_plane_state->crtc_x,
4989                                    old_plane_state->crtc_y,
4990                                    old_plane_state->crtc_w,
4991                                    old_plane_state->crtc_h, &i, false);
4992         }
4993
4994         if (num_clips) {
4995                 for (; i < num_clips; clips++)
4996                         fill_dc_dirty_rect(new_plane_state->plane,
4997                                            &dirty_rects[i], clips->x1,
4998                                            clips->y1, clips->x2 - clips->x1,
4999                                            clips->y2 - clips->y1, &i, false);
5000         } else if (fb_changed && !bb_changed) {
5001                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5002                                    new_plane_state->crtc_x,
5003                                    new_plane_state->crtc_y,
5004                                    new_plane_state->crtc_w,
5005                                    new_plane_state->crtc_h, &i, false);
5006         }
5007
5008         if (i > DC_MAX_DIRTY_RECTS)
5009                 goto ffu;
5010
5011         flip_addrs->dirty_rect_count = i;
5012         return;
5013
5014 ffu:
5015         fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
5016                            dm_crtc_state->base.mode.crtc_hdisplay,
5017                            dm_crtc_state->base.mode.crtc_vdisplay,
5018                            &flip_addrs->dirty_rect_count, true);
5019 }
5020
5021 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5022                                            const struct dm_connector_state *dm_state,
5023                                            struct dc_stream_state *stream)
5024 {
5025         enum amdgpu_rmx_type rmx_type;
5026
5027         struct rect src = { 0 }; /* viewport in composition space*/
5028         struct rect dst = { 0 }; /* stream addressable area */
5029
5030         /* no mode. nothing to be done */
5031         if (!mode)
5032                 return;
5033
5034         /* Full screen scaling by default */
5035         src.width = mode->hdisplay;
5036         src.height = mode->vdisplay;
5037         dst.width = stream->timing.h_addressable;
5038         dst.height = stream->timing.v_addressable;
5039
5040         if (dm_state) {
5041                 rmx_type = dm_state->scaling;
5042                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5043                         if (src.width * dst.height <
5044                                         src.height * dst.width) {
5045                                 /* height needs less upscaling/more downscaling */
5046                                 dst.width = src.width *
5047                                                 dst.height / src.height;
5048                         } else {
5049                                 /* width needs less upscaling/more downscaling */
5050                                 dst.height = src.height *
5051                                                 dst.width / src.width;
5052                         }
5053                 } else if (rmx_type == RMX_CENTER) {
5054                         dst = src;
5055                 }
5056
5057                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5058                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5059
5060                 if (dm_state->underscan_enable) {
5061                         dst.x += dm_state->underscan_hborder / 2;
5062                         dst.y += dm_state->underscan_vborder / 2;
5063                         dst.width -= dm_state->underscan_hborder;
5064                         dst.height -= dm_state->underscan_vborder;
5065                 }
5066         }
5067
5068         stream->src = src;
5069         stream->dst = dst;
5070
5071         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5072                       dst.x, dst.y, dst.width, dst.height);
5073
5074 }
5075
5076 static enum dc_color_depth
5077 convert_color_depth_from_display_info(const struct drm_connector *connector,
5078                                       bool is_y420, int requested_bpc)
5079 {
5080         uint8_t bpc;
5081
5082         if (is_y420) {
5083                 bpc = 8;
5084
5085                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5086                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5087                         bpc = 16;
5088                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5089                         bpc = 12;
5090                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5091                         bpc = 10;
5092         } else {
5093                 bpc = (uint8_t)connector->display_info.bpc;
5094                 /* Assume 8 bpc by default if no bpc is specified. */
5095                 bpc = bpc ? bpc : 8;
5096         }
5097
5098         if (requested_bpc > 0) {
5099                 /*
5100                  * Cap display bpc based on the user requested value.
5101                  *
5102                  * The value for state->max_bpc may not correctly updated
5103                  * depending on when the connector gets added to the state
5104                  * or if this was called outside of atomic check, so it
5105                  * can't be used directly.
5106                  */
5107                 bpc = min_t(u8, bpc, requested_bpc);
5108
5109                 /* Round down to the nearest even number. */
5110                 bpc = bpc - (bpc & 1);
5111         }
5112
5113         switch (bpc) {
5114         case 0:
5115                 /*
5116                  * Temporary Work around, DRM doesn't parse color depth for
5117                  * EDID revision before 1.4
5118                  * TODO: Fix edid parsing
5119                  */
5120                 return COLOR_DEPTH_888;
5121         case 6:
5122                 return COLOR_DEPTH_666;
5123         case 8:
5124                 return COLOR_DEPTH_888;
5125         case 10:
5126                 return COLOR_DEPTH_101010;
5127         case 12:
5128                 return COLOR_DEPTH_121212;
5129         case 14:
5130                 return COLOR_DEPTH_141414;
5131         case 16:
5132                 return COLOR_DEPTH_161616;
5133         default:
5134                 return COLOR_DEPTH_UNDEFINED;
5135         }
5136 }
5137
5138 static enum dc_aspect_ratio
5139 get_aspect_ratio(const struct drm_display_mode *mode_in)
5140 {
5141         /* 1-1 mapping, since both enums follow the HDMI spec. */
5142         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5143 }
5144
5145 static enum dc_color_space
5146 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5147 {
5148         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5149
5150         switch (dc_crtc_timing->pixel_encoding) {
5151         case PIXEL_ENCODING_YCBCR422:
5152         case PIXEL_ENCODING_YCBCR444:
5153         case PIXEL_ENCODING_YCBCR420:
5154         {
5155                 /*
5156                  * 27030khz is the separation point between HDTV and SDTV
5157                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5158                  * respectively
5159                  */
5160                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5161                         if (dc_crtc_timing->flags.Y_ONLY)
5162                                 color_space =
5163                                         COLOR_SPACE_YCBCR709_LIMITED;
5164                         else
5165                                 color_space = COLOR_SPACE_YCBCR709;
5166                 } else {
5167                         if (dc_crtc_timing->flags.Y_ONLY)
5168                                 color_space =
5169                                         COLOR_SPACE_YCBCR601_LIMITED;
5170                         else
5171                                 color_space = COLOR_SPACE_YCBCR601;
5172                 }
5173
5174         }
5175         break;
5176         case PIXEL_ENCODING_RGB:
5177                 color_space = COLOR_SPACE_SRGB;
5178                 break;
5179
5180         default:
5181                 WARN_ON(1);
5182                 break;
5183         }
5184
5185         return color_space;
5186 }
5187
5188 static bool adjust_colour_depth_from_display_info(
5189         struct dc_crtc_timing *timing_out,
5190         const struct drm_display_info *info)
5191 {
5192         enum dc_color_depth depth = timing_out->display_color_depth;
5193         int normalized_clk;
5194         do {
5195                 normalized_clk = timing_out->pix_clk_100hz / 10;
5196                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5197                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5198                         normalized_clk /= 2;
5199                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5200                 switch (depth) {
5201                 case COLOR_DEPTH_888:
5202                         break;
5203                 case COLOR_DEPTH_101010:
5204                         normalized_clk = (normalized_clk * 30) / 24;
5205                         break;
5206                 case COLOR_DEPTH_121212:
5207                         normalized_clk = (normalized_clk * 36) / 24;
5208                         break;
5209                 case COLOR_DEPTH_161616:
5210                         normalized_clk = (normalized_clk * 48) / 24;
5211                         break;
5212                 default:
5213                         /* The above depths are the only ones valid for HDMI. */
5214                         return false;
5215                 }
5216                 if (normalized_clk <= info->max_tmds_clock) {
5217                         timing_out->display_color_depth = depth;
5218                         return true;
5219                 }
5220         } while (--depth > COLOR_DEPTH_666);
5221         return false;
5222 }
5223
5224 static void fill_stream_properties_from_drm_display_mode(
5225         struct dc_stream_state *stream,
5226         const struct drm_display_mode *mode_in,
5227         const struct drm_connector *connector,
5228         const struct drm_connector_state *connector_state,
5229         const struct dc_stream_state *old_stream,
5230         int requested_bpc)
5231 {
5232         struct dc_crtc_timing *timing_out = &stream->timing;
5233         const struct drm_display_info *info = &connector->display_info;
5234         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5235         struct hdmi_vendor_infoframe hv_frame;
5236         struct hdmi_avi_infoframe avi_frame;
5237
5238         memset(&hv_frame, 0, sizeof(hv_frame));
5239         memset(&avi_frame, 0, sizeof(avi_frame));
5240
5241         timing_out->h_border_left = 0;
5242         timing_out->h_border_right = 0;
5243         timing_out->v_border_top = 0;
5244         timing_out->v_border_bottom = 0;
5245         /* TODO: un-hardcode */
5246         if (drm_mode_is_420_only(info, mode_in)
5247                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5248                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5249         else if (drm_mode_is_420_also(info, mode_in)
5250                         && aconnector->force_yuv420_output)
5251                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5252         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5253                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5254                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5255         else
5256                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5257
5258         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5259         timing_out->display_color_depth = convert_color_depth_from_display_info(
5260                 connector,
5261                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5262                 requested_bpc);
5263         timing_out->scan_type = SCANNING_TYPE_NODATA;
5264         timing_out->hdmi_vic = 0;
5265
5266         if (old_stream) {
5267                 timing_out->vic = old_stream->timing.vic;
5268                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5269                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5270         } else {
5271                 timing_out->vic = drm_match_cea_mode(mode_in);
5272                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5273                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5274                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5275                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5276         }
5277
5278         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5279                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5280                 timing_out->vic = avi_frame.video_code;
5281                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5282                 timing_out->hdmi_vic = hv_frame.vic;
5283         }
5284
5285         if (is_freesync_video_mode(mode_in, aconnector)) {
5286                 timing_out->h_addressable = mode_in->hdisplay;
5287                 timing_out->h_total = mode_in->htotal;
5288                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5289                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5290                 timing_out->v_total = mode_in->vtotal;
5291                 timing_out->v_addressable = mode_in->vdisplay;
5292                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5293                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5294                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5295         } else {
5296                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5297                 timing_out->h_total = mode_in->crtc_htotal;
5298                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5299                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5300                 timing_out->v_total = mode_in->crtc_vtotal;
5301                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5302                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5303                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5304                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5305         }
5306
5307         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5308
5309         stream->output_color_space = get_output_color_space(timing_out);
5310
5311         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5312         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5313         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5314                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5315                     drm_mode_is_420_also(info, mode_in) &&
5316                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5317                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5318                         adjust_colour_depth_from_display_info(timing_out, info);
5319                 }
5320         }
5321 }
5322
5323 static void fill_audio_info(struct audio_info *audio_info,
5324                             const struct drm_connector *drm_connector,
5325                             const struct dc_sink *dc_sink)
5326 {
5327         int i = 0;
5328         int cea_revision = 0;
5329         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5330
5331         audio_info->manufacture_id = edid_caps->manufacturer_id;
5332         audio_info->product_id = edid_caps->product_id;
5333
5334         cea_revision = drm_connector->display_info.cea_rev;
5335
5336         strscpy(audio_info->display_name,
5337                 edid_caps->display_name,
5338                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5339
5340         if (cea_revision >= 3) {
5341                 audio_info->mode_count = edid_caps->audio_mode_count;
5342
5343                 for (i = 0; i < audio_info->mode_count; ++i) {
5344                         audio_info->modes[i].format_code =
5345                                         (enum audio_format_code)
5346                                         (edid_caps->audio_modes[i].format_code);
5347                         audio_info->modes[i].channel_count =
5348                                         edid_caps->audio_modes[i].channel_count;
5349                         audio_info->modes[i].sample_rates.all =
5350                                         edid_caps->audio_modes[i].sample_rate;
5351                         audio_info->modes[i].sample_size =
5352                                         edid_caps->audio_modes[i].sample_size;
5353                 }
5354         }
5355
5356         audio_info->flags.all = edid_caps->speaker_flags;
5357
5358         /* TODO: We only check for the progressive mode, check for interlace mode too */
5359         if (drm_connector->latency_present[0]) {
5360                 audio_info->video_latency = drm_connector->video_latency[0];
5361                 audio_info->audio_latency = drm_connector->audio_latency[0];
5362         }
5363
5364         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5365
5366 }
5367
5368 static void
5369 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5370                                       struct drm_display_mode *dst_mode)
5371 {
5372         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5373         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5374         dst_mode->crtc_clock = src_mode->crtc_clock;
5375         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5376         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5377         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5378         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5379         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5380         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5381         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5382         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5383         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5384         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5385         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5386 }
5387
5388 static void
5389 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5390                                         const struct drm_display_mode *native_mode,
5391                                         bool scale_enabled)
5392 {
5393         if (scale_enabled) {
5394                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5395         } else if (native_mode->clock == drm_mode->clock &&
5396                         native_mode->htotal == drm_mode->htotal &&
5397                         native_mode->vtotal == drm_mode->vtotal) {
5398                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5399         } else {
5400                 /* no scaling nor amdgpu inserted, no need to patch */
5401         }
5402 }
5403
5404 static struct dc_sink *
5405 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5406 {
5407         struct dc_sink_init_data sink_init_data = { 0 };
5408         struct dc_sink *sink = NULL;
5409         sink_init_data.link = aconnector->dc_link;
5410         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5411
5412         sink = dc_sink_create(&sink_init_data);
5413         if (!sink) {
5414                 DRM_ERROR("Failed to create sink!\n");
5415                 return NULL;
5416         }
5417         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5418
5419         return sink;
5420 }
5421
5422 static void set_multisync_trigger_params(
5423                 struct dc_stream_state *stream)
5424 {
5425         struct dc_stream_state *master = NULL;
5426
5427         if (stream->triggered_crtc_reset.enabled) {
5428                 master = stream->triggered_crtc_reset.event_source;
5429                 stream->triggered_crtc_reset.event =
5430                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5431                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5432                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5433         }
5434 }
5435
5436 static void set_master_stream(struct dc_stream_state *stream_set[],
5437                               int stream_count)
5438 {
5439         int j, highest_rfr = 0, master_stream = 0;
5440
5441         for (j = 0;  j < stream_count; j++) {
5442                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5443                         int refresh_rate = 0;
5444
5445                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5446                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5447                         if (refresh_rate > highest_rfr) {
5448                                 highest_rfr = refresh_rate;
5449                                 master_stream = j;
5450                         }
5451                 }
5452         }
5453         for (j = 0;  j < stream_count; j++) {
5454                 if (stream_set[j])
5455                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5456         }
5457 }
5458
5459 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5460 {
5461         int i = 0;
5462         struct dc_stream_state *stream;
5463
5464         if (context->stream_count < 2)
5465                 return;
5466         for (i = 0; i < context->stream_count ; i++) {
5467                 if (!context->streams[i])
5468                         continue;
5469                 /*
5470                  * TODO: add a function to read AMD VSDB bits and set
5471                  * crtc_sync_master.multi_sync_enabled flag
5472                  * For now it's set to false
5473                  */
5474         }
5475
5476         set_master_stream(context->streams, context->stream_count);
5477
5478         for (i = 0; i < context->stream_count ; i++) {
5479                 stream = context->streams[i];
5480
5481                 if (!stream)
5482                         continue;
5483
5484                 set_multisync_trigger_params(stream);
5485         }
5486 }
5487
5488 /**
5489  * DOC: FreeSync Video
5490  *
5491  * When a userspace application wants to play a video, the content follows a
5492  * standard format definition that usually specifies the FPS for that format.
5493  * The below list illustrates some video format and the expected FPS,
5494  * respectively:
5495  *
5496  * - TV/NTSC (23.976 FPS)
5497  * - Cinema (24 FPS)
5498  * - TV/PAL (25 FPS)
5499  * - TV/NTSC (29.97 FPS)
5500  * - TV/NTSC (30 FPS)
5501  * - Cinema HFR (48 FPS)
5502  * - TV/PAL (50 FPS)
5503  * - Commonly used (60 FPS)
5504  * - Multiples of 24 (48,72,96 FPS)
5505  *
5506  * The list of standards video format is not huge and can be added to the
5507  * connector modeset list beforehand. With that, userspace can leverage
5508  * FreeSync to extends the front porch in order to attain the target refresh
5509  * rate. Such a switch will happen seamlessly, without screen blanking or
5510  * reprogramming of the output in any other way. If the userspace requests a
5511  * modesetting change compatible with FreeSync modes that only differ in the
5512  * refresh rate, DC will skip the full update and avoid blink during the
5513  * transition. For example, the video player can change the modesetting from
5514  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5515  * causing any display blink. This same concept can be applied to a mode
5516  * setting change.
5517  */
5518 static struct drm_display_mode *
5519 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5520                 bool use_probed_modes)
5521 {
5522         struct drm_display_mode *m, *m_pref = NULL;
5523         u16 current_refresh, highest_refresh;
5524         struct list_head *list_head = use_probed_modes ?
5525                 &aconnector->base.probed_modes :
5526                 &aconnector->base.modes;
5527
5528         if (aconnector->freesync_vid_base.clock != 0)
5529                 return &aconnector->freesync_vid_base;
5530
5531         /* Find the preferred mode */
5532         list_for_each_entry (m, list_head, head) {
5533                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5534                         m_pref = m;
5535                         break;
5536                 }
5537         }
5538
5539         if (!m_pref) {
5540                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5541                 m_pref = list_first_entry_or_null(
5542                                 &aconnector->base.modes, struct drm_display_mode, head);
5543                 if (!m_pref) {
5544                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5545                         return NULL;
5546                 }
5547         }
5548
5549         highest_refresh = drm_mode_vrefresh(m_pref);
5550
5551         /*
5552          * Find the mode with highest refresh rate with same resolution.
5553          * For some monitors, preferred mode is not the mode with highest
5554          * supported refresh rate.
5555          */
5556         list_for_each_entry (m, list_head, head) {
5557                 current_refresh  = drm_mode_vrefresh(m);
5558
5559                 if (m->hdisplay == m_pref->hdisplay &&
5560                     m->vdisplay == m_pref->vdisplay &&
5561                     highest_refresh < current_refresh) {
5562                         highest_refresh = current_refresh;
5563                         m_pref = m;
5564                 }
5565         }
5566
5567         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5568         return m_pref;
5569 }
5570
5571 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5572                 struct amdgpu_dm_connector *aconnector)
5573 {
5574         struct drm_display_mode *high_mode;
5575         int timing_diff;
5576
5577         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5578         if (!high_mode || !mode)
5579                 return false;
5580
5581         timing_diff = high_mode->vtotal - mode->vtotal;
5582
5583         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5584             high_mode->hdisplay != mode->hdisplay ||
5585             high_mode->vdisplay != mode->vdisplay ||
5586             high_mode->hsync_start != mode->hsync_start ||
5587             high_mode->hsync_end != mode->hsync_end ||
5588             high_mode->htotal != mode->htotal ||
5589             high_mode->hskew != mode->hskew ||
5590             high_mode->vscan != mode->vscan ||
5591             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5592             high_mode->vsync_end - mode->vsync_end != timing_diff)
5593                 return false;
5594         else
5595                 return true;
5596 }
5597
5598 #if defined(CONFIG_DRM_AMD_DC_DCN)
5599 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5600                             struct dc_sink *sink, struct dc_stream_state *stream,
5601                             struct dsc_dec_dpcd_caps *dsc_caps)
5602 {
5603         stream->timing.flags.DSC = 0;
5604         dsc_caps->is_dsc_supported = false;
5605
5606         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5607             sink->sink_signal == SIGNAL_TYPE_EDP)) {
5608                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5609                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5610                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5611                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5612                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5613                                 dsc_caps);
5614         }
5615 }
5616
5617
5618 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5619                                     struct dc_sink *sink, struct dc_stream_state *stream,
5620                                     struct dsc_dec_dpcd_caps *dsc_caps,
5621                                     uint32_t max_dsc_target_bpp_limit_override)
5622 {
5623         const struct dc_link_settings *verified_link_cap = NULL;
5624         uint32_t link_bw_in_kbps;
5625         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
5626         struct dc *dc = sink->ctx->dc;
5627         struct dc_dsc_bw_range bw_range = {0};
5628         struct dc_dsc_config dsc_cfg = {0};
5629
5630         verified_link_cap = dc_link_get_link_cap(stream->link);
5631         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5632         edp_min_bpp_x16 = 8 * 16;
5633         edp_max_bpp_x16 = 8 * 16;
5634
5635         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5636                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5637
5638         if (edp_max_bpp_x16 < edp_min_bpp_x16)
5639                 edp_min_bpp_x16 = edp_max_bpp_x16;
5640
5641         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5642                                 dc->debug.dsc_min_slice_height_override,
5643                                 edp_min_bpp_x16, edp_max_bpp_x16,
5644                                 dsc_caps,
5645                                 &stream->timing,
5646                                 &bw_range)) {
5647
5648                 if (bw_range.max_kbps < link_bw_in_kbps) {
5649                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5650                                         dsc_caps,
5651                                         dc->debug.dsc_min_slice_height_override,
5652                                         max_dsc_target_bpp_limit_override,
5653                                         0,
5654                                         &stream->timing,
5655                                         &dsc_cfg)) {
5656                                 stream->timing.dsc_cfg = dsc_cfg;
5657                                 stream->timing.flags.DSC = 1;
5658                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5659                         }
5660                         return;
5661                 }
5662         }
5663
5664         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5665                                 dsc_caps,
5666                                 dc->debug.dsc_min_slice_height_override,
5667                                 max_dsc_target_bpp_limit_override,
5668                                 link_bw_in_kbps,
5669                                 &stream->timing,
5670                                 &dsc_cfg)) {
5671                 stream->timing.dsc_cfg = dsc_cfg;
5672                 stream->timing.flags.DSC = 1;
5673         }
5674 }
5675
5676
5677 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5678                                         struct dc_sink *sink, struct dc_stream_state *stream,
5679                                         struct dsc_dec_dpcd_caps *dsc_caps)
5680 {
5681         struct drm_connector *drm_connector = &aconnector->base;
5682         uint32_t link_bandwidth_kbps;
5683         struct dc *dc = sink->ctx->dc;
5684         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
5685         uint32_t dsc_max_supported_bw_in_kbps;
5686         uint32_t max_dsc_target_bpp_limit_override =
5687                 drm_connector->display_info.max_dsc_bpp;
5688
5689         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5690                                                         dc_link_get_link_cap(aconnector->dc_link));
5691
5692         /* Set DSC policy according to dsc_clock_en */
5693         dc_dsc_policy_set_enable_dsc_when_not_needed(
5694                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5695
5696         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
5697             !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
5698             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5699
5700                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5701
5702         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5703                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5704                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5705                                                 dsc_caps,
5706                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5707                                                 max_dsc_target_bpp_limit_override,
5708                                                 link_bandwidth_kbps,
5709                                                 &stream->timing,
5710                                                 &stream->timing.dsc_cfg)) {
5711                                 stream->timing.flags.DSC = 1;
5712                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5713                         }
5714                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5715                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
5716                         max_supported_bw_in_kbps = link_bandwidth_kbps;
5717                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5718
5719                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5720                                         max_supported_bw_in_kbps > 0 &&
5721                                         dsc_max_supported_bw_in_kbps > 0)
5722                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5723                                                 dsc_caps,
5724                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5725                                                 max_dsc_target_bpp_limit_override,
5726                                                 dsc_max_supported_bw_in_kbps,
5727                                                 &stream->timing,
5728                                                 &stream->timing.dsc_cfg)) {
5729                                         stream->timing.flags.DSC = 1;
5730                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5731                                                                          __func__, drm_connector->name);
5732                                 }
5733                 }
5734         }
5735
5736         /* Overwrite the stream flag if DSC is enabled through debugfs */
5737         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5738                 stream->timing.flags.DSC = 1;
5739
5740         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5741                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5742
5743         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5744                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5745
5746         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5747                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5748 }
5749 #endif /* CONFIG_DRM_AMD_DC_DCN */
5750
5751 static struct dc_stream_state *
5752 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5753                        const struct drm_display_mode *drm_mode,
5754                        const struct dm_connector_state *dm_state,
5755                        const struct dc_stream_state *old_stream,
5756                        int requested_bpc)
5757 {
5758         struct drm_display_mode *preferred_mode = NULL;
5759         struct drm_connector *drm_connector;
5760         const struct drm_connector_state *con_state =
5761                 dm_state ? &dm_state->base : NULL;
5762         struct dc_stream_state *stream = NULL;
5763         struct drm_display_mode mode;
5764         struct drm_display_mode saved_mode;
5765         struct drm_display_mode *freesync_mode = NULL;
5766         bool native_mode_found = false;
5767         bool recalculate_timing = false;
5768         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5769         int mode_refresh;
5770         int preferred_refresh = 0;
5771         enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
5772 #if defined(CONFIG_DRM_AMD_DC_DCN)
5773         struct dsc_dec_dpcd_caps dsc_caps;
5774 #endif
5775
5776         struct dc_sink *sink = NULL;
5777
5778         drm_mode_init(&mode, drm_mode);
5779         memset(&saved_mode, 0, sizeof(saved_mode));
5780
5781         if (aconnector == NULL) {
5782                 DRM_ERROR("aconnector is NULL!\n");
5783                 return stream;
5784         }
5785
5786         drm_connector = &aconnector->base;
5787
5788         if (!aconnector->dc_sink) {
5789                 sink = create_fake_sink(aconnector);
5790                 if (!sink)
5791                         return stream;
5792         } else {
5793                 sink = aconnector->dc_sink;
5794                 dc_sink_retain(sink);
5795         }
5796
5797         stream = dc_create_stream_for_sink(sink);
5798
5799         if (stream == NULL) {
5800                 DRM_ERROR("Failed to create stream for sink!\n");
5801                 goto finish;
5802         }
5803
5804         stream->dm_stream_context = aconnector;
5805
5806         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5807                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5808
5809         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5810                 /* Search for preferred mode */
5811                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5812                         native_mode_found = true;
5813                         break;
5814                 }
5815         }
5816         if (!native_mode_found)
5817                 preferred_mode = list_first_entry_or_null(
5818                                 &aconnector->base.modes,
5819                                 struct drm_display_mode,
5820                                 head);
5821
5822         mode_refresh = drm_mode_vrefresh(&mode);
5823
5824         if (preferred_mode == NULL) {
5825                 /*
5826                  * This may not be an error, the use case is when we have no
5827                  * usermode calls to reset and set mode upon hotplug. In this
5828                  * case, we call set mode ourselves to restore the previous mode
5829                  * and the modelist may not be filled in in time.
5830                  */
5831                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5832         } else {
5833                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
5834                 if (recalculate_timing) {
5835                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5836                         drm_mode_copy(&saved_mode, &mode);
5837                         drm_mode_copy(&mode, freesync_mode);
5838                 } else {
5839                         decide_crtc_timing_for_drm_display_mode(
5840                                         &mode, preferred_mode, scale);
5841
5842                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
5843                 }
5844         }
5845
5846         if (recalculate_timing)
5847                 drm_mode_set_crtcinfo(&saved_mode, 0);
5848         else if (!dm_state)
5849                 drm_mode_set_crtcinfo(&mode, 0);
5850
5851         /*
5852         * If scaling is enabled and refresh rate didn't change
5853         * we copy the vic and polarities of the old timings
5854         */
5855         if (!scale || mode_refresh != preferred_refresh)
5856                 fill_stream_properties_from_drm_display_mode(
5857                         stream, &mode, &aconnector->base, con_state, NULL,
5858                         requested_bpc);
5859         else
5860                 fill_stream_properties_from_drm_display_mode(
5861                         stream, &mode, &aconnector->base, con_state, old_stream,
5862                         requested_bpc);
5863
5864 #if defined(CONFIG_DRM_AMD_DC_DCN)
5865         /* SST DSC determination policy */
5866         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5867         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5868                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5869 #endif
5870
5871         update_stream_scaling_settings(&mode, dm_state, stream);
5872
5873         fill_audio_info(
5874                 &stream->audio_info,
5875                 drm_connector,
5876                 sink);
5877
5878         update_stream_signal(stream, sink);
5879
5880         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5881                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5882
5883         if (stream->link->psr_settings.psr_feature_enabled) {
5884                 //
5885                 // should decide stream support vsc sdp colorimetry capability
5886                 // before building vsc info packet
5887                 //
5888                 stream->use_vsc_sdp_for_colorimetry = false;
5889                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5890                         stream->use_vsc_sdp_for_colorimetry =
5891                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5892                 } else {
5893                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5894                                 stream->use_vsc_sdp_for_colorimetry = true;
5895                 }
5896                 if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
5897                         tf = TRANSFER_FUNC_GAMMA_22;
5898                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
5899                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5900
5901         }
5902 finish:
5903         dc_sink_release(sink);
5904
5905         return stream;
5906 }
5907
5908 static enum drm_connector_status
5909 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5910 {
5911         bool connected;
5912         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5913
5914         /*
5915          * Notes:
5916          * 1. This interface is NOT called in context of HPD irq.
5917          * 2. This interface *is called* in context of user-mode ioctl. Which
5918          * makes it a bad place for *any* MST-related activity.
5919          */
5920
5921         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5922             !aconnector->fake_enable)
5923                 connected = (aconnector->dc_sink != NULL);
5924         else
5925                 connected = (aconnector->base.force == DRM_FORCE_ON ||
5926                                 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
5927
5928         update_subconnector_property(aconnector);
5929
5930         return (connected ? connector_status_connected :
5931                         connector_status_disconnected);
5932 }
5933
5934 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5935                                             struct drm_connector_state *connector_state,
5936                                             struct drm_property *property,
5937                                             uint64_t val)
5938 {
5939         struct drm_device *dev = connector->dev;
5940         struct amdgpu_device *adev = drm_to_adev(dev);
5941         struct dm_connector_state *dm_old_state =
5942                 to_dm_connector_state(connector->state);
5943         struct dm_connector_state *dm_new_state =
5944                 to_dm_connector_state(connector_state);
5945
5946         int ret = -EINVAL;
5947
5948         if (property == dev->mode_config.scaling_mode_property) {
5949                 enum amdgpu_rmx_type rmx_type;
5950
5951                 switch (val) {
5952                 case DRM_MODE_SCALE_CENTER:
5953                         rmx_type = RMX_CENTER;
5954                         break;
5955                 case DRM_MODE_SCALE_ASPECT:
5956                         rmx_type = RMX_ASPECT;
5957                         break;
5958                 case DRM_MODE_SCALE_FULLSCREEN:
5959                         rmx_type = RMX_FULL;
5960                         break;
5961                 case DRM_MODE_SCALE_NONE:
5962                 default:
5963                         rmx_type = RMX_OFF;
5964                         break;
5965                 }
5966
5967                 if (dm_old_state->scaling == rmx_type)
5968                         return 0;
5969
5970                 dm_new_state->scaling = rmx_type;
5971                 ret = 0;
5972         } else if (property == adev->mode_info.underscan_hborder_property) {
5973                 dm_new_state->underscan_hborder = val;
5974                 ret = 0;
5975         } else if (property == adev->mode_info.underscan_vborder_property) {
5976                 dm_new_state->underscan_vborder = val;
5977                 ret = 0;
5978         } else if (property == adev->mode_info.underscan_property) {
5979                 dm_new_state->underscan_enable = val;
5980                 ret = 0;
5981         } else if (property == adev->mode_info.abm_level_property) {
5982                 dm_new_state->abm_level = val;
5983                 ret = 0;
5984         }
5985
5986         return ret;
5987 }
5988
5989 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5990                                             const struct drm_connector_state *state,
5991                                             struct drm_property *property,
5992                                             uint64_t *val)
5993 {
5994         struct drm_device *dev = connector->dev;
5995         struct amdgpu_device *adev = drm_to_adev(dev);
5996         struct dm_connector_state *dm_state =
5997                 to_dm_connector_state(state);
5998         int ret = -EINVAL;
5999
6000         if (property == dev->mode_config.scaling_mode_property) {
6001                 switch (dm_state->scaling) {
6002                 case RMX_CENTER:
6003                         *val = DRM_MODE_SCALE_CENTER;
6004                         break;
6005                 case RMX_ASPECT:
6006                         *val = DRM_MODE_SCALE_ASPECT;
6007                         break;
6008                 case RMX_FULL:
6009                         *val = DRM_MODE_SCALE_FULLSCREEN;
6010                         break;
6011                 case RMX_OFF:
6012                 default:
6013                         *val = DRM_MODE_SCALE_NONE;
6014                         break;
6015                 }
6016                 ret = 0;
6017         } else if (property == adev->mode_info.underscan_hborder_property) {
6018                 *val = dm_state->underscan_hborder;
6019                 ret = 0;
6020         } else if (property == adev->mode_info.underscan_vborder_property) {
6021                 *val = dm_state->underscan_vborder;
6022                 ret = 0;
6023         } else if (property == adev->mode_info.underscan_property) {
6024                 *val = dm_state->underscan_enable;
6025                 ret = 0;
6026         } else if (property == adev->mode_info.abm_level_property) {
6027                 *val = dm_state->abm_level;
6028                 ret = 0;
6029         }
6030
6031         return ret;
6032 }
6033
6034 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6035 {
6036         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6037
6038         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6039 }
6040
6041 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6042 {
6043         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6044         const struct dc_link *link = aconnector->dc_link;
6045         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6046         struct amdgpu_display_manager *dm = &adev->dm;
6047         int i;
6048
6049         /*
6050          * Call only if mst_mgr was initialized before since it's not done
6051          * for all connector types.
6052          */
6053         if (aconnector->mst_mgr.dev)
6054                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6055
6056 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6057         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6058         for (i = 0; i < dm->num_of_edps; i++) {
6059                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6060                         backlight_device_unregister(dm->backlight_dev[i]);
6061                         dm->backlight_dev[i] = NULL;
6062                 }
6063         }
6064 #endif
6065
6066         if (aconnector->dc_em_sink)
6067                 dc_sink_release(aconnector->dc_em_sink);
6068         aconnector->dc_em_sink = NULL;
6069         if (aconnector->dc_sink)
6070                 dc_sink_release(aconnector->dc_sink);
6071         aconnector->dc_sink = NULL;
6072
6073         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6074         drm_connector_unregister(connector);
6075         drm_connector_cleanup(connector);
6076         if (aconnector->i2c) {
6077                 i2c_del_adapter(&aconnector->i2c->base);
6078                 kfree(aconnector->i2c);
6079         }
6080         kfree(aconnector->dm_dp_aux.aux.name);
6081
6082         kfree(connector);
6083 }
6084
6085 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6086 {
6087         struct dm_connector_state *state =
6088                 to_dm_connector_state(connector->state);
6089
6090         if (connector->state)
6091                 __drm_atomic_helper_connector_destroy_state(connector->state);
6092
6093         kfree(state);
6094
6095         state = kzalloc(sizeof(*state), GFP_KERNEL);
6096
6097         if (state) {
6098                 state->scaling = RMX_OFF;
6099                 state->underscan_enable = false;
6100                 state->underscan_hborder = 0;
6101                 state->underscan_vborder = 0;
6102                 state->base.max_requested_bpc = 8;
6103                 state->vcpi_slots = 0;
6104                 state->pbn = 0;
6105
6106                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6107                         state->abm_level = amdgpu_dm_abm_level;
6108
6109                 __drm_atomic_helper_connector_reset(connector, &state->base);
6110         }
6111 }
6112
6113 struct drm_connector_state *
6114 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6115 {
6116         struct dm_connector_state *state =
6117                 to_dm_connector_state(connector->state);
6118
6119         struct dm_connector_state *new_state =
6120                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6121
6122         if (!new_state)
6123                 return NULL;
6124
6125         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6126
6127         new_state->freesync_capable = state->freesync_capable;
6128         new_state->abm_level = state->abm_level;
6129         new_state->scaling = state->scaling;
6130         new_state->underscan_enable = state->underscan_enable;
6131         new_state->underscan_hborder = state->underscan_hborder;
6132         new_state->underscan_vborder = state->underscan_vborder;
6133         new_state->vcpi_slots = state->vcpi_slots;
6134         new_state->pbn = state->pbn;
6135         return &new_state->base;
6136 }
6137
6138 static int
6139 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6140 {
6141         struct amdgpu_dm_connector *amdgpu_dm_connector =
6142                 to_amdgpu_dm_connector(connector);
6143         int r;
6144
6145         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6146             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6147                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6148                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6149                 if (r)
6150                         return r;
6151         }
6152
6153 #if defined(CONFIG_DEBUG_FS)
6154         connector_debugfs_init(amdgpu_dm_connector);
6155 #endif
6156
6157         return 0;
6158 }
6159
6160 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6161         .reset = amdgpu_dm_connector_funcs_reset,
6162         .detect = amdgpu_dm_connector_detect,
6163         .fill_modes = drm_helper_probe_single_connector_modes,
6164         .destroy = amdgpu_dm_connector_destroy,
6165         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6166         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6167         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6168         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6169         .late_register = amdgpu_dm_connector_late_register,
6170         .early_unregister = amdgpu_dm_connector_unregister
6171 };
6172
6173 static int get_modes(struct drm_connector *connector)
6174 {
6175         return amdgpu_dm_connector_get_modes(connector);
6176 }
6177
6178 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6179 {
6180         struct dc_sink_init_data init_params = {
6181                         .link = aconnector->dc_link,
6182                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6183         };
6184         struct edid *edid;
6185
6186         if (!aconnector->base.edid_blob_ptr) {
6187                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6188                                 aconnector->base.name);
6189
6190                 aconnector->base.force = DRM_FORCE_OFF;
6191                 return;
6192         }
6193
6194         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6195
6196         aconnector->edid = edid;
6197
6198         aconnector->dc_em_sink = dc_link_add_remote_sink(
6199                 aconnector->dc_link,
6200                 (uint8_t *)edid,
6201                 (edid->extensions + 1) * EDID_LENGTH,
6202                 &init_params);
6203
6204         if (aconnector->base.force == DRM_FORCE_ON) {
6205                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6206                 aconnector->dc_link->local_sink :
6207                 aconnector->dc_em_sink;
6208                 dc_sink_retain(aconnector->dc_sink);
6209         }
6210 }
6211
6212 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6213 {
6214         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6215
6216         /*
6217          * In case of headless boot with force on for DP managed connector
6218          * Those settings have to be != 0 to get initial modeset
6219          */
6220         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6221                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6222                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6223         }
6224
6225         create_eml_sink(aconnector);
6226 }
6227
6228 static enum dc_status dm_validate_stream_and_context(struct dc *dc,
6229                                                 struct dc_stream_state *stream)
6230 {
6231         enum dc_status dc_result = DC_ERROR_UNEXPECTED;
6232         struct dc_plane_state *dc_plane_state = NULL;
6233         struct dc_state *dc_state = NULL;
6234
6235         if (!stream)
6236                 goto cleanup;
6237
6238         dc_plane_state = dc_create_plane_state(dc);
6239         if (!dc_plane_state)
6240                 goto cleanup;
6241
6242         dc_state = dc_create_state(dc);
6243         if (!dc_state)
6244                 goto cleanup;
6245
6246         /* populate stream to plane */
6247         dc_plane_state->src_rect.height  = stream->src.height;
6248         dc_plane_state->src_rect.width   = stream->src.width;
6249         dc_plane_state->dst_rect.height  = stream->src.height;
6250         dc_plane_state->dst_rect.width   = stream->src.width;
6251         dc_plane_state->clip_rect.height = stream->src.height;
6252         dc_plane_state->clip_rect.width  = stream->src.width;
6253         dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
6254         dc_plane_state->plane_size.surface_size.height = stream->src.height;
6255         dc_plane_state->plane_size.surface_size.width  = stream->src.width;
6256         dc_plane_state->plane_size.chroma_size.height  = stream->src.height;
6257         dc_plane_state->plane_size.chroma_size.width   = stream->src.width;
6258         dc_plane_state->tiling_info.gfx9.swizzle =  DC_SW_UNKNOWN;
6259         dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
6260         dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
6261         dc_plane_state->rotation = ROTATION_ANGLE_0;
6262         dc_plane_state->is_tiling_rotated = false;
6263         dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
6264
6265         dc_result = dc_validate_stream(dc, stream);
6266         if (dc_result == DC_OK)
6267                 dc_result = dc_validate_plane(dc, dc_plane_state);
6268
6269         if (dc_result == DC_OK)
6270                 dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
6271
6272         if (dc_result == DC_OK && !dc_add_plane_to_context(
6273                                                 dc,
6274                                                 stream,
6275                                                 dc_plane_state,
6276                                                 dc_state))
6277                 dc_result = DC_FAIL_ATTACH_SURFACES;
6278
6279         if (dc_result == DC_OK)
6280                 dc_result = dc_validate_global_state(dc, dc_state, true);
6281
6282 cleanup:
6283         if (dc_state)
6284                 dc_release_state(dc_state);
6285
6286         if (dc_plane_state)
6287                 dc_plane_state_release(dc_plane_state);
6288
6289         return dc_result;
6290 }
6291
6292 struct dc_stream_state *
6293 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6294                                 const struct drm_display_mode *drm_mode,
6295                                 const struct dm_connector_state *dm_state,
6296                                 const struct dc_stream_state *old_stream)
6297 {
6298         struct drm_connector *connector = &aconnector->base;
6299         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6300         struct dc_stream_state *stream;
6301         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6302         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6303         enum dc_status dc_result = DC_OK;
6304
6305         do {
6306                 stream = create_stream_for_sink(aconnector, drm_mode,
6307                                                 dm_state, old_stream,
6308                                                 requested_bpc);
6309                 if (stream == NULL) {
6310                         DRM_ERROR("Failed to create stream for sink!\n");
6311                         break;
6312                 }
6313
6314                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6315                 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6316                         dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6317
6318                 if (dc_result == DC_OK)
6319                         dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
6320
6321                 if (dc_result != DC_OK) {
6322                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6323                                       drm_mode->hdisplay,
6324                                       drm_mode->vdisplay,
6325                                       drm_mode->clock,
6326                                       dc_result,
6327                                       dc_status_to_str(dc_result));
6328
6329                         dc_stream_release(stream);
6330                         stream = NULL;
6331                         requested_bpc -= 2; /* lower bpc to retry validation */
6332                 }
6333
6334         } while (stream == NULL && requested_bpc >= 6);
6335
6336         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6337                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6338
6339                 aconnector->force_yuv420_output = true;
6340                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6341                                                 dm_state, old_stream);
6342                 aconnector->force_yuv420_output = false;
6343         }
6344
6345         return stream;
6346 }
6347
6348 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6349                                    struct drm_display_mode *mode)
6350 {
6351         int result = MODE_ERROR;
6352         struct dc_sink *dc_sink;
6353         /* TODO: Unhardcode stream count */
6354         struct dc_stream_state *stream;
6355         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6356
6357         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6358                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6359                 return result;
6360
6361         /*
6362          * Only run this the first time mode_valid is called to initilialize
6363          * EDID mgmt
6364          */
6365         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6366                 !aconnector->dc_em_sink)
6367                 handle_edid_mgmt(aconnector);
6368
6369         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6370
6371         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6372                                 aconnector->base.force != DRM_FORCE_ON) {
6373                 DRM_ERROR("dc_sink is NULL!\n");
6374                 goto fail;
6375         }
6376
6377         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6378         if (stream) {
6379                 dc_stream_release(stream);
6380                 result = MODE_OK;
6381         }
6382
6383 fail:
6384         /* TODO: error handling*/
6385         return result;
6386 }
6387
6388 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6389                                 struct dc_info_packet *out)
6390 {
6391         struct hdmi_drm_infoframe frame;
6392         unsigned char buf[30]; /* 26 + 4 */
6393         ssize_t len;
6394         int ret, i;
6395
6396         memset(out, 0, sizeof(*out));
6397
6398         if (!state->hdr_output_metadata)
6399                 return 0;
6400
6401         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6402         if (ret)
6403                 return ret;
6404
6405         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6406         if (len < 0)
6407                 return (int)len;
6408
6409         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6410         if (len != 30)
6411                 return -EINVAL;
6412
6413         /* Prepare the infopacket for DC. */
6414         switch (state->connector->connector_type) {
6415         case DRM_MODE_CONNECTOR_HDMIA:
6416                 out->hb0 = 0x87; /* type */
6417                 out->hb1 = 0x01; /* version */
6418                 out->hb2 = 0x1A; /* length */
6419                 out->sb[0] = buf[3]; /* checksum */
6420                 i = 1;
6421                 break;
6422
6423         case DRM_MODE_CONNECTOR_DisplayPort:
6424         case DRM_MODE_CONNECTOR_eDP:
6425                 out->hb0 = 0x00; /* sdp id, zero */
6426                 out->hb1 = 0x87; /* type */
6427                 out->hb2 = 0x1D; /* payload len - 1 */
6428                 out->hb3 = (0x13 << 2); /* sdp version */
6429                 out->sb[0] = 0x01; /* version */
6430                 out->sb[1] = 0x1A; /* length */
6431                 i = 2;
6432                 break;
6433
6434         default:
6435                 return -EINVAL;
6436         }
6437
6438         memcpy(&out->sb[i], &buf[4], 26);
6439         out->valid = true;
6440
6441         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6442                        sizeof(out->sb), false);
6443
6444         return 0;
6445 }
6446
6447 static int
6448 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6449                                  struct drm_atomic_state *state)
6450 {
6451         struct drm_connector_state *new_con_state =
6452                 drm_atomic_get_new_connector_state(state, conn);
6453         struct drm_connector_state *old_con_state =
6454                 drm_atomic_get_old_connector_state(state, conn);
6455         struct drm_crtc *crtc = new_con_state->crtc;
6456         struct drm_crtc_state *new_crtc_state;
6457         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
6458         int ret;
6459
6460         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6461
6462         if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
6463                 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
6464                 if (ret < 0)
6465                         return ret;
6466         }
6467
6468         if (!crtc)
6469                 return 0;
6470
6471         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6472                 struct dc_info_packet hdr_infopacket;
6473
6474                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6475                 if (ret)
6476                         return ret;
6477
6478                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6479                 if (IS_ERR(new_crtc_state))
6480                         return PTR_ERR(new_crtc_state);
6481
6482                 /*
6483                  * DC considers the stream backends changed if the
6484                  * static metadata changes. Forcing the modeset also
6485                  * gives a simple way for userspace to switch from
6486                  * 8bpc to 10bpc when setting the metadata to enter
6487                  * or exit HDR.
6488                  *
6489                  * Changing the static metadata after it's been
6490                  * set is permissible, however. So only force a
6491                  * modeset if we're entering or exiting HDR.
6492                  */
6493                 new_crtc_state->mode_changed =
6494                         !old_con_state->hdr_output_metadata ||
6495                         !new_con_state->hdr_output_metadata;
6496         }
6497
6498         return 0;
6499 }
6500
6501 static const struct drm_connector_helper_funcs
6502 amdgpu_dm_connector_helper_funcs = {
6503         /*
6504          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6505          * modes will be filtered by drm_mode_validate_size(), and those modes
6506          * are missing after user start lightdm. So we need to renew modes list.
6507          * in get_modes call back, not just return the modes count
6508          */
6509         .get_modes = get_modes,
6510         .mode_valid = amdgpu_dm_connector_mode_valid,
6511         .atomic_check = amdgpu_dm_connector_atomic_check,
6512 };
6513
6514 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6515 {
6516
6517 }
6518
6519 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6520 {
6521         switch (display_color_depth) {
6522         case COLOR_DEPTH_666:
6523                 return 6;
6524         case COLOR_DEPTH_888:
6525                 return 8;
6526         case COLOR_DEPTH_101010:
6527                 return 10;
6528         case COLOR_DEPTH_121212:
6529                 return 12;
6530         case COLOR_DEPTH_141414:
6531                 return 14;
6532         case COLOR_DEPTH_161616:
6533                 return 16;
6534         default:
6535                 break;
6536         }
6537         return 0;
6538 }
6539
6540 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6541                                           struct drm_crtc_state *crtc_state,
6542                                           struct drm_connector_state *conn_state)
6543 {
6544         struct drm_atomic_state *state = crtc_state->state;
6545         struct drm_connector *connector = conn_state->connector;
6546         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6547         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6548         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6549         struct drm_dp_mst_topology_mgr *mst_mgr;
6550         struct drm_dp_mst_port *mst_port;
6551         struct drm_dp_mst_topology_state *mst_state;
6552         enum dc_color_depth color_depth;
6553         int clock, bpp = 0;
6554         bool is_y420 = false;
6555
6556         if (!aconnector->port || !aconnector->dc_sink)
6557                 return 0;
6558
6559         mst_port = aconnector->port;
6560         mst_mgr = &aconnector->mst_port->mst_mgr;
6561
6562         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6563                 return 0;
6564
6565         mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
6566         if (IS_ERR(mst_state))
6567                 return PTR_ERR(mst_state);
6568
6569         if (!mst_state->pbn_div)
6570                 mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link);
6571
6572         if (!state->duplicated) {
6573                 int max_bpc = conn_state->max_requested_bpc;
6574                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6575                           aconnector->force_yuv420_output;
6576                 color_depth = convert_color_depth_from_display_info(connector,
6577                                                                     is_y420,
6578                                                                     max_bpc);
6579                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6580                 clock = adjusted_mode->clock;
6581                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6582         }
6583
6584         dm_new_connector_state->vcpi_slots =
6585                 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
6586                                               dm_new_connector_state->pbn);
6587         if (dm_new_connector_state->vcpi_slots < 0) {
6588                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6589                 return dm_new_connector_state->vcpi_slots;
6590         }
6591         return 0;
6592 }
6593
6594 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6595         .disable = dm_encoder_helper_disable,
6596         .atomic_check = dm_encoder_helper_atomic_check
6597 };
6598
6599 #if defined(CONFIG_DRM_AMD_DC_DCN)
6600 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6601                                             struct dc_state *dc_state,
6602                                             struct dsc_mst_fairness_vars *vars)
6603 {
6604         struct dc_stream_state *stream = NULL;
6605         struct drm_connector *connector;
6606         struct drm_connector_state *new_con_state;
6607         struct amdgpu_dm_connector *aconnector;
6608         struct dm_connector_state *dm_conn_state;
6609         int i, j, ret;
6610         int vcpi, pbn_div, pbn, slot_num = 0;
6611
6612         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6613
6614                 aconnector = to_amdgpu_dm_connector(connector);
6615
6616                 if (!aconnector->port)
6617                         continue;
6618
6619                 if (!new_con_state || !new_con_state->crtc)
6620                         continue;
6621
6622                 dm_conn_state = to_dm_connector_state(new_con_state);
6623
6624                 for (j = 0; j < dc_state->stream_count; j++) {
6625                         stream = dc_state->streams[j];
6626                         if (!stream)
6627                                 continue;
6628
6629                         if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
6630                                 break;
6631
6632                         stream = NULL;
6633                 }
6634
6635                 if (!stream)
6636                         continue;
6637
6638                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6639                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
6640                 for (j = 0; j < dc_state->stream_count; j++) {
6641                         if (vars[j].aconnector == aconnector) {
6642                                 pbn = vars[j].pbn;
6643                                 break;
6644                         }
6645                 }
6646
6647                 if (j == dc_state->stream_count)
6648                         continue;
6649
6650                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
6651
6652                 if (stream->timing.flags.DSC != 1) {
6653                         dm_conn_state->pbn = pbn;
6654                         dm_conn_state->vcpi_slots = slot_num;
6655
6656                         ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
6657                                                            dm_conn_state->pbn, false);
6658                         if (ret < 0)
6659                                 return ret;
6660
6661                         continue;
6662                 }
6663
6664                 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true);
6665                 if (vcpi < 0)
6666                         return vcpi;
6667
6668                 dm_conn_state->pbn = pbn;
6669                 dm_conn_state->vcpi_slots = vcpi;
6670         }
6671         return 0;
6672 }
6673 #endif
6674
6675 static int to_drm_connector_type(enum signal_type st)
6676 {
6677         switch (st) {
6678         case SIGNAL_TYPE_HDMI_TYPE_A:
6679                 return DRM_MODE_CONNECTOR_HDMIA;
6680         case SIGNAL_TYPE_EDP:
6681                 return DRM_MODE_CONNECTOR_eDP;
6682         case SIGNAL_TYPE_LVDS:
6683                 return DRM_MODE_CONNECTOR_LVDS;
6684         case SIGNAL_TYPE_RGB:
6685                 return DRM_MODE_CONNECTOR_VGA;
6686         case SIGNAL_TYPE_DISPLAY_PORT:
6687         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6688                 return DRM_MODE_CONNECTOR_DisplayPort;
6689         case SIGNAL_TYPE_DVI_DUAL_LINK:
6690         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6691                 return DRM_MODE_CONNECTOR_DVID;
6692         case SIGNAL_TYPE_VIRTUAL:
6693                 return DRM_MODE_CONNECTOR_VIRTUAL;
6694
6695         default:
6696                 return DRM_MODE_CONNECTOR_Unknown;
6697         }
6698 }
6699
6700 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6701 {
6702         struct drm_encoder *encoder;
6703
6704         /* There is only one encoder per connector */
6705         drm_connector_for_each_possible_encoder(connector, encoder)
6706                 return encoder;
6707
6708         return NULL;
6709 }
6710
6711 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6712 {
6713         struct drm_encoder *encoder;
6714         struct amdgpu_encoder *amdgpu_encoder;
6715
6716         encoder = amdgpu_dm_connector_to_encoder(connector);
6717
6718         if (encoder == NULL)
6719                 return;
6720
6721         amdgpu_encoder = to_amdgpu_encoder(encoder);
6722
6723         amdgpu_encoder->native_mode.clock = 0;
6724
6725         if (!list_empty(&connector->probed_modes)) {
6726                 struct drm_display_mode *preferred_mode = NULL;
6727
6728                 list_for_each_entry(preferred_mode,
6729                                     &connector->probed_modes,
6730                                     head) {
6731                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6732                                 amdgpu_encoder->native_mode = *preferred_mode;
6733
6734                         break;
6735                 }
6736
6737         }
6738 }
6739
6740 static struct drm_display_mode *
6741 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6742                              char *name,
6743                              int hdisplay, int vdisplay)
6744 {
6745         struct drm_device *dev = encoder->dev;
6746         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6747         struct drm_display_mode *mode = NULL;
6748         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6749
6750         mode = drm_mode_duplicate(dev, native_mode);
6751
6752         if (mode == NULL)
6753                 return NULL;
6754
6755         mode->hdisplay = hdisplay;
6756         mode->vdisplay = vdisplay;
6757         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6758         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6759
6760         return mode;
6761
6762 }
6763
6764 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6765                                                  struct drm_connector *connector)
6766 {
6767         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6768         struct drm_display_mode *mode = NULL;
6769         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6770         struct amdgpu_dm_connector *amdgpu_dm_connector =
6771                                 to_amdgpu_dm_connector(connector);
6772         int i;
6773         int n;
6774         struct mode_size {
6775                 char name[DRM_DISPLAY_MODE_LEN];
6776                 int w;
6777                 int h;
6778         } common_modes[] = {
6779                 {  "640x480",  640,  480},
6780                 {  "800x600",  800,  600},
6781                 { "1024x768", 1024,  768},
6782                 { "1280x720", 1280,  720},
6783                 { "1280x800", 1280,  800},
6784                 {"1280x1024", 1280, 1024},
6785                 { "1440x900", 1440,  900},
6786                 {"1680x1050", 1680, 1050},
6787                 {"1600x1200", 1600, 1200},
6788                 {"1920x1080", 1920, 1080},
6789                 {"1920x1200", 1920, 1200}
6790         };
6791
6792         n = ARRAY_SIZE(common_modes);
6793
6794         for (i = 0; i < n; i++) {
6795                 struct drm_display_mode *curmode = NULL;
6796                 bool mode_existed = false;
6797
6798                 if (common_modes[i].w > native_mode->hdisplay ||
6799                     common_modes[i].h > native_mode->vdisplay ||
6800                    (common_modes[i].w == native_mode->hdisplay &&
6801                     common_modes[i].h == native_mode->vdisplay))
6802                         continue;
6803
6804                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6805                         if (common_modes[i].w == curmode->hdisplay &&
6806                             common_modes[i].h == curmode->vdisplay) {
6807                                 mode_existed = true;
6808                                 break;
6809                         }
6810                 }
6811
6812                 if (mode_existed)
6813                         continue;
6814
6815                 mode = amdgpu_dm_create_common_mode(encoder,
6816                                 common_modes[i].name, common_modes[i].w,
6817                                 common_modes[i].h);
6818                 if (!mode)
6819                         continue;
6820
6821                 drm_mode_probed_add(connector, mode);
6822                 amdgpu_dm_connector->num_modes++;
6823         }
6824 }
6825
6826 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
6827 {
6828         struct drm_encoder *encoder;
6829         struct amdgpu_encoder *amdgpu_encoder;
6830         const struct drm_display_mode *native_mode;
6831
6832         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
6833             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
6834                 return;
6835
6836         mutex_lock(&connector->dev->mode_config.mutex);
6837         amdgpu_dm_connector_get_modes(connector);
6838         mutex_unlock(&connector->dev->mode_config.mutex);
6839
6840         encoder = amdgpu_dm_connector_to_encoder(connector);
6841         if (!encoder)
6842                 return;
6843
6844         amdgpu_encoder = to_amdgpu_encoder(encoder);
6845
6846         native_mode = &amdgpu_encoder->native_mode;
6847         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
6848                 return;
6849
6850         drm_connector_set_panel_orientation_with_quirk(connector,
6851                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
6852                                                        native_mode->hdisplay,
6853                                                        native_mode->vdisplay);
6854 }
6855
6856 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6857                                               struct edid *edid)
6858 {
6859         struct amdgpu_dm_connector *amdgpu_dm_connector =
6860                         to_amdgpu_dm_connector(connector);
6861
6862         if (edid) {
6863                 /* empty probed_modes */
6864                 INIT_LIST_HEAD(&connector->probed_modes);
6865                 amdgpu_dm_connector->num_modes =
6866                                 drm_add_edid_modes(connector, edid);
6867
6868                 /* sorting the probed modes before calling function
6869                  * amdgpu_dm_get_native_mode() since EDID can have
6870                  * more than one preferred mode. The modes that are
6871                  * later in the probed mode list could be of higher
6872                  * and preferred resolution. For example, 3840x2160
6873                  * resolution in base EDID preferred timing and 4096x2160
6874                  * preferred resolution in DID extension block later.
6875                  */
6876                 drm_mode_sort(&connector->probed_modes);
6877                 amdgpu_dm_get_native_mode(connector);
6878
6879                 /* Freesync capabilities are reset by calling
6880                  * drm_add_edid_modes() and need to be
6881                  * restored here.
6882                  */
6883                 amdgpu_dm_update_freesync_caps(connector, edid);
6884         } else {
6885                 amdgpu_dm_connector->num_modes = 0;
6886         }
6887 }
6888
6889 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
6890                               struct drm_display_mode *mode)
6891 {
6892         struct drm_display_mode *m;
6893
6894         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
6895                 if (drm_mode_equal(m, mode))
6896                         return true;
6897         }
6898
6899         return false;
6900 }
6901
6902 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
6903 {
6904         const struct drm_display_mode *m;
6905         struct drm_display_mode *new_mode;
6906         uint i;
6907         uint32_t new_modes_count = 0;
6908
6909         /* Standard FPS values
6910          *
6911          * 23.976       - TV/NTSC
6912          * 24           - Cinema
6913          * 25           - TV/PAL
6914          * 29.97        - TV/NTSC
6915          * 30           - TV/NTSC
6916          * 48           - Cinema HFR
6917          * 50           - TV/PAL
6918          * 60           - Commonly used
6919          * 48,72,96,120 - Multiples of 24
6920          */
6921         static const uint32_t common_rates[] = {
6922                 23976, 24000, 25000, 29970, 30000,
6923                 48000, 50000, 60000, 72000, 96000, 120000
6924         };
6925
6926         /*
6927          * Find mode with highest refresh rate with the same resolution
6928          * as the preferred mode. Some monitors report a preferred mode
6929          * with lower resolution than the highest refresh rate supported.
6930          */
6931
6932         m = get_highest_refresh_rate_mode(aconnector, true);
6933         if (!m)
6934                 return 0;
6935
6936         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
6937                 uint64_t target_vtotal, target_vtotal_diff;
6938                 uint64_t num, den;
6939
6940                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
6941                         continue;
6942
6943                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
6944                     common_rates[i] > aconnector->max_vfreq * 1000)
6945                         continue;
6946
6947                 num = (unsigned long long)m->clock * 1000 * 1000;
6948                 den = common_rates[i] * (unsigned long long)m->htotal;
6949                 target_vtotal = div_u64(num, den);
6950                 target_vtotal_diff = target_vtotal - m->vtotal;
6951
6952                 /* Check for illegal modes */
6953                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
6954                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
6955                     m->vtotal + target_vtotal_diff < m->vsync_end)
6956                         continue;
6957
6958                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
6959                 if (!new_mode)
6960                         goto out;
6961
6962                 new_mode->vtotal += (u16)target_vtotal_diff;
6963                 new_mode->vsync_start += (u16)target_vtotal_diff;
6964                 new_mode->vsync_end += (u16)target_vtotal_diff;
6965                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6966                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
6967
6968                 if (!is_duplicate_mode(aconnector, new_mode)) {
6969                         drm_mode_probed_add(&aconnector->base, new_mode);
6970                         new_modes_count += 1;
6971                 } else
6972                         drm_mode_destroy(aconnector->base.dev, new_mode);
6973         }
6974  out:
6975         return new_modes_count;
6976 }
6977
6978 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
6979                                                    struct edid *edid)
6980 {
6981         struct amdgpu_dm_connector *amdgpu_dm_connector =
6982                 to_amdgpu_dm_connector(connector);
6983
6984         if (!edid)
6985                 return;
6986
6987         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
6988                 amdgpu_dm_connector->num_modes +=
6989                         add_fs_modes(amdgpu_dm_connector);
6990 }
6991
6992 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6993 {
6994         struct amdgpu_dm_connector *amdgpu_dm_connector =
6995                         to_amdgpu_dm_connector(connector);
6996         struct drm_encoder *encoder;
6997         struct edid *edid = amdgpu_dm_connector->edid;
6998
6999         encoder = amdgpu_dm_connector_to_encoder(connector);
7000
7001         if (!drm_edid_is_valid(edid)) {
7002                 amdgpu_dm_connector->num_modes =
7003                                 drm_add_modes_noedid(connector, 640, 480);
7004         } else {
7005                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7006                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7007                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7008         }
7009         amdgpu_dm_fbc_init(connector);
7010
7011         return amdgpu_dm_connector->num_modes;
7012 }
7013
7014 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7015                                      struct amdgpu_dm_connector *aconnector,
7016                                      int connector_type,
7017                                      struct dc_link *link,
7018                                      int link_index)
7019 {
7020         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7021
7022         /*
7023          * Some of the properties below require access to state, like bpc.
7024          * Allocate some default initial connector state with our reset helper.
7025          */
7026         if (aconnector->base.funcs->reset)
7027                 aconnector->base.funcs->reset(&aconnector->base);
7028
7029         aconnector->connector_id = link_index;
7030         aconnector->dc_link = link;
7031         aconnector->base.interlace_allowed = false;
7032         aconnector->base.doublescan_allowed = false;
7033         aconnector->base.stereo_allowed = false;
7034         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7035         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7036         aconnector->audio_inst = -1;
7037         mutex_init(&aconnector->hpd_lock);
7038
7039         /*
7040          * configure support HPD hot plug connector_>polled default value is 0
7041          * which means HPD hot plug not supported
7042          */
7043         switch (connector_type) {
7044         case DRM_MODE_CONNECTOR_HDMIA:
7045                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7046                 aconnector->base.ycbcr_420_allowed =
7047                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7048                 break;
7049         case DRM_MODE_CONNECTOR_DisplayPort:
7050                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7051                 link->link_enc = link_enc_cfg_get_link_enc(link);
7052                 ASSERT(link->link_enc);
7053                 if (link->link_enc)
7054                         aconnector->base.ycbcr_420_allowed =
7055                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7056                 break;
7057         case DRM_MODE_CONNECTOR_DVID:
7058                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7059                 break;
7060         default:
7061                 break;
7062         }
7063
7064         drm_object_attach_property(&aconnector->base.base,
7065                                 dm->ddev->mode_config.scaling_mode_property,
7066                                 DRM_MODE_SCALE_NONE);
7067
7068         drm_object_attach_property(&aconnector->base.base,
7069                                 adev->mode_info.underscan_property,
7070                                 UNDERSCAN_OFF);
7071         drm_object_attach_property(&aconnector->base.base,
7072                                 adev->mode_info.underscan_hborder_property,
7073                                 0);
7074         drm_object_attach_property(&aconnector->base.base,
7075                                 adev->mode_info.underscan_vborder_property,
7076                                 0);
7077
7078         if (!aconnector->mst_port)
7079                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7080
7081         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7082         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7083         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7084
7085         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7086             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7087                 drm_object_attach_property(&aconnector->base.base,
7088                                 adev->mode_info.abm_level_property, 0);
7089         }
7090
7091         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7092             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7093             connector_type == DRM_MODE_CONNECTOR_eDP) {
7094                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7095
7096                 if (!aconnector->mst_port)
7097                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7098
7099 #ifdef CONFIG_DRM_AMD_DC_HDCP
7100                 if (adev->dm.hdcp_workqueue)
7101                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7102 #endif
7103         }
7104 }
7105
7106 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7107                               struct i2c_msg *msgs, int num)
7108 {
7109         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7110         struct ddc_service *ddc_service = i2c->ddc_service;
7111         struct i2c_command cmd;
7112         int i;
7113         int result = -EIO;
7114
7115         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7116
7117         if (!cmd.payloads)
7118                 return result;
7119
7120         cmd.number_of_payloads = num;
7121         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7122         cmd.speed = 100;
7123
7124         for (i = 0; i < num; i++) {
7125                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7126                 cmd.payloads[i].address = msgs[i].addr;
7127                 cmd.payloads[i].length = msgs[i].len;
7128                 cmd.payloads[i].data = msgs[i].buf;
7129         }
7130
7131         if (dc_submit_i2c(
7132                         ddc_service->ctx->dc,
7133                         ddc_service->link->link_index,
7134                         &cmd))
7135                 result = num;
7136
7137         kfree(cmd.payloads);
7138         return result;
7139 }
7140
7141 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7142 {
7143         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7144 }
7145
7146 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7147         .master_xfer = amdgpu_dm_i2c_xfer,
7148         .functionality = amdgpu_dm_i2c_func,
7149 };
7150
7151 static struct amdgpu_i2c_adapter *
7152 create_i2c(struct ddc_service *ddc_service,
7153            int link_index,
7154            int *res)
7155 {
7156         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7157         struct amdgpu_i2c_adapter *i2c;
7158
7159         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7160         if (!i2c)
7161                 return NULL;
7162         i2c->base.owner = THIS_MODULE;
7163         i2c->base.class = I2C_CLASS_DDC;
7164         i2c->base.dev.parent = &adev->pdev->dev;
7165         i2c->base.algo = &amdgpu_dm_i2c_algo;
7166         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7167         i2c_set_adapdata(&i2c->base, i2c);
7168         i2c->ddc_service = ddc_service;
7169
7170         return i2c;
7171 }
7172
7173
7174 /*
7175  * Note: this function assumes that dc_link_detect() was called for the
7176  * dc_link which will be represented by this aconnector.
7177  */
7178 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7179                                     struct amdgpu_dm_connector *aconnector,
7180                                     uint32_t link_index,
7181                                     struct amdgpu_encoder *aencoder)
7182 {
7183         int res = 0;
7184         int connector_type;
7185         struct dc *dc = dm->dc;
7186         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7187         struct amdgpu_i2c_adapter *i2c;
7188
7189         link->priv = aconnector;
7190
7191         DRM_DEBUG_DRIVER("%s()\n", __func__);
7192
7193         i2c = create_i2c(link->ddc, link->link_index, &res);
7194         if (!i2c) {
7195                 DRM_ERROR("Failed to create i2c adapter data\n");
7196                 return -ENOMEM;
7197         }
7198
7199         aconnector->i2c = i2c;
7200         res = i2c_add_adapter(&i2c->base);
7201
7202         if (res) {
7203                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7204                 goto out_free;
7205         }
7206
7207         connector_type = to_drm_connector_type(link->connector_signal);
7208
7209         res = drm_connector_init_with_ddc(
7210                         dm->ddev,
7211                         &aconnector->base,
7212                         &amdgpu_dm_connector_funcs,
7213                         connector_type,
7214                         &i2c->base);
7215
7216         if (res) {
7217                 DRM_ERROR("connector_init failed\n");
7218                 aconnector->connector_id = -1;
7219                 goto out_free;
7220         }
7221
7222         drm_connector_helper_add(
7223                         &aconnector->base,
7224                         &amdgpu_dm_connector_helper_funcs);
7225
7226         amdgpu_dm_connector_init_helper(
7227                 dm,
7228                 aconnector,
7229                 connector_type,
7230                 link,
7231                 link_index);
7232
7233         drm_connector_attach_encoder(
7234                 &aconnector->base, &aencoder->base);
7235
7236         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7237                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7238                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7239
7240 out_free:
7241         if (res) {
7242                 kfree(i2c);
7243                 aconnector->i2c = NULL;
7244         }
7245         return res;
7246 }
7247
7248 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7249 {
7250         switch (adev->mode_info.num_crtc) {
7251         case 1:
7252                 return 0x1;
7253         case 2:
7254                 return 0x3;
7255         case 3:
7256                 return 0x7;
7257         case 4:
7258                 return 0xf;
7259         case 5:
7260                 return 0x1f;
7261         case 6:
7262         default:
7263                 return 0x3f;
7264         }
7265 }
7266
7267 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7268                                   struct amdgpu_encoder *aencoder,
7269                                   uint32_t link_index)
7270 {
7271         struct amdgpu_device *adev = drm_to_adev(dev);
7272
7273         int res = drm_encoder_init(dev,
7274                                    &aencoder->base,
7275                                    &amdgpu_dm_encoder_funcs,
7276                                    DRM_MODE_ENCODER_TMDS,
7277                                    NULL);
7278
7279         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7280
7281         if (!res)
7282                 aencoder->encoder_id = link_index;
7283         else
7284                 aencoder->encoder_id = -1;
7285
7286         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7287
7288         return res;
7289 }
7290
7291 static void manage_dm_interrupts(struct amdgpu_device *adev,
7292                                  struct amdgpu_crtc *acrtc,
7293                                  bool enable)
7294 {
7295         /*
7296          * We have no guarantee that the frontend index maps to the same
7297          * backend index - some even map to more than one.
7298          *
7299          * TODO: Use a different interrupt or check DC itself for the mapping.
7300          */
7301         int irq_type =
7302                 amdgpu_display_crtc_idx_to_irq_type(
7303                         adev,
7304                         acrtc->crtc_id);
7305
7306         if (enable) {
7307                 drm_crtc_vblank_on(&acrtc->base);
7308                 amdgpu_irq_get(
7309                         adev,
7310                         &adev->pageflip_irq,
7311                         irq_type);
7312 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7313                 amdgpu_irq_get(
7314                         adev,
7315                         &adev->vline0_irq,
7316                         irq_type);
7317 #endif
7318         } else {
7319 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7320                 amdgpu_irq_put(
7321                         adev,
7322                         &adev->vline0_irq,
7323                         irq_type);
7324 #endif
7325                 amdgpu_irq_put(
7326                         adev,
7327                         &adev->pageflip_irq,
7328                         irq_type);
7329                 drm_crtc_vblank_off(&acrtc->base);
7330         }
7331 }
7332
7333 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7334                                       struct amdgpu_crtc *acrtc)
7335 {
7336         int irq_type =
7337                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7338
7339         /**
7340          * This reads the current state for the IRQ and force reapplies
7341          * the setting to hardware.
7342          */
7343         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7344 }
7345
7346 static bool
7347 is_scaling_state_different(const struct dm_connector_state *dm_state,
7348                            const struct dm_connector_state *old_dm_state)
7349 {
7350         if (dm_state->scaling != old_dm_state->scaling)
7351                 return true;
7352         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7353                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7354                         return true;
7355         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7356                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7357                         return true;
7358         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7359                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7360                 return true;
7361         return false;
7362 }
7363
7364 #ifdef CONFIG_DRM_AMD_DC_HDCP
7365 static bool is_content_protection_different(struct drm_connector_state *state,
7366                                             const struct drm_connector_state *old_state,
7367                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7368 {
7369         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7370         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7371
7372         /* Handle: Type0/1 change */
7373         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7374             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7375                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7376                 return true;
7377         }
7378
7379         /* CP is being re enabled, ignore this
7380          *
7381          * Handles:     ENABLED -> DESIRED
7382          */
7383         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7384             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7385                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7386                 return false;
7387         }
7388
7389         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7390          *
7391          * Handles:     UNDESIRED -> ENABLED
7392          */
7393         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7394             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7395                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7396
7397         /* Stream removed and re-enabled
7398          *
7399          * Can sometimes overlap with the HPD case,
7400          * thus set update_hdcp to false to avoid
7401          * setting HDCP multiple times.
7402          *
7403          * Handles:     DESIRED -> DESIRED (Special case)
7404          */
7405         if (!(old_state->crtc && old_state->crtc->enabled) &&
7406                 state->crtc && state->crtc->enabled &&
7407                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7408                 dm_con_state->update_hdcp = false;
7409                 return true;
7410         }
7411
7412         /* Hot-plug, headless s3, dpms
7413          *
7414          * Only start HDCP if the display is connected/enabled.
7415          * update_hdcp flag will be set to false until the next
7416          * HPD comes in.
7417          *
7418          * Handles:     DESIRED -> DESIRED (Special case)
7419          */
7420         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7421             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7422                 dm_con_state->update_hdcp = false;
7423                 return true;
7424         }
7425
7426         /*
7427          * Handles:     UNDESIRED -> UNDESIRED
7428          *              DESIRED -> DESIRED
7429          *              ENABLED -> ENABLED
7430          */
7431         if (old_state->content_protection == state->content_protection)
7432                 return false;
7433
7434         /*
7435          * Handles:     UNDESIRED -> DESIRED
7436          *              DESIRED -> UNDESIRED
7437          *              ENABLED -> UNDESIRED
7438          */
7439         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7440                 return true;
7441
7442         /*
7443          * Handles:     DESIRED -> ENABLED
7444          */
7445         return false;
7446 }
7447
7448 #endif
7449 static void remove_stream(struct amdgpu_device *adev,
7450                           struct amdgpu_crtc *acrtc,
7451                           struct dc_stream_state *stream)
7452 {
7453         /* this is the update mode case */
7454
7455         acrtc->otg_inst = -1;
7456         acrtc->enabled = false;
7457 }
7458
7459 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7460 {
7461
7462         assert_spin_locked(&acrtc->base.dev->event_lock);
7463         WARN_ON(acrtc->event);
7464
7465         acrtc->event = acrtc->base.state->event;
7466
7467         /* Set the flip status */
7468         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7469
7470         /* Mark this event as consumed */
7471         acrtc->base.state->event = NULL;
7472
7473         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7474                      acrtc->crtc_id);
7475 }
7476
7477 static void update_freesync_state_on_stream(
7478         struct amdgpu_display_manager *dm,
7479         struct dm_crtc_state *new_crtc_state,
7480         struct dc_stream_state *new_stream,
7481         struct dc_plane_state *surface,
7482         u32 flip_timestamp_in_us)
7483 {
7484         struct mod_vrr_params vrr_params;
7485         struct dc_info_packet vrr_infopacket = {0};
7486         struct amdgpu_device *adev = dm->adev;
7487         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7488         unsigned long flags;
7489         bool pack_sdp_v1_3 = false;
7490
7491         if (!new_stream)
7492                 return;
7493
7494         /*
7495          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7496          * For now it's sufficient to just guard against these conditions.
7497          */
7498
7499         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7500                 return;
7501
7502         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7503         vrr_params = acrtc->dm_irq_params.vrr_params;
7504
7505         if (surface) {
7506                 mod_freesync_handle_preflip(
7507                         dm->freesync_module,
7508                         surface,
7509                         new_stream,
7510                         flip_timestamp_in_us,
7511                         &vrr_params);
7512
7513                 if (adev->family < AMDGPU_FAMILY_AI &&
7514                     amdgpu_dm_vrr_active(new_crtc_state)) {
7515                         mod_freesync_handle_v_update(dm->freesync_module,
7516                                                      new_stream, &vrr_params);
7517
7518                         /* Need to call this before the frame ends. */
7519                         dc_stream_adjust_vmin_vmax(dm->dc,
7520                                                    new_crtc_state->stream,
7521                                                    &vrr_params.adjust);
7522                 }
7523         }
7524
7525         mod_freesync_build_vrr_infopacket(
7526                 dm->freesync_module,
7527                 new_stream,
7528                 &vrr_params,
7529                 PACKET_TYPE_VRR,
7530                 TRANSFER_FUNC_UNKNOWN,
7531                 &vrr_infopacket,
7532                 pack_sdp_v1_3);
7533
7534         new_crtc_state->freesync_vrr_info_changed |=
7535                 (memcmp(&new_crtc_state->vrr_infopacket,
7536                         &vrr_infopacket,
7537                         sizeof(vrr_infopacket)) != 0);
7538
7539         acrtc->dm_irq_params.vrr_params = vrr_params;
7540         new_crtc_state->vrr_infopacket = vrr_infopacket;
7541
7542         new_stream->vrr_infopacket = vrr_infopacket;
7543
7544         if (new_crtc_state->freesync_vrr_info_changed)
7545                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7546                               new_crtc_state->base.crtc->base.id,
7547                               (int)new_crtc_state->base.vrr_enabled,
7548                               (int)vrr_params.state);
7549
7550         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7551 }
7552
7553 static void update_stream_irq_parameters(
7554         struct amdgpu_display_manager *dm,
7555         struct dm_crtc_state *new_crtc_state)
7556 {
7557         struct dc_stream_state *new_stream = new_crtc_state->stream;
7558         struct mod_vrr_params vrr_params;
7559         struct mod_freesync_config config = new_crtc_state->freesync_config;
7560         struct amdgpu_device *adev = dm->adev;
7561         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7562         unsigned long flags;
7563
7564         if (!new_stream)
7565                 return;
7566
7567         /*
7568          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7569          * For now it's sufficient to just guard against these conditions.
7570          */
7571         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7572                 return;
7573
7574         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7575         vrr_params = acrtc->dm_irq_params.vrr_params;
7576
7577         if (new_crtc_state->vrr_supported &&
7578             config.min_refresh_in_uhz &&
7579             config.max_refresh_in_uhz) {
7580                 /*
7581                  * if freesync compatible mode was set, config.state will be set
7582                  * in atomic check
7583                  */
7584                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7585                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7586                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7587                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7588                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7589                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7590                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7591                 } else {
7592                         config.state = new_crtc_state->base.vrr_enabled ?
7593                                                      VRR_STATE_ACTIVE_VARIABLE :
7594                                                      VRR_STATE_INACTIVE;
7595                 }
7596         } else {
7597                 config.state = VRR_STATE_UNSUPPORTED;
7598         }
7599
7600         mod_freesync_build_vrr_params(dm->freesync_module,
7601                                       new_stream,
7602                                       &config, &vrr_params);
7603
7604         new_crtc_state->freesync_config = config;
7605         /* Copy state for access from DM IRQ handler */
7606         acrtc->dm_irq_params.freesync_config = config;
7607         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7608         acrtc->dm_irq_params.vrr_params = vrr_params;
7609         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7610 }
7611
7612 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7613                                             struct dm_crtc_state *new_state)
7614 {
7615         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7616         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7617
7618         if (!old_vrr_active && new_vrr_active) {
7619                 /* Transition VRR inactive -> active:
7620                  * While VRR is active, we must not disable vblank irq, as a
7621                  * reenable after disable would compute bogus vblank/pflip
7622                  * timestamps if it likely happened inside display front-porch.
7623                  *
7624                  * We also need vupdate irq for the actual core vblank handling
7625                  * at end of vblank.
7626                  */
7627                 WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
7628                 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
7629                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7630                                  __func__, new_state->base.crtc->base.id);
7631         } else if (old_vrr_active && !new_vrr_active) {
7632                 /* Transition VRR active -> inactive:
7633                  * Allow vblank irq disable again for fixed refresh rate.
7634                  */
7635                 WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
7636                 drm_crtc_vblank_put(new_state->base.crtc);
7637                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7638                                  __func__, new_state->base.crtc->base.id);
7639         }
7640 }
7641
7642 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7643 {
7644         struct drm_plane *plane;
7645         struct drm_plane_state *old_plane_state;
7646         int i;
7647
7648         /*
7649          * TODO: Make this per-stream so we don't issue redundant updates for
7650          * commits with multiple streams.
7651          */
7652         for_each_old_plane_in_state(state, plane, old_plane_state, i)
7653                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7654                         handle_cursor_update(plane, old_plane_state);
7655 }
7656
7657 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7658                                     struct dc_state *dc_state,
7659                                     struct drm_device *dev,
7660                                     struct amdgpu_display_manager *dm,
7661                                     struct drm_crtc *pcrtc,
7662                                     bool wait_for_vblank)
7663 {
7664         uint32_t i;
7665         uint64_t timestamp_ns;
7666         struct drm_plane *plane;
7667         struct drm_plane_state *old_plane_state, *new_plane_state;
7668         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7669         struct drm_crtc_state *new_pcrtc_state =
7670                         drm_atomic_get_new_crtc_state(state, pcrtc);
7671         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7672         struct dm_crtc_state *dm_old_crtc_state =
7673                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7674         int planes_count = 0, vpos, hpos;
7675         unsigned long flags;
7676         uint32_t target_vblank, last_flip_vblank;
7677         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7678         bool cursor_update = false;
7679         bool pflip_present = false;
7680         struct {
7681                 struct dc_surface_update surface_updates[MAX_SURFACES];
7682                 struct dc_plane_info plane_infos[MAX_SURFACES];
7683                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7684                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7685                 struct dc_stream_update stream_update;
7686         } *bundle;
7687
7688         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7689
7690         if (!bundle) {
7691                 dm_error("Failed to allocate update bundle\n");
7692                 goto cleanup;
7693         }
7694
7695         /*
7696          * Disable the cursor first if we're disabling all the planes.
7697          * It'll remain on the screen after the planes are re-enabled
7698          * if we don't.
7699          */
7700         if (acrtc_state->active_planes == 0)
7701                 amdgpu_dm_commit_cursors(state);
7702
7703         /* update planes when needed */
7704         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7705                 struct drm_crtc *crtc = new_plane_state->crtc;
7706                 struct drm_crtc_state *new_crtc_state;
7707                 struct drm_framebuffer *fb = new_plane_state->fb;
7708                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7709                 bool plane_needs_flip;
7710                 struct dc_plane_state *dc_plane;
7711                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7712
7713                 /* Cursor plane is handled after stream updates */
7714                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7715                         if ((fb && crtc == pcrtc) ||
7716                             (old_plane_state->fb && old_plane_state->crtc == pcrtc))
7717                                 cursor_update = true;
7718
7719                         continue;
7720                 }
7721
7722                 if (!fb || !crtc || pcrtc != crtc)
7723                         continue;
7724
7725                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7726                 if (!new_crtc_state->active)
7727                         continue;
7728
7729                 dc_plane = dm_new_plane_state->dc_state;
7730
7731                 bundle->surface_updates[planes_count].surface = dc_plane;
7732                 if (new_pcrtc_state->color_mgmt_changed) {
7733                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7734                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7735                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7736                 }
7737
7738                 fill_dc_scaling_info(dm->adev, new_plane_state,
7739                                      &bundle->scaling_infos[planes_count]);
7740
7741                 bundle->surface_updates[planes_count].scaling_info =
7742                         &bundle->scaling_infos[planes_count];
7743
7744                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7745
7746                 pflip_present = pflip_present || plane_needs_flip;
7747
7748                 if (!plane_needs_flip) {
7749                         planes_count += 1;
7750                         continue;
7751                 }
7752
7753                 fill_dc_plane_info_and_addr(
7754                         dm->adev, new_plane_state,
7755                         afb->tiling_flags,
7756                         &bundle->plane_infos[planes_count],
7757                         &bundle->flip_addrs[planes_count].address,
7758                         afb->tmz_surface, false);
7759
7760                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
7761                                  new_plane_state->plane->index,
7762                                  bundle->plane_infos[planes_count].dcc.enable);
7763
7764                 bundle->surface_updates[planes_count].plane_info =
7765                         &bundle->plane_infos[planes_count];
7766
7767                 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7768                         fill_dc_dirty_rects(plane, old_plane_state,
7769                                             new_plane_state, new_crtc_state,
7770                                             &bundle->flip_addrs[planes_count]);
7771
7772                 /*
7773                  * Only allow immediate flips for fast updates that don't
7774                  * change FB pitch, DCC state, rotation or mirroing.
7775                  */
7776                 bundle->flip_addrs[planes_count].flip_immediate =
7777                         crtc->state->async_flip &&
7778                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7779
7780                 timestamp_ns = ktime_get_ns();
7781                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7782                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7783                 bundle->surface_updates[planes_count].surface = dc_plane;
7784
7785                 if (!bundle->surface_updates[planes_count].surface) {
7786                         DRM_ERROR("No surface for CRTC: id=%d\n",
7787                                         acrtc_attach->crtc_id);
7788                         continue;
7789                 }
7790
7791                 if (plane == pcrtc->primary)
7792                         update_freesync_state_on_stream(
7793                                 dm,
7794                                 acrtc_state,
7795                                 acrtc_state->stream,
7796                                 dc_plane,
7797                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7798
7799                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
7800                                  __func__,
7801                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7802                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7803
7804                 planes_count += 1;
7805
7806         }
7807
7808         if (pflip_present) {
7809                 if (!vrr_active) {
7810                         /* Use old throttling in non-vrr fixed refresh rate mode
7811                          * to keep flip scheduling based on target vblank counts
7812                          * working in a backwards compatible way, e.g., for
7813                          * clients using the GLX_OML_sync_control extension or
7814                          * DRI3/Present extension with defined target_msc.
7815                          */
7816                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7817                 }
7818                 else {
7819                         /* For variable refresh rate mode only:
7820                          * Get vblank of last completed flip to avoid > 1 vrr
7821                          * flips per video frame by use of throttling, but allow
7822                          * flip programming anywhere in the possibly large
7823                          * variable vrr vblank interval for fine-grained flip
7824                          * timing control and more opportunity to avoid stutter
7825                          * on late submission of flips.
7826                          */
7827                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7828                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7829                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7830                 }
7831
7832                 target_vblank = last_flip_vblank + wait_for_vblank;
7833
7834                 /*
7835                  * Wait until we're out of the vertical blank period before the one
7836                  * targeted by the flip
7837                  */
7838                 while ((acrtc_attach->enabled &&
7839                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7840                                                             0, &vpos, &hpos, NULL,
7841                                                             NULL, &pcrtc->hwmode)
7842                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7843                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7844                         (int)(target_vblank -
7845                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7846                         usleep_range(1000, 1100);
7847                 }
7848
7849                 /**
7850                  * Prepare the flip event for the pageflip interrupt to handle.
7851                  *
7852                  * This only works in the case where we've already turned on the
7853                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7854                  * from 0 -> n planes we have to skip a hardware generated event
7855                  * and rely on sending it from software.
7856                  */
7857                 if (acrtc_attach->base.state->event &&
7858                     acrtc_state->active_planes > 0) {
7859                         drm_crtc_vblank_get(pcrtc);
7860
7861                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7862
7863                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7864                         prepare_flip_isr(acrtc_attach);
7865
7866                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7867                 }
7868
7869                 if (acrtc_state->stream) {
7870                         if (acrtc_state->freesync_vrr_info_changed)
7871                                 bundle->stream_update.vrr_infopacket =
7872                                         &acrtc_state->stream->vrr_infopacket;
7873                 }
7874         } else if (cursor_update && acrtc_state->active_planes > 0 &&
7875                    acrtc_attach->base.state->event) {
7876                 drm_crtc_vblank_get(pcrtc);
7877
7878                 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7879
7880                 acrtc_attach->event = acrtc_attach->base.state->event;
7881                 acrtc_attach->base.state->event = NULL;
7882
7883                 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7884         }
7885
7886         /* Update the planes if changed or disable if we don't have any. */
7887         if ((planes_count || acrtc_state->active_planes == 0) &&
7888                 acrtc_state->stream) {
7889                 /*
7890                  * If PSR or idle optimizations are enabled then flush out
7891                  * any pending work before hardware programming.
7892                  */
7893                 if (dm->vblank_control_workqueue)
7894                         flush_workqueue(dm->vblank_control_workqueue);
7895
7896                 bundle->stream_update.stream = acrtc_state->stream;
7897                 if (new_pcrtc_state->mode_changed) {
7898                         bundle->stream_update.src = acrtc_state->stream->src;
7899                         bundle->stream_update.dst = acrtc_state->stream->dst;
7900                 }
7901
7902                 if (new_pcrtc_state->color_mgmt_changed) {
7903                         /*
7904                          * TODO: This isn't fully correct since we've actually
7905                          * already modified the stream in place.
7906                          */
7907                         bundle->stream_update.gamut_remap =
7908                                 &acrtc_state->stream->gamut_remap_matrix;
7909                         bundle->stream_update.output_csc_transform =
7910                                 &acrtc_state->stream->csc_color_matrix;
7911                         bundle->stream_update.out_transfer_func =
7912                                 acrtc_state->stream->out_transfer_func;
7913                 }
7914
7915                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7916                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7917                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7918
7919                 /*
7920                  * If FreeSync state on the stream has changed then we need to
7921                  * re-adjust the min/max bounds now that DC doesn't handle this
7922                  * as part of commit.
7923                  */
7924                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
7925                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7926                         dc_stream_adjust_vmin_vmax(
7927                                 dm->dc, acrtc_state->stream,
7928                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7929                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7930                 }
7931                 mutex_lock(&dm->dc_lock);
7932                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7933                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7934                         amdgpu_dm_psr_disable(acrtc_state->stream);
7935
7936                 dc_commit_updates_for_stream(dm->dc,
7937                                                      bundle->surface_updates,
7938                                                      planes_count,
7939                                                      acrtc_state->stream,
7940                                                      &bundle->stream_update,
7941                                                      dc_state);
7942
7943                 /**
7944                  * Enable or disable the interrupts on the backend.
7945                  *
7946                  * Most pipes are put into power gating when unused.
7947                  *
7948                  * When power gating is enabled on a pipe we lose the
7949                  * interrupt enablement state when power gating is disabled.
7950                  *
7951                  * So we need to update the IRQ control state in hardware
7952                  * whenever the pipe turns on (since it could be previously
7953                  * power gated) or off (since some pipes can't be power gated
7954                  * on some ASICs).
7955                  */
7956                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7957                         dm_update_pflip_irq_state(drm_to_adev(dev),
7958                                                   acrtc_attach);
7959
7960                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7961                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7962                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7963                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7964
7965                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
7966                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
7967                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
7968                         struct amdgpu_dm_connector *aconn =
7969                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
7970
7971                         if (aconn->psr_skip_count > 0)
7972                                 aconn->psr_skip_count--;
7973
7974                         /* Allow PSR when skip count is 0. */
7975                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
7976
7977                         /*
7978                          * If sink supports PSR SU, there is no need to rely on
7979                          * a vblank event disable request to enable PSR. PSR SU
7980                          * can be enabled immediately once OS demonstrates an
7981                          * adequate number of fast atomic commits to notify KMD
7982                          * of update events. See `vblank_control_worker()`.
7983                          */
7984                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
7985                             acrtc_attach->dm_irq_params.allow_psr_entry &&
7986 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
7987                             !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
7988 #endif
7989                             !acrtc_state->stream->link->psr_settings.psr_allow_active)
7990                                 amdgpu_dm_psr_enable(acrtc_state->stream);
7991                 } else {
7992                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
7993                 }
7994
7995                 mutex_unlock(&dm->dc_lock);
7996         }
7997
7998         /*
7999          * Update cursor state *after* programming all the planes.
8000          * This avoids redundant programming in the case where we're going
8001          * to be disabling a single plane - those pipes are being disabled.
8002          */
8003         if (acrtc_state->active_planes)
8004                 amdgpu_dm_commit_cursors(state);
8005
8006 cleanup:
8007         kfree(bundle);
8008 }
8009
8010 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8011                                    struct drm_atomic_state *state)
8012 {
8013         struct amdgpu_device *adev = drm_to_adev(dev);
8014         struct amdgpu_dm_connector *aconnector;
8015         struct drm_connector *connector;
8016         struct drm_connector_state *old_con_state, *new_con_state;
8017         struct drm_crtc_state *new_crtc_state;
8018         struct dm_crtc_state *new_dm_crtc_state;
8019         const struct dc_stream_status *status;
8020         int i, inst;
8021
8022         /* Notify device removals. */
8023         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8024                 if (old_con_state->crtc != new_con_state->crtc) {
8025                         /* CRTC changes require notification. */
8026                         goto notify;
8027                 }
8028
8029                 if (!new_con_state->crtc)
8030                         continue;
8031
8032                 new_crtc_state = drm_atomic_get_new_crtc_state(
8033                         state, new_con_state->crtc);
8034
8035                 if (!new_crtc_state)
8036                         continue;
8037
8038                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8039                         continue;
8040
8041         notify:
8042                 aconnector = to_amdgpu_dm_connector(connector);
8043
8044                 mutex_lock(&adev->dm.audio_lock);
8045                 inst = aconnector->audio_inst;
8046                 aconnector->audio_inst = -1;
8047                 mutex_unlock(&adev->dm.audio_lock);
8048
8049                 amdgpu_dm_audio_eld_notify(adev, inst);
8050         }
8051
8052         /* Notify audio device additions. */
8053         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8054                 if (!new_con_state->crtc)
8055                         continue;
8056
8057                 new_crtc_state = drm_atomic_get_new_crtc_state(
8058                         state, new_con_state->crtc);
8059
8060                 if (!new_crtc_state)
8061                         continue;
8062
8063                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8064                         continue;
8065
8066                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8067                 if (!new_dm_crtc_state->stream)
8068                         continue;
8069
8070                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8071                 if (!status)
8072                         continue;
8073
8074                 aconnector = to_amdgpu_dm_connector(connector);
8075
8076                 mutex_lock(&adev->dm.audio_lock);
8077                 inst = status->audio_inst;
8078                 aconnector->audio_inst = inst;
8079                 mutex_unlock(&adev->dm.audio_lock);
8080
8081                 amdgpu_dm_audio_eld_notify(adev, inst);
8082         }
8083 }
8084
8085 /*
8086  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8087  * @crtc_state: the DRM CRTC state
8088  * @stream_state: the DC stream state.
8089  *
8090  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8091  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8092  */
8093 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8094                                                 struct dc_stream_state *stream_state)
8095 {
8096         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8097 }
8098
8099 /**
8100  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8101  * @state: The atomic state to commit
8102  *
8103  * This will tell DC to commit the constructed DC state from atomic_check,
8104  * programming the hardware. Any failures here implies a hardware failure, since
8105  * atomic check should have filtered anything non-kosher.
8106  */
8107 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8108 {
8109         struct drm_device *dev = state->dev;
8110         struct amdgpu_device *adev = drm_to_adev(dev);
8111         struct amdgpu_display_manager *dm = &adev->dm;
8112         struct dm_atomic_state *dm_state;
8113         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8114         uint32_t i, j;
8115         struct drm_crtc *crtc;
8116         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8117         unsigned long flags;
8118         bool wait_for_vblank = true;
8119         struct drm_connector *connector;
8120         struct drm_connector_state *old_con_state, *new_con_state;
8121         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8122         int crtc_disable_count = 0;
8123         bool mode_set_reset_required = false;
8124         int r;
8125
8126         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8127
8128         r = drm_atomic_helper_wait_for_fences(dev, state, false);
8129         if (unlikely(r))
8130                 DRM_ERROR("Waiting for fences timed out!");
8131
8132         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8133         drm_dp_mst_atomic_wait_for_dependencies(state);
8134
8135         dm_state = dm_atomic_get_new_state(state);
8136         if (dm_state && dm_state->context) {
8137                 dc_state = dm_state->context;
8138         } else {
8139                 /* No state changes, retain current state. */
8140                 dc_state_temp = dc_create_state(dm->dc);
8141                 ASSERT(dc_state_temp);
8142                 dc_state = dc_state_temp;
8143                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8144         }
8145
8146         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8147                                        new_crtc_state, i) {
8148                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8149
8150                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8151
8152                 if (old_crtc_state->active &&
8153                     (!new_crtc_state->active ||
8154                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8155                         manage_dm_interrupts(adev, acrtc, false);
8156                         dc_stream_release(dm_old_crtc_state->stream);
8157                 }
8158         }
8159
8160         drm_atomic_helper_calc_timestamping_constants(state);
8161
8162         /* update changed items */
8163         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8164                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8165
8166                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8167                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8168
8169                 drm_dbg_state(state->dev,
8170                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8171                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8172                         "connectors_changed:%d\n",
8173                         acrtc->crtc_id,
8174                         new_crtc_state->enable,
8175                         new_crtc_state->active,
8176                         new_crtc_state->planes_changed,
8177                         new_crtc_state->mode_changed,
8178                         new_crtc_state->active_changed,
8179                         new_crtc_state->connectors_changed);
8180
8181                 /* Disable cursor if disabling crtc */
8182                 if (old_crtc_state->active && !new_crtc_state->active) {
8183                         struct dc_cursor_position position;
8184
8185                         memset(&position, 0, sizeof(position));
8186                         mutex_lock(&dm->dc_lock);
8187                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8188                         mutex_unlock(&dm->dc_lock);
8189                 }
8190
8191                 /* Copy all transient state flags into dc state */
8192                 if (dm_new_crtc_state->stream) {
8193                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8194                                                             dm_new_crtc_state->stream);
8195                 }
8196
8197                 /* handles headless hotplug case, updating new_state and
8198                  * aconnector as needed
8199                  */
8200
8201                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8202
8203                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8204
8205                         if (!dm_new_crtc_state->stream) {
8206                                 /*
8207                                  * this could happen because of issues with
8208                                  * userspace notifications delivery.
8209                                  * In this case userspace tries to set mode on
8210                                  * display which is disconnected in fact.
8211                                  * dc_sink is NULL in this case on aconnector.
8212                                  * We expect reset mode will come soon.
8213                                  *
8214                                  * This can also happen when unplug is done
8215                                  * during resume sequence ended
8216                                  *
8217                                  * In this case, we want to pretend we still
8218                                  * have a sink to keep the pipe running so that
8219                                  * hw state is consistent with the sw state
8220                                  */
8221                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8222                                                 __func__, acrtc->base.base.id);
8223                                 continue;
8224                         }
8225
8226                         if (dm_old_crtc_state->stream)
8227                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8228
8229                         pm_runtime_get_noresume(dev->dev);
8230
8231                         acrtc->enabled = true;
8232                         acrtc->hw_mode = new_crtc_state->mode;
8233                         crtc->hwmode = new_crtc_state->mode;
8234                         mode_set_reset_required = true;
8235                 } else if (modereset_required(new_crtc_state)) {
8236                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8237                         /* i.e. reset mode */
8238                         if (dm_old_crtc_state->stream)
8239                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8240
8241                         mode_set_reset_required = true;
8242                 }
8243         } /* for_each_crtc_in_state() */
8244
8245         if (dc_state) {
8246                 /* if there mode set or reset, disable eDP PSR */
8247                 if (mode_set_reset_required) {
8248                         if (dm->vblank_control_workqueue)
8249                                 flush_workqueue(dm->vblank_control_workqueue);
8250
8251                         amdgpu_dm_psr_disable_all(dm);
8252                 }
8253
8254                 dm_enable_per_frame_crtc_master_sync(dc_state);
8255                 mutex_lock(&dm->dc_lock);
8256                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8257
8258                 /* Allow idle optimization when vblank count is 0 for display off */
8259                 if (dm->active_vblank_irq_count == 0)
8260                         dc_allow_idle_optimizations(dm->dc, true);
8261                 mutex_unlock(&dm->dc_lock);
8262         }
8263
8264         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8265                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8266
8267                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8268
8269                 if (dm_new_crtc_state->stream != NULL) {
8270                         const struct dc_stream_status *status =
8271                                         dc_stream_get_status(dm_new_crtc_state->stream);
8272
8273                         if (!status)
8274                                 status = dc_stream_get_status_from_state(dc_state,
8275                                                                          dm_new_crtc_state->stream);
8276                         if (!status)
8277                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8278                         else
8279                                 acrtc->otg_inst = status->primary_otg_inst;
8280                 }
8281         }
8282 #ifdef CONFIG_DRM_AMD_DC_HDCP
8283         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8284                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8285                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8286                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8287
8288                 new_crtc_state = NULL;
8289
8290                 if (acrtc)
8291                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8292
8293                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8294
8295                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8296                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8297                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8298                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8299                         dm_new_con_state->update_hdcp = true;
8300                         continue;
8301                 }
8302
8303                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8304                         hdcp_update_display(
8305                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8306                                 new_con_state->hdcp_content_type,
8307                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8308         }
8309 #endif
8310
8311         /* Handle connector state changes */
8312         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8313                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8314                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8315                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8316                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8317                 struct dc_stream_update stream_update;
8318                 struct dc_info_packet hdr_packet;
8319                 struct dc_stream_status *status = NULL;
8320                 bool abm_changed, hdr_changed, scaling_changed;
8321
8322                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8323                 memset(&stream_update, 0, sizeof(stream_update));
8324
8325                 if (acrtc) {
8326                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8327                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8328                 }
8329
8330                 /* Skip any modesets/resets */
8331                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8332                         continue;
8333
8334                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8335                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8336
8337                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8338                                                              dm_old_con_state);
8339
8340                 abm_changed = dm_new_crtc_state->abm_level !=
8341                               dm_old_crtc_state->abm_level;
8342
8343                 hdr_changed =
8344                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8345
8346                 if (!scaling_changed && !abm_changed && !hdr_changed)
8347                         continue;
8348
8349                 stream_update.stream = dm_new_crtc_state->stream;
8350                 if (scaling_changed) {
8351                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8352                                         dm_new_con_state, dm_new_crtc_state->stream);
8353
8354                         stream_update.src = dm_new_crtc_state->stream->src;
8355                         stream_update.dst = dm_new_crtc_state->stream->dst;
8356                 }
8357
8358                 if (abm_changed) {
8359                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8360
8361                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8362                 }
8363
8364                 if (hdr_changed) {
8365                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8366                         stream_update.hdr_static_metadata = &hdr_packet;
8367                 }
8368
8369                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8370
8371                 if (WARN_ON(!status))
8372                         continue;
8373
8374                 WARN_ON(!status->plane_count);
8375
8376                 /*
8377                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8378                  * Here we create an empty update on each plane.
8379                  * To fix this, DC should permit updating only stream properties.
8380                  */
8381                 for (j = 0; j < status->plane_count; j++)
8382                         dummy_updates[j].surface = status->plane_states[0];
8383
8384
8385                 mutex_lock(&dm->dc_lock);
8386                 dc_commit_updates_for_stream(dm->dc,
8387                                                      dummy_updates,
8388                                                      status->plane_count,
8389                                                      dm_new_crtc_state->stream,
8390                                                      &stream_update,
8391                                                      dc_state);
8392                 mutex_unlock(&dm->dc_lock);
8393         }
8394
8395         /**
8396          * Enable interrupts for CRTCs that are newly enabled or went through
8397          * a modeset. It was intentionally deferred until after the front end
8398          * state was modified to wait until the OTG was on and so the IRQ
8399          * handlers didn't access stale or invalid state.
8400          */
8401         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8402                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8403 #ifdef CONFIG_DEBUG_FS
8404                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8405 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8406                 struct crc_rd_work *crc_rd_wrk;
8407 #endif
8408 #endif
8409                 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8410                 if (old_crtc_state->active && !new_crtc_state->active)
8411                         crtc_disable_count++;
8412
8413                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8414                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8415
8416                 /* For freesync config update on crtc state and params for irq */
8417                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8418
8419 #ifdef CONFIG_DEBUG_FS
8420 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8421                 crc_rd_wrk = dm->crc_rd_wrk;
8422 #endif
8423                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8424                 cur_crc_src = acrtc->dm_irq_params.crc_src;
8425                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8426 #endif
8427
8428                 if (new_crtc_state->active &&
8429                     (!old_crtc_state->active ||
8430                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8431                         dc_stream_retain(dm_new_crtc_state->stream);
8432                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8433                         manage_dm_interrupts(adev, acrtc, true);
8434                 }
8435                 /* Handle vrr on->off / off->on transitions */
8436                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
8437
8438 #ifdef CONFIG_DEBUG_FS
8439                 if (new_crtc_state->active &&
8440                     (!old_crtc_state->active ||
8441                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8442                         /**
8443                          * Frontend may have changed so reapply the CRC capture
8444                          * settings for the stream.
8445                          */
8446                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8447 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8448                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
8449                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8450                                         acrtc->dm_irq_params.window_param.update_win = true;
8451                                         acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
8452                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
8453                                         crc_rd_wrk->crtc = crtc;
8454                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
8455                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8456                                 }
8457 #endif
8458                                 if (amdgpu_dm_crtc_configure_crc_source(
8459                                         crtc, dm_new_crtc_state, cur_crc_src))
8460                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
8461                         }
8462                 }
8463 #endif
8464         }
8465
8466         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8467                 if (new_crtc_state->async_flip)
8468                         wait_for_vblank = false;
8469
8470         /* update planes when needed per crtc*/
8471         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8472                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8473
8474                 if (dm_new_crtc_state->stream)
8475                         amdgpu_dm_commit_planes(state, dc_state, dev,
8476                                                 dm, crtc, wait_for_vblank);
8477         }
8478
8479         /* Update audio instances for each connector. */
8480         amdgpu_dm_commit_audio(dev, state);
8481
8482         /* restore the backlight level */
8483         for (i = 0; i < dm->num_of_edps; i++) {
8484                 if (dm->backlight_dev[i] &&
8485                     (dm->actual_brightness[i] != dm->brightness[i]))
8486                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8487         }
8488
8489         /*
8490          * send vblank event on all events not handled in flip and
8491          * mark consumed event for drm_atomic_helper_commit_hw_done
8492          */
8493         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8494         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8495
8496                 if (new_crtc_state->event)
8497                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8498
8499                 new_crtc_state->event = NULL;
8500         }
8501         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8502
8503         /* Signal HW programming completion */
8504         drm_atomic_helper_commit_hw_done(state);
8505
8506         if (wait_for_vblank)
8507                 drm_atomic_helper_wait_for_flip_done(dev, state);
8508
8509         drm_atomic_helper_cleanup_planes(dev, state);
8510
8511         /* return the stolen vga memory back to VRAM */
8512         if (!adev->mman.keep_stolen_vga_memory)
8513                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8514         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8515
8516         /*
8517          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8518          * so we can put the GPU into runtime suspend if we're not driving any
8519          * displays anymore
8520          */
8521         for (i = 0; i < crtc_disable_count; i++)
8522                 pm_runtime_put_autosuspend(dev->dev);
8523         pm_runtime_mark_last_busy(dev->dev);
8524
8525         if (dc_state_temp)
8526                 dc_release_state(dc_state_temp);
8527 }
8528
8529 static int dm_force_atomic_commit(struct drm_connector *connector)
8530 {
8531         int ret = 0;
8532         struct drm_device *ddev = connector->dev;
8533         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8534         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8535         struct drm_plane *plane = disconnected_acrtc->base.primary;
8536         struct drm_connector_state *conn_state;
8537         struct drm_crtc_state *crtc_state;
8538         struct drm_plane_state *plane_state;
8539
8540         if (!state)
8541                 return -ENOMEM;
8542
8543         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8544
8545         /* Construct an atomic state to restore previous display setting */
8546
8547         /*
8548          * Attach connectors to drm_atomic_state
8549          */
8550         conn_state = drm_atomic_get_connector_state(state, connector);
8551
8552         ret = PTR_ERR_OR_ZERO(conn_state);
8553         if (ret)
8554                 goto out;
8555
8556         /* Attach crtc to drm_atomic_state*/
8557         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8558
8559         ret = PTR_ERR_OR_ZERO(crtc_state);
8560         if (ret)
8561                 goto out;
8562
8563         /* force a restore */
8564         crtc_state->mode_changed = true;
8565
8566         /* Attach plane to drm_atomic_state */
8567         plane_state = drm_atomic_get_plane_state(state, plane);
8568
8569         ret = PTR_ERR_OR_ZERO(plane_state);
8570         if (ret)
8571                 goto out;
8572
8573         /* Call commit internally with the state we just constructed */
8574         ret = drm_atomic_commit(state);
8575
8576 out:
8577         drm_atomic_state_put(state);
8578         if (ret)
8579                 DRM_ERROR("Restoring old state failed with %i\n", ret);
8580
8581         return ret;
8582 }
8583
8584 /*
8585  * This function handles all cases when set mode does not come upon hotplug.
8586  * This includes when a display is unplugged then plugged back into the
8587  * same port and when running without usermode desktop manager supprot
8588  */
8589 void dm_restore_drm_connector_state(struct drm_device *dev,
8590                                     struct drm_connector *connector)
8591 {
8592         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8593         struct amdgpu_crtc *disconnected_acrtc;
8594         struct dm_crtc_state *acrtc_state;
8595
8596         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8597                 return;
8598
8599         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8600         if (!disconnected_acrtc)
8601                 return;
8602
8603         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8604         if (!acrtc_state->stream)
8605                 return;
8606
8607         /*
8608          * If the previous sink is not released and different from the current,
8609          * we deduce we are in a state where we can not rely on usermode call
8610          * to turn on the display, so we do it here
8611          */
8612         if (acrtc_state->stream->sink != aconnector->dc_sink)
8613                 dm_force_atomic_commit(&aconnector->base);
8614 }
8615
8616 /*
8617  * Grabs all modesetting locks to serialize against any blocking commits,
8618  * Waits for completion of all non blocking commits.
8619  */
8620 static int do_aquire_global_lock(struct drm_device *dev,
8621                                  struct drm_atomic_state *state)
8622 {
8623         struct drm_crtc *crtc;
8624         struct drm_crtc_commit *commit;
8625         long ret;
8626
8627         /*
8628          * Adding all modeset locks to aquire_ctx will
8629          * ensure that when the framework release it the
8630          * extra locks we are locking here will get released to
8631          */
8632         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8633         if (ret)
8634                 return ret;
8635
8636         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8637                 spin_lock(&crtc->commit_lock);
8638                 commit = list_first_entry_or_null(&crtc->commit_list,
8639                                 struct drm_crtc_commit, commit_entry);
8640                 if (commit)
8641                         drm_crtc_commit_get(commit);
8642                 spin_unlock(&crtc->commit_lock);
8643
8644                 if (!commit)
8645                         continue;
8646
8647                 /*
8648                  * Make sure all pending HW programming completed and
8649                  * page flips done
8650                  */
8651                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8652
8653                 if (ret > 0)
8654                         ret = wait_for_completion_interruptible_timeout(
8655                                         &commit->flip_done, 10*HZ);
8656
8657                 if (ret == 0)
8658                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8659                                   "timed out\n", crtc->base.id, crtc->name);
8660
8661                 drm_crtc_commit_put(commit);
8662         }
8663
8664         return ret < 0 ? ret : 0;
8665 }
8666
8667 static void get_freesync_config_for_crtc(
8668         struct dm_crtc_state *new_crtc_state,
8669         struct dm_connector_state *new_con_state)
8670 {
8671         struct mod_freesync_config config = {0};
8672         struct amdgpu_dm_connector *aconnector =
8673                         to_amdgpu_dm_connector(new_con_state->base.connector);
8674         struct drm_display_mode *mode = &new_crtc_state->base.mode;
8675         int vrefresh = drm_mode_vrefresh(mode);
8676         bool fs_vid_mode = false;
8677
8678         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8679                                         vrefresh >= aconnector->min_vfreq &&
8680                                         vrefresh <= aconnector->max_vfreq;
8681
8682         if (new_crtc_state->vrr_supported) {
8683                 new_crtc_state->stream->ignore_msa_timing_param = true;
8684                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
8685
8686                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
8687                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
8688                 config.vsif_supported = true;
8689                 config.btr = true;
8690
8691                 if (fs_vid_mode) {
8692                         config.state = VRR_STATE_ACTIVE_FIXED;
8693                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
8694                         goto out;
8695                 } else if (new_crtc_state->base.vrr_enabled) {
8696                         config.state = VRR_STATE_ACTIVE_VARIABLE;
8697                 } else {
8698                         config.state = VRR_STATE_INACTIVE;
8699                 }
8700         }
8701 out:
8702         new_crtc_state->freesync_config = config;
8703 }
8704
8705 static void reset_freesync_config_for_crtc(
8706         struct dm_crtc_state *new_crtc_state)
8707 {
8708         new_crtc_state->vrr_supported = false;
8709
8710         memset(&new_crtc_state->vrr_infopacket, 0,
8711                sizeof(new_crtc_state->vrr_infopacket));
8712 }
8713
8714 static bool
8715 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
8716                                  struct drm_crtc_state *new_crtc_state)
8717 {
8718         const struct drm_display_mode *old_mode, *new_mode;
8719
8720         if (!old_crtc_state || !new_crtc_state)
8721                 return false;
8722
8723         old_mode = &old_crtc_state->mode;
8724         new_mode = &new_crtc_state->mode;
8725
8726         if (old_mode->clock       == new_mode->clock &&
8727             old_mode->hdisplay    == new_mode->hdisplay &&
8728             old_mode->vdisplay    == new_mode->vdisplay &&
8729             old_mode->htotal      == new_mode->htotal &&
8730             old_mode->vtotal      != new_mode->vtotal &&
8731             old_mode->hsync_start == new_mode->hsync_start &&
8732             old_mode->vsync_start != new_mode->vsync_start &&
8733             old_mode->hsync_end   == new_mode->hsync_end &&
8734             old_mode->vsync_end   != new_mode->vsync_end &&
8735             old_mode->hskew       == new_mode->hskew &&
8736             old_mode->vscan       == new_mode->vscan &&
8737             (old_mode->vsync_end - old_mode->vsync_start) ==
8738             (new_mode->vsync_end - new_mode->vsync_start))
8739                 return true;
8740
8741         return false;
8742 }
8743
8744 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
8745         uint64_t num, den, res;
8746         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
8747
8748         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
8749
8750         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
8751         den = (unsigned long long)new_crtc_state->mode.htotal *
8752               (unsigned long long)new_crtc_state->mode.vtotal;
8753
8754         res = div_u64(num, den);
8755         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
8756 }
8757
8758 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8759                          struct drm_atomic_state *state,
8760                          struct drm_crtc *crtc,
8761                          struct drm_crtc_state *old_crtc_state,
8762                          struct drm_crtc_state *new_crtc_state,
8763                          bool enable,
8764                          bool *lock_and_validation_needed)
8765 {
8766         struct dm_atomic_state *dm_state = NULL;
8767         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8768         struct dc_stream_state *new_stream;
8769         int ret = 0;
8770
8771         /*
8772          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8773          * update changed items
8774          */
8775         struct amdgpu_crtc *acrtc = NULL;
8776         struct amdgpu_dm_connector *aconnector = NULL;
8777         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8778         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8779
8780         new_stream = NULL;
8781
8782         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8783         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8784         acrtc = to_amdgpu_crtc(crtc);
8785         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8786
8787         /* TODO This hack should go away */
8788         if (aconnector && enable) {
8789                 /* Make sure fake sink is created in plug-in scenario */
8790                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8791                                                             &aconnector->base);
8792                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8793                                                             &aconnector->base);
8794
8795                 if (IS_ERR(drm_new_conn_state)) {
8796                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8797                         goto fail;
8798                 }
8799
8800                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8801                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8802
8803                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8804                         goto skip_modeset;
8805
8806                 new_stream = create_validate_stream_for_sink(aconnector,
8807                                                              &new_crtc_state->mode,
8808                                                              dm_new_conn_state,
8809                                                              dm_old_crtc_state->stream);
8810
8811                 /*
8812                  * we can have no stream on ACTION_SET if a display
8813                  * was disconnected during S3, in this case it is not an
8814                  * error, the OS will be updated after detection, and
8815                  * will do the right thing on next atomic commit
8816                  */
8817
8818                 if (!new_stream) {
8819                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8820                                         __func__, acrtc->base.base.id);
8821                         ret = -ENOMEM;
8822                         goto fail;
8823                 }
8824
8825                 /*
8826                  * TODO: Check VSDB bits to decide whether this should
8827                  * be enabled or not.
8828                  */
8829                 new_stream->triggered_crtc_reset.enabled =
8830                         dm->force_timing_sync;
8831
8832                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8833
8834                 ret = fill_hdr_info_packet(drm_new_conn_state,
8835                                            &new_stream->hdr_static_metadata);
8836                 if (ret)
8837                         goto fail;
8838
8839                 /*
8840                  * If we already removed the old stream from the context
8841                  * (and set the new stream to NULL) then we can't reuse
8842                  * the old stream even if the stream and scaling are unchanged.
8843                  * We'll hit the BUG_ON and black screen.
8844                  *
8845                  * TODO: Refactor this function to allow this check to work
8846                  * in all conditions.
8847                  */
8848                 if (dm_new_crtc_state->stream &&
8849                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
8850                         goto skip_modeset;
8851
8852                 if (dm_new_crtc_state->stream &&
8853                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8854                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8855                         new_crtc_state->mode_changed = false;
8856                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8857                                          new_crtc_state->mode_changed);
8858                 }
8859         }
8860
8861         /* mode_changed flag may get updated above, need to check again */
8862         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8863                 goto skip_modeset;
8864
8865         drm_dbg_state(state->dev,
8866                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8867                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8868                 "connectors_changed:%d\n",
8869                 acrtc->crtc_id,
8870                 new_crtc_state->enable,
8871                 new_crtc_state->active,
8872                 new_crtc_state->planes_changed,
8873                 new_crtc_state->mode_changed,
8874                 new_crtc_state->active_changed,
8875                 new_crtc_state->connectors_changed);
8876
8877         /* Remove stream for any changed/disabled CRTC */
8878         if (!enable) {
8879
8880                 if (!dm_old_crtc_state->stream)
8881                         goto skip_modeset;
8882
8883                 if (dm_new_crtc_state->stream &&
8884                     is_timing_unchanged_for_freesync(new_crtc_state,
8885                                                      old_crtc_state)) {
8886                         new_crtc_state->mode_changed = false;
8887                         DRM_DEBUG_DRIVER(
8888                                 "Mode change not required for front porch change, "
8889                                 "setting mode_changed to %d",
8890                                 new_crtc_state->mode_changed);
8891
8892                         set_freesync_fixed_config(dm_new_crtc_state);
8893
8894                         goto skip_modeset;
8895                 } else if (aconnector &&
8896                            is_freesync_video_mode(&new_crtc_state->mode,
8897                                                   aconnector)) {
8898                         struct drm_display_mode *high_mode;
8899
8900                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
8901                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
8902                                 set_freesync_fixed_config(dm_new_crtc_state);
8903                         }
8904                 }
8905
8906                 ret = dm_atomic_get_state(state, &dm_state);
8907                 if (ret)
8908                         goto fail;
8909
8910                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8911                                 crtc->base.id);
8912
8913                 /* i.e. reset mode */
8914                 if (dc_remove_stream_from_ctx(
8915                                 dm->dc,
8916                                 dm_state->context,
8917                                 dm_old_crtc_state->stream) != DC_OK) {
8918                         ret = -EINVAL;
8919                         goto fail;
8920                 }
8921
8922                 dc_stream_release(dm_old_crtc_state->stream);
8923                 dm_new_crtc_state->stream = NULL;
8924
8925                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8926
8927                 *lock_and_validation_needed = true;
8928
8929         } else {/* Add stream for any updated/enabled CRTC */
8930                 /*
8931                  * Quick fix to prevent NULL pointer on new_stream when
8932                  * added MST connectors not found in existing crtc_state in the chained mode
8933                  * TODO: need to dig out the root cause of that
8934                  */
8935                 if (!aconnector)
8936                         goto skip_modeset;
8937
8938                 if (modereset_required(new_crtc_state))
8939                         goto skip_modeset;
8940
8941                 if (modeset_required(new_crtc_state, new_stream,
8942                                      dm_old_crtc_state->stream)) {
8943
8944                         WARN_ON(dm_new_crtc_state->stream);
8945
8946                         ret = dm_atomic_get_state(state, &dm_state);
8947                         if (ret)
8948                                 goto fail;
8949
8950                         dm_new_crtc_state->stream = new_stream;
8951
8952                         dc_stream_retain(new_stream);
8953
8954                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
8955                                          crtc->base.id);
8956
8957                         if (dc_add_stream_to_ctx(
8958                                         dm->dc,
8959                                         dm_state->context,
8960                                         dm_new_crtc_state->stream) != DC_OK) {
8961                                 ret = -EINVAL;
8962                                 goto fail;
8963                         }
8964
8965                         *lock_and_validation_needed = true;
8966                 }
8967         }
8968
8969 skip_modeset:
8970         /* Release extra reference */
8971         if (new_stream)
8972                  dc_stream_release(new_stream);
8973
8974         /*
8975          * We want to do dc stream updates that do not require a
8976          * full modeset below.
8977          */
8978         if (!(enable && aconnector && new_crtc_state->active))
8979                 return 0;
8980         /*
8981          * Given above conditions, the dc state cannot be NULL because:
8982          * 1. We're in the process of enabling CRTCs (just been added
8983          *    to the dc context, or already is on the context)
8984          * 2. Has a valid connector attached, and
8985          * 3. Is currently active and enabled.
8986          * => The dc stream state currently exists.
8987          */
8988         BUG_ON(dm_new_crtc_state->stream == NULL);
8989
8990         /* Scaling or underscan settings */
8991         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8992                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
8993                 update_stream_scaling_settings(
8994                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8995
8996         /* ABM settings */
8997         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8998
8999         /*
9000          * Color management settings. We also update color properties
9001          * when a modeset is needed, to ensure it gets reprogrammed.
9002          */
9003         if (dm_new_crtc_state->base.color_mgmt_changed ||
9004             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9005                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9006                 if (ret)
9007                         goto fail;
9008         }
9009
9010         /* Update Freesync settings. */
9011         get_freesync_config_for_crtc(dm_new_crtc_state,
9012                                      dm_new_conn_state);
9013
9014         return ret;
9015
9016 fail:
9017         if (new_stream)
9018                 dc_stream_release(new_stream);
9019         return ret;
9020 }
9021
9022 static bool should_reset_plane(struct drm_atomic_state *state,
9023                                struct drm_plane *plane,
9024                                struct drm_plane_state *old_plane_state,
9025                                struct drm_plane_state *new_plane_state)
9026 {
9027         struct drm_plane *other;
9028         struct drm_plane_state *old_other_state, *new_other_state;
9029         struct drm_crtc_state *new_crtc_state;
9030         int i;
9031
9032         /*
9033          * TODO: Remove this hack once the checks below are sufficient
9034          * enough to determine when we need to reset all the planes on
9035          * the stream.
9036          */
9037         if (state->allow_modeset)
9038                 return true;
9039
9040         /* Exit early if we know that we're adding or removing the plane. */
9041         if (old_plane_state->crtc != new_plane_state->crtc)
9042                 return true;
9043
9044         /* old crtc == new_crtc == NULL, plane not in context. */
9045         if (!new_plane_state->crtc)
9046                 return false;
9047
9048         new_crtc_state =
9049                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9050
9051         if (!new_crtc_state)
9052                 return true;
9053
9054         /* CRTC Degamma changes currently require us to recreate planes. */
9055         if (new_crtc_state->color_mgmt_changed)
9056                 return true;
9057
9058         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9059                 return true;
9060
9061         /*
9062          * If there are any new primary or overlay planes being added or
9063          * removed then the z-order can potentially change. To ensure
9064          * correct z-order and pipe acquisition the current DC architecture
9065          * requires us to remove and recreate all existing planes.
9066          *
9067          * TODO: Come up with a more elegant solution for this.
9068          */
9069         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9070                 struct amdgpu_framebuffer *old_afb, *new_afb;
9071                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9072                         continue;
9073
9074                 if (old_other_state->crtc != new_plane_state->crtc &&
9075                     new_other_state->crtc != new_plane_state->crtc)
9076                         continue;
9077
9078                 if (old_other_state->crtc != new_other_state->crtc)
9079                         return true;
9080
9081                 /* Src/dst size and scaling updates. */
9082                 if (old_other_state->src_w != new_other_state->src_w ||
9083                     old_other_state->src_h != new_other_state->src_h ||
9084                     old_other_state->crtc_w != new_other_state->crtc_w ||
9085                     old_other_state->crtc_h != new_other_state->crtc_h)
9086                         return true;
9087
9088                 /* Rotation / mirroring updates. */
9089                 if (old_other_state->rotation != new_other_state->rotation)
9090                         return true;
9091
9092                 /* Blending updates. */
9093                 if (old_other_state->pixel_blend_mode !=
9094                     new_other_state->pixel_blend_mode)
9095                         return true;
9096
9097                 /* Alpha updates. */
9098                 if (old_other_state->alpha != new_other_state->alpha)
9099                         return true;
9100
9101                 /* Colorspace changes. */
9102                 if (old_other_state->color_range != new_other_state->color_range ||
9103                     old_other_state->color_encoding != new_other_state->color_encoding)
9104                         return true;
9105
9106                 /* Framebuffer checks fall at the end. */
9107                 if (!old_other_state->fb || !new_other_state->fb)
9108                         continue;
9109
9110                 /* Pixel format changes can require bandwidth updates. */
9111                 if (old_other_state->fb->format != new_other_state->fb->format)
9112                         return true;
9113
9114                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9115                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9116
9117                 /* Tiling and DCC changes also require bandwidth updates. */
9118                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9119                     old_afb->base.modifier != new_afb->base.modifier)
9120                         return true;
9121         }
9122
9123         return false;
9124 }
9125
9126 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9127                               struct drm_plane_state *new_plane_state,
9128                               struct drm_framebuffer *fb)
9129 {
9130         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9131         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9132         unsigned int pitch;
9133         bool linear;
9134
9135         if (fb->width > new_acrtc->max_cursor_width ||
9136             fb->height > new_acrtc->max_cursor_height) {
9137                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9138                                  new_plane_state->fb->width,
9139                                  new_plane_state->fb->height);
9140                 return -EINVAL;
9141         }
9142         if (new_plane_state->src_w != fb->width << 16 ||
9143             new_plane_state->src_h != fb->height << 16) {
9144                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9145                 return -EINVAL;
9146         }
9147
9148         /* Pitch in pixels */
9149         pitch = fb->pitches[0] / fb->format->cpp[0];
9150
9151         if (fb->width != pitch) {
9152                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9153                                  fb->width, pitch);
9154                 return -EINVAL;
9155         }
9156
9157         switch (pitch) {
9158         case 64:
9159         case 128:
9160         case 256:
9161                 /* FB pitch is supported by cursor plane */
9162                 break;
9163         default:
9164                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9165                 return -EINVAL;
9166         }
9167
9168         /* Core DRM takes care of checking FB modifiers, so we only need to
9169          * check tiling flags when the FB doesn't have a modifier. */
9170         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9171                 if (adev->family < AMDGPU_FAMILY_AI) {
9172                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9173                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9174                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9175                 } else {
9176                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9177                 }
9178                 if (!linear) {
9179                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9180                         return -EINVAL;
9181                 }
9182         }
9183
9184         return 0;
9185 }
9186
9187 static int dm_update_plane_state(struct dc *dc,
9188                                  struct drm_atomic_state *state,
9189                                  struct drm_plane *plane,
9190                                  struct drm_plane_state *old_plane_state,
9191                                  struct drm_plane_state *new_plane_state,
9192                                  bool enable,
9193                                  bool *lock_and_validation_needed)
9194 {
9195
9196         struct dm_atomic_state *dm_state = NULL;
9197         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9198         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9199         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9200         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9201         struct amdgpu_crtc *new_acrtc;
9202         bool needs_reset;
9203         int ret = 0;
9204
9205
9206         new_plane_crtc = new_plane_state->crtc;
9207         old_plane_crtc = old_plane_state->crtc;
9208         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9209         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9210
9211         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9212                 if (!enable || !new_plane_crtc ||
9213                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9214                         return 0;
9215
9216                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9217
9218                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9219                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9220                         return -EINVAL;
9221                 }
9222
9223                 if (new_plane_state->fb) {
9224                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9225                                                  new_plane_state->fb);
9226                         if (ret)
9227                                 return ret;
9228                 }
9229
9230                 return 0;
9231         }
9232
9233         needs_reset = should_reset_plane(state, plane, old_plane_state,
9234                                          new_plane_state);
9235
9236         /* Remove any changed/removed planes */
9237         if (!enable) {
9238                 if (!needs_reset)
9239                         return 0;
9240
9241                 if (!old_plane_crtc)
9242                         return 0;
9243
9244                 old_crtc_state = drm_atomic_get_old_crtc_state(
9245                                 state, old_plane_crtc);
9246                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9247
9248                 if (!dm_old_crtc_state->stream)
9249                         return 0;
9250
9251                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9252                                 plane->base.id, old_plane_crtc->base.id);
9253
9254                 ret = dm_atomic_get_state(state, &dm_state);
9255                 if (ret)
9256                         return ret;
9257
9258                 if (!dc_remove_plane_from_context(
9259                                 dc,
9260                                 dm_old_crtc_state->stream,
9261                                 dm_old_plane_state->dc_state,
9262                                 dm_state->context)) {
9263
9264                         return -EINVAL;
9265                 }
9266
9267
9268                 dc_plane_state_release(dm_old_plane_state->dc_state);
9269                 dm_new_plane_state->dc_state = NULL;
9270
9271                 *lock_and_validation_needed = true;
9272
9273         } else { /* Add new planes */
9274                 struct dc_plane_state *dc_new_plane_state;
9275
9276                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9277                         return 0;
9278
9279                 if (!new_plane_crtc)
9280                         return 0;
9281
9282                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9283                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9284
9285                 if (!dm_new_crtc_state->stream)
9286                         return 0;
9287
9288                 if (!needs_reset)
9289                         return 0;
9290
9291                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9292                 if (ret)
9293                         return ret;
9294
9295                 WARN_ON(dm_new_plane_state->dc_state);
9296
9297                 dc_new_plane_state = dc_create_plane_state(dc);
9298                 if (!dc_new_plane_state)
9299                         return -ENOMEM;
9300
9301                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9302                                  plane->base.id, new_plane_crtc->base.id);
9303
9304                 ret = fill_dc_plane_attributes(
9305                         drm_to_adev(new_plane_crtc->dev),
9306                         dc_new_plane_state,
9307                         new_plane_state,
9308                         new_crtc_state);
9309                 if (ret) {
9310                         dc_plane_state_release(dc_new_plane_state);
9311                         return ret;
9312                 }
9313
9314                 ret = dm_atomic_get_state(state, &dm_state);
9315                 if (ret) {
9316                         dc_plane_state_release(dc_new_plane_state);
9317                         return ret;
9318                 }
9319
9320                 /*
9321                  * Any atomic check errors that occur after this will
9322                  * not need a release. The plane state will be attached
9323                  * to the stream, and therefore part of the atomic
9324                  * state. It'll be released when the atomic state is
9325                  * cleaned.
9326                  */
9327                 if (!dc_add_plane_to_context(
9328                                 dc,
9329                                 dm_new_crtc_state->stream,
9330                                 dc_new_plane_state,
9331                                 dm_state->context)) {
9332
9333                         dc_plane_state_release(dc_new_plane_state);
9334                         return -EINVAL;
9335                 }
9336
9337                 dm_new_plane_state->dc_state = dc_new_plane_state;
9338
9339                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9340
9341                 /* Tell DC to do a full surface update every time there
9342                  * is a plane change. Inefficient, but works for now.
9343                  */
9344                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9345
9346                 *lock_and_validation_needed = true;
9347         }
9348
9349
9350         return ret;
9351 }
9352
9353 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9354                                        int *src_w, int *src_h)
9355 {
9356         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9357         case DRM_MODE_ROTATE_90:
9358         case DRM_MODE_ROTATE_270:
9359                 *src_w = plane_state->src_h >> 16;
9360                 *src_h = plane_state->src_w >> 16;
9361                 break;
9362         case DRM_MODE_ROTATE_0:
9363         case DRM_MODE_ROTATE_180:
9364         default:
9365                 *src_w = plane_state->src_w >> 16;
9366                 *src_h = plane_state->src_h >> 16;
9367                 break;
9368         }
9369 }
9370
9371 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9372                                 struct drm_crtc *crtc,
9373                                 struct drm_crtc_state *new_crtc_state)
9374 {
9375         struct drm_plane *cursor = crtc->cursor, *underlying;
9376         struct drm_plane_state *new_cursor_state, *new_underlying_state;
9377         int i;
9378         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
9379         int cursor_src_w, cursor_src_h;
9380         int underlying_src_w, underlying_src_h;
9381
9382         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9383          * cursor per pipe but it's going to inherit the scaling and
9384          * positioning from the underlying pipe. Check the cursor plane's
9385          * blending properties match the underlying planes'. */
9386
9387         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
9388         if (!new_cursor_state || !new_cursor_state->fb) {
9389                 return 0;
9390         }
9391
9392         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
9393         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
9394         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
9395
9396         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9397                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
9398                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9399                         continue;
9400
9401                 /* Ignore disabled planes */
9402                 if (!new_underlying_state->fb)
9403                         continue;
9404
9405                 dm_get_oriented_plane_size(new_underlying_state,
9406                                            &underlying_src_w, &underlying_src_h);
9407                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
9408                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
9409
9410                 if (cursor_scale_w != underlying_scale_w ||
9411                     cursor_scale_h != underlying_scale_h) {
9412                         drm_dbg_atomic(crtc->dev,
9413                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9414                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9415                         return -EINVAL;
9416                 }
9417
9418                 /* If this plane covers the whole CRTC, no need to check planes underneath */
9419                 if (new_underlying_state->crtc_x <= 0 &&
9420                     new_underlying_state->crtc_y <= 0 &&
9421                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9422                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9423                         break;
9424         }
9425
9426         return 0;
9427 }
9428
9429 #if defined(CONFIG_DRM_AMD_DC_DCN)
9430 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9431 {
9432         struct drm_connector *connector;
9433         struct drm_connector_state *conn_state, *old_conn_state;
9434         struct amdgpu_dm_connector *aconnector = NULL;
9435         int i;
9436         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
9437                 if (!conn_state->crtc)
9438                         conn_state = old_conn_state;
9439
9440                 if (conn_state->crtc != crtc)
9441                         continue;
9442
9443                 aconnector = to_amdgpu_dm_connector(connector);
9444                 if (!aconnector->port || !aconnector->mst_port)
9445                         aconnector = NULL;
9446                 else
9447                         break;
9448         }
9449
9450         if (!aconnector)
9451                 return 0;
9452
9453         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9454 }
9455 #endif
9456
9457 /**
9458  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9459  *
9460  * @dev: The DRM device
9461  * @state: The atomic state to commit
9462  *
9463  * Validate that the given atomic state is programmable by DC into hardware.
9464  * This involves constructing a &struct dc_state reflecting the new hardware
9465  * state we wish to commit, then querying DC to see if it is programmable. It's
9466  * important not to modify the existing DC state. Otherwise, atomic_check
9467  * may unexpectedly commit hardware changes.
9468  *
9469  * When validating the DC state, it's important that the right locks are
9470  * acquired. For full updates case which removes/adds/updates streams on one
9471  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9472  * that any such full update commit will wait for completion of any outstanding
9473  * flip using DRMs synchronization events.
9474  *
9475  * Note that DM adds the affected connectors for all CRTCs in state, when that
9476  * might not seem necessary. This is because DC stream creation requires the
9477  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9478  * be possible but non-trivial - a possible TODO item.
9479  *
9480  * Return: -Error code if validation failed.
9481  */
9482 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9483                                   struct drm_atomic_state *state)
9484 {
9485         struct amdgpu_device *adev = drm_to_adev(dev);
9486         struct dm_atomic_state *dm_state = NULL;
9487         struct dc *dc = adev->dm.dc;
9488         struct drm_connector *connector;
9489         struct drm_connector_state *old_con_state, *new_con_state;
9490         struct drm_crtc *crtc;
9491         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9492         struct drm_plane *plane;
9493         struct drm_plane_state *old_plane_state, *new_plane_state;
9494         enum dc_status status;
9495         int ret, i;
9496         bool lock_and_validation_needed = false;
9497         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9498 #if defined(CONFIG_DRM_AMD_DC_DCN)
9499         struct dsc_mst_fairness_vars vars[MAX_PIPES];
9500 #endif
9501
9502         trace_amdgpu_dm_atomic_check_begin(state);
9503
9504         ret = drm_atomic_helper_check_modeset(dev, state);
9505         if (ret) {
9506                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
9507                 goto fail;
9508         }
9509
9510         /* Check connector changes */
9511         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9512                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9513                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9514
9515                 /* Skip connectors that are disabled or part of modeset already. */
9516                 if (!new_con_state->crtc)
9517                         continue;
9518
9519                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9520                 if (IS_ERR(new_crtc_state)) {
9521                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
9522                         ret = PTR_ERR(new_crtc_state);
9523                         goto fail;
9524                 }
9525
9526                 if (dm_old_con_state->abm_level !=
9527                     dm_new_con_state->abm_level)
9528                         new_crtc_state->connectors_changed = true;
9529         }
9530
9531 #if defined(CONFIG_DRM_AMD_DC_DCN)
9532         if (dc_resource_is_dsc_encoding_supported(dc)) {
9533                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9534                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9535                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9536                                 if (ret) {
9537                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
9538                                         goto fail;
9539                                 }
9540                         }
9541                 }
9542         }
9543 #endif
9544         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9545                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9546
9547                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9548                     !new_crtc_state->color_mgmt_changed &&
9549                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9550                         dm_old_crtc_state->dsc_force_changed == false)
9551                         continue;
9552
9553                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
9554                 if (ret) {
9555                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
9556                         goto fail;
9557                 }
9558
9559                 if (!new_crtc_state->enable)
9560                         continue;
9561
9562                 ret = drm_atomic_add_affected_connectors(state, crtc);
9563                 if (ret) {
9564                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
9565                         goto fail;
9566                 }
9567
9568                 ret = drm_atomic_add_affected_planes(state, crtc);
9569                 if (ret) {
9570                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
9571                         goto fail;
9572                 }
9573
9574                 if (dm_old_crtc_state->dsc_force_changed)
9575                         new_crtc_state->mode_changed = true;
9576         }
9577
9578         /*
9579          * Add all primary and overlay planes on the CRTC to the state
9580          * whenever a plane is enabled to maintain correct z-ordering
9581          * and to enable fast surface updates.
9582          */
9583         drm_for_each_crtc(crtc, dev) {
9584                 bool modified = false;
9585
9586                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9587                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9588                                 continue;
9589
9590                         if (new_plane_state->crtc == crtc ||
9591                             old_plane_state->crtc == crtc) {
9592                                 modified = true;
9593                                 break;
9594                         }
9595                 }
9596
9597                 if (!modified)
9598                         continue;
9599
9600                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9601                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9602                                 continue;
9603
9604                         new_plane_state =
9605                                 drm_atomic_get_plane_state(state, plane);
9606
9607                         if (IS_ERR(new_plane_state)) {
9608                                 ret = PTR_ERR(new_plane_state);
9609                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
9610                                 goto fail;
9611                         }
9612                 }
9613         }
9614
9615         /*
9616          * DC consults the zpos (layer_index in DC terminology) to determine the
9617          * hw plane on which to enable the hw cursor (see
9618          * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
9619          * atomic state, so call drm helper to normalize zpos.
9620          */
9621         drm_atomic_normalize_zpos(dev, state);
9622
9623         /* Remove exiting planes if they are modified */
9624         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9625                 ret = dm_update_plane_state(dc, state, plane,
9626                                             old_plane_state,
9627                                             new_plane_state,
9628                                             false,
9629                                             &lock_and_validation_needed);
9630                 if (ret) {
9631                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9632                         goto fail;
9633                 }
9634         }
9635
9636         /* Disable all crtcs which require disable */
9637         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9638                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9639                                            old_crtc_state,
9640                                            new_crtc_state,
9641                                            false,
9642                                            &lock_and_validation_needed);
9643                 if (ret) {
9644                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
9645                         goto fail;
9646                 }
9647         }
9648
9649         /* Enable all crtcs which require enable */
9650         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9651                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9652                                            old_crtc_state,
9653                                            new_crtc_state,
9654                                            true,
9655                                            &lock_and_validation_needed);
9656                 if (ret) {
9657                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
9658                         goto fail;
9659                 }
9660         }
9661
9662         /* Add new/modified planes */
9663         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9664                 ret = dm_update_plane_state(dc, state, plane,
9665                                             old_plane_state,
9666                                             new_plane_state,
9667                                             true,
9668                                             &lock_and_validation_needed);
9669                 if (ret) {
9670                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9671                         goto fail;
9672                 }
9673         }
9674
9675 #if defined(CONFIG_DRM_AMD_DC_DCN)
9676         if (dc_resource_is_dsc_encoding_supported(dc)) {
9677                 ret = pre_validate_dsc(state, &dm_state, vars);
9678                 if (ret != 0)
9679                         goto fail;
9680         }
9681 #endif
9682
9683         /* Run this here since we want to validate the streams we created */
9684         ret = drm_atomic_helper_check_planes(dev, state);
9685         if (ret) {
9686                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
9687                 goto fail;
9688         }
9689
9690         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9691                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9692                 if (dm_new_crtc_state->mpo_requested)
9693                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
9694         }
9695
9696         /* Check cursor planes scaling */
9697         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9698                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9699                 if (ret) {
9700                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
9701                         goto fail;
9702                 }
9703         }
9704
9705         if (state->legacy_cursor_update) {
9706                 /*
9707                  * This is a fast cursor update coming from the plane update
9708                  * helper, check if it can be done asynchronously for better
9709                  * performance.
9710                  */
9711                 state->async_update =
9712                         !drm_atomic_helper_async_check(dev, state);
9713
9714                 /*
9715                  * Skip the remaining global validation if this is an async
9716                  * update. Cursor updates can be done without affecting
9717                  * state or bandwidth calcs and this avoids the performance
9718                  * penalty of locking the private state object and
9719                  * allocating a new dc_state.
9720                  */
9721                 if (state->async_update)
9722                         return 0;
9723         }
9724
9725         /* Check scaling and underscan changes*/
9726         /* TODO Removed scaling changes validation due to inability to commit
9727          * new stream into context w\o causing full reset. Need to
9728          * decide how to handle.
9729          */
9730         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9731                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9732                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9733                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9734
9735                 /* Skip any modesets/resets */
9736                 if (!acrtc || drm_atomic_crtc_needs_modeset(
9737                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9738                         continue;
9739
9740                 /* Skip any thing not scale or underscan changes */
9741                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9742                         continue;
9743
9744                 lock_and_validation_needed = true;
9745         }
9746
9747         /**
9748          * Streams and planes are reset when there are changes that affect
9749          * bandwidth. Anything that affects bandwidth needs to go through
9750          * DC global validation to ensure that the configuration can be applied
9751          * to hardware.
9752          *
9753          * We have to currently stall out here in atomic_check for outstanding
9754          * commits to finish in this case because our IRQ handlers reference
9755          * DRM state directly - we can end up disabling interrupts too early
9756          * if we don't.
9757          *
9758          * TODO: Remove this stall and drop DM state private objects.
9759          */
9760         if (lock_and_validation_needed) {
9761                 ret = dm_atomic_get_state(state, &dm_state);
9762                 if (ret) {
9763                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
9764                         goto fail;
9765                 }
9766
9767                 ret = do_aquire_global_lock(dev, state);
9768                 if (ret) {
9769                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
9770                         goto fail;
9771                 }
9772
9773 #if defined(CONFIG_DRM_AMD_DC_DCN)
9774                 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
9775                 if (ret) {
9776                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
9777                         goto fail;
9778                 }
9779
9780                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
9781                 if (ret) {
9782                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
9783                         goto fail;
9784                 }
9785 #endif
9786
9787                 /*
9788                  * Perform validation of MST topology in the state:
9789                  * We need to perform MST atomic check before calling
9790                  * dc_validate_global_state(), or there is a chance
9791                  * to get stuck in an infinite loop and hang eventually.
9792                  */
9793                 ret = drm_dp_mst_atomic_check(state);
9794                 if (ret) {
9795                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
9796                         goto fail;
9797                 }
9798                 status = dc_validate_global_state(dc, dm_state->context, true);
9799                 if (status != DC_OK) {
9800                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
9801                                        dc_status_to_str(status), status);
9802                         ret = -EINVAL;
9803                         goto fail;
9804                 }
9805         } else {
9806                 /*
9807                  * The commit is a fast update. Fast updates shouldn't change
9808                  * the DC context, affect global validation, and can have their
9809                  * commit work done in parallel with other commits not touching
9810                  * the same resource. If we have a new DC context as part of
9811                  * the DM atomic state from validation we need to free it and
9812                  * retain the existing one instead.
9813                  *
9814                  * Furthermore, since the DM atomic state only contains the DC
9815                  * context and can safely be annulled, we can free the state
9816                  * and clear the associated private object now to free
9817                  * some memory and avoid a possible use-after-free later.
9818                  */
9819
9820                 for (i = 0; i < state->num_private_objs; i++) {
9821                         struct drm_private_obj *obj = state->private_objs[i].ptr;
9822
9823                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
9824                                 int j = state->num_private_objs-1;
9825
9826                                 dm_atomic_destroy_state(obj,
9827                                                 state->private_objs[i].state);
9828
9829                                 /* If i is not at the end of the array then the
9830                                  * last element needs to be moved to where i was
9831                                  * before the array can safely be truncated.
9832                                  */
9833                                 if (i != j)
9834                                         state->private_objs[i] =
9835                                                 state->private_objs[j];
9836
9837                                 state->private_objs[j].ptr = NULL;
9838                                 state->private_objs[j].state = NULL;
9839                                 state->private_objs[j].old_state = NULL;
9840                                 state->private_objs[j].new_state = NULL;
9841
9842                                 state->num_private_objs = j;
9843                                 break;
9844                         }
9845                 }
9846         }
9847
9848         /* Store the overall update type for use later in atomic check. */
9849         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9850                 struct dm_crtc_state *dm_new_crtc_state =
9851                         to_dm_crtc_state(new_crtc_state);
9852
9853                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9854                                                          UPDATE_TYPE_FULL :
9855                                                          UPDATE_TYPE_FAST;
9856         }
9857
9858         /* Must be success */
9859         WARN_ON(ret);
9860
9861         trace_amdgpu_dm_atomic_check_finish(state, ret);
9862
9863         return ret;
9864
9865 fail:
9866         if (ret == -EDEADLK)
9867                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9868         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9869                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9870         else
9871                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9872
9873         trace_amdgpu_dm_atomic_check_finish(state, ret);
9874
9875         return ret;
9876 }
9877
9878 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9879                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
9880 {
9881         uint8_t dpcd_data;
9882         bool capable = false;
9883
9884         if (amdgpu_dm_connector->dc_link &&
9885                 dm_helpers_dp_read_dpcd(
9886                                 NULL,
9887                                 amdgpu_dm_connector->dc_link,
9888                                 DP_DOWN_STREAM_PORT_COUNT,
9889                                 &dpcd_data,
9890                                 sizeof(dpcd_data))) {
9891                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9892         }
9893
9894         return capable;
9895 }
9896
9897 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
9898                 unsigned int offset,
9899                 unsigned int total_length,
9900                 uint8_t *data,
9901                 unsigned int length,
9902                 struct amdgpu_hdmi_vsdb_info *vsdb)
9903 {
9904         bool res;
9905         union dmub_rb_cmd cmd;
9906         struct dmub_cmd_send_edid_cea *input;
9907         struct dmub_cmd_edid_cea_output *output;
9908
9909         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
9910                 return false;
9911
9912         memset(&cmd, 0, sizeof(cmd));
9913
9914         input = &cmd.edid_cea.data.input;
9915
9916         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
9917         cmd.edid_cea.header.sub_type = 0;
9918         cmd.edid_cea.header.payload_bytes =
9919                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
9920         input->offset = offset;
9921         input->length = length;
9922         input->cea_total_length = total_length;
9923         memcpy(input->payload, data, length);
9924
9925         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
9926         if (!res) {
9927                 DRM_ERROR("EDID CEA parser failed\n");
9928                 return false;
9929         }
9930
9931         output = &cmd.edid_cea.data.output;
9932
9933         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
9934                 if (!output->ack.success) {
9935                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
9936                                         output->ack.offset);
9937                 }
9938         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
9939                 if (!output->amd_vsdb.vsdb_found)
9940                         return false;
9941
9942                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
9943                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
9944                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
9945                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
9946         } else {
9947                 DRM_WARN("Unknown EDID CEA parser results\n");
9948                 return false;
9949         }
9950
9951         return true;
9952 }
9953
9954 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
9955                 uint8_t *edid_ext, int len,
9956                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
9957 {
9958         int i;
9959
9960         /* send extension block to DMCU for parsing */
9961         for (i = 0; i < len; i += 8) {
9962                 bool res;
9963                 int offset;
9964
9965                 /* send 8 bytes a time */
9966                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
9967                         return false;
9968
9969                 if (i+8 == len) {
9970                         /* EDID block sent completed, expect result */
9971                         int version, min_rate, max_rate;
9972
9973                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
9974                         if (res) {
9975                                 /* amd vsdb found */
9976                                 vsdb_info->freesync_supported = 1;
9977                                 vsdb_info->amd_vsdb_version = version;
9978                                 vsdb_info->min_refresh_rate_hz = min_rate;
9979                                 vsdb_info->max_refresh_rate_hz = max_rate;
9980                                 return true;
9981                         }
9982                         /* not amd vsdb */
9983                         return false;
9984                 }
9985
9986                 /* check for ack*/
9987                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
9988                 if (!res)
9989                         return false;
9990         }
9991
9992         return false;
9993 }
9994
9995 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
9996                 uint8_t *edid_ext, int len,
9997                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
9998 {
9999         int i;
10000
10001         /* send extension block to DMCU for parsing */
10002         for (i = 0; i < len; i += 8) {
10003                 /* send 8 bytes a time */
10004                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10005                         return false;
10006         }
10007
10008         return vsdb_info->freesync_supported;
10009 }
10010
10011 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10012                 uint8_t *edid_ext, int len,
10013                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10014 {
10015         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10016
10017         if (adev->dm.dmub_srv)
10018                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10019         else
10020                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10021 }
10022
10023 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10024                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10025 {
10026         uint8_t *edid_ext = NULL;
10027         int i;
10028         bool valid_vsdb_found = false;
10029
10030         /*----- drm_find_cea_extension() -----*/
10031         /* No EDID or EDID extensions */
10032         if (edid == NULL || edid->extensions == 0)
10033                 return -ENODEV;
10034
10035         /* Find CEA extension */
10036         for (i = 0; i < edid->extensions; i++) {
10037                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10038                 if (edid_ext[0] == CEA_EXT)
10039                         break;
10040         }
10041
10042         if (i == edid->extensions)
10043                 return -ENODEV;
10044
10045         /*----- cea_db_offsets() -----*/
10046         if (edid_ext[0] != CEA_EXT)
10047                 return -ENODEV;
10048
10049         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10050
10051         return valid_vsdb_found ? i : -ENODEV;
10052 }
10053
10054 /**
10055  * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
10056  *
10057  * @connector: Connector to query.
10058  * @edid: EDID from monitor
10059  *
10060  * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
10061  * track of some of the display information in the internal data struct used by
10062  * amdgpu_dm. This function checks which type of connector we need to set the
10063  * FreeSync parameters.
10064  */
10065 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10066                                     struct edid *edid)
10067 {
10068         int i = 0;
10069         struct detailed_timing *timing;
10070         struct detailed_non_pixel *data;
10071         struct detailed_data_monitor_range *range;
10072         struct amdgpu_dm_connector *amdgpu_dm_connector =
10073                         to_amdgpu_dm_connector(connector);
10074         struct dm_connector_state *dm_con_state = NULL;
10075         struct dc_sink *sink;
10076
10077         struct drm_device *dev = connector->dev;
10078         struct amdgpu_device *adev = drm_to_adev(dev);
10079         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10080         bool freesync_capable = false;
10081
10082         if (!connector->state) {
10083                 DRM_ERROR("%s - Connector has no state", __func__);
10084                 goto update;
10085         }
10086
10087         sink = amdgpu_dm_connector->dc_sink ?
10088                 amdgpu_dm_connector->dc_sink :
10089                 amdgpu_dm_connector->dc_em_sink;
10090
10091         if (!edid || !sink) {
10092                 dm_con_state = to_dm_connector_state(connector->state);
10093
10094                 amdgpu_dm_connector->min_vfreq = 0;
10095                 amdgpu_dm_connector->max_vfreq = 0;
10096                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10097                 connector->display_info.monitor_range.min_vfreq = 0;
10098                 connector->display_info.monitor_range.max_vfreq = 0;
10099                 freesync_capable = false;
10100
10101                 goto update;
10102         }
10103
10104         dm_con_state = to_dm_connector_state(connector->state);
10105
10106         if (!adev->dm.freesync_module)
10107                 goto update;
10108
10109         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10110                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
10111                 bool edid_check_required = false;
10112
10113                 if (edid) {
10114                         edid_check_required = is_dp_capable_without_timing_msa(
10115                                                 adev->dm.dc,
10116                                                 amdgpu_dm_connector);
10117                 }
10118
10119                 if (edid_check_required == true && (edid->version > 1 ||
10120                    (edid->version == 1 && edid->revision > 1))) {
10121                         for (i = 0; i < 4; i++) {
10122
10123                                 timing  = &edid->detailed_timings[i];
10124                                 data    = &timing->data.other_data;
10125                                 range   = &data->data.range;
10126                                 /*
10127                                  * Check if monitor has continuous frequency mode
10128                                  */
10129                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10130                                         continue;
10131                                 /*
10132                                  * Check for flag range limits only. If flag == 1 then
10133                                  * no additional timing information provided.
10134                                  * Default GTF, GTF Secondary curve and CVT are not
10135                                  * supported
10136                                  */
10137                                 if (range->flags != 1)
10138                                         continue;
10139
10140                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10141                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10142                                 amdgpu_dm_connector->pixel_clock_mhz =
10143                                         range->pixel_clock_mhz * 10;
10144
10145                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10146                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10147
10148                                 break;
10149                         }
10150
10151                         if (amdgpu_dm_connector->max_vfreq -
10152                             amdgpu_dm_connector->min_vfreq > 10) {
10153
10154                                 freesync_capable = true;
10155                         }
10156                 }
10157         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10158                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10159                 if (i >= 0 && vsdb_info.freesync_supported) {
10160                         timing  = &edid->detailed_timings[i];
10161                         data    = &timing->data.other_data;
10162
10163                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10164                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10165                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10166                                 freesync_capable = true;
10167
10168                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10169                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10170                 }
10171         }
10172
10173 update:
10174         if (dm_con_state)
10175                 dm_con_state->freesync_capable = freesync_capable;
10176
10177         if (connector->vrr_capable_property)
10178                 drm_connector_set_vrr_capable_property(connector,
10179                                                        freesync_capable);
10180 }
10181
10182 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10183 {
10184         struct amdgpu_device *adev = drm_to_adev(dev);
10185         struct dc *dc = adev->dm.dc;
10186         int i;
10187
10188         mutex_lock(&adev->dm.dc_lock);
10189         if (dc->current_state) {
10190                 for (i = 0; i < dc->current_state->stream_count; ++i)
10191                         dc->current_state->streams[i]
10192                                 ->triggered_crtc_reset.enabled =
10193                                 adev->dm.force_timing_sync;
10194
10195                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10196                 dc_trigger_sync(dc, dc->current_state);
10197         }
10198         mutex_unlock(&adev->dm.dc_lock);
10199 }
10200
10201 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10202                        uint32_t value, const char *func_name)
10203 {
10204 #ifdef DM_CHECK_ADDR_0
10205         if (address == 0) {
10206                 DC_ERR("invalid register write. address = 0");
10207                 return;
10208         }
10209 #endif
10210         cgs_write_register(ctx->cgs_device, address, value);
10211         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10212 }
10213
10214 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10215                           const char *func_name)
10216 {
10217         uint32_t value;
10218 #ifdef DM_CHECK_ADDR_0
10219         if (address == 0) {
10220                 DC_ERR("invalid register read; address = 0\n");
10221                 return 0;
10222         }
10223 #endif
10224
10225         if (ctx->dmub_srv &&
10226             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10227             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10228                 ASSERT(false);
10229                 return 0;
10230         }
10231
10232         value = cgs_read_register(ctx->cgs_device, address);
10233
10234         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10235
10236         return value;
10237 }
10238
10239 int amdgpu_dm_process_dmub_aux_transfer_sync(
10240                 struct dc_context *ctx,
10241                 unsigned int link_index,
10242                 struct aux_payload *payload,
10243                 enum aux_return_code_type *operation_result)
10244 {
10245         struct amdgpu_device *adev = ctx->driver_context;
10246         struct dmub_notification *p_notify = adev->dm.dmub_notify;
10247         int ret = -1;
10248
10249         mutex_lock(&adev->dm.dpia_aux_lock);
10250         if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
10251                 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10252                 goto out;
10253         }
10254
10255         if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10256                 DRM_ERROR("wait_for_completion_timeout timeout!");
10257                 *operation_result = AUX_RET_ERROR_TIMEOUT;
10258                 goto out;
10259         }
10260
10261         if (p_notify->result != AUX_RET_SUCCESS) {
10262                 /*
10263                  * Transient states before tunneling is enabled could
10264                  * lead to this error. We can ignore this for now.
10265                  */
10266                 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
10267                         DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
10268                                         payload->address, payload->length,
10269                                         p_notify->result);
10270                 }
10271                 *operation_result = AUX_RET_ERROR_INVALID_REPLY;
10272                 goto out;
10273         }
10274
10275
10276         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10277         if (!payload->write && p_notify->aux_reply.length &&
10278                         (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
10279
10280                 if (payload->length != p_notify->aux_reply.length) {
10281                         DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
10282                                 p_notify->aux_reply.length,
10283                                         payload->address, payload->length);
10284                         *operation_result = AUX_RET_ERROR_INVALID_REPLY;
10285                         goto out;
10286                 }
10287
10288                 memcpy(payload->data, p_notify->aux_reply.data,
10289                                 p_notify->aux_reply.length);
10290         }
10291
10292         /* success */
10293         ret = p_notify->aux_reply.length;
10294         *operation_result = p_notify->result;
10295 out:
10296         mutex_unlock(&adev->dm.dpia_aux_lock);
10297         return ret;
10298 }
10299
10300 int amdgpu_dm_process_dmub_set_config_sync(
10301                 struct dc_context *ctx,
10302                 unsigned int link_index,
10303                 struct set_config_cmd_payload *payload,
10304                 enum set_config_status *operation_result)
10305 {
10306         struct amdgpu_device *adev = ctx->driver_context;
10307         bool is_cmd_complete;
10308         int ret;
10309
10310         mutex_lock(&adev->dm.dpia_aux_lock);
10311         is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
10312                         link_index, payload, adev->dm.dmub_notify);
10313
10314         if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10315                 ret = 0;
10316                 *operation_result = adev->dm.dmub_notify->sc_status;
10317         } else {
10318                 DRM_ERROR("wait_for_completion_timeout timeout!");
10319                 ret = -1;
10320                 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
10321         }
10322
10323         mutex_unlock(&adev->dm.dpia_aux_lock);
10324         return ret;
10325 }
10326
10327 /*
10328  * Check whether seamless boot is supported.
10329  *
10330  * So far we only support seamless boot on CHIP_VANGOGH.
10331  * If everything goes well, we may consider expanding
10332  * seamless boot to other ASICs.
10333  */
10334 bool check_seamless_boot_capability(struct amdgpu_device *adev)
10335 {
10336         switch (adev->ip_versions[DCE_HWIP][0]) {
10337         case IP_VERSION(3, 0, 1):
10338                 if (!adev->mman.keep_stolen_vga_memory)
10339                         return true;
10340                 break;
10341         default:
10342                 break;
10343         }
10344
10345         return false;
10346 }