drm/amdgpu/display: give aux i2c buses more meaningful names
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97
98 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136                                 struct drm_plane *plane,
137                                 unsigned long possible_crtcs,
138                                 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140                                struct drm_plane *plane,
141                                uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
144                                     uint32_t link_index,
145                                     struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147                                   struct amdgpu_encoder *aencoder,
148                                   uint32_t link_index);
149
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153                                    struct drm_atomic_state *state,
154                                    bool nonblock);
155
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159                                   struct drm_atomic_state *state);
160
161 static void handle_cursor_update(struct drm_plane *plane,
162                                  struct drm_plane_state *old_plane_state);
163
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185         if (crtc >= adev->mode_info.num_crtc)
186                 return 0;
187         else {
188                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190                                 acrtc->base.state);
191
192
193                 if (acrtc_state->stream == NULL) {
194                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195                                   crtc);
196                         return 0;
197                 }
198
199                 return dc_stream_get_vblank_counter(acrtc_state->stream);
200         }
201 }
202
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204                                   u32 *vbl, u32 *position)
205 {
206         uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
208         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209                 return -EINVAL;
210         else {
211                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213                                                 acrtc->base.state);
214
215                 if (acrtc_state->stream ==  NULL) {
216                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217                                   crtc);
218                         return 0;
219                 }
220
221                 /*
222                  * TODO rework base driver to use values directly.
223                  * for now parse it back into reg-format
224                  */
225                 dc_stream_get_scanoutpos(acrtc_state->stream,
226                                          &v_blank_start,
227                                          &v_blank_end,
228                                          &h_position,
229                                          &v_position);
230
231                 *position = v_position | (h_position << 16);
232                 *vbl = v_blank_start | (v_blank_end << 16);
233         }
234
235         return 0;
236 }
237
238 static bool dm_is_idle(void *handle)
239 {
240         /* XXX todo */
241         return true;
242 }
243
244 static int dm_wait_for_idle(void *handle)
245 {
246         /* XXX todo */
247         return 0;
248 }
249
250 static bool dm_check_soft_reset(void *handle)
251 {
252         return false;
253 }
254
255 static int dm_soft_reset(void *handle)
256 {
257         /* XXX todo */
258         return 0;
259 }
260
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263                      int otg_inst)
264 {
265         struct drm_device *dev = adev->ddev;
266         struct drm_crtc *crtc;
267         struct amdgpu_crtc *amdgpu_crtc;
268
269         if (otg_inst == -1) {
270                 WARN_ON(1);
271                 return adev->mode_info.crtcs[0];
272         }
273
274         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275                 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277                 if (amdgpu_crtc->otg_inst == otg_inst)
278                         return amdgpu_crtc;
279         }
280
281         return NULL;
282 }
283
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299         struct amdgpu_crtc *amdgpu_crtc;
300         struct common_irq_params *irq_params = interrupt_params;
301         struct amdgpu_device *adev = irq_params->adev;
302         unsigned long flags;
303         struct drm_pending_vblank_event *e;
304         struct dm_crtc_state *acrtc_state;
305         uint32_t vpos, hpos, v_blank_start, v_blank_end;
306         bool vrr_active;
307
308         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310         /* IRQ could occur when in initial stage */
311         /* TODO work and BO cleanup */
312         if (amdgpu_crtc == NULL) {
313                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314                 return;
315         }
316
317         spin_lock_irqsave(&adev->ddev->event_lock, flags);
318
319         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321                                                  amdgpu_crtc->pflip_status,
322                                                  AMDGPU_FLIP_SUBMITTED,
323                                                  amdgpu_crtc->crtc_id,
324                                                  amdgpu_crtc);
325                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326                 return;
327         }
328
329         /* page flip completed. */
330         e = amdgpu_crtc->event;
331         amdgpu_crtc->event = NULL;
332
333         if (!e)
334                 WARN_ON(1);
335
336         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340         if (!vrr_active ||
341             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342                                       &v_blank_end, &hpos, &vpos) ||
343             (vpos < v_blank_start)) {
344                 /* Update to correct count and vblank timestamp if racing with
345                  * vblank irq. This also updates to the correct vblank timestamp
346                  * even in VRR mode, as scanout is past the front-porch atm.
347                  */
348                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349
350                 /* Wake up userspace by sending the pageflip event with proper
351                  * count and timestamp of vblank of flip completion.
352                  */
353                 if (e) {
354                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356                         /* Event sent, so done with vblank for this flip */
357                         drm_crtc_vblank_put(&amdgpu_crtc->base);
358                 }
359         } else if (e) {
360                 /* VRR active and inside front-porch: vblank count and
361                  * timestamp for pageflip event will only be up to date after
362                  * drm_crtc_handle_vblank() has been executed from late vblank
363                  * irq handler after start of back-porch (vline 0). We queue the
364                  * pageflip event for send-out by drm_crtc_handle_vblank() with
365                  * updated timestamp and count, once it runs after us.
366                  *
367                  * We need to open-code this instead of using the helper
368                  * drm_crtc_arm_vblank_event(), as that helper would
369                  * call drm_crtc_accurate_vblank_count(), which we must
370                  * not call in VRR mode while we are in front-porch!
371                  */
372
373                 /* sequence will be replaced by real count during send-out. */
374                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375                 e->pipe = amdgpu_crtc->crtc_id;
376
377                 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378                 e = NULL;
379         }
380
381         /* Keep track of vblank of this flip for flip throttling. We use the
382          * cooked hw counter, as that one incremented at start of this vblank
383          * of pageflip completion, so last_flip_vblank is the forbidden count
384          * for queueing new pageflips if vsync + VRR is enabled.
385          */
386         amdgpu_crtc->last_flip_vblank =
387                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388
389         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
392         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393                          amdgpu_crtc->crtc_id, amdgpu_crtc,
394                          vrr_active, (int) !e);
395 }
396
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399         struct common_irq_params *irq_params = interrupt_params;
400         struct amdgpu_device *adev = irq_params->adev;
401         struct amdgpu_crtc *acrtc;
402         struct dm_crtc_state *acrtc_state;
403         unsigned long flags;
404
405         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407         if (acrtc) {
408                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
410                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411                               acrtc->crtc_id,
412                               amdgpu_dm_vrr_active(acrtc_state));
413
414                 /* Core vblank handling is done here after end of front-porch in
415                  * vrr mode, as vblank timestamping will give valid results
416                  * while now done after front-porch. This will also deliver
417                  * page-flip completion events that have been queued to us
418                  * if a pageflip happened inside front-porch.
419                  */
420                 if (amdgpu_dm_vrr_active(acrtc_state)) {
421                         drm_crtc_handle_vblank(&acrtc->base);
422
423                         /* BTR processing for pre-DCE12 ASICs */
424                         if (acrtc_state->stream &&
425                             adev->family < AMDGPU_FAMILY_AI) {
426                                 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427                                 mod_freesync_handle_v_update(
428                                     adev->dm.freesync_module,
429                                     acrtc_state->stream,
430                                     &acrtc_state->vrr_params);
431
432                                 dc_stream_adjust_vmin_vmax(
433                                     adev->dm.dc,
434                                     acrtc_state->stream,
435                                     &acrtc_state->vrr_params.adjust);
436                                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437                         }
438                 }
439         }
440 }
441
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: ignored
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451         struct common_irq_params *irq_params = interrupt_params;
452         struct amdgpu_device *adev = irq_params->adev;
453         struct amdgpu_crtc *acrtc;
454         struct dm_crtc_state *acrtc_state;
455         unsigned long flags;
456
457         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458
459         if (acrtc) {
460                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
461
462                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
463                               acrtc->crtc_id,
464                               amdgpu_dm_vrr_active(acrtc_state));
465
466                 /* Core vblank handling at start of front-porch is only possible
467                  * in non-vrr mode, as only there vblank timestamping will give
468                  * valid results while done in front-porch. Otherwise defer it
469                  * to dm_vupdate_high_irq after end of front-porch.
470                  */
471                 if (!amdgpu_dm_vrr_active(acrtc_state))
472                         drm_crtc_handle_vblank(&acrtc->base);
473
474                 /* Following stuff must happen at start of vblank, for crc
475                  * computation and below-the-range btr support in vrr mode.
476                  */
477                 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
478
479                 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
480                     acrtc_state->vrr_params.supported &&
481                     acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
482                         spin_lock_irqsave(&adev->ddev->event_lock, flags);
483                         mod_freesync_handle_v_update(
484                                 adev->dm.freesync_module,
485                                 acrtc_state->stream,
486                                 &acrtc_state->vrr_params);
487
488                         dc_stream_adjust_vmin_vmax(
489                                 adev->dm.dc,
490                                 acrtc_state->stream,
491                                 &acrtc_state->vrr_params.adjust);
492                         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
493                 }
494         }
495 }
496
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
498 /**
499  * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500  * @interrupt params - interrupt parameters
501  *
502  * Notify DRM's vblank event handler at VSTARTUP
503  *
504  * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505  * * We are close enough to VUPDATE - the point of no return for hw
506  * * We are in the fixed portion of variable front porch when vrr is enabled
507  * * We are before VUPDATE, where double-buffered vrr registers are swapped
508  *
509  * It is therefore the correct place to signal vblank, send user flip events,
510  * and update VRR.
511  */
512 static void dm_dcn_crtc_high_irq(void *interrupt_params)
513 {
514         struct common_irq_params *irq_params = interrupt_params;
515         struct amdgpu_device *adev = irq_params->adev;
516         struct amdgpu_crtc *acrtc;
517         struct dm_crtc_state *acrtc_state;
518         unsigned long flags;
519
520         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521
522         if (!acrtc)
523                 return;
524
525         acrtc_state = to_dm_crtc_state(acrtc->base.state);
526
527         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528                          amdgpu_dm_vrr_active(acrtc_state),
529                          acrtc_state->active_planes);
530
531         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532         drm_crtc_handle_vblank(&acrtc->base);
533
534         spin_lock_irqsave(&adev->ddev->event_lock, flags);
535
536         if (acrtc_state->vrr_params.supported &&
537             acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538                 mod_freesync_handle_v_update(
539                 adev->dm.freesync_module,
540                 acrtc_state->stream,
541                 &acrtc_state->vrr_params);
542
543                 dc_stream_adjust_vmin_vmax(
544                         adev->dm.dc,
545                         acrtc_state->stream,
546                         &acrtc_state->vrr_params.adjust);
547         }
548
549         /*
550          * If there aren't any active_planes then DCH HUBP may be clock-gated.
551          * In that case, pageflip completion interrupts won't fire and pageflip
552          * completion events won't get delivered. Prevent this by sending
553          * pending pageflip events from here if a flip is still pending.
554          *
555          * If any planes are enabled, use dm_pflip_high_irq() instead, to
556          * avoid race conditions between flip programming and completion,
557          * which could cause too early flip completion events.
558          */
559         if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560             acrtc_state->active_planes == 0) {
561                 if (acrtc->event) {
562                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
563                         acrtc->event = NULL;
564                         drm_crtc_vblank_put(&acrtc->base);
565                 }
566                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
567         }
568
569         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
570 }
571 #endif
572
573 static int dm_set_clockgating_state(void *handle,
574                   enum amd_clockgating_state state)
575 {
576         return 0;
577 }
578
579 static int dm_set_powergating_state(void *handle,
580                   enum amd_powergating_state state)
581 {
582         return 0;
583 }
584
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle);
587
588 /* Allocate memory for FBC compressed data  */
589 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
590 {
591         struct drm_device *dev = connector->dev;
592         struct amdgpu_device *adev = dev->dev_private;
593         struct dm_comressor_info *compressor = &adev->dm.compressor;
594         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595         struct drm_display_mode *mode;
596         unsigned long max_size = 0;
597
598         if (adev->dm.dc->fbc_compressor == NULL)
599                 return;
600
601         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
602                 return;
603
604         if (compressor->bo_ptr)
605                 return;
606
607
608         list_for_each_entry(mode, &connector->modes, head) {
609                 if (max_size < mode->htotal * mode->vtotal)
610                         max_size = mode->htotal * mode->vtotal;
611         }
612
613         if (max_size) {
614                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
615                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
616                             &compressor->gpu_addr, &compressor->cpu_addr);
617
618                 if (r)
619                         DRM_ERROR("DM: Failed to initialize FBC\n");
620                 else {
621                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
623                 }
624
625         }
626
627 }
628
629 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630                                           int pipe, bool *enabled,
631                                           unsigned char *buf, int max_bytes)
632 {
633         struct drm_device *dev = dev_get_drvdata(kdev);
634         struct amdgpu_device *adev = dev->dev_private;
635         struct drm_connector *connector;
636         struct drm_connector_list_iter conn_iter;
637         struct amdgpu_dm_connector *aconnector;
638         int ret = 0;
639
640         *enabled = false;
641
642         mutex_lock(&adev->dm.audio_lock);
643
644         drm_connector_list_iter_begin(dev, &conn_iter);
645         drm_for_each_connector_iter(connector, &conn_iter) {
646                 aconnector = to_amdgpu_dm_connector(connector);
647                 if (aconnector->audio_inst != port)
648                         continue;
649
650                 *enabled = true;
651                 ret = drm_eld_size(connector->eld);
652                 memcpy(buf, connector->eld, min(max_bytes, ret));
653
654                 break;
655         }
656         drm_connector_list_iter_end(&conn_iter);
657
658         mutex_unlock(&adev->dm.audio_lock);
659
660         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661
662         return ret;
663 }
664
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666         .get_eld = amdgpu_dm_audio_component_get_eld,
667 };
668
669 static int amdgpu_dm_audio_component_bind(struct device *kdev,
670                                        struct device *hda_kdev, void *data)
671 {
672         struct drm_device *dev = dev_get_drvdata(kdev);
673         struct amdgpu_device *adev = dev->dev_private;
674         struct drm_audio_component *acomp = data;
675
676         acomp->ops = &amdgpu_dm_audio_component_ops;
677         acomp->dev = kdev;
678         adev->dm.audio_component = acomp;
679
680         return 0;
681 }
682
683 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684                                           struct device *hda_kdev, void *data)
685 {
686         struct drm_device *dev = dev_get_drvdata(kdev);
687         struct amdgpu_device *adev = dev->dev_private;
688         struct drm_audio_component *acomp = data;
689
690         acomp->ops = NULL;
691         acomp->dev = NULL;
692         adev->dm.audio_component = NULL;
693 }
694
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696         .bind   = amdgpu_dm_audio_component_bind,
697         .unbind = amdgpu_dm_audio_component_unbind,
698 };
699
700 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
701 {
702         int i, ret;
703
704         if (!amdgpu_audio)
705                 return 0;
706
707         adev->mode_info.audio.enabled = true;
708
709         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
710
711         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712                 adev->mode_info.audio.pin[i].channels = -1;
713                 adev->mode_info.audio.pin[i].rate = -1;
714                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
715                 adev->mode_info.audio.pin[i].status_bits = 0;
716                 adev->mode_info.audio.pin[i].category_code = 0;
717                 adev->mode_info.audio.pin[i].connected = false;
718                 adev->mode_info.audio.pin[i].id =
719                         adev->dm.dc->res_pool->audios[i]->inst;
720                 adev->mode_info.audio.pin[i].offset = 0;
721         }
722
723         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724         if (ret < 0)
725                 return ret;
726
727         adev->dm.audio_registered = true;
728
729         return 0;
730 }
731
732 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 {
734         if (!amdgpu_audio)
735                 return;
736
737         if (!adev->mode_info.audio.enabled)
738                 return;
739
740         if (adev->dm.audio_registered) {
741                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742                 adev->dm.audio_registered = false;
743         }
744
745         /* TODO: Disable audio? */
746
747         adev->mode_info.audio.enabled = false;
748 }
749
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
751 {
752         struct drm_audio_component *acomp = adev->dm.audio_component;
753
754         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
756
757                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758                                                  pin, -1);
759         }
760 }
761
762 static int dm_dmub_hw_init(struct amdgpu_device *adev)
763 {
764         const struct dmcub_firmware_header_v1_0 *hdr;
765         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
766         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
767         const struct firmware *dmub_fw = adev->dm.dmub_fw;
768         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769         struct abm *abm = adev->dm.dc->res_pool->abm;
770         struct dmub_srv_hw_params hw_params;
771         enum dmub_status status;
772         const unsigned char *fw_inst_const, *fw_bss_data;
773         uint32_t i, fw_inst_const_size, fw_bss_data_size;
774         bool has_hw_support;
775
776         if (!dmub_srv)
777                 /* DMUB isn't supported on the ASIC. */
778                 return 0;
779
780         if (!fb_info) {
781                 DRM_ERROR("No framebuffer info for DMUB service.\n");
782                 return -EINVAL;
783         }
784
785         if (!dmub_fw) {
786                 /* Firmware required for DMUB support. */
787                 DRM_ERROR("No firmware provided for DMUB.\n");
788                 return -EINVAL;
789         }
790
791         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792         if (status != DMUB_STATUS_OK) {
793                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794                 return -EINVAL;
795         }
796
797         if (!has_hw_support) {
798                 DRM_INFO("DMUB unsupported on ASIC\n");
799                 return 0;
800         }
801
802         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
803
804         fw_inst_const = dmub_fw->data +
805                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806                         PSP_HEADER_BYTES;
807
808         fw_bss_data = dmub_fw->data +
809                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810                       le32_to_cpu(hdr->inst_const_bytes);
811
812         /* Copy firmware and bios info into FB memory. */
813         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
815
816         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
817
818         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819          * amdgpu_ucode_init_single_fw will load dmub firmware
820          * fw_inst_const part to cw0; otherwise, the firmware back door load
821          * will be done by dm_dmub_hw_init
822          */
823         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825                                 fw_inst_const_size);
826         }
827
828         memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
829                fw_bss_data_size);
830
831         /* Copy firmware bios info into FB memory. */
832         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
833                adev->bios_size);
834
835         /* Reset regions that need to be reset. */
836         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
837         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
838
839         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
840                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
841
842         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
843                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
844
845         /* Initialize hardware. */
846         memset(&hw_params, 0, sizeof(hw_params));
847         hw_params.fb_base = adev->gmc.fb_start;
848         hw_params.fb_offset = adev->gmc.aper_base;
849
850         /* backdoor load firmware and trigger dmub running */
851         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
852                 hw_params.load_inst_const = true;
853
854         if (dmcu)
855                 hw_params.psp_version = dmcu->psp_version;
856
857         for (i = 0; i < fb_info->num_fb; ++i)
858                 hw_params.fb[i] = &fb_info->fb[i];
859
860         status = dmub_srv_hw_init(dmub_srv, &hw_params);
861         if (status != DMUB_STATUS_OK) {
862                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
863                 return -EINVAL;
864         }
865
866         /* Wait for firmware load to finish. */
867         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
868         if (status != DMUB_STATUS_OK)
869                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
870
871         /* Init DMCU and ABM if available. */
872         if (dmcu && abm) {
873                 dmcu->funcs->dmcu_init(dmcu);
874                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
875         }
876
877         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
878         if (!adev->dm.dc->ctx->dmub_srv) {
879                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
880                 return -ENOMEM;
881         }
882
883         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
884                  adev->dm.dmcub_fw_version);
885
886         return 0;
887 }
888
889 static int amdgpu_dm_init(struct amdgpu_device *adev)
890 {
891         struct dc_init_data init_data;
892 #ifdef CONFIG_DRM_AMD_DC_HDCP
893         struct dc_callback_init init_params;
894 #endif
895         int r;
896
897         adev->dm.ddev = adev->ddev;
898         adev->dm.adev = adev;
899
900         /* Zero all the fields */
901         memset(&init_data, 0, sizeof(init_data));
902 #ifdef CONFIG_DRM_AMD_DC_HDCP
903         memset(&init_params, 0, sizeof(init_params));
904 #endif
905
906         mutex_init(&adev->dm.dc_lock);
907         mutex_init(&adev->dm.audio_lock);
908
909         if(amdgpu_dm_irq_init(adev)) {
910                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
911                 goto error;
912         }
913
914         init_data.asic_id.chip_family = adev->family;
915
916         init_data.asic_id.pci_revision_id = adev->pdev->revision;
917         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
918
919         init_data.asic_id.vram_width = adev->gmc.vram_width;
920         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
921         init_data.asic_id.atombios_base_address =
922                 adev->mode_info.atom_context->bios;
923
924         init_data.driver = adev;
925
926         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
927
928         if (!adev->dm.cgs_device) {
929                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
930                 goto error;
931         }
932
933         init_data.cgs_device = adev->dm.cgs_device;
934
935         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
936
937         switch (adev->asic_type) {
938         case CHIP_CARRIZO:
939         case CHIP_STONEY:
940         case CHIP_RAVEN:
941         case CHIP_RENOIR:
942                 init_data.flags.gpu_vm_support = true;
943                 break;
944         default:
945                 break;
946         }
947
948         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
949                 init_data.flags.fbc_support = true;
950
951         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
952                 init_data.flags.multi_mon_pp_mclk_switch = true;
953
954         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
955                 init_data.flags.disable_fractional_pwm = true;
956
957         init_data.flags.power_down_display_on_boot = true;
958
959         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
960
961         /* Display Core create. */
962         adev->dm.dc = dc_create(&init_data);
963
964         if (adev->dm.dc) {
965                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
966         } else {
967                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
968                 goto error;
969         }
970
971         r = dm_dmub_hw_init(adev);
972         if (r) {
973                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
974                 goto error;
975         }
976
977         dc_hardware_init(adev->dm.dc);
978
979         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
980         if (!adev->dm.freesync_module) {
981                 DRM_ERROR(
982                 "amdgpu: failed to initialize freesync_module.\n");
983         } else
984                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
985                                 adev->dm.freesync_module);
986
987         amdgpu_dm_init_color_mod();
988
989 #ifdef CONFIG_DRM_AMD_DC_HDCP
990         if (adev->asic_type >= CHIP_RAVEN) {
991                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
992
993                 if (!adev->dm.hdcp_workqueue)
994                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
995                 else
996                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
997
998                 dc_init_callbacks(adev->dm.dc, &init_params);
999         }
1000 #endif
1001         if (amdgpu_dm_initialize_drm_device(adev)) {
1002                 DRM_ERROR(
1003                 "amdgpu: failed to initialize sw for display support.\n");
1004                 goto error;
1005         }
1006
1007         /* Update the actual used number of crtc */
1008         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1009
1010         /* TODO: Add_display_info? */
1011
1012         /* TODO use dynamic cursor width */
1013         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1014         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1015
1016         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1017                 DRM_ERROR(
1018                 "amdgpu: failed to initialize sw for display support.\n");
1019                 goto error;
1020         }
1021
1022         DRM_DEBUG_DRIVER("KMS initialized.\n");
1023
1024         return 0;
1025 error:
1026         amdgpu_dm_fini(adev);
1027
1028         return -EINVAL;
1029 }
1030
1031 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1032 {
1033         amdgpu_dm_audio_fini(adev);
1034
1035         amdgpu_dm_destroy_drm_device(&adev->dm);
1036
1037 #ifdef CONFIG_DRM_AMD_DC_HDCP
1038         if (adev->dm.hdcp_workqueue) {
1039                 hdcp_destroy(adev->dm.hdcp_workqueue);
1040                 adev->dm.hdcp_workqueue = NULL;
1041         }
1042
1043         if (adev->dm.dc)
1044                 dc_deinit_callbacks(adev->dm.dc);
1045 #endif
1046         if (adev->dm.dc->ctx->dmub_srv) {
1047                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1048                 adev->dm.dc->ctx->dmub_srv = NULL;
1049         }
1050
1051         if (adev->dm.dmub_bo)
1052                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1053                                       &adev->dm.dmub_bo_gpu_addr,
1054                                       &adev->dm.dmub_bo_cpu_addr);
1055
1056         /* DC Destroy TODO: Replace destroy DAL */
1057         if (adev->dm.dc)
1058                 dc_destroy(&adev->dm.dc);
1059         /*
1060          * TODO: pageflip, vlank interrupt
1061          *
1062          * amdgpu_dm_irq_fini(adev);
1063          */
1064
1065         if (adev->dm.cgs_device) {
1066                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1067                 adev->dm.cgs_device = NULL;
1068         }
1069         if (adev->dm.freesync_module) {
1070                 mod_freesync_destroy(adev->dm.freesync_module);
1071                 adev->dm.freesync_module = NULL;
1072         }
1073
1074         mutex_destroy(&adev->dm.audio_lock);
1075         mutex_destroy(&adev->dm.dc_lock);
1076
1077         return;
1078 }
1079
1080 static int load_dmcu_fw(struct amdgpu_device *adev)
1081 {
1082         const char *fw_name_dmcu = NULL;
1083         int r;
1084         const struct dmcu_firmware_header_v1_0 *hdr;
1085
1086         switch(adev->asic_type) {
1087         case CHIP_BONAIRE:
1088         case CHIP_HAWAII:
1089         case CHIP_KAVERI:
1090         case CHIP_KABINI:
1091         case CHIP_MULLINS:
1092         case CHIP_TONGA:
1093         case CHIP_FIJI:
1094         case CHIP_CARRIZO:
1095         case CHIP_STONEY:
1096         case CHIP_POLARIS11:
1097         case CHIP_POLARIS10:
1098         case CHIP_POLARIS12:
1099         case CHIP_VEGAM:
1100         case CHIP_VEGA10:
1101         case CHIP_VEGA12:
1102         case CHIP_VEGA20:
1103         case CHIP_NAVI10:
1104         case CHIP_NAVI14:
1105         case CHIP_RENOIR:
1106                 return 0;
1107         case CHIP_NAVI12:
1108                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1109                 break;
1110         case CHIP_RAVEN:
1111                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1112                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1113                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1114                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1115                 else
1116                         return 0;
1117                 break;
1118         default:
1119                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1120                 return -EINVAL;
1121         }
1122
1123         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1124                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1125                 return 0;
1126         }
1127
1128         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1129         if (r == -ENOENT) {
1130                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1131                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1132                 adev->dm.fw_dmcu = NULL;
1133                 return 0;
1134         }
1135         if (r) {
1136                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1137                         fw_name_dmcu);
1138                 return r;
1139         }
1140
1141         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1142         if (r) {
1143                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1144                         fw_name_dmcu);
1145                 release_firmware(adev->dm.fw_dmcu);
1146                 adev->dm.fw_dmcu = NULL;
1147                 return r;
1148         }
1149
1150         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1151         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1152         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1153         adev->firmware.fw_size +=
1154                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1155
1156         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1157         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1158         adev->firmware.fw_size +=
1159                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1160
1161         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1162
1163         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1164
1165         return 0;
1166 }
1167
1168 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1169 {
1170         struct amdgpu_device *adev = ctx;
1171
1172         return dm_read_reg(adev->dm.dc->ctx, address);
1173 }
1174
1175 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1176                                      uint32_t value)
1177 {
1178         struct amdgpu_device *adev = ctx;
1179
1180         return dm_write_reg(adev->dm.dc->ctx, address, value);
1181 }
1182
1183 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1184 {
1185         struct dmub_srv_create_params create_params;
1186         struct dmub_srv_region_params region_params;
1187         struct dmub_srv_region_info region_info;
1188         struct dmub_srv_fb_params fb_params;
1189         struct dmub_srv_fb_info *fb_info;
1190         struct dmub_srv *dmub_srv;
1191         const struct dmcub_firmware_header_v1_0 *hdr;
1192         const char *fw_name_dmub;
1193         enum dmub_asic dmub_asic;
1194         enum dmub_status status;
1195         int r;
1196
1197         switch (adev->asic_type) {
1198         case CHIP_RENOIR:
1199                 dmub_asic = DMUB_ASIC_DCN21;
1200                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1201                 break;
1202
1203         default:
1204                 /* ASIC doesn't support DMUB. */
1205                 return 0;
1206         }
1207
1208         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1209         if (r) {
1210                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1211                 return 0;
1212         }
1213
1214         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1215         if (r) {
1216                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1217                 return 0;
1218         }
1219
1220         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1221
1222         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1223                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1224                         AMDGPU_UCODE_ID_DMCUB;
1225                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1226                         adev->dm.dmub_fw;
1227                 adev->firmware.fw_size +=
1228                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1229
1230                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1231                          adev->dm.dmcub_fw_version);
1232         }
1233
1234         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1235
1236         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1237         dmub_srv = adev->dm.dmub_srv;
1238
1239         if (!dmub_srv) {
1240                 DRM_ERROR("Failed to allocate DMUB service!\n");
1241                 return -ENOMEM;
1242         }
1243
1244         memset(&create_params, 0, sizeof(create_params));
1245         create_params.user_ctx = adev;
1246         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1247         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1248         create_params.asic = dmub_asic;
1249
1250         /* Create the DMUB service. */
1251         status = dmub_srv_create(dmub_srv, &create_params);
1252         if (status != DMUB_STATUS_OK) {
1253                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1254                 return -EINVAL;
1255         }
1256
1257         /* Calculate the size of all the regions for the DMUB service. */
1258         memset(&region_params, 0, sizeof(region_params));
1259
1260         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1261                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1262         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1263         region_params.vbios_size = adev->bios_size;
1264         region_params.fw_bss_data =
1265                 adev->dm.dmub_fw->data +
1266                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267                 le32_to_cpu(hdr->inst_const_bytes);
1268
1269         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1270                                            &region_info);
1271
1272         if (status != DMUB_STATUS_OK) {
1273                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1274                 return -EINVAL;
1275         }
1276
1277         /*
1278          * Allocate a framebuffer based on the total size of all the regions.
1279          * TODO: Move this into GART.
1280          */
1281         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283                                     &adev->dm.dmub_bo_gpu_addr,
1284                                     &adev->dm.dmub_bo_cpu_addr);
1285         if (r)
1286                 return r;
1287
1288         /* Rebase the regions on the framebuffer address. */
1289         memset(&fb_params, 0, sizeof(fb_params));
1290         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292         fb_params.region_info = &region_info;
1293
1294         adev->dm.dmub_fb_info =
1295                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296         fb_info = adev->dm.dmub_fb_info;
1297
1298         if (!fb_info) {
1299                 DRM_ERROR(
1300                         "Failed to allocate framebuffer info for DMUB service!\n");
1301                 return -ENOMEM;
1302         }
1303
1304         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305         if (status != DMUB_STATUS_OK) {
1306                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1307                 return -EINVAL;
1308         }
1309
1310         return 0;
1311 }
1312
1313 static int dm_sw_init(void *handle)
1314 {
1315         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316         int r;
1317
1318         r = dm_dmub_sw_init(adev);
1319         if (r)
1320                 return r;
1321
1322         return load_dmcu_fw(adev);
1323 }
1324
1325 static int dm_sw_fini(void *handle)
1326 {
1327         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328
1329         kfree(adev->dm.dmub_fb_info);
1330         adev->dm.dmub_fb_info = NULL;
1331
1332         if (adev->dm.dmub_srv) {
1333                 dmub_srv_destroy(adev->dm.dmub_srv);
1334                 adev->dm.dmub_srv = NULL;
1335         }
1336
1337         if (adev->dm.dmub_fw) {
1338                 release_firmware(adev->dm.dmub_fw);
1339                 adev->dm.dmub_fw = NULL;
1340         }
1341
1342         if(adev->dm.fw_dmcu) {
1343                 release_firmware(adev->dm.fw_dmcu);
1344                 adev->dm.fw_dmcu = NULL;
1345         }
1346
1347         return 0;
1348 }
1349
1350 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1351 {
1352         struct amdgpu_dm_connector *aconnector;
1353         struct drm_connector *connector;
1354         struct drm_connector_list_iter iter;
1355         int ret = 0;
1356
1357         drm_connector_list_iter_begin(dev, &iter);
1358         drm_for_each_connector_iter(connector, &iter) {
1359                 aconnector = to_amdgpu_dm_connector(connector);
1360                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1361                     aconnector->mst_mgr.aux) {
1362                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1363                                          aconnector,
1364                                          aconnector->base.base.id);
1365
1366                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1367                         if (ret < 0) {
1368                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1369                                 aconnector->dc_link->type =
1370                                         dc_connection_single;
1371                                 break;
1372                         }
1373                 }
1374         }
1375         drm_connector_list_iter_end(&iter);
1376
1377         return ret;
1378 }
1379
1380 static int dm_late_init(void *handle)
1381 {
1382         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1383
1384         struct dmcu_iram_parameters params;
1385         unsigned int linear_lut[16];
1386         int i;
1387         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1388         bool ret = false;
1389
1390         for (i = 0; i < 16; i++)
1391                 linear_lut[i] = 0xFFFF * i / 15;
1392
1393         params.set = 0;
1394         params.backlight_ramping_start = 0xCCCC;
1395         params.backlight_ramping_reduction = 0xCCCCCCCC;
1396         params.backlight_lut_array_size = 16;
1397         params.backlight_lut_array = linear_lut;
1398
1399         /* Min backlight level after ABM reduction,  Don't allow below 1%
1400          * 0xFFFF x 0.01 = 0x28F
1401          */
1402         params.min_abm_backlight = 0x28F;
1403
1404         /* todo will enable for navi10 */
1405         if (adev->asic_type <= CHIP_RAVEN) {
1406                 ret = dmcu_load_iram(dmcu, params);
1407
1408                 if (!ret)
1409                         return -EINVAL;
1410         }
1411
1412         return detect_mst_link_for_all_connectors(adev->ddev);
1413 }
1414
1415 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1416 {
1417         struct amdgpu_dm_connector *aconnector;
1418         struct drm_connector *connector;
1419         struct drm_connector_list_iter iter;
1420         struct drm_dp_mst_topology_mgr *mgr;
1421         int ret;
1422         bool need_hotplug = false;
1423
1424         drm_connector_list_iter_begin(dev, &iter);
1425         drm_for_each_connector_iter(connector, &iter) {
1426                 aconnector = to_amdgpu_dm_connector(connector);
1427                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1428                     aconnector->mst_port)
1429                         continue;
1430
1431                 mgr = &aconnector->mst_mgr;
1432
1433                 if (suspend) {
1434                         drm_dp_mst_topology_mgr_suspend(mgr);
1435                 } else {
1436                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1437                         if (ret < 0) {
1438                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1439                                 need_hotplug = true;
1440                         }
1441                 }
1442         }
1443         drm_connector_list_iter_end(&iter);
1444
1445         if (need_hotplug)
1446                 drm_kms_helper_hotplug_event(dev);
1447 }
1448
1449 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1450 {
1451         struct smu_context *smu = &adev->smu;
1452         int ret = 0;
1453
1454         if (!is_support_sw_smu(adev))
1455                 return 0;
1456
1457         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1458          * on window driver dc implementation.
1459          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1460          * should be passed to smu during boot up and resume from s3.
1461          * boot up: dc calculate dcn watermark clock settings within dc_create,
1462          * dcn20_resource_construct
1463          * then call pplib functions below to pass the settings to smu:
1464          * smu_set_watermarks_for_clock_ranges
1465          * smu_set_watermarks_table
1466          * navi10_set_watermarks_table
1467          * smu_write_watermarks_table
1468          *
1469          * For Renoir, clock settings of dcn watermark are also fixed values.
1470          * dc has implemented different flow for window driver:
1471          * dc_hardware_init / dc_set_power_state
1472          * dcn10_init_hw
1473          * notify_wm_ranges
1474          * set_wm_ranges
1475          * -- Linux
1476          * smu_set_watermarks_for_clock_ranges
1477          * renoir_set_watermarks_table
1478          * smu_write_watermarks_table
1479          *
1480          * For Linux,
1481          * dc_hardware_init -> amdgpu_dm_init
1482          * dc_set_power_state --> dm_resume
1483          *
1484          * therefore, this function apply to navi10/12/14 but not Renoir
1485          * *
1486          */
1487         switch(adev->asic_type) {
1488         case CHIP_NAVI10:
1489         case CHIP_NAVI14:
1490         case CHIP_NAVI12:
1491                 break;
1492         default:
1493                 return 0;
1494         }
1495
1496         mutex_lock(&smu->mutex);
1497
1498         /* pass data to smu controller */
1499         if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1500                         !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1501                 ret = smu_write_watermarks_table(smu);
1502
1503                 if (ret) {
1504                         mutex_unlock(&smu->mutex);
1505                         DRM_ERROR("Failed to update WMTABLE!\n");
1506                         return ret;
1507                 }
1508                 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1509         }
1510
1511         mutex_unlock(&smu->mutex);
1512
1513         return 0;
1514 }
1515
1516 /**
1517  * dm_hw_init() - Initialize DC device
1518  * @handle: The base driver device containing the amdgpu_dm device.
1519  *
1520  * Initialize the &struct amdgpu_display_manager device. This involves calling
1521  * the initializers of each DM component, then populating the struct with them.
1522  *
1523  * Although the function implies hardware initialization, both hardware and
1524  * software are initialized here. Splitting them out to their relevant init
1525  * hooks is a future TODO item.
1526  *
1527  * Some notable things that are initialized here:
1528  *
1529  * - Display Core, both software and hardware
1530  * - DC modules that we need (freesync and color management)
1531  * - DRM software states
1532  * - Interrupt sources and handlers
1533  * - Vblank support
1534  * - Debug FS entries, if enabled
1535  */
1536 static int dm_hw_init(void *handle)
1537 {
1538         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539         /* Create DAL display manager */
1540         amdgpu_dm_init(adev);
1541         amdgpu_dm_hpd_init(adev);
1542
1543         return 0;
1544 }
1545
1546 /**
1547  * dm_hw_fini() - Teardown DC device
1548  * @handle: The base driver device containing the amdgpu_dm device.
1549  *
1550  * Teardown components within &struct amdgpu_display_manager that require
1551  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552  * were loaded. Also flush IRQ workqueues and disable them.
1553  */
1554 static int dm_hw_fini(void *handle)
1555 {
1556         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557
1558         amdgpu_dm_hpd_fini(adev);
1559
1560         amdgpu_dm_irq_fini(adev);
1561         amdgpu_dm_fini(adev);
1562         return 0;
1563 }
1564
1565 static int dm_suspend(void *handle)
1566 {
1567         struct amdgpu_device *adev = handle;
1568         struct amdgpu_display_manager *dm = &adev->dm;
1569         int ret = 0;
1570
1571         WARN_ON(adev->dm.cached_state);
1572         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1573
1574         s3_handle_mst(adev->ddev, true);
1575
1576         amdgpu_dm_irq_suspend(adev);
1577
1578
1579         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1580
1581         return ret;
1582 }
1583
1584 static struct amdgpu_dm_connector *
1585 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1586                                              struct drm_crtc *crtc)
1587 {
1588         uint32_t i;
1589         struct drm_connector_state *new_con_state;
1590         struct drm_connector *connector;
1591         struct drm_crtc *crtc_from_state;
1592
1593         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1594                 crtc_from_state = new_con_state->crtc;
1595
1596                 if (crtc_from_state == crtc)
1597                         return to_amdgpu_dm_connector(connector);
1598         }
1599
1600         return NULL;
1601 }
1602
1603 static void emulated_link_detect(struct dc_link *link)
1604 {
1605         struct dc_sink_init_data sink_init_data = { 0 };
1606         struct display_sink_capability sink_caps = { 0 };
1607         enum dc_edid_status edid_status;
1608         struct dc_context *dc_ctx = link->ctx;
1609         struct dc_sink *sink = NULL;
1610         struct dc_sink *prev_sink = NULL;
1611
1612         link->type = dc_connection_none;
1613         prev_sink = link->local_sink;
1614
1615         if (prev_sink != NULL)
1616                 dc_sink_retain(prev_sink);
1617
1618         switch (link->connector_signal) {
1619         case SIGNAL_TYPE_HDMI_TYPE_A: {
1620                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1621                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1622                 break;
1623         }
1624
1625         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1626                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1627                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1628                 break;
1629         }
1630
1631         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1632                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1633                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1634                 break;
1635         }
1636
1637         case SIGNAL_TYPE_LVDS: {
1638                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1639                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1640                 break;
1641         }
1642
1643         case SIGNAL_TYPE_EDP: {
1644                 sink_caps.transaction_type =
1645                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1646                 sink_caps.signal = SIGNAL_TYPE_EDP;
1647                 break;
1648         }
1649
1650         case SIGNAL_TYPE_DISPLAY_PORT: {
1651                 sink_caps.transaction_type =
1652                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1653                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1654                 break;
1655         }
1656
1657         default:
1658                 DC_ERROR("Invalid connector type! signal:%d\n",
1659                         link->connector_signal);
1660                 return;
1661         }
1662
1663         sink_init_data.link = link;
1664         sink_init_data.sink_signal = sink_caps.signal;
1665
1666         sink = dc_sink_create(&sink_init_data);
1667         if (!sink) {
1668                 DC_ERROR("Failed to create sink!\n");
1669                 return;
1670         }
1671
1672         /* dc_sink_create returns a new reference */
1673         link->local_sink = sink;
1674
1675         edid_status = dm_helpers_read_local_edid(
1676                         link->ctx,
1677                         link,
1678                         sink);
1679
1680         if (edid_status != EDID_OK)
1681                 DC_ERROR("Failed to read EDID");
1682
1683 }
1684
1685 static int dm_resume(void *handle)
1686 {
1687         struct amdgpu_device *adev = handle;
1688         struct drm_device *ddev = adev->ddev;
1689         struct amdgpu_display_manager *dm = &adev->dm;
1690         struct amdgpu_dm_connector *aconnector;
1691         struct drm_connector *connector;
1692         struct drm_connector_list_iter iter;
1693         struct drm_crtc *crtc;
1694         struct drm_crtc_state *new_crtc_state;
1695         struct dm_crtc_state *dm_new_crtc_state;
1696         struct drm_plane *plane;
1697         struct drm_plane_state *new_plane_state;
1698         struct dm_plane_state *dm_new_plane_state;
1699         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1700         enum dc_connection_type new_connection_type = dc_connection_none;
1701         int i, r;
1702
1703         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1704         dc_release_state(dm_state->context);
1705         dm_state->context = dc_create_state(dm->dc);
1706         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1707         dc_resource_state_construct(dm->dc, dm_state->context);
1708
1709         /* Before powering on DC we need to re-initialize DMUB. */
1710         r = dm_dmub_hw_init(adev);
1711         if (r)
1712                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1713
1714         /* power on hardware */
1715         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1716
1717         /* program HPD filter */
1718         dc_resume(dm->dc);
1719
1720         /*
1721          * early enable HPD Rx IRQ, should be done before set mode as short
1722          * pulse interrupts are used for MST
1723          */
1724         amdgpu_dm_irq_resume_early(adev);
1725
1726         /* On resume we need to rewrite the MSTM control bits to enable MST*/
1727         s3_handle_mst(ddev, false);
1728
1729         /* Do detection*/
1730         drm_connector_list_iter_begin(ddev, &iter);
1731         drm_for_each_connector_iter(connector, &iter) {
1732                 aconnector = to_amdgpu_dm_connector(connector);
1733
1734                 /*
1735                  * this is the case when traversing through already created
1736                  * MST connectors, should be skipped
1737                  */
1738                 if (aconnector->mst_port)
1739                         continue;
1740
1741                 mutex_lock(&aconnector->hpd_lock);
1742                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1743                         DRM_ERROR("KMS: Failed to detect connector\n");
1744
1745                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1746                         emulated_link_detect(aconnector->dc_link);
1747                 else
1748                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1749
1750                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1751                         aconnector->fake_enable = false;
1752
1753                 if (aconnector->dc_sink)
1754                         dc_sink_release(aconnector->dc_sink);
1755                 aconnector->dc_sink = NULL;
1756                 amdgpu_dm_update_connector_after_detect(aconnector);
1757                 mutex_unlock(&aconnector->hpd_lock);
1758         }
1759         drm_connector_list_iter_end(&iter);
1760
1761         /* Force mode set in atomic commit */
1762         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1763                 new_crtc_state->active_changed = true;
1764
1765         /*
1766          * atomic_check is expected to create the dc states. We need to release
1767          * them here, since they were duplicated as part of the suspend
1768          * procedure.
1769          */
1770         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1771                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1772                 if (dm_new_crtc_state->stream) {
1773                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1774                         dc_stream_release(dm_new_crtc_state->stream);
1775                         dm_new_crtc_state->stream = NULL;
1776                 }
1777         }
1778
1779         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1780                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1781                 if (dm_new_plane_state->dc_state) {
1782                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1783                         dc_plane_state_release(dm_new_plane_state->dc_state);
1784                         dm_new_plane_state->dc_state = NULL;
1785                 }
1786         }
1787
1788         drm_atomic_helper_resume(ddev, dm->cached_state);
1789
1790         dm->cached_state = NULL;
1791
1792         amdgpu_dm_irq_resume_late(adev);
1793
1794         amdgpu_dm_smu_write_watermarks_table(adev);
1795
1796         return 0;
1797 }
1798
1799 /**
1800  * DOC: DM Lifecycle
1801  *
1802  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1803  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1804  * the base driver's device list to be initialized and torn down accordingly.
1805  *
1806  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1807  */
1808
1809 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1810         .name = "dm",
1811         .early_init = dm_early_init,
1812         .late_init = dm_late_init,
1813         .sw_init = dm_sw_init,
1814         .sw_fini = dm_sw_fini,
1815         .hw_init = dm_hw_init,
1816         .hw_fini = dm_hw_fini,
1817         .suspend = dm_suspend,
1818         .resume = dm_resume,
1819         .is_idle = dm_is_idle,
1820         .wait_for_idle = dm_wait_for_idle,
1821         .check_soft_reset = dm_check_soft_reset,
1822         .soft_reset = dm_soft_reset,
1823         .set_clockgating_state = dm_set_clockgating_state,
1824         .set_powergating_state = dm_set_powergating_state,
1825 };
1826
1827 const struct amdgpu_ip_block_version dm_ip_block =
1828 {
1829         .type = AMD_IP_BLOCK_TYPE_DCE,
1830         .major = 1,
1831         .minor = 0,
1832         .rev = 0,
1833         .funcs = &amdgpu_dm_funcs,
1834 };
1835
1836
1837 /**
1838  * DOC: atomic
1839  *
1840  * *WIP*
1841  */
1842
1843 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1844         .fb_create = amdgpu_display_user_framebuffer_create,
1845         .output_poll_changed = drm_fb_helper_output_poll_changed,
1846         .atomic_check = amdgpu_dm_atomic_check,
1847         .atomic_commit = amdgpu_dm_atomic_commit,
1848 };
1849
1850 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1851         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1852 };
1853
1854 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1855 {
1856         u32 max_cll, min_cll, max, min, q, r;
1857         struct amdgpu_dm_backlight_caps *caps;
1858         struct amdgpu_display_manager *dm;
1859         struct drm_connector *conn_base;
1860         struct amdgpu_device *adev;
1861         static const u8 pre_computed_values[] = {
1862                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1863                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1864
1865         if (!aconnector || !aconnector->dc_link)
1866                 return;
1867
1868         conn_base = &aconnector->base;
1869         adev = conn_base->dev->dev_private;
1870         dm = &adev->dm;
1871         caps = &dm->backlight_caps;
1872         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1873         caps->aux_support = false;
1874         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1875         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1876
1877         if (caps->ext_caps->bits.oled == 1 ||
1878             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1879             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1880                 caps->aux_support = true;
1881
1882         /* From the specification (CTA-861-G), for calculating the maximum
1883          * luminance we need to use:
1884          *      Luminance = 50*2**(CV/32)
1885          * Where CV is a one-byte value.
1886          * For calculating this expression we may need float point precision;
1887          * to avoid this complexity level, we take advantage that CV is divided
1888          * by a constant. From the Euclids division algorithm, we know that CV
1889          * can be written as: CV = 32*q + r. Next, we replace CV in the
1890          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1891          * need to pre-compute the value of r/32. For pre-computing the values
1892          * We just used the following Ruby line:
1893          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1894          * The results of the above expressions can be verified at
1895          * pre_computed_values.
1896          */
1897         q = max_cll >> 5;
1898         r = max_cll % 32;
1899         max = (1 << q) * pre_computed_values[r];
1900
1901         // min luminance: maxLum * (CV/255)^2 / 100
1902         q = DIV_ROUND_CLOSEST(min_cll, 255);
1903         min = max * DIV_ROUND_CLOSEST((q * q), 100);
1904
1905         caps->aux_max_input_signal = max;
1906         caps->aux_min_input_signal = min;
1907 }
1908
1909 void amdgpu_dm_update_connector_after_detect(
1910                 struct amdgpu_dm_connector *aconnector)
1911 {
1912         struct drm_connector *connector = &aconnector->base;
1913         struct drm_device *dev = connector->dev;
1914         struct dc_sink *sink;
1915
1916         /* MST handled by drm_mst framework */
1917         if (aconnector->mst_mgr.mst_state == true)
1918                 return;
1919
1920
1921         sink = aconnector->dc_link->local_sink;
1922         if (sink)
1923                 dc_sink_retain(sink);
1924
1925         /*
1926          * Edid mgmt connector gets first update only in mode_valid hook and then
1927          * the connector sink is set to either fake or physical sink depends on link status.
1928          * Skip if already done during boot.
1929          */
1930         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1931                         && aconnector->dc_em_sink) {
1932
1933                 /*
1934                  * For S3 resume with headless use eml_sink to fake stream
1935                  * because on resume connector->sink is set to NULL
1936                  */
1937                 mutex_lock(&dev->mode_config.mutex);
1938
1939                 if (sink) {
1940                         if (aconnector->dc_sink) {
1941                                 amdgpu_dm_update_freesync_caps(connector, NULL);
1942                                 /*
1943                                  * retain and release below are used to
1944                                  * bump up refcount for sink because the link doesn't point
1945                                  * to it anymore after disconnect, so on next crtc to connector
1946                                  * reshuffle by UMD we will get into unwanted dc_sink release
1947                                  */
1948                                 dc_sink_release(aconnector->dc_sink);
1949                         }
1950                         aconnector->dc_sink = sink;
1951                         dc_sink_retain(aconnector->dc_sink);
1952                         amdgpu_dm_update_freesync_caps(connector,
1953                                         aconnector->edid);
1954                 } else {
1955                         amdgpu_dm_update_freesync_caps(connector, NULL);
1956                         if (!aconnector->dc_sink) {
1957                                 aconnector->dc_sink = aconnector->dc_em_sink;
1958                                 dc_sink_retain(aconnector->dc_sink);
1959                         }
1960                 }
1961
1962                 mutex_unlock(&dev->mode_config.mutex);
1963
1964                 if (sink)
1965                         dc_sink_release(sink);
1966                 return;
1967         }
1968
1969         /*
1970          * TODO: temporary guard to look for proper fix
1971          * if this sink is MST sink, we should not do anything
1972          */
1973         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1974                 dc_sink_release(sink);
1975                 return;
1976         }
1977
1978         if (aconnector->dc_sink == sink) {
1979                 /*
1980                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
1981                  * Do nothing!!
1982                  */
1983                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1984                                 aconnector->connector_id);
1985                 if (sink)
1986                         dc_sink_release(sink);
1987                 return;
1988         }
1989
1990         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1991                 aconnector->connector_id, aconnector->dc_sink, sink);
1992
1993         mutex_lock(&dev->mode_config.mutex);
1994
1995         /*
1996          * 1. Update status of the drm connector
1997          * 2. Send an event and let userspace tell us what to do
1998          */
1999         if (sink) {
2000                 /*
2001                  * TODO: check if we still need the S3 mode update workaround.
2002                  * If yes, put it here.
2003                  */
2004                 if (aconnector->dc_sink)
2005                         amdgpu_dm_update_freesync_caps(connector, NULL);
2006
2007                 aconnector->dc_sink = sink;
2008                 dc_sink_retain(aconnector->dc_sink);
2009                 if (sink->dc_edid.length == 0) {
2010                         aconnector->edid = NULL;
2011                         drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2012                 } else {
2013                         aconnector->edid =
2014                                 (struct edid *) sink->dc_edid.raw_edid;
2015
2016
2017                         drm_connector_update_edid_property(connector,
2018                                         aconnector->edid);
2019                         drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2020                                             aconnector->edid);
2021                 }
2022                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2023                 update_connector_ext_caps(aconnector);
2024         } else {
2025                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2026                 amdgpu_dm_update_freesync_caps(connector, NULL);
2027                 drm_connector_update_edid_property(connector, NULL);
2028                 aconnector->num_modes = 0;
2029                 dc_sink_release(aconnector->dc_sink);
2030                 aconnector->dc_sink = NULL;
2031                 aconnector->edid = NULL;
2032 #ifdef CONFIG_DRM_AMD_DC_HDCP
2033                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2034                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2035                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2036 #endif
2037         }
2038
2039         mutex_unlock(&dev->mode_config.mutex);
2040
2041         if (sink)
2042                 dc_sink_release(sink);
2043 }
2044
2045 static void handle_hpd_irq(void *param)
2046 {
2047         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2048         struct drm_connector *connector = &aconnector->base;
2049         struct drm_device *dev = connector->dev;
2050         enum dc_connection_type new_connection_type = dc_connection_none;
2051 #ifdef CONFIG_DRM_AMD_DC_HDCP
2052         struct amdgpu_device *adev = dev->dev_private;
2053 #endif
2054
2055         /*
2056          * In case of failure or MST no need to update connector status or notify the OS
2057          * since (for MST case) MST does this in its own context.
2058          */
2059         mutex_lock(&aconnector->hpd_lock);
2060
2061 #ifdef CONFIG_DRM_AMD_DC_HDCP
2062         if (adev->dm.hdcp_workqueue)
2063                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2064 #endif
2065         if (aconnector->fake_enable)
2066                 aconnector->fake_enable = false;
2067
2068         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2069                 DRM_ERROR("KMS: Failed to detect connector\n");
2070
2071         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2072                 emulated_link_detect(aconnector->dc_link);
2073
2074
2075                 drm_modeset_lock_all(dev);
2076                 dm_restore_drm_connector_state(dev, connector);
2077                 drm_modeset_unlock_all(dev);
2078
2079                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2080                         drm_kms_helper_hotplug_event(dev);
2081
2082         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2083                 amdgpu_dm_update_connector_after_detect(aconnector);
2084
2085
2086                 drm_modeset_lock_all(dev);
2087                 dm_restore_drm_connector_state(dev, connector);
2088                 drm_modeset_unlock_all(dev);
2089
2090                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2091                         drm_kms_helper_hotplug_event(dev);
2092         }
2093         mutex_unlock(&aconnector->hpd_lock);
2094
2095 }
2096
2097 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2098 {
2099         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2100         uint8_t dret;
2101         bool new_irq_handled = false;
2102         int dpcd_addr;
2103         int dpcd_bytes_to_read;
2104
2105         const int max_process_count = 30;
2106         int process_count = 0;
2107
2108         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2109
2110         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2111                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2112                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2113                 dpcd_addr = DP_SINK_COUNT;
2114         } else {
2115                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2116                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2117                 dpcd_addr = DP_SINK_COUNT_ESI;
2118         }
2119
2120         dret = drm_dp_dpcd_read(
2121                 &aconnector->dm_dp_aux.aux,
2122                 dpcd_addr,
2123                 esi,
2124                 dpcd_bytes_to_read);
2125
2126         while (dret == dpcd_bytes_to_read &&
2127                 process_count < max_process_count) {
2128                 uint8_t retry;
2129                 dret = 0;
2130
2131                 process_count++;
2132
2133                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2134                 /* handle HPD short pulse irq */
2135                 if (aconnector->mst_mgr.mst_state)
2136                         drm_dp_mst_hpd_irq(
2137                                 &aconnector->mst_mgr,
2138                                 esi,
2139                                 &new_irq_handled);
2140
2141                 if (new_irq_handled) {
2142                         /* ACK at DPCD to notify down stream */
2143                         const int ack_dpcd_bytes_to_write =
2144                                 dpcd_bytes_to_read - 1;
2145
2146                         for (retry = 0; retry < 3; retry++) {
2147                                 uint8_t wret;
2148
2149                                 wret = drm_dp_dpcd_write(
2150                                         &aconnector->dm_dp_aux.aux,
2151                                         dpcd_addr + 1,
2152                                         &esi[1],
2153                                         ack_dpcd_bytes_to_write);
2154                                 if (wret == ack_dpcd_bytes_to_write)
2155                                         break;
2156                         }
2157
2158                         /* check if there is new irq to be handled */
2159                         dret = drm_dp_dpcd_read(
2160                                 &aconnector->dm_dp_aux.aux,
2161                                 dpcd_addr,
2162                                 esi,
2163                                 dpcd_bytes_to_read);
2164
2165                         new_irq_handled = false;
2166                 } else {
2167                         break;
2168                 }
2169         }
2170
2171         if (process_count == max_process_count)
2172                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2173 }
2174
2175 static void handle_hpd_rx_irq(void *param)
2176 {
2177         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2178         struct drm_connector *connector = &aconnector->base;
2179         struct drm_device *dev = connector->dev;
2180         struct dc_link *dc_link = aconnector->dc_link;
2181         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2182         enum dc_connection_type new_connection_type = dc_connection_none;
2183 #ifdef CONFIG_DRM_AMD_DC_HDCP
2184         union hpd_irq_data hpd_irq_data;
2185         struct amdgpu_device *adev = dev->dev_private;
2186
2187         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2188 #endif
2189
2190         /*
2191          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2192          * conflict, after implement i2c helper, this mutex should be
2193          * retired.
2194          */
2195         if (dc_link->type != dc_connection_mst_branch)
2196                 mutex_lock(&aconnector->hpd_lock);
2197
2198
2199 #ifdef CONFIG_DRM_AMD_DC_HDCP
2200         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2201 #else
2202         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2203 #endif
2204                         !is_mst_root_connector) {
2205                 /* Downstream Port status changed. */
2206                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2207                         DRM_ERROR("KMS: Failed to detect connector\n");
2208
2209                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2210                         emulated_link_detect(dc_link);
2211
2212                         if (aconnector->fake_enable)
2213                                 aconnector->fake_enable = false;
2214
2215                         amdgpu_dm_update_connector_after_detect(aconnector);
2216
2217
2218                         drm_modeset_lock_all(dev);
2219                         dm_restore_drm_connector_state(dev, connector);
2220                         drm_modeset_unlock_all(dev);
2221
2222                         drm_kms_helper_hotplug_event(dev);
2223                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2224
2225                         if (aconnector->fake_enable)
2226                                 aconnector->fake_enable = false;
2227
2228                         amdgpu_dm_update_connector_after_detect(aconnector);
2229
2230
2231                         drm_modeset_lock_all(dev);
2232                         dm_restore_drm_connector_state(dev, connector);
2233                         drm_modeset_unlock_all(dev);
2234
2235                         drm_kms_helper_hotplug_event(dev);
2236                 }
2237         }
2238 #ifdef CONFIG_DRM_AMD_DC_HDCP
2239         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2240                 if (adev->dm.hdcp_workqueue)
2241                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2242         }
2243 #endif
2244         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2245             (dc_link->type == dc_connection_mst_branch))
2246                 dm_handle_hpd_rx_irq(aconnector);
2247
2248         if (dc_link->type != dc_connection_mst_branch) {
2249                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2250                 mutex_unlock(&aconnector->hpd_lock);
2251         }
2252 }
2253
2254 static void register_hpd_handlers(struct amdgpu_device *adev)
2255 {
2256         struct drm_device *dev = adev->ddev;
2257         struct drm_connector *connector;
2258         struct amdgpu_dm_connector *aconnector;
2259         const struct dc_link *dc_link;
2260         struct dc_interrupt_params int_params = {0};
2261
2262         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2263         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2264
2265         list_for_each_entry(connector,
2266                         &dev->mode_config.connector_list, head) {
2267
2268                 aconnector = to_amdgpu_dm_connector(connector);
2269                 dc_link = aconnector->dc_link;
2270
2271                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2272                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2273                         int_params.irq_source = dc_link->irq_source_hpd;
2274
2275                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2276                                         handle_hpd_irq,
2277                                         (void *) aconnector);
2278                 }
2279
2280                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2281
2282                         /* Also register for DP short pulse (hpd_rx). */
2283                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2284                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2285
2286                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2287                                         handle_hpd_rx_irq,
2288                                         (void *) aconnector);
2289                 }
2290         }
2291 }
2292
2293 /* Register IRQ sources and initialize IRQ callbacks */
2294 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2295 {
2296         struct dc *dc = adev->dm.dc;
2297         struct common_irq_params *c_irq_params;
2298         struct dc_interrupt_params int_params = {0};
2299         int r;
2300         int i;
2301         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2302
2303         if (adev->asic_type >= CHIP_VEGA10)
2304                 client_id = SOC15_IH_CLIENTID_DCE;
2305
2306         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2307         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2308
2309         /*
2310          * Actions of amdgpu_irq_add_id():
2311          * 1. Register a set() function with base driver.
2312          *    Base driver will call set() function to enable/disable an
2313          *    interrupt in DC hardware.
2314          * 2. Register amdgpu_dm_irq_handler().
2315          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2316          *    coming from DC hardware.
2317          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2318          *    for acknowledging and handling. */
2319
2320         /* Use VBLANK interrupt */
2321         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2322                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2323                 if (r) {
2324                         DRM_ERROR("Failed to add crtc irq id!\n");
2325                         return r;
2326                 }
2327
2328                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2329                 int_params.irq_source =
2330                         dc_interrupt_to_irq_source(dc, i, 0);
2331
2332                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2333
2334                 c_irq_params->adev = adev;
2335                 c_irq_params->irq_src = int_params.irq_source;
2336
2337                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2338                                 dm_crtc_high_irq, c_irq_params);
2339         }
2340
2341         /* Use VUPDATE interrupt */
2342         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2343                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2344                 if (r) {
2345                         DRM_ERROR("Failed to add vupdate irq id!\n");
2346                         return r;
2347                 }
2348
2349                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2350                 int_params.irq_source =
2351                         dc_interrupt_to_irq_source(dc, i, 0);
2352
2353                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2354
2355                 c_irq_params->adev = adev;
2356                 c_irq_params->irq_src = int_params.irq_source;
2357
2358                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2359                                 dm_vupdate_high_irq, c_irq_params);
2360         }
2361
2362         /* Use GRPH_PFLIP interrupt */
2363         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2364                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2365                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2366                 if (r) {
2367                         DRM_ERROR("Failed to add page flip irq id!\n");
2368                         return r;
2369                 }
2370
2371                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2372                 int_params.irq_source =
2373                         dc_interrupt_to_irq_source(dc, i, 0);
2374
2375                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2376
2377                 c_irq_params->adev = adev;
2378                 c_irq_params->irq_src = int_params.irq_source;
2379
2380                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2381                                 dm_pflip_high_irq, c_irq_params);
2382
2383         }
2384
2385         /* HPD */
2386         r = amdgpu_irq_add_id(adev, client_id,
2387                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2388         if (r) {
2389                 DRM_ERROR("Failed to add hpd irq id!\n");
2390                 return r;
2391         }
2392
2393         register_hpd_handlers(adev);
2394
2395         return 0;
2396 }
2397
2398 #if defined(CONFIG_DRM_AMD_DC_DCN)
2399 /* Register IRQ sources and initialize IRQ callbacks */
2400 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2401 {
2402         struct dc *dc = adev->dm.dc;
2403         struct common_irq_params *c_irq_params;
2404         struct dc_interrupt_params int_params = {0};
2405         int r;
2406         int i;
2407
2408         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2409         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2410
2411         /*
2412          * Actions of amdgpu_irq_add_id():
2413          * 1. Register a set() function with base driver.
2414          *    Base driver will call set() function to enable/disable an
2415          *    interrupt in DC hardware.
2416          * 2. Register amdgpu_dm_irq_handler().
2417          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2418          *    coming from DC hardware.
2419          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2420          *    for acknowledging and handling.
2421          */
2422
2423         /* Use VSTARTUP interrupt */
2424         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2425                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2426                         i++) {
2427                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2428
2429                 if (r) {
2430                         DRM_ERROR("Failed to add crtc irq id!\n");
2431                         return r;
2432                 }
2433
2434                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2435                 int_params.irq_source =
2436                         dc_interrupt_to_irq_source(dc, i, 0);
2437
2438                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2439
2440                 c_irq_params->adev = adev;
2441                 c_irq_params->irq_src = int_params.irq_source;
2442
2443                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2444                                 dm_dcn_crtc_high_irq, c_irq_params);
2445         }
2446
2447         /* Use GRPH_PFLIP interrupt */
2448         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2449                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2450                         i++) {
2451                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2452                 if (r) {
2453                         DRM_ERROR("Failed to add page flip irq id!\n");
2454                         return r;
2455                 }
2456
2457                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2458                 int_params.irq_source =
2459                         dc_interrupt_to_irq_source(dc, i, 0);
2460
2461                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2462
2463                 c_irq_params->adev = adev;
2464                 c_irq_params->irq_src = int_params.irq_source;
2465
2466                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2467                                 dm_pflip_high_irq, c_irq_params);
2468
2469         }
2470
2471         /* HPD */
2472         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2473                         &adev->hpd_irq);
2474         if (r) {
2475                 DRM_ERROR("Failed to add hpd irq id!\n");
2476                 return r;
2477         }
2478
2479         register_hpd_handlers(adev);
2480
2481         return 0;
2482 }
2483 #endif
2484
2485 /*
2486  * Acquires the lock for the atomic state object and returns
2487  * the new atomic state.
2488  *
2489  * This should only be called during atomic check.
2490  */
2491 static int dm_atomic_get_state(struct drm_atomic_state *state,
2492                                struct dm_atomic_state **dm_state)
2493 {
2494         struct drm_device *dev = state->dev;
2495         struct amdgpu_device *adev = dev->dev_private;
2496         struct amdgpu_display_manager *dm = &adev->dm;
2497         struct drm_private_state *priv_state;
2498
2499         if (*dm_state)
2500                 return 0;
2501
2502         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2503         if (IS_ERR(priv_state))
2504                 return PTR_ERR(priv_state);
2505
2506         *dm_state = to_dm_atomic_state(priv_state);
2507
2508         return 0;
2509 }
2510
2511 struct dm_atomic_state *
2512 dm_atomic_get_new_state(struct drm_atomic_state *state)
2513 {
2514         struct drm_device *dev = state->dev;
2515         struct amdgpu_device *adev = dev->dev_private;
2516         struct amdgpu_display_manager *dm = &adev->dm;
2517         struct drm_private_obj *obj;
2518         struct drm_private_state *new_obj_state;
2519         int i;
2520
2521         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2522                 if (obj->funcs == dm->atomic_obj.funcs)
2523                         return to_dm_atomic_state(new_obj_state);
2524         }
2525
2526         return NULL;
2527 }
2528
2529 struct dm_atomic_state *
2530 dm_atomic_get_old_state(struct drm_atomic_state *state)
2531 {
2532         struct drm_device *dev = state->dev;
2533         struct amdgpu_device *adev = dev->dev_private;
2534         struct amdgpu_display_manager *dm = &adev->dm;
2535         struct drm_private_obj *obj;
2536         struct drm_private_state *old_obj_state;
2537         int i;
2538
2539         for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2540                 if (obj->funcs == dm->atomic_obj.funcs)
2541                         return to_dm_atomic_state(old_obj_state);
2542         }
2543
2544         return NULL;
2545 }
2546
2547 static struct drm_private_state *
2548 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2549 {
2550         struct dm_atomic_state *old_state, *new_state;
2551
2552         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2553         if (!new_state)
2554                 return NULL;
2555
2556         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2557
2558         old_state = to_dm_atomic_state(obj->state);
2559
2560         if (old_state && old_state->context)
2561                 new_state->context = dc_copy_state(old_state->context);
2562
2563         if (!new_state->context) {
2564                 kfree(new_state);
2565                 return NULL;
2566         }
2567
2568         return &new_state->base;
2569 }
2570
2571 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2572                                     struct drm_private_state *state)
2573 {
2574         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2575
2576         if (dm_state && dm_state->context)
2577                 dc_release_state(dm_state->context);
2578
2579         kfree(dm_state);
2580 }
2581
2582 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2583         .atomic_duplicate_state = dm_atomic_duplicate_state,
2584         .atomic_destroy_state = dm_atomic_destroy_state,
2585 };
2586
2587 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2588 {
2589         struct dm_atomic_state *state;
2590         int r;
2591
2592         adev->mode_info.mode_config_initialized = true;
2593
2594         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2595         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2596
2597         adev->ddev->mode_config.max_width = 16384;
2598         adev->ddev->mode_config.max_height = 16384;
2599
2600         adev->ddev->mode_config.preferred_depth = 24;
2601         adev->ddev->mode_config.prefer_shadow = 1;
2602         /* indicates support for immediate flip */
2603         adev->ddev->mode_config.async_page_flip = true;
2604
2605         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2606
2607         state = kzalloc(sizeof(*state), GFP_KERNEL);
2608         if (!state)
2609                 return -ENOMEM;
2610
2611         state->context = dc_create_state(adev->dm.dc);
2612         if (!state->context) {
2613                 kfree(state);
2614                 return -ENOMEM;
2615         }
2616
2617         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2618
2619         drm_atomic_private_obj_init(adev->ddev,
2620                                     &adev->dm.atomic_obj,
2621                                     &state->base,
2622                                     &dm_atomic_state_funcs);
2623
2624         r = amdgpu_display_modeset_create_props(adev);
2625         if (r)
2626                 return r;
2627
2628         r = amdgpu_dm_audio_init(adev);
2629         if (r)
2630                 return r;
2631
2632         return 0;
2633 }
2634
2635 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2636 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2637 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2638
2639 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2640         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2641
2642 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2643 {
2644 #if defined(CONFIG_ACPI)
2645         struct amdgpu_dm_backlight_caps caps;
2646
2647         if (dm->backlight_caps.caps_valid)
2648                 return;
2649
2650         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2651         if (caps.caps_valid) {
2652                 dm->backlight_caps.caps_valid = true;
2653                 if (caps.aux_support)
2654                         return;
2655                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2656                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2657         } else {
2658                 dm->backlight_caps.min_input_signal =
2659                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2660                 dm->backlight_caps.max_input_signal =
2661                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2662         }
2663 #else
2664         if (dm->backlight_caps.aux_support)
2665                 return;
2666
2667         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2668         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2669 #endif
2670 }
2671
2672 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2673 {
2674         bool rc;
2675
2676         if (!link)
2677                 return 1;
2678
2679         rc = dc_link_set_backlight_level_nits(link, true, brightness,
2680                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2681
2682         return rc ? 0 : 1;
2683 }
2684
2685 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2686                               const uint32_t user_brightness)
2687 {
2688         u32 min, max, conversion_pace;
2689         u32 brightness = user_brightness;
2690
2691         if (!caps)
2692                 goto out;
2693
2694         if (!caps->aux_support) {
2695                 max = caps->max_input_signal;
2696                 min = caps->min_input_signal;
2697                 /*
2698                  * The brightness input is in the range 0-255
2699                  * It needs to be rescaled to be between the
2700                  * requested min and max input signal
2701                  * It also needs to be scaled up by 0x101 to
2702                  * match the DC interface which has a range of
2703                  * 0 to 0xffff
2704                  */
2705                 conversion_pace = 0x101;
2706                 brightness =
2707                         user_brightness
2708                         * conversion_pace
2709                         * (max - min)
2710                         / AMDGPU_MAX_BL_LEVEL
2711                         + min * conversion_pace;
2712         } else {
2713                 /* TODO
2714                  * We are doing a linear interpolation here, which is OK but
2715                  * does not provide the optimal result. We probably want
2716                  * something close to the Perceptual Quantizer (PQ) curve.
2717                  */
2718                 max = caps->aux_max_input_signal;
2719                 min = caps->aux_min_input_signal;
2720
2721                 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2722                                + user_brightness * max;
2723                 // Multiple the value by 1000 since we use millinits
2724                 brightness *= 1000;
2725                 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2726         }
2727
2728 out:
2729         return brightness;
2730 }
2731
2732 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2733 {
2734         struct amdgpu_display_manager *dm = bl_get_data(bd);
2735         struct amdgpu_dm_backlight_caps caps;
2736         struct dc_link *link = NULL;
2737         u32 brightness;
2738         bool rc;
2739
2740         amdgpu_dm_update_backlight_caps(dm);
2741         caps = dm->backlight_caps;
2742
2743         link = (struct dc_link *)dm->backlight_link;
2744
2745         brightness = convert_brightness(&caps, bd->props.brightness);
2746         // Change brightness based on AUX property
2747         if (caps.aux_support)
2748                 return set_backlight_via_aux(link, brightness);
2749
2750         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2751
2752         return rc ? 0 : 1;
2753 }
2754
2755 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2756 {
2757         struct amdgpu_display_manager *dm = bl_get_data(bd);
2758         int ret = dc_link_get_backlight_level(dm->backlight_link);
2759
2760         if (ret == DC_ERROR_UNEXPECTED)
2761                 return bd->props.brightness;
2762         return ret;
2763 }
2764
2765 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2766         .options = BL_CORE_SUSPENDRESUME,
2767         .get_brightness = amdgpu_dm_backlight_get_brightness,
2768         .update_status  = amdgpu_dm_backlight_update_status,
2769 };
2770
2771 static void
2772 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2773 {
2774         char bl_name[16];
2775         struct backlight_properties props = { 0 };
2776
2777         amdgpu_dm_update_backlight_caps(dm);
2778
2779         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2780         props.brightness = AMDGPU_MAX_BL_LEVEL;
2781         props.type = BACKLIGHT_RAW;
2782
2783         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2784                         dm->adev->ddev->primary->index);
2785
2786         dm->backlight_dev = backlight_device_register(bl_name,
2787                         dm->adev->ddev->dev,
2788                         dm,
2789                         &amdgpu_dm_backlight_ops,
2790                         &props);
2791
2792         if (IS_ERR(dm->backlight_dev))
2793                 DRM_ERROR("DM: Backlight registration failed!\n");
2794         else
2795                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2796 }
2797
2798 #endif
2799
2800 static int initialize_plane(struct amdgpu_display_manager *dm,
2801                             struct amdgpu_mode_info *mode_info, int plane_id,
2802                             enum drm_plane_type plane_type,
2803                             const struct dc_plane_cap *plane_cap)
2804 {
2805         struct drm_plane *plane;
2806         unsigned long possible_crtcs;
2807         int ret = 0;
2808
2809         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2810         if (!plane) {
2811                 DRM_ERROR("KMS: Failed to allocate plane\n");
2812                 return -ENOMEM;
2813         }
2814         plane->type = plane_type;
2815
2816         /*
2817          * HACK: IGT tests expect that the primary plane for a CRTC
2818          * can only have one possible CRTC. Only expose support for
2819          * any CRTC if they're not going to be used as a primary plane
2820          * for a CRTC - like overlay or underlay planes.
2821          */
2822         possible_crtcs = 1 << plane_id;
2823         if (plane_id >= dm->dc->caps.max_streams)
2824                 possible_crtcs = 0xff;
2825
2826         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2827
2828         if (ret) {
2829                 DRM_ERROR("KMS: Failed to initialize plane\n");
2830                 kfree(plane);
2831                 return ret;
2832         }
2833
2834         if (mode_info)
2835                 mode_info->planes[plane_id] = plane;
2836
2837         return ret;
2838 }
2839
2840
2841 static void register_backlight_device(struct amdgpu_display_manager *dm,
2842                                       struct dc_link *link)
2843 {
2844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2845         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2846
2847         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2848             link->type != dc_connection_none) {
2849                 /*
2850                  * Event if registration failed, we should continue with
2851                  * DM initialization because not having a backlight control
2852                  * is better then a black screen.
2853                  */
2854                 amdgpu_dm_register_backlight_device(dm);
2855
2856                 if (dm->backlight_dev)
2857                         dm->backlight_link = link;
2858         }
2859 #endif
2860 }
2861
2862
2863 /*
2864  * In this architecture, the association
2865  * connector -> encoder -> crtc
2866  * id not really requried. The crtc and connector will hold the
2867  * display_index as an abstraction to use with DAL component
2868  *
2869  * Returns 0 on success
2870  */
2871 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2872 {
2873         struct amdgpu_display_manager *dm = &adev->dm;
2874         int32_t i;
2875         struct amdgpu_dm_connector *aconnector = NULL;
2876         struct amdgpu_encoder *aencoder = NULL;
2877         struct amdgpu_mode_info *mode_info = &adev->mode_info;
2878         uint32_t link_cnt;
2879         int32_t primary_planes;
2880         enum dc_connection_type new_connection_type = dc_connection_none;
2881         const struct dc_plane_cap *plane;
2882
2883         link_cnt = dm->dc->caps.max_links;
2884         if (amdgpu_dm_mode_config_init(dm->adev)) {
2885                 DRM_ERROR("DM: Failed to initialize mode config\n");
2886                 return -EINVAL;
2887         }
2888
2889         /* There is one primary plane per CRTC */
2890         primary_planes = dm->dc->caps.max_streams;
2891         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2892
2893         /*
2894          * Initialize primary planes, implicit planes for legacy IOCTLS.
2895          * Order is reversed to match iteration order in atomic check.
2896          */
2897         for (i = (primary_planes - 1); i >= 0; i--) {
2898                 plane = &dm->dc->caps.planes[i];
2899
2900                 if (initialize_plane(dm, mode_info, i,
2901                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
2902                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
2903                         goto fail;
2904                 }
2905         }
2906
2907         /*
2908          * Initialize overlay planes, index starting after primary planes.
2909          * These planes have a higher DRM index than the primary planes since
2910          * they should be considered as having a higher z-order.
2911          * Order is reversed to match iteration order in atomic check.
2912          *
2913          * Only support DCN for now, and only expose one so we don't encourage
2914          * userspace to use up all the pipes.
2915          */
2916         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2917                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2918
2919                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2920                         continue;
2921
2922                 if (!plane->blends_with_above || !plane->blends_with_below)
2923                         continue;
2924
2925                 if (!plane->pixel_format_support.argb8888)
2926                         continue;
2927
2928                 if (initialize_plane(dm, NULL, primary_planes + i,
2929                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
2930                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2931                         goto fail;
2932                 }
2933
2934                 /* Only create one overlay plane. */
2935                 break;
2936         }
2937
2938         for (i = 0; i < dm->dc->caps.max_streams; i++)
2939                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2940                         DRM_ERROR("KMS: Failed to initialize crtc\n");
2941                         goto fail;
2942                 }
2943
2944         dm->display_indexes_num = dm->dc->caps.max_streams;
2945
2946         /* loops over all connectors on the board */
2947         for (i = 0; i < link_cnt; i++) {
2948                 struct dc_link *link = NULL;
2949
2950                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2951                         DRM_ERROR(
2952                                 "KMS: Cannot support more than %d display indexes\n",
2953                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
2954                         continue;
2955                 }
2956
2957                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2958                 if (!aconnector)
2959                         goto fail;
2960
2961                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2962                 if (!aencoder)
2963                         goto fail;
2964
2965                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2966                         DRM_ERROR("KMS: Failed to initialize encoder\n");
2967                         goto fail;
2968                 }
2969
2970                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2971                         DRM_ERROR("KMS: Failed to initialize connector\n");
2972                         goto fail;
2973                 }
2974
2975                 link = dc_get_link_at_index(dm->dc, i);
2976
2977                 if (!dc_link_detect_sink(link, &new_connection_type))
2978                         DRM_ERROR("KMS: Failed to detect connector\n");
2979
2980                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2981                         emulated_link_detect(link);
2982                         amdgpu_dm_update_connector_after_detect(aconnector);
2983
2984                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2985                         amdgpu_dm_update_connector_after_detect(aconnector);
2986                         register_backlight_device(dm, link);
2987                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2988                                 amdgpu_dm_set_psr_caps(link);
2989                 }
2990
2991
2992         }
2993
2994         /* Software is initialized. Now we can register interrupt handlers. */
2995         switch (adev->asic_type) {
2996         case CHIP_BONAIRE:
2997         case CHIP_HAWAII:
2998         case CHIP_KAVERI:
2999         case CHIP_KABINI:
3000         case CHIP_MULLINS:
3001         case CHIP_TONGA:
3002         case CHIP_FIJI:
3003         case CHIP_CARRIZO:
3004         case CHIP_STONEY:
3005         case CHIP_POLARIS11:
3006         case CHIP_POLARIS10:
3007         case CHIP_POLARIS12:
3008         case CHIP_VEGAM:
3009         case CHIP_VEGA10:
3010         case CHIP_VEGA12:
3011         case CHIP_VEGA20:
3012                 if (dce110_register_irq_handlers(dm->adev)) {
3013                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3014                         goto fail;
3015                 }
3016                 break;
3017 #if defined(CONFIG_DRM_AMD_DC_DCN)
3018         case CHIP_RAVEN:
3019         case CHIP_NAVI12:
3020         case CHIP_NAVI10:
3021         case CHIP_NAVI14:
3022         case CHIP_RENOIR:
3023                 if (dcn10_register_irq_handlers(dm->adev)) {
3024                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3025                         goto fail;
3026                 }
3027                 break;
3028 #endif
3029         default:
3030                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3031                 goto fail;
3032         }
3033
3034         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3035                 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3036
3037         /* No userspace support. */
3038         dm->dc->debug.disable_tri_buf = true;
3039
3040         return 0;
3041 fail:
3042         kfree(aencoder);
3043         kfree(aconnector);
3044
3045         return -EINVAL;
3046 }
3047
3048 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3049 {
3050         drm_mode_config_cleanup(dm->ddev);
3051         drm_atomic_private_obj_fini(&dm->atomic_obj);
3052         return;
3053 }
3054
3055 /******************************************************************************
3056  * amdgpu_display_funcs functions
3057  *****************************************************************************/
3058
3059 /*
3060  * dm_bandwidth_update - program display watermarks
3061  *
3062  * @adev: amdgpu_device pointer
3063  *
3064  * Calculate and program the display watermarks and line buffer allocation.
3065  */
3066 static void dm_bandwidth_update(struct amdgpu_device *adev)
3067 {
3068         /* TODO: implement later */
3069 }
3070
3071 static const struct amdgpu_display_funcs dm_display_funcs = {
3072         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3073         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3074         .backlight_set_level = NULL, /* never called for DC */
3075         .backlight_get_level = NULL, /* never called for DC */
3076         .hpd_sense = NULL,/* called unconditionally */
3077         .hpd_set_polarity = NULL, /* called unconditionally */
3078         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3079         .page_flip_get_scanoutpos =
3080                 dm_crtc_get_scanoutpos,/* called unconditionally */
3081         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3082         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3083 };
3084
3085 #if defined(CONFIG_DEBUG_KERNEL_DC)
3086
3087 static ssize_t s3_debug_store(struct device *device,
3088                               struct device_attribute *attr,
3089                               const char *buf,
3090                               size_t count)
3091 {
3092         int ret;
3093         int s3_state;
3094         struct drm_device *drm_dev = dev_get_drvdata(device);
3095         struct amdgpu_device *adev = drm_dev->dev_private;
3096
3097         ret = kstrtoint(buf, 0, &s3_state);
3098
3099         if (ret == 0) {
3100                 if (s3_state) {
3101                         dm_resume(adev);
3102                         drm_kms_helper_hotplug_event(adev->ddev);
3103                 } else
3104                         dm_suspend(adev);
3105         }
3106
3107         return ret == 0 ? count : 0;
3108 }
3109
3110 DEVICE_ATTR_WO(s3_debug);
3111
3112 #endif
3113
3114 static int dm_early_init(void *handle)
3115 {
3116         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3117
3118         switch (adev->asic_type) {
3119         case CHIP_BONAIRE:
3120         case CHIP_HAWAII:
3121                 adev->mode_info.num_crtc = 6;
3122                 adev->mode_info.num_hpd = 6;
3123                 adev->mode_info.num_dig = 6;
3124                 break;
3125         case CHIP_KAVERI:
3126                 adev->mode_info.num_crtc = 4;
3127                 adev->mode_info.num_hpd = 6;
3128                 adev->mode_info.num_dig = 7;
3129                 break;
3130         case CHIP_KABINI:
3131         case CHIP_MULLINS:
3132                 adev->mode_info.num_crtc = 2;
3133                 adev->mode_info.num_hpd = 6;
3134                 adev->mode_info.num_dig = 6;
3135                 break;
3136         case CHIP_FIJI:
3137         case CHIP_TONGA:
3138                 adev->mode_info.num_crtc = 6;
3139                 adev->mode_info.num_hpd = 6;
3140                 adev->mode_info.num_dig = 7;
3141                 break;
3142         case CHIP_CARRIZO:
3143                 adev->mode_info.num_crtc = 3;
3144                 adev->mode_info.num_hpd = 6;
3145                 adev->mode_info.num_dig = 9;
3146                 break;
3147         case CHIP_STONEY:
3148                 adev->mode_info.num_crtc = 2;
3149                 adev->mode_info.num_hpd = 6;
3150                 adev->mode_info.num_dig = 9;
3151                 break;
3152         case CHIP_POLARIS11:
3153         case CHIP_POLARIS12:
3154                 adev->mode_info.num_crtc = 5;
3155                 adev->mode_info.num_hpd = 5;
3156                 adev->mode_info.num_dig = 5;
3157                 break;
3158         case CHIP_POLARIS10:
3159         case CHIP_VEGAM:
3160                 adev->mode_info.num_crtc = 6;
3161                 adev->mode_info.num_hpd = 6;
3162                 adev->mode_info.num_dig = 6;
3163                 break;
3164         case CHIP_VEGA10:
3165         case CHIP_VEGA12:
3166         case CHIP_VEGA20:
3167                 adev->mode_info.num_crtc = 6;
3168                 adev->mode_info.num_hpd = 6;
3169                 adev->mode_info.num_dig = 6;
3170                 break;
3171 #if defined(CONFIG_DRM_AMD_DC_DCN)
3172         case CHIP_RAVEN:
3173                 adev->mode_info.num_crtc = 4;
3174                 adev->mode_info.num_hpd = 4;
3175                 adev->mode_info.num_dig = 4;
3176                 break;
3177 #endif
3178         case CHIP_NAVI10:
3179         case CHIP_NAVI12:
3180                 adev->mode_info.num_crtc = 6;
3181                 adev->mode_info.num_hpd = 6;
3182                 adev->mode_info.num_dig = 6;
3183                 break;
3184         case CHIP_NAVI14:
3185                 adev->mode_info.num_crtc = 5;
3186                 adev->mode_info.num_hpd = 5;
3187                 adev->mode_info.num_dig = 5;
3188                 break;
3189         case CHIP_RENOIR:
3190                 adev->mode_info.num_crtc = 4;
3191                 adev->mode_info.num_hpd = 4;
3192                 adev->mode_info.num_dig = 4;
3193                 break;
3194         default:
3195                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3196                 return -EINVAL;
3197         }
3198
3199         amdgpu_dm_set_irq_funcs(adev);
3200
3201         if (adev->mode_info.funcs == NULL)
3202                 adev->mode_info.funcs = &dm_display_funcs;
3203
3204         /*
3205          * Note: Do NOT change adev->audio_endpt_rreg and
3206          * adev->audio_endpt_wreg because they are initialised in
3207          * amdgpu_device_init()
3208          */
3209 #if defined(CONFIG_DEBUG_KERNEL_DC)
3210         device_create_file(
3211                 adev->ddev->dev,
3212                 &dev_attr_s3_debug);
3213 #endif
3214
3215         return 0;
3216 }
3217
3218 static bool modeset_required(struct drm_crtc_state *crtc_state,
3219                              struct dc_stream_state *new_stream,
3220                              struct dc_stream_state *old_stream)
3221 {
3222         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3223                 return false;
3224
3225         if (!crtc_state->enable)
3226                 return false;
3227
3228         return crtc_state->active;
3229 }
3230
3231 static bool modereset_required(struct drm_crtc_state *crtc_state)
3232 {
3233         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3234                 return false;
3235
3236         return !crtc_state->enable || !crtc_state->active;
3237 }
3238
3239 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3240 {
3241         drm_encoder_cleanup(encoder);
3242         kfree(encoder);
3243 }
3244
3245 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3246         .destroy = amdgpu_dm_encoder_destroy,
3247 };
3248
3249
3250 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3251                                 struct dc_scaling_info *scaling_info)
3252 {
3253         int scale_w, scale_h;
3254
3255         memset(scaling_info, 0, sizeof(*scaling_info));
3256
3257         /* Source is fixed 16.16 but we ignore mantissa for now... */
3258         scaling_info->src_rect.x = state->src_x >> 16;
3259         scaling_info->src_rect.y = state->src_y >> 16;
3260
3261         scaling_info->src_rect.width = state->src_w >> 16;
3262         if (scaling_info->src_rect.width == 0)
3263                 return -EINVAL;
3264
3265         scaling_info->src_rect.height = state->src_h >> 16;
3266         if (scaling_info->src_rect.height == 0)
3267                 return -EINVAL;
3268
3269         scaling_info->dst_rect.x = state->crtc_x;
3270         scaling_info->dst_rect.y = state->crtc_y;
3271
3272         if (state->crtc_w == 0)
3273                 return -EINVAL;
3274
3275         scaling_info->dst_rect.width = state->crtc_w;
3276
3277         if (state->crtc_h == 0)
3278                 return -EINVAL;
3279
3280         scaling_info->dst_rect.height = state->crtc_h;
3281
3282         /* DRM doesn't specify clipping on destination output. */
3283         scaling_info->clip_rect = scaling_info->dst_rect;
3284
3285         /* TODO: Validate scaling per-format with DC plane caps */
3286         scale_w = scaling_info->dst_rect.width * 1000 /
3287                   scaling_info->src_rect.width;
3288
3289         if (scale_w < 250 || scale_w > 16000)
3290                 return -EINVAL;
3291
3292         scale_h = scaling_info->dst_rect.height * 1000 /
3293                   scaling_info->src_rect.height;
3294
3295         if (scale_h < 250 || scale_h > 16000)
3296                 return -EINVAL;
3297
3298         /*
3299          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3300          * assume reasonable defaults based on the format.
3301          */
3302
3303         return 0;
3304 }
3305
3306 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3307                        uint64_t *tiling_flags)
3308 {
3309         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3310         int r = amdgpu_bo_reserve(rbo, false);
3311
3312         if (unlikely(r)) {
3313                 /* Don't show error message when returning -ERESTARTSYS */
3314                 if (r != -ERESTARTSYS)
3315                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3316                 return r;
3317         }
3318
3319         if (tiling_flags)
3320                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3321
3322         amdgpu_bo_unreserve(rbo);
3323
3324         return r;
3325 }
3326
3327 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3328 {
3329         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3330
3331         return offset ? (address + offset * 256) : 0;
3332 }
3333
3334 static int
3335 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3336                           const struct amdgpu_framebuffer *afb,
3337                           const enum surface_pixel_format format,
3338                           const enum dc_rotation_angle rotation,
3339                           const struct plane_size *plane_size,
3340                           const union dc_tiling_info *tiling_info,
3341                           const uint64_t info,
3342                           struct dc_plane_dcc_param *dcc,
3343                           struct dc_plane_address *address)
3344 {
3345         struct dc *dc = adev->dm.dc;
3346         struct dc_dcc_surface_param input;
3347         struct dc_surface_dcc_cap output;
3348         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3349         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3350         uint64_t dcc_address;
3351
3352         memset(&input, 0, sizeof(input));
3353         memset(&output, 0, sizeof(output));
3354
3355         if (!offset)
3356                 return 0;
3357
3358         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3359                 return 0;
3360
3361         if (!dc->cap_funcs.get_dcc_compression_cap)
3362                 return -EINVAL;
3363
3364         input.format = format;
3365         input.surface_size.width = plane_size->surface_size.width;
3366         input.surface_size.height = plane_size->surface_size.height;
3367         input.swizzle_mode = tiling_info->gfx9.swizzle;
3368
3369         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3370                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3371         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3372                 input.scan = SCAN_DIRECTION_VERTICAL;
3373
3374         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3375                 return -EINVAL;
3376
3377         if (!output.capable)
3378                 return -EINVAL;
3379
3380         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3381                 return -EINVAL;
3382
3383         dcc->enable = 1;
3384         dcc->meta_pitch =
3385                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3386         dcc->independent_64b_blks = i64b;
3387
3388         dcc_address = get_dcc_address(afb->address, info);
3389         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3390         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3391
3392         return 0;
3393 }
3394
3395 static int
3396 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3397                              const struct amdgpu_framebuffer *afb,
3398                              const enum surface_pixel_format format,
3399                              const enum dc_rotation_angle rotation,
3400                              const uint64_t tiling_flags,
3401                              union dc_tiling_info *tiling_info,
3402                              struct plane_size *plane_size,
3403                              struct dc_plane_dcc_param *dcc,
3404                              struct dc_plane_address *address)
3405 {
3406         const struct drm_framebuffer *fb = &afb->base;
3407         int ret;
3408
3409         memset(tiling_info, 0, sizeof(*tiling_info));
3410         memset(plane_size, 0, sizeof(*plane_size));
3411         memset(dcc, 0, sizeof(*dcc));
3412         memset(address, 0, sizeof(*address));
3413
3414         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3415                 plane_size->surface_size.x = 0;
3416                 plane_size->surface_size.y = 0;
3417                 plane_size->surface_size.width = fb->width;
3418                 plane_size->surface_size.height = fb->height;
3419                 plane_size->surface_pitch =
3420                         fb->pitches[0] / fb->format->cpp[0];
3421
3422                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3423                 address->grph.addr.low_part = lower_32_bits(afb->address);
3424                 address->grph.addr.high_part = upper_32_bits(afb->address);
3425         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3426                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3427
3428                 plane_size->surface_size.x = 0;
3429                 plane_size->surface_size.y = 0;
3430                 plane_size->surface_size.width = fb->width;
3431                 plane_size->surface_size.height = fb->height;
3432                 plane_size->surface_pitch =
3433                         fb->pitches[0] / fb->format->cpp[0];
3434
3435                 plane_size->chroma_size.x = 0;
3436                 plane_size->chroma_size.y = 0;
3437                 /* TODO: set these based on surface format */
3438                 plane_size->chroma_size.width = fb->width / 2;
3439                 plane_size->chroma_size.height = fb->height / 2;
3440
3441                 plane_size->chroma_pitch =
3442                         fb->pitches[1] / fb->format->cpp[1];
3443
3444                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3445                 address->video_progressive.luma_addr.low_part =
3446                         lower_32_bits(afb->address);
3447                 address->video_progressive.luma_addr.high_part =
3448                         upper_32_bits(afb->address);
3449                 address->video_progressive.chroma_addr.low_part =
3450                         lower_32_bits(chroma_addr);
3451                 address->video_progressive.chroma_addr.high_part =
3452                         upper_32_bits(chroma_addr);
3453         }
3454
3455         /* Fill GFX8 params */
3456         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3457                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3458
3459                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3460                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3461                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3462                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3463                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3464
3465                 /* XXX fix me for VI */
3466                 tiling_info->gfx8.num_banks = num_banks;
3467                 tiling_info->gfx8.array_mode =
3468                                 DC_ARRAY_2D_TILED_THIN1;
3469                 tiling_info->gfx8.tile_split = tile_split;
3470                 tiling_info->gfx8.bank_width = bankw;
3471                 tiling_info->gfx8.bank_height = bankh;
3472                 tiling_info->gfx8.tile_aspect = mtaspect;
3473                 tiling_info->gfx8.tile_mode =
3474                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3475         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3476                         == DC_ARRAY_1D_TILED_THIN1) {
3477                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3478         }
3479
3480         tiling_info->gfx8.pipe_config =
3481                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3482
3483         if (adev->asic_type == CHIP_VEGA10 ||
3484             adev->asic_type == CHIP_VEGA12 ||
3485             adev->asic_type == CHIP_VEGA20 ||
3486             adev->asic_type == CHIP_NAVI10 ||
3487             adev->asic_type == CHIP_NAVI14 ||
3488             adev->asic_type == CHIP_NAVI12 ||
3489             adev->asic_type == CHIP_RENOIR ||
3490             adev->asic_type == CHIP_RAVEN) {
3491                 /* Fill GFX9 params */
3492                 tiling_info->gfx9.num_pipes =
3493                         adev->gfx.config.gb_addr_config_fields.num_pipes;
3494                 tiling_info->gfx9.num_banks =
3495                         adev->gfx.config.gb_addr_config_fields.num_banks;
3496                 tiling_info->gfx9.pipe_interleave =
3497                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3498                 tiling_info->gfx9.num_shader_engines =
3499                         adev->gfx.config.gb_addr_config_fields.num_se;
3500                 tiling_info->gfx9.max_compressed_frags =
3501                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3502                 tiling_info->gfx9.num_rb_per_se =
3503                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3504                 tiling_info->gfx9.swizzle =
3505                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3506                 tiling_info->gfx9.shaderEnable = 1;
3507
3508                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3509                                                 plane_size, tiling_info,
3510                                                 tiling_flags, dcc, address);
3511                 if (ret)
3512                         return ret;
3513         }
3514
3515         return 0;
3516 }
3517
3518 static void
3519 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3520                                bool *per_pixel_alpha, bool *global_alpha,
3521                                int *global_alpha_value)
3522 {
3523         *per_pixel_alpha = false;
3524         *global_alpha = false;
3525         *global_alpha_value = 0xff;
3526
3527         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3528                 return;
3529
3530         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3531                 static const uint32_t alpha_formats[] = {
3532                         DRM_FORMAT_ARGB8888,
3533                         DRM_FORMAT_RGBA8888,
3534                         DRM_FORMAT_ABGR8888,
3535                 };
3536                 uint32_t format = plane_state->fb->format->format;
3537                 unsigned int i;
3538
3539                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3540                         if (format == alpha_formats[i]) {
3541                                 *per_pixel_alpha = true;
3542                                 break;
3543                         }
3544                 }
3545         }
3546
3547         if (plane_state->alpha < 0xffff) {
3548                 *global_alpha = true;
3549                 *global_alpha_value = plane_state->alpha >> 8;
3550         }
3551 }
3552
3553 static int
3554 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3555                             const enum surface_pixel_format format,
3556                             enum dc_color_space *color_space)
3557 {
3558         bool full_range;
3559
3560         *color_space = COLOR_SPACE_SRGB;
3561
3562         /* DRM color properties only affect non-RGB formats. */
3563         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3564                 return 0;
3565
3566         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3567
3568         switch (plane_state->color_encoding) {
3569         case DRM_COLOR_YCBCR_BT601:
3570                 if (full_range)
3571                         *color_space = COLOR_SPACE_YCBCR601;
3572                 else
3573                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3574                 break;
3575
3576         case DRM_COLOR_YCBCR_BT709:
3577                 if (full_range)
3578                         *color_space = COLOR_SPACE_YCBCR709;
3579                 else
3580                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3581                 break;
3582
3583         case DRM_COLOR_YCBCR_BT2020:
3584                 if (full_range)
3585                         *color_space = COLOR_SPACE_2020_YCBCR;
3586                 else
3587                         return -EINVAL;
3588                 break;
3589
3590         default:
3591                 return -EINVAL;
3592         }
3593
3594         return 0;
3595 }
3596
3597 static int
3598 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3599                             const struct drm_plane_state *plane_state,
3600                             const uint64_t tiling_flags,
3601                             struct dc_plane_info *plane_info,
3602                             struct dc_plane_address *address)
3603 {
3604         const struct drm_framebuffer *fb = plane_state->fb;
3605         const struct amdgpu_framebuffer *afb =
3606                 to_amdgpu_framebuffer(plane_state->fb);
3607         struct drm_format_name_buf format_name;
3608         int ret;
3609
3610         memset(plane_info, 0, sizeof(*plane_info));
3611
3612         switch (fb->format->format) {
3613         case DRM_FORMAT_C8:
3614                 plane_info->format =
3615                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3616                 break;
3617         case DRM_FORMAT_RGB565:
3618                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3619                 break;
3620         case DRM_FORMAT_XRGB8888:
3621         case DRM_FORMAT_ARGB8888:
3622                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3623                 break;
3624         case DRM_FORMAT_XRGB2101010:
3625         case DRM_FORMAT_ARGB2101010:
3626                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3627                 break;
3628         case DRM_FORMAT_XBGR2101010:
3629         case DRM_FORMAT_ABGR2101010:
3630                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3631                 break;
3632         case DRM_FORMAT_XBGR8888:
3633         case DRM_FORMAT_ABGR8888:
3634                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3635                 break;
3636         case DRM_FORMAT_NV21:
3637                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3638                 break;
3639         case DRM_FORMAT_NV12:
3640                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3641                 break;
3642         case DRM_FORMAT_P010:
3643                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3644                 break;
3645         default:
3646                 DRM_ERROR(
3647                         "Unsupported screen format %s\n",
3648                         drm_get_format_name(fb->format->format, &format_name));
3649                 return -EINVAL;
3650         }
3651
3652         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3653         case DRM_MODE_ROTATE_0:
3654                 plane_info->rotation = ROTATION_ANGLE_0;
3655                 break;
3656         case DRM_MODE_ROTATE_90:
3657                 plane_info->rotation = ROTATION_ANGLE_90;
3658                 break;
3659         case DRM_MODE_ROTATE_180:
3660                 plane_info->rotation = ROTATION_ANGLE_180;
3661                 break;
3662         case DRM_MODE_ROTATE_270:
3663                 plane_info->rotation = ROTATION_ANGLE_270;
3664                 break;
3665         default:
3666                 plane_info->rotation = ROTATION_ANGLE_0;
3667                 break;
3668         }
3669
3670         plane_info->visible = true;
3671         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3672
3673         plane_info->layer_index = 0;
3674
3675         ret = fill_plane_color_attributes(plane_state, plane_info->format,
3676                                           &plane_info->color_space);
3677         if (ret)
3678                 return ret;
3679
3680         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3681                                            plane_info->rotation, tiling_flags,
3682                                            &plane_info->tiling_info,
3683                                            &plane_info->plane_size,
3684                                            &plane_info->dcc, address);
3685         if (ret)
3686                 return ret;
3687
3688         fill_blending_from_plane_state(
3689                 plane_state, &plane_info->per_pixel_alpha,
3690                 &plane_info->global_alpha, &plane_info->global_alpha_value);
3691
3692         return 0;
3693 }
3694
3695 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3696                                     struct dc_plane_state *dc_plane_state,
3697                                     struct drm_plane_state *plane_state,
3698                                     struct drm_crtc_state *crtc_state)
3699 {
3700         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3701         const struct amdgpu_framebuffer *amdgpu_fb =
3702                 to_amdgpu_framebuffer(plane_state->fb);
3703         struct dc_scaling_info scaling_info;
3704         struct dc_plane_info plane_info;
3705         uint64_t tiling_flags;
3706         int ret;
3707
3708         ret = fill_dc_scaling_info(plane_state, &scaling_info);
3709         if (ret)
3710                 return ret;
3711
3712         dc_plane_state->src_rect = scaling_info.src_rect;
3713         dc_plane_state->dst_rect = scaling_info.dst_rect;
3714         dc_plane_state->clip_rect = scaling_info.clip_rect;
3715         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3716
3717         ret = get_fb_info(amdgpu_fb, &tiling_flags);
3718         if (ret)
3719                 return ret;
3720
3721         ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3722                                           &plane_info,
3723                                           &dc_plane_state->address);
3724         if (ret)
3725                 return ret;
3726
3727         dc_plane_state->format = plane_info.format;
3728         dc_plane_state->color_space = plane_info.color_space;
3729         dc_plane_state->format = plane_info.format;
3730         dc_plane_state->plane_size = plane_info.plane_size;
3731         dc_plane_state->rotation = plane_info.rotation;
3732         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3733         dc_plane_state->stereo_format = plane_info.stereo_format;
3734         dc_plane_state->tiling_info = plane_info.tiling_info;
3735         dc_plane_state->visible = plane_info.visible;
3736         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3737         dc_plane_state->global_alpha = plane_info.global_alpha;
3738         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3739         dc_plane_state->dcc = plane_info.dcc;
3740         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3741
3742         /*
3743          * Always set input transfer function, since plane state is refreshed
3744          * every time.
3745          */
3746         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3747         if (ret)
3748                 return ret;
3749
3750         return 0;
3751 }
3752
3753 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3754                                            const struct dm_connector_state *dm_state,
3755                                            struct dc_stream_state *stream)
3756 {
3757         enum amdgpu_rmx_type rmx_type;
3758
3759         struct rect src = { 0 }; /* viewport in composition space*/
3760         struct rect dst = { 0 }; /* stream addressable area */
3761
3762         /* no mode. nothing to be done */
3763         if (!mode)
3764                 return;
3765
3766         /* Full screen scaling by default */
3767         src.width = mode->hdisplay;
3768         src.height = mode->vdisplay;
3769         dst.width = stream->timing.h_addressable;
3770         dst.height = stream->timing.v_addressable;
3771
3772         if (dm_state) {
3773                 rmx_type = dm_state->scaling;
3774                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3775                         if (src.width * dst.height <
3776                                         src.height * dst.width) {
3777                                 /* height needs less upscaling/more downscaling */
3778                                 dst.width = src.width *
3779                                                 dst.height / src.height;
3780                         } else {
3781                                 /* width needs less upscaling/more downscaling */
3782                                 dst.height = src.height *
3783                                                 dst.width / src.width;
3784                         }
3785                 } else if (rmx_type == RMX_CENTER) {
3786                         dst = src;
3787                 }
3788
3789                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3790                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3791
3792                 if (dm_state->underscan_enable) {
3793                         dst.x += dm_state->underscan_hborder / 2;
3794                         dst.y += dm_state->underscan_vborder / 2;
3795                         dst.width -= dm_state->underscan_hborder;
3796                         dst.height -= dm_state->underscan_vborder;
3797                 }
3798         }
3799
3800         stream->src = src;
3801         stream->dst = dst;
3802
3803         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3804                         dst.x, dst.y, dst.width, dst.height);
3805
3806 }
3807
3808 static enum dc_color_depth
3809 convert_color_depth_from_display_info(const struct drm_connector *connector,
3810                                       const struct drm_connector_state *state,
3811                                       bool is_y420)
3812 {
3813         uint8_t bpc;
3814
3815         if (is_y420) {
3816                 bpc = 8;
3817
3818                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3819                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3820                         bpc = 16;
3821                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3822                         bpc = 12;
3823                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3824                         bpc = 10;
3825         } else {
3826                 bpc = (uint8_t)connector->display_info.bpc;
3827                 /* Assume 8 bpc by default if no bpc is specified. */
3828                 bpc = bpc ? bpc : 8;
3829         }
3830
3831         if (!state)
3832                 state = connector->state;
3833
3834         if (state) {
3835                 /*
3836                  * Cap display bpc based on the user requested value.
3837                  *
3838                  * The value for state->max_bpc may not correctly updated
3839                  * depending on when the connector gets added to the state
3840                  * or if this was called outside of atomic check, so it
3841                  * can't be used directly.
3842                  */
3843                 bpc = min(bpc, state->max_requested_bpc);
3844
3845                 /* Round down to the nearest even number. */
3846                 bpc = bpc - (bpc & 1);
3847         }
3848
3849         switch (bpc) {
3850         case 0:
3851                 /*
3852                  * Temporary Work around, DRM doesn't parse color depth for
3853                  * EDID revision before 1.4
3854                  * TODO: Fix edid parsing
3855                  */
3856                 return COLOR_DEPTH_888;
3857         case 6:
3858                 return COLOR_DEPTH_666;
3859         case 8:
3860                 return COLOR_DEPTH_888;
3861         case 10:
3862                 return COLOR_DEPTH_101010;
3863         case 12:
3864                 return COLOR_DEPTH_121212;
3865         case 14:
3866                 return COLOR_DEPTH_141414;
3867         case 16:
3868                 return COLOR_DEPTH_161616;
3869         default:
3870                 return COLOR_DEPTH_UNDEFINED;
3871         }
3872 }
3873
3874 static enum dc_aspect_ratio
3875 get_aspect_ratio(const struct drm_display_mode *mode_in)
3876 {
3877         /* 1-1 mapping, since both enums follow the HDMI spec. */
3878         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3879 }
3880
3881 static enum dc_color_space
3882 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3883 {
3884         enum dc_color_space color_space = COLOR_SPACE_SRGB;
3885
3886         switch (dc_crtc_timing->pixel_encoding) {
3887         case PIXEL_ENCODING_YCBCR422:
3888         case PIXEL_ENCODING_YCBCR444:
3889         case PIXEL_ENCODING_YCBCR420:
3890         {
3891                 /*
3892                  * 27030khz is the separation point between HDTV and SDTV
3893                  * according to HDMI spec, we use YCbCr709 and YCbCr601
3894                  * respectively
3895                  */
3896                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3897                         if (dc_crtc_timing->flags.Y_ONLY)
3898                                 color_space =
3899                                         COLOR_SPACE_YCBCR709_LIMITED;
3900                         else
3901                                 color_space = COLOR_SPACE_YCBCR709;
3902                 } else {
3903                         if (dc_crtc_timing->flags.Y_ONLY)
3904                                 color_space =
3905                                         COLOR_SPACE_YCBCR601_LIMITED;
3906                         else
3907                                 color_space = COLOR_SPACE_YCBCR601;
3908                 }
3909
3910         }
3911         break;
3912         case PIXEL_ENCODING_RGB:
3913                 color_space = COLOR_SPACE_SRGB;
3914                 break;
3915
3916         default:
3917                 WARN_ON(1);
3918                 break;
3919         }
3920
3921         return color_space;
3922 }
3923
3924 static bool adjust_colour_depth_from_display_info(
3925         struct dc_crtc_timing *timing_out,
3926         const struct drm_display_info *info)
3927 {
3928         enum dc_color_depth depth = timing_out->display_color_depth;
3929         int normalized_clk;
3930         do {
3931                 normalized_clk = timing_out->pix_clk_100hz / 10;
3932                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3933                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3934                         normalized_clk /= 2;
3935                 /* Adjusting pix clock following on HDMI spec based on colour depth */
3936                 switch (depth) {
3937                 case COLOR_DEPTH_888:
3938                         break;
3939                 case COLOR_DEPTH_101010:
3940                         normalized_clk = (normalized_clk * 30) / 24;
3941                         break;
3942                 case COLOR_DEPTH_121212:
3943                         normalized_clk = (normalized_clk * 36) / 24;
3944                         break;
3945                 case COLOR_DEPTH_161616:
3946                         normalized_clk = (normalized_clk * 48) / 24;
3947                         break;
3948                 default:
3949                         /* The above depths are the only ones valid for HDMI. */
3950                         return false;
3951                 }
3952                 if (normalized_clk <= info->max_tmds_clock) {
3953                         timing_out->display_color_depth = depth;
3954                         return true;
3955                 }
3956         } while (--depth > COLOR_DEPTH_666);
3957         return false;
3958 }
3959
3960 static void fill_stream_properties_from_drm_display_mode(
3961         struct dc_stream_state *stream,
3962         const struct drm_display_mode *mode_in,
3963         const struct drm_connector *connector,
3964         const struct drm_connector_state *connector_state,
3965         const struct dc_stream_state *old_stream)
3966 {
3967         struct dc_crtc_timing *timing_out = &stream->timing;
3968         const struct drm_display_info *info = &connector->display_info;
3969         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3970         struct hdmi_vendor_infoframe hv_frame;
3971         struct hdmi_avi_infoframe avi_frame;
3972
3973         memset(&hv_frame, 0, sizeof(hv_frame));
3974         memset(&avi_frame, 0, sizeof(avi_frame));
3975
3976         timing_out->h_border_left = 0;
3977         timing_out->h_border_right = 0;
3978         timing_out->v_border_top = 0;
3979         timing_out->v_border_bottom = 0;
3980         /* TODO: un-hardcode */
3981         if (drm_mode_is_420_only(info, mode_in)
3982                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3983                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3984         else if (drm_mode_is_420_also(info, mode_in)
3985                         && aconnector->force_yuv420_output)
3986                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3987         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3988                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3989                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3990         else
3991                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3992
3993         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3994         timing_out->display_color_depth = convert_color_depth_from_display_info(
3995                 connector, connector_state,
3996                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
3997         timing_out->scan_type = SCANNING_TYPE_NODATA;
3998         timing_out->hdmi_vic = 0;
3999
4000         if(old_stream) {
4001                 timing_out->vic = old_stream->timing.vic;
4002                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4003                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4004         } else {
4005                 timing_out->vic = drm_match_cea_mode(mode_in);
4006                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4007                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4008                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4009                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4010         }
4011
4012         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4013                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4014                 timing_out->vic = avi_frame.video_code;
4015                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4016                 timing_out->hdmi_vic = hv_frame.vic;
4017         }
4018
4019         timing_out->h_addressable = mode_in->crtc_hdisplay;
4020         timing_out->h_total = mode_in->crtc_htotal;
4021         timing_out->h_sync_width =
4022                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4023         timing_out->h_front_porch =
4024                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4025         timing_out->v_total = mode_in->crtc_vtotal;
4026         timing_out->v_addressable = mode_in->crtc_vdisplay;
4027         timing_out->v_front_porch =
4028                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4029         timing_out->v_sync_width =
4030                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4031         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4032         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4033
4034         stream->output_color_space = get_output_color_space(timing_out);
4035
4036         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4037         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4038         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4039                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4040                     drm_mode_is_420_also(info, mode_in) &&
4041                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4042                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4043                         adjust_colour_depth_from_display_info(timing_out, info);
4044                 }
4045         }
4046 }
4047
4048 static void fill_audio_info(struct audio_info *audio_info,
4049                             const struct drm_connector *drm_connector,
4050                             const struct dc_sink *dc_sink)
4051 {
4052         int i = 0;
4053         int cea_revision = 0;
4054         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4055
4056         audio_info->manufacture_id = edid_caps->manufacturer_id;
4057         audio_info->product_id = edid_caps->product_id;
4058
4059         cea_revision = drm_connector->display_info.cea_rev;
4060
4061         strscpy(audio_info->display_name,
4062                 edid_caps->display_name,
4063                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4064
4065         if (cea_revision >= 3) {
4066                 audio_info->mode_count = edid_caps->audio_mode_count;
4067
4068                 for (i = 0; i < audio_info->mode_count; ++i) {
4069                         audio_info->modes[i].format_code =
4070                                         (enum audio_format_code)
4071                                         (edid_caps->audio_modes[i].format_code);
4072                         audio_info->modes[i].channel_count =
4073                                         edid_caps->audio_modes[i].channel_count;
4074                         audio_info->modes[i].sample_rates.all =
4075                                         edid_caps->audio_modes[i].sample_rate;
4076                         audio_info->modes[i].sample_size =
4077                                         edid_caps->audio_modes[i].sample_size;
4078                 }
4079         }
4080
4081         audio_info->flags.all = edid_caps->speaker_flags;
4082
4083         /* TODO: We only check for the progressive mode, check for interlace mode too */
4084         if (drm_connector->latency_present[0]) {
4085                 audio_info->video_latency = drm_connector->video_latency[0];
4086                 audio_info->audio_latency = drm_connector->audio_latency[0];
4087         }
4088
4089         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4090
4091 }
4092
4093 static void
4094 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4095                                       struct drm_display_mode *dst_mode)
4096 {
4097         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4098         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4099         dst_mode->crtc_clock = src_mode->crtc_clock;
4100         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4101         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4102         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4103         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4104         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4105         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4106         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4107         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4108         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4109         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4110         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4111 }
4112
4113 static void
4114 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4115                                         const struct drm_display_mode *native_mode,
4116                                         bool scale_enabled)
4117 {
4118         if (scale_enabled) {
4119                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4120         } else if (native_mode->clock == drm_mode->clock &&
4121                         native_mode->htotal == drm_mode->htotal &&
4122                         native_mode->vtotal == drm_mode->vtotal) {
4123                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4124         } else {
4125                 /* no scaling nor amdgpu inserted, no need to patch */
4126         }
4127 }
4128
4129 static struct dc_sink *
4130 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4131 {
4132         struct dc_sink_init_data sink_init_data = { 0 };
4133         struct dc_sink *sink = NULL;
4134         sink_init_data.link = aconnector->dc_link;
4135         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4136
4137         sink = dc_sink_create(&sink_init_data);
4138         if (!sink) {
4139                 DRM_ERROR("Failed to create sink!\n");
4140                 return NULL;
4141         }
4142         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4143
4144         return sink;
4145 }
4146
4147 static void set_multisync_trigger_params(
4148                 struct dc_stream_state *stream)
4149 {
4150         if (stream->triggered_crtc_reset.enabled) {
4151                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4152                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4153         }
4154 }
4155
4156 static void set_master_stream(struct dc_stream_state *stream_set[],
4157                               int stream_count)
4158 {
4159         int j, highest_rfr = 0, master_stream = 0;
4160
4161         for (j = 0;  j < stream_count; j++) {
4162                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4163                         int refresh_rate = 0;
4164
4165                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4166                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4167                         if (refresh_rate > highest_rfr) {
4168                                 highest_rfr = refresh_rate;
4169                                 master_stream = j;
4170                         }
4171                 }
4172         }
4173         for (j = 0;  j < stream_count; j++) {
4174                 if (stream_set[j])
4175                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4176         }
4177 }
4178
4179 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4180 {
4181         int i = 0;
4182
4183         if (context->stream_count < 2)
4184                 return;
4185         for (i = 0; i < context->stream_count ; i++) {
4186                 if (!context->streams[i])
4187                         continue;
4188                 /*
4189                  * TODO: add a function to read AMD VSDB bits and set
4190                  * crtc_sync_master.multi_sync_enabled flag
4191                  * For now it's set to false
4192                  */
4193                 set_multisync_trigger_params(context->streams[i]);
4194         }
4195         set_master_stream(context->streams, context->stream_count);
4196 }
4197
4198 static struct dc_stream_state *
4199 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4200                        const struct drm_display_mode *drm_mode,
4201                        const struct dm_connector_state *dm_state,
4202                        const struct dc_stream_state *old_stream)
4203 {
4204         struct drm_display_mode *preferred_mode = NULL;
4205         struct drm_connector *drm_connector;
4206         const struct drm_connector_state *con_state =
4207                 dm_state ? &dm_state->base : NULL;
4208         struct dc_stream_state *stream = NULL;
4209         struct drm_display_mode mode = *drm_mode;
4210         bool native_mode_found = false;
4211         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4212         int mode_refresh;
4213         int preferred_refresh = 0;
4214 #if defined(CONFIG_DRM_AMD_DC_DCN)
4215         struct dsc_dec_dpcd_caps dsc_caps;
4216 #endif
4217         uint32_t link_bandwidth_kbps;
4218
4219         struct dc_sink *sink = NULL;
4220         if (aconnector == NULL) {
4221                 DRM_ERROR("aconnector is NULL!\n");
4222                 return stream;
4223         }
4224
4225         drm_connector = &aconnector->base;
4226
4227         if (!aconnector->dc_sink) {
4228                 sink = create_fake_sink(aconnector);
4229                 if (!sink)
4230                         return stream;
4231         } else {
4232                 sink = aconnector->dc_sink;
4233                 dc_sink_retain(sink);
4234         }
4235
4236         stream = dc_create_stream_for_sink(sink);
4237
4238         if (stream == NULL) {
4239                 DRM_ERROR("Failed to create stream for sink!\n");
4240                 goto finish;
4241         }
4242
4243         stream->dm_stream_context = aconnector;
4244
4245         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4246                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4247
4248         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4249                 /* Search for preferred mode */
4250                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4251                         native_mode_found = true;
4252                         break;
4253                 }
4254         }
4255         if (!native_mode_found)
4256                 preferred_mode = list_first_entry_or_null(
4257                                 &aconnector->base.modes,
4258                                 struct drm_display_mode,
4259                                 head);
4260
4261         mode_refresh = drm_mode_vrefresh(&mode);
4262
4263         if (preferred_mode == NULL) {
4264                 /*
4265                  * This may not be an error, the use case is when we have no
4266                  * usermode calls to reset and set mode upon hotplug. In this
4267                  * case, we call set mode ourselves to restore the previous mode
4268                  * and the modelist may not be filled in in time.
4269                  */
4270                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4271         } else {
4272                 decide_crtc_timing_for_drm_display_mode(
4273                                 &mode, preferred_mode,
4274                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4275                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4276         }
4277
4278         if (!dm_state)
4279                 drm_mode_set_crtcinfo(&mode, 0);
4280
4281         /*
4282         * If scaling is enabled and refresh rate didn't change
4283         * we copy the vic and polarities of the old timings
4284         */
4285         if (!scale || mode_refresh != preferred_refresh)
4286                 fill_stream_properties_from_drm_display_mode(stream,
4287                         &mode, &aconnector->base, con_state, NULL);
4288         else
4289                 fill_stream_properties_from_drm_display_mode(stream,
4290                         &mode, &aconnector->base, con_state, old_stream);
4291
4292         stream->timing.flags.DSC = 0;
4293
4294         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4295 #if defined(CONFIG_DRM_AMD_DC_DCN)
4296                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4297                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4298                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4299                                       &dsc_caps);
4300 #endif
4301                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4302                                                              dc_link_get_link_cap(aconnector->dc_link));
4303
4304 #if defined(CONFIG_DRM_AMD_DC_DCN)
4305                 if (dsc_caps.is_dsc_supported)
4306                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4307                                                   &dsc_caps,
4308                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4309                                                   link_bandwidth_kbps,
4310                                                   &stream->timing,
4311                                                   &stream->timing.dsc_cfg))
4312                                 stream->timing.flags.DSC = 1;
4313 #endif
4314         }
4315
4316         update_stream_scaling_settings(&mode, dm_state, stream);
4317
4318         fill_audio_info(
4319                 &stream->audio_info,
4320                 drm_connector,
4321                 sink);
4322
4323         update_stream_signal(stream, sink);
4324
4325         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4326                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4327         if (stream->link->psr_feature_enabled)  {
4328                 struct dc  *core_dc = stream->link->ctx->dc;
4329
4330                 if (dc_is_dmcu_initialized(core_dc)) {
4331                         struct dmcu *dmcu = core_dc->res_pool->dmcu;
4332
4333                         stream->psr_version = dmcu->dmcu_version.psr_version;
4334
4335                         //
4336                         // should decide stream support vsc sdp colorimetry capability
4337                         // before building vsc info packet
4338                         //
4339                         stream->use_vsc_sdp_for_colorimetry = false;
4340                         if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4341                                 stream->use_vsc_sdp_for_colorimetry =
4342                                         aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4343                         } else {
4344                                 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4345                                         stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4346                                         stream->use_vsc_sdp_for_colorimetry = true;
4347                                 }
4348                         }
4349                         mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4350                 }
4351         }
4352 finish:
4353         dc_sink_release(sink);
4354
4355         return stream;
4356 }
4357
4358 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4359 {
4360         drm_crtc_cleanup(crtc);
4361         kfree(crtc);
4362 }
4363
4364 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4365                                   struct drm_crtc_state *state)
4366 {
4367         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4368
4369         /* TODO Destroy dc_stream objects are stream object is flattened */
4370         if (cur->stream)
4371                 dc_stream_release(cur->stream);
4372
4373
4374         __drm_atomic_helper_crtc_destroy_state(state);
4375
4376
4377         kfree(state);
4378 }
4379
4380 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4381 {
4382         struct dm_crtc_state *state;
4383
4384         if (crtc->state)
4385                 dm_crtc_destroy_state(crtc, crtc->state);
4386
4387         state = kzalloc(sizeof(*state), GFP_KERNEL);
4388         if (WARN_ON(!state))
4389                 return;
4390
4391         crtc->state = &state->base;
4392         crtc->state->crtc = crtc;
4393
4394 }
4395
4396 static struct drm_crtc_state *
4397 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4398 {
4399         struct dm_crtc_state *state, *cur;
4400
4401         cur = to_dm_crtc_state(crtc->state);
4402
4403         if (WARN_ON(!crtc->state))
4404                 return NULL;
4405
4406         state = kzalloc(sizeof(*state), GFP_KERNEL);
4407         if (!state)
4408                 return NULL;
4409
4410         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4411
4412         if (cur->stream) {
4413                 state->stream = cur->stream;
4414                 dc_stream_retain(state->stream);
4415         }
4416
4417         state->active_planes = cur->active_planes;
4418         state->interrupts_enabled = cur->interrupts_enabled;
4419         state->vrr_params = cur->vrr_params;
4420         state->vrr_infopacket = cur->vrr_infopacket;
4421         state->abm_level = cur->abm_level;
4422         state->vrr_supported = cur->vrr_supported;
4423         state->freesync_config = cur->freesync_config;
4424         state->crc_src = cur->crc_src;
4425         state->cm_has_degamma = cur->cm_has_degamma;
4426         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4427
4428         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4429
4430         return &state->base;
4431 }
4432
4433 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4434 {
4435         enum dc_irq_source irq_source;
4436         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4437         struct amdgpu_device *adev = crtc->dev->dev_private;
4438         int rc;
4439
4440         /* Do not set vupdate for DCN hardware */
4441         if (adev->family > AMDGPU_FAMILY_AI)
4442                 return 0;
4443
4444         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4445
4446         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4447
4448         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4449                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4450         return rc;
4451 }
4452
4453 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4454 {
4455         enum dc_irq_source irq_source;
4456         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4457         struct amdgpu_device *adev = crtc->dev->dev_private;
4458         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4459         int rc = 0;
4460
4461         if (enable) {
4462                 /* vblank irq on -> Only need vupdate irq in vrr mode */
4463                 if (amdgpu_dm_vrr_active(acrtc_state))
4464                         rc = dm_set_vupdate_irq(crtc, true);
4465         } else {
4466                 /* vblank irq off -> vupdate irq off */
4467                 rc = dm_set_vupdate_irq(crtc, false);
4468         }
4469
4470         if (rc)
4471                 return rc;
4472
4473         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4474         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4475 }
4476
4477 static int dm_enable_vblank(struct drm_crtc *crtc)
4478 {
4479         return dm_set_vblank(crtc, true);
4480 }
4481
4482 static void dm_disable_vblank(struct drm_crtc *crtc)
4483 {
4484         dm_set_vblank(crtc, false);
4485 }
4486
4487 /* Implemented only the options currently availible for the driver */
4488 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4489         .reset = dm_crtc_reset_state,
4490         .destroy = amdgpu_dm_crtc_destroy,
4491         .gamma_set = drm_atomic_helper_legacy_gamma_set,
4492         .set_config = drm_atomic_helper_set_config,
4493         .page_flip = drm_atomic_helper_page_flip,
4494         .atomic_duplicate_state = dm_crtc_duplicate_state,
4495         .atomic_destroy_state = dm_crtc_destroy_state,
4496         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4497         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4498         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4499         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4500         .enable_vblank = dm_enable_vblank,
4501         .disable_vblank = dm_disable_vblank,
4502         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4503 };
4504
4505 static enum drm_connector_status
4506 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4507 {
4508         bool connected;
4509         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4510
4511         /*
4512          * Notes:
4513          * 1. This interface is NOT called in context of HPD irq.
4514          * 2. This interface *is called* in context of user-mode ioctl. Which
4515          * makes it a bad place for *any* MST-related activity.
4516          */
4517
4518         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4519             !aconnector->fake_enable)
4520                 connected = (aconnector->dc_sink != NULL);
4521         else
4522                 connected = (aconnector->base.force == DRM_FORCE_ON);
4523
4524         return (connected ? connector_status_connected :
4525                         connector_status_disconnected);
4526 }
4527
4528 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4529                                             struct drm_connector_state *connector_state,
4530                                             struct drm_property *property,
4531                                             uint64_t val)
4532 {
4533         struct drm_device *dev = connector->dev;
4534         struct amdgpu_device *adev = dev->dev_private;
4535         struct dm_connector_state *dm_old_state =
4536                 to_dm_connector_state(connector->state);
4537         struct dm_connector_state *dm_new_state =
4538                 to_dm_connector_state(connector_state);
4539
4540         int ret = -EINVAL;
4541
4542         if (property == dev->mode_config.scaling_mode_property) {
4543                 enum amdgpu_rmx_type rmx_type;
4544
4545                 switch (val) {
4546                 case DRM_MODE_SCALE_CENTER:
4547                         rmx_type = RMX_CENTER;
4548                         break;
4549                 case DRM_MODE_SCALE_ASPECT:
4550                         rmx_type = RMX_ASPECT;
4551                         break;
4552                 case DRM_MODE_SCALE_FULLSCREEN:
4553                         rmx_type = RMX_FULL;
4554                         break;
4555                 case DRM_MODE_SCALE_NONE:
4556                 default:
4557                         rmx_type = RMX_OFF;
4558                         break;
4559                 }
4560
4561                 if (dm_old_state->scaling == rmx_type)
4562                         return 0;
4563
4564                 dm_new_state->scaling = rmx_type;
4565                 ret = 0;
4566         } else if (property == adev->mode_info.underscan_hborder_property) {
4567                 dm_new_state->underscan_hborder = val;
4568                 ret = 0;
4569         } else if (property == adev->mode_info.underscan_vborder_property) {
4570                 dm_new_state->underscan_vborder = val;
4571                 ret = 0;
4572         } else if (property == adev->mode_info.underscan_property) {
4573                 dm_new_state->underscan_enable = val;
4574                 ret = 0;
4575         } else if (property == adev->mode_info.abm_level_property) {
4576                 dm_new_state->abm_level = val;
4577                 ret = 0;
4578         }
4579
4580         return ret;
4581 }
4582
4583 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4584                                             const struct drm_connector_state *state,
4585                                             struct drm_property *property,
4586                                             uint64_t *val)
4587 {
4588         struct drm_device *dev = connector->dev;
4589         struct amdgpu_device *adev = dev->dev_private;
4590         struct dm_connector_state *dm_state =
4591                 to_dm_connector_state(state);
4592         int ret = -EINVAL;
4593
4594         if (property == dev->mode_config.scaling_mode_property) {
4595                 switch (dm_state->scaling) {
4596                 case RMX_CENTER:
4597                         *val = DRM_MODE_SCALE_CENTER;
4598                         break;
4599                 case RMX_ASPECT:
4600                         *val = DRM_MODE_SCALE_ASPECT;
4601                         break;
4602                 case RMX_FULL:
4603                         *val = DRM_MODE_SCALE_FULLSCREEN;
4604                         break;
4605                 case RMX_OFF:
4606                 default:
4607                         *val = DRM_MODE_SCALE_NONE;
4608                         break;
4609                 }
4610                 ret = 0;
4611         } else if (property == adev->mode_info.underscan_hborder_property) {
4612                 *val = dm_state->underscan_hborder;
4613                 ret = 0;
4614         } else if (property == adev->mode_info.underscan_vborder_property) {
4615                 *val = dm_state->underscan_vborder;
4616                 ret = 0;
4617         } else if (property == adev->mode_info.underscan_property) {
4618                 *val = dm_state->underscan_enable;
4619                 ret = 0;
4620         } else if (property == adev->mode_info.abm_level_property) {
4621                 *val = dm_state->abm_level;
4622                 ret = 0;
4623         }
4624
4625         return ret;
4626 }
4627
4628 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4629 {
4630         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4631
4632         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4633 }
4634
4635 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4636 {
4637         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4638         const struct dc_link *link = aconnector->dc_link;
4639         struct amdgpu_device *adev = connector->dev->dev_private;
4640         struct amdgpu_display_manager *dm = &adev->dm;
4641
4642 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4643         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4644
4645         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4646             link->type != dc_connection_none &&
4647             dm->backlight_dev) {
4648                 backlight_device_unregister(dm->backlight_dev);
4649                 dm->backlight_dev = NULL;
4650         }
4651 #endif
4652
4653         if (aconnector->dc_em_sink)
4654                 dc_sink_release(aconnector->dc_em_sink);
4655         aconnector->dc_em_sink = NULL;
4656         if (aconnector->dc_sink)
4657                 dc_sink_release(aconnector->dc_sink);
4658         aconnector->dc_sink = NULL;
4659
4660         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4661         drm_connector_unregister(connector);
4662         drm_connector_cleanup(connector);
4663         if (aconnector->i2c) {
4664                 i2c_del_adapter(&aconnector->i2c->base);
4665                 kfree(aconnector->i2c);
4666         }
4667         kfree(aconnector->dm_dp_aux.aux.name);
4668
4669         kfree(connector);
4670 }
4671
4672 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4673 {
4674         struct dm_connector_state *state =
4675                 to_dm_connector_state(connector->state);
4676
4677         if (connector->state)
4678                 __drm_atomic_helper_connector_destroy_state(connector->state);
4679
4680         kfree(state);
4681
4682         state = kzalloc(sizeof(*state), GFP_KERNEL);
4683
4684         if (state) {
4685                 state->scaling = RMX_OFF;
4686                 state->underscan_enable = false;
4687                 state->underscan_hborder = 0;
4688                 state->underscan_vborder = 0;
4689                 state->base.max_requested_bpc = 8;
4690                 state->vcpi_slots = 0;
4691                 state->pbn = 0;
4692                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4693                         state->abm_level = amdgpu_dm_abm_level;
4694
4695                 __drm_atomic_helper_connector_reset(connector, &state->base);
4696         }
4697 }
4698
4699 struct drm_connector_state *
4700 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4701 {
4702         struct dm_connector_state *state =
4703                 to_dm_connector_state(connector->state);
4704
4705         struct dm_connector_state *new_state =
4706                         kmemdup(state, sizeof(*state), GFP_KERNEL);
4707
4708         if (!new_state)
4709                 return NULL;
4710
4711         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4712
4713         new_state->freesync_capable = state->freesync_capable;
4714         new_state->abm_level = state->abm_level;
4715         new_state->scaling = state->scaling;
4716         new_state->underscan_enable = state->underscan_enable;
4717         new_state->underscan_hborder = state->underscan_hborder;
4718         new_state->underscan_vborder = state->underscan_vborder;
4719         new_state->vcpi_slots = state->vcpi_slots;
4720         new_state->pbn = state->pbn;
4721         return &new_state->base;
4722 }
4723
4724 static int
4725 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4726 {
4727         struct amdgpu_dm_connector *amdgpu_dm_connector =
4728                 to_amdgpu_dm_connector(connector);
4729         int r;
4730
4731         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4732             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4733                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4734                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4735                 if (r)
4736                         return r;
4737         }
4738
4739 #if defined(CONFIG_DEBUG_FS)
4740         connector_debugfs_init(amdgpu_dm_connector);
4741 #endif
4742
4743         return 0;
4744 }
4745
4746 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4747         .reset = amdgpu_dm_connector_funcs_reset,
4748         .detect = amdgpu_dm_connector_detect,
4749         .fill_modes = drm_helper_probe_single_connector_modes,
4750         .destroy = amdgpu_dm_connector_destroy,
4751         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4752         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4753         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4754         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4755         .late_register = amdgpu_dm_connector_late_register,
4756         .early_unregister = amdgpu_dm_connector_unregister
4757 };
4758
4759 static int get_modes(struct drm_connector *connector)
4760 {
4761         return amdgpu_dm_connector_get_modes(connector);
4762 }
4763
4764 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4765 {
4766         struct dc_sink_init_data init_params = {
4767                         .link = aconnector->dc_link,
4768                         .sink_signal = SIGNAL_TYPE_VIRTUAL
4769         };
4770         struct edid *edid;
4771
4772         if (!aconnector->base.edid_blob_ptr) {
4773                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4774                                 aconnector->base.name);
4775
4776                 aconnector->base.force = DRM_FORCE_OFF;
4777                 aconnector->base.override_edid = false;
4778                 return;
4779         }
4780
4781         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4782
4783         aconnector->edid = edid;
4784
4785         aconnector->dc_em_sink = dc_link_add_remote_sink(
4786                 aconnector->dc_link,
4787                 (uint8_t *)edid,
4788                 (edid->extensions + 1) * EDID_LENGTH,
4789                 &init_params);
4790
4791         if (aconnector->base.force == DRM_FORCE_ON) {
4792                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4793                 aconnector->dc_link->local_sink :
4794                 aconnector->dc_em_sink;
4795                 dc_sink_retain(aconnector->dc_sink);
4796         }
4797 }
4798
4799 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4800 {
4801         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4802
4803         /*
4804          * In case of headless boot with force on for DP managed connector
4805          * Those settings have to be != 0 to get initial modeset
4806          */
4807         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4808                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4809                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4810         }
4811
4812
4813         aconnector->base.override_edid = true;
4814         create_eml_sink(aconnector);
4815 }
4816
4817 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4818                                    struct drm_display_mode *mode)
4819 {
4820         int result = MODE_ERROR;
4821         struct dc_sink *dc_sink;
4822         struct amdgpu_device *adev = connector->dev->dev_private;
4823         /* TODO: Unhardcode stream count */
4824         struct dc_stream_state *stream;
4825         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4826         enum dc_status dc_result = DC_OK;
4827
4828         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4829                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4830                 return result;
4831
4832         /*
4833          * Only run this the first time mode_valid is called to initilialize
4834          * EDID mgmt
4835          */
4836         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4837                 !aconnector->dc_em_sink)
4838                 handle_edid_mgmt(aconnector);
4839
4840         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4841
4842         if (dc_sink == NULL) {
4843                 DRM_ERROR("dc_sink is NULL!\n");
4844                 goto fail;
4845         }
4846
4847         stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4848         if (stream == NULL) {
4849                 DRM_ERROR("Failed to create stream for sink!\n");
4850                 goto fail;
4851         }
4852
4853         dc_result = dc_validate_stream(adev->dm.dc, stream);
4854
4855         if (dc_result == DC_OK)
4856                 result = MODE_OK;
4857         else
4858                 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4859                               mode->hdisplay,
4860                               mode->vdisplay,
4861                               mode->clock,
4862                               dc_result);
4863
4864         dc_stream_release(stream);
4865
4866 fail:
4867         /* TODO: error handling*/
4868         return result;
4869 }
4870
4871 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4872                                 struct dc_info_packet *out)
4873 {
4874         struct hdmi_drm_infoframe frame;
4875         unsigned char buf[30]; /* 26 + 4 */
4876         ssize_t len;
4877         int ret, i;
4878
4879         memset(out, 0, sizeof(*out));
4880
4881         if (!state->hdr_output_metadata)
4882                 return 0;
4883
4884         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4885         if (ret)
4886                 return ret;
4887
4888         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4889         if (len < 0)
4890                 return (int)len;
4891
4892         /* Static metadata is a fixed 26 bytes + 4 byte header. */
4893         if (len != 30)
4894                 return -EINVAL;
4895
4896         /* Prepare the infopacket for DC. */
4897         switch (state->connector->connector_type) {
4898         case DRM_MODE_CONNECTOR_HDMIA:
4899                 out->hb0 = 0x87; /* type */
4900                 out->hb1 = 0x01; /* version */
4901                 out->hb2 = 0x1A; /* length */
4902                 out->sb[0] = buf[3]; /* checksum */
4903                 i = 1;
4904                 break;
4905
4906         case DRM_MODE_CONNECTOR_DisplayPort:
4907         case DRM_MODE_CONNECTOR_eDP:
4908                 out->hb0 = 0x00; /* sdp id, zero */
4909                 out->hb1 = 0x87; /* type */
4910                 out->hb2 = 0x1D; /* payload len - 1 */
4911                 out->hb3 = (0x13 << 2); /* sdp version */
4912                 out->sb[0] = 0x01; /* version */
4913                 out->sb[1] = 0x1A; /* length */
4914                 i = 2;
4915                 break;
4916
4917         default:
4918                 return -EINVAL;
4919         }
4920
4921         memcpy(&out->sb[i], &buf[4], 26);
4922         out->valid = true;
4923
4924         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4925                        sizeof(out->sb), false);
4926
4927         return 0;
4928 }
4929
4930 static bool
4931 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4932                           const struct drm_connector_state *new_state)
4933 {
4934         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4935         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4936
4937         if (old_blob != new_blob) {
4938                 if (old_blob && new_blob &&
4939                     old_blob->length == new_blob->length)
4940                         return memcmp(old_blob->data, new_blob->data,
4941                                       old_blob->length);
4942
4943                 return true;
4944         }
4945
4946         return false;
4947 }
4948
4949 static int
4950 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4951                                  struct drm_atomic_state *state)
4952 {
4953         struct drm_connector_state *new_con_state =
4954                 drm_atomic_get_new_connector_state(state, conn);
4955         struct drm_connector_state *old_con_state =
4956                 drm_atomic_get_old_connector_state(state, conn);
4957         struct drm_crtc *crtc = new_con_state->crtc;
4958         struct drm_crtc_state *new_crtc_state;
4959         int ret;
4960
4961         if (!crtc)
4962                 return 0;
4963
4964         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4965                 struct dc_info_packet hdr_infopacket;
4966
4967                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4968                 if (ret)
4969                         return ret;
4970
4971                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4972                 if (IS_ERR(new_crtc_state))
4973                         return PTR_ERR(new_crtc_state);
4974
4975                 /*
4976                  * DC considers the stream backends changed if the
4977                  * static metadata changes. Forcing the modeset also
4978                  * gives a simple way for userspace to switch from
4979                  * 8bpc to 10bpc when setting the metadata to enter
4980                  * or exit HDR.
4981                  *
4982                  * Changing the static metadata after it's been
4983                  * set is permissible, however. So only force a
4984                  * modeset if we're entering or exiting HDR.
4985                  */
4986                 new_crtc_state->mode_changed =
4987                         !old_con_state->hdr_output_metadata ||
4988                         !new_con_state->hdr_output_metadata;
4989         }
4990
4991         return 0;
4992 }
4993
4994 static const struct drm_connector_helper_funcs
4995 amdgpu_dm_connector_helper_funcs = {
4996         /*
4997          * If hotplugging a second bigger display in FB Con mode, bigger resolution
4998          * modes will be filtered by drm_mode_validate_size(), and those modes
4999          * are missing after user start lightdm. So we need to renew modes list.
5000          * in get_modes call back, not just return the modes count
5001          */
5002         .get_modes = get_modes,
5003         .mode_valid = amdgpu_dm_connector_mode_valid,
5004         .atomic_check = amdgpu_dm_connector_atomic_check,
5005 };
5006
5007 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5008 {
5009 }
5010
5011 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5012 {
5013         struct drm_device *dev = new_crtc_state->crtc->dev;
5014         struct drm_plane *plane;
5015
5016         drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5017                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5018                         return true;
5019         }
5020
5021         return false;
5022 }
5023
5024 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5025 {
5026         struct drm_atomic_state *state = new_crtc_state->state;
5027         struct drm_plane *plane;
5028         int num_active = 0;
5029
5030         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5031                 struct drm_plane_state *new_plane_state;
5032
5033                 /* Cursor planes are "fake". */
5034                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5035                         continue;
5036
5037                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5038
5039                 if (!new_plane_state) {
5040                         /*
5041                          * The plane is enable on the CRTC and hasn't changed
5042                          * state. This means that it previously passed
5043                          * validation and is therefore enabled.
5044                          */
5045                         num_active += 1;
5046                         continue;
5047                 }
5048
5049                 /* We need a framebuffer to be considered enabled. */
5050                 num_active += (new_plane_state->fb != NULL);
5051         }
5052
5053         return num_active;
5054 }
5055
5056 /*
5057  * Sets whether interrupts should be enabled on a specific CRTC.
5058  * We require that the stream be enabled and that there exist active
5059  * DC planes on the stream.
5060  */
5061 static void
5062 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5063                                struct drm_crtc_state *new_crtc_state)
5064 {
5065         struct dm_crtc_state *dm_new_crtc_state =
5066                 to_dm_crtc_state(new_crtc_state);
5067
5068         dm_new_crtc_state->active_planes = 0;
5069         dm_new_crtc_state->interrupts_enabled = false;
5070
5071         if (!dm_new_crtc_state->stream)
5072                 return;
5073
5074         dm_new_crtc_state->active_planes =
5075                 count_crtc_active_planes(new_crtc_state);
5076
5077         dm_new_crtc_state->interrupts_enabled =
5078                 dm_new_crtc_state->active_planes > 0;
5079 }
5080
5081 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5082                                        struct drm_crtc_state *state)
5083 {
5084         struct amdgpu_device *adev = crtc->dev->dev_private;
5085         struct dc *dc = adev->dm.dc;
5086         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5087         int ret = -EINVAL;
5088
5089         /*
5090          * Update interrupt state for the CRTC. This needs to happen whenever
5091          * the CRTC has changed or whenever any of its planes have changed.
5092          * Atomic check satisfies both of these requirements since the CRTC
5093          * is added to the state by DRM during drm_atomic_helper_check_planes.
5094          */
5095         dm_update_crtc_interrupt_state(crtc, state);
5096
5097         if (unlikely(!dm_crtc_state->stream &&
5098                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5099                 WARN_ON(1);
5100                 return ret;
5101         }
5102
5103         /* In some use cases, like reset, no stream is attached */
5104         if (!dm_crtc_state->stream)
5105                 return 0;
5106
5107         /*
5108          * We want at least one hardware plane enabled to use
5109          * the stream with a cursor enabled.
5110          */
5111         if (state->enable && state->active &&
5112             does_crtc_have_active_cursor(state) &&
5113             dm_crtc_state->active_planes == 0)
5114                 return -EINVAL;
5115
5116         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5117                 return 0;
5118
5119         return ret;
5120 }
5121
5122 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5123                                       const struct drm_display_mode *mode,
5124                                       struct drm_display_mode *adjusted_mode)
5125 {
5126         return true;
5127 }
5128
5129 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5130         .disable = dm_crtc_helper_disable,
5131         .atomic_check = dm_crtc_helper_atomic_check,
5132         .mode_fixup = dm_crtc_helper_mode_fixup,
5133         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5134 };
5135
5136 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5137 {
5138
5139 }
5140
5141 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5142 {
5143         switch (display_color_depth) {
5144                 case COLOR_DEPTH_666:
5145                         return 6;
5146                 case COLOR_DEPTH_888:
5147                         return 8;
5148                 case COLOR_DEPTH_101010:
5149                         return 10;
5150                 case COLOR_DEPTH_121212:
5151                         return 12;
5152                 case COLOR_DEPTH_141414:
5153                         return 14;
5154                 case COLOR_DEPTH_161616:
5155                         return 16;
5156                 default:
5157                         break;
5158                 }
5159         return 0;
5160 }
5161
5162 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5163                                           struct drm_crtc_state *crtc_state,
5164                                           struct drm_connector_state *conn_state)
5165 {
5166         struct drm_atomic_state *state = crtc_state->state;
5167         struct drm_connector *connector = conn_state->connector;
5168         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5169         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5170         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5171         struct drm_dp_mst_topology_mgr *mst_mgr;
5172         struct drm_dp_mst_port *mst_port;
5173         enum dc_color_depth color_depth;
5174         int clock, bpp = 0;
5175         bool is_y420 = false;
5176
5177         if (!aconnector->port || !aconnector->dc_sink)
5178                 return 0;
5179
5180         mst_port = aconnector->port;
5181         mst_mgr = &aconnector->mst_port->mst_mgr;
5182
5183         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5184                 return 0;
5185
5186         if (!state->duplicated) {
5187                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5188                                 aconnector->force_yuv420_output;
5189                 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5190                                                                     is_y420);
5191                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5192                 clock = adjusted_mode->clock;
5193                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5194         }
5195         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5196                                                                            mst_mgr,
5197                                                                            mst_port,
5198                                                                            dm_new_connector_state->pbn,
5199                                                                            0);
5200         if (dm_new_connector_state->vcpi_slots < 0) {
5201                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5202                 return dm_new_connector_state->vcpi_slots;
5203         }
5204         return 0;
5205 }
5206
5207 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5208         .disable = dm_encoder_helper_disable,
5209         .atomic_check = dm_encoder_helper_atomic_check
5210 };
5211
5212 #if defined(CONFIG_DRM_AMD_DC_DCN)
5213 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5214                                             struct dc_state *dc_state)
5215 {
5216         struct dc_stream_state *stream = NULL;
5217         struct drm_connector *connector;
5218         struct drm_connector_state *new_con_state, *old_con_state;
5219         struct amdgpu_dm_connector *aconnector;
5220         struct dm_connector_state *dm_conn_state;
5221         int i, j, clock, bpp;
5222         int vcpi, pbn_div, pbn = 0;
5223
5224         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5225
5226                 aconnector = to_amdgpu_dm_connector(connector);
5227
5228                 if (!aconnector->port)
5229                         continue;
5230
5231                 if (!new_con_state || !new_con_state->crtc)
5232                         continue;
5233
5234                 dm_conn_state = to_dm_connector_state(new_con_state);
5235
5236                 for (j = 0; j < dc_state->stream_count; j++) {
5237                         stream = dc_state->streams[j];
5238                         if (!stream)
5239                                 continue;
5240
5241                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5242                                 break;
5243
5244                         stream = NULL;
5245                 }
5246
5247                 if (!stream)
5248                         continue;
5249
5250                 if (stream->timing.flags.DSC != 1) {
5251                         drm_dp_mst_atomic_enable_dsc(state,
5252                                                      aconnector->port,
5253                                                      dm_conn_state->pbn,
5254                                                      0,
5255                                                      false);
5256                         continue;
5257                 }
5258
5259                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5260                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5261                 clock = stream->timing.pix_clk_100hz / 10;
5262                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5263                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5264                                                     aconnector->port,
5265                                                     pbn, pbn_div,
5266                                                     true);
5267                 if (vcpi < 0)
5268                         return vcpi;
5269
5270                 dm_conn_state->pbn = pbn;
5271                 dm_conn_state->vcpi_slots = vcpi;
5272         }
5273         return 0;
5274 }
5275 #endif
5276
5277 static void dm_drm_plane_reset(struct drm_plane *plane)
5278 {
5279         struct dm_plane_state *amdgpu_state = NULL;
5280
5281         if (plane->state)
5282                 plane->funcs->atomic_destroy_state(plane, plane->state);
5283
5284         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5285         WARN_ON(amdgpu_state == NULL);
5286
5287         if (amdgpu_state)
5288                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5289 }
5290
5291 static struct drm_plane_state *
5292 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5293 {
5294         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5295
5296         old_dm_plane_state = to_dm_plane_state(plane->state);
5297         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5298         if (!dm_plane_state)
5299                 return NULL;
5300
5301         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5302
5303         if (old_dm_plane_state->dc_state) {
5304                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5305                 dc_plane_state_retain(dm_plane_state->dc_state);
5306         }
5307
5308         return &dm_plane_state->base;
5309 }
5310
5311 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5312                                 struct drm_plane_state *state)
5313 {
5314         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5315
5316         if (dm_plane_state->dc_state)
5317                 dc_plane_state_release(dm_plane_state->dc_state);
5318
5319         drm_atomic_helper_plane_destroy_state(plane, state);
5320 }
5321
5322 static const struct drm_plane_funcs dm_plane_funcs = {
5323         .update_plane   = drm_atomic_helper_update_plane,
5324         .disable_plane  = drm_atomic_helper_disable_plane,
5325         .destroy        = drm_primary_helper_destroy,
5326         .reset = dm_drm_plane_reset,
5327         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5328         .atomic_destroy_state = dm_drm_plane_destroy_state,
5329 };
5330
5331 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5332                                       struct drm_plane_state *new_state)
5333 {
5334         struct amdgpu_framebuffer *afb;
5335         struct drm_gem_object *obj;
5336         struct amdgpu_device *adev;
5337         struct amdgpu_bo *rbo;
5338         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5339         struct list_head list;
5340         struct ttm_validate_buffer tv;
5341         struct ww_acquire_ctx ticket;
5342         uint64_t tiling_flags;
5343         uint32_t domain;
5344         int r;
5345
5346         dm_plane_state_old = to_dm_plane_state(plane->state);
5347         dm_plane_state_new = to_dm_plane_state(new_state);
5348
5349         if (!new_state->fb) {
5350                 DRM_DEBUG_DRIVER("No FB bound\n");
5351                 return 0;
5352         }
5353
5354         afb = to_amdgpu_framebuffer(new_state->fb);
5355         obj = new_state->fb->obj[0];
5356         rbo = gem_to_amdgpu_bo(obj);
5357         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5358         INIT_LIST_HEAD(&list);
5359
5360         tv.bo = &rbo->tbo;
5361         tv.num_shared = 1;
5362         list_add(&tv.head, &list);
5363
5364         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5365         if (r) {
5366                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5367                 return r;
5368         }
5369
5370         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5371                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5372         else
5373                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5374
5375         r = amdgpu_bo_pin(rbo, domain);
5376         if (unlikely(r != 0)) {
5377                 if (r != -ERESTARTSYS)
5378                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5379                 ttm_eu_backoff_reservation(&ticket, &list);
5380                 return r;
5381         }
5382
5383         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5384         if (unlikely(r != 0)) {
5385                 amdgpu_bo_unpin(rbo);
5386                 ttm_eu_backoff_reservation(&ticket, &list);
5387                 DRM_ERROR("%p bind failed\n", rbo);
5388                 return r;
5389         }
5390
5391         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5392
5393         ttm_eu_backoff_reservation(&ticket, &list);
5394
5395         afb->address = amdgpu_bo_gpu_offset(rbo);
5396
5397         amdgpu_bo_ref(rbo);
5398
5399         if (dm_plane_state_new->dc_state &&
5400                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5401                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5402
5403                 fill_plane_buffer_attributes(
5404                         adev, afb, plane_state->format, plane_state->rotation,
5405                         tiling_flags, &plane_state->tiling_info,
5406                         &plane_state->plane_size, &plane_state->dcc,
5407                         &plane_state->address);
5408         }
5409
5410         return 0;
5411 }
5412
5413 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5414                                        struct drm_plane_state *old_state)
5415 {
5416         struct amdgpu_bo *rbo;
5417         int r;
5418
5419         if (!old_state->fb)
5420                 return;
5421
5422         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5423         r = amdgpu_bo_reserve(rbo, false);
5424         if (unlikely(r)) {
5425                 DRM_ERROR("failed to reserve rbo before unpin\n");
5426                 return;
5427         }
5428
5429         amdgpu_bo_unpin(rbo);
5430         amdgpu_bo_unreserve(rbo);
5431         amdgpu_bo_unref(&rbo);
5432 }
5433
5434 static int dm_plane_atomic_check(struct drm_plane *plane,
5435                                  struct drm_plane_state *state)
5436 {
5437         struct amdgpu_device *adev = plane->dev->dev_private;
5438         struct dc *dc = adev->dm.dc;
5439         struct dm_plane_state *dm_plane_state;
5440         struct dc_scaling_info scaling_info;
5441         int ret;
5442
5443         dm_plane_state = to_dm_plane_state(state);
5444
5445         if (!dm_plane_state->dc_state)
5446                 return 0;
5447
5448         ret = fill_dc_scaling_info(state, &scaling_info);
5449         if (ret)
5450                 return ret;
5451
5452         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5453                 return 0;
5454
5455         return -EINVAL;
5456 }
5457
5458 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5459                                        struct drm_plane_state *new_plane_state)
5460 {
5461         /* Only support async updates on cursor planes. */
5462         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5463                 return -EINVAL;
5464
5465         return 0;
5466 }
5467
5468 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5469                                          struct drm_plane_state *new_state)
5470 {
5471         struct drm_plane_state *old_state =
5472                 drm_atomic_get_old_plane_state(new_state->state, plane);
5473
5474         swap(plane->state->fb, new_state->fb);
5475
5476         plane->state->src_x = new_state->src_x;
5477         plane->state->src_y = new_state->src_y;
5478         plane->state->src_w = new_state->src_w;
5479         plane->state->src_h = new_state->src_h;
5480         plane->state->crtc_x = new_state->crtc_x;
5481         plane->state->crtc_y = new_state->crtc_y;
5482         plane->state->crtc_w = new_state->crtc_w;
5483         plane->state->crtc_h = new_state->crtc_h;
5484
5485         handle_cursor_update(plane, old_state);
5486 }
5487
5488 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5489         .prepare_fb = dm_plane_helper_prepare_fb,
5490         .cleanup_fb = dm_plane_helper_cleanup_fb,
5491         .atomic_check = dm_plane_atomic_check,
5492         .atomic_async_check = dm_plane_atomic_async_check,
5493         .atomic_async_update = dm_plane_atomic_async_update
5494 };
5495
5496 /*
5497  * TODO: these are currently initialized to rgb formats only.
5498  * For future use cases we should either initialize them dynamically based on
5499  * plane capabilities, or initialize this array to all formats, so internal drm
5500  * check will succeed, and let DC implement proper check
5501  */
5502 static const uint32_t rgb_formats[] = {
5503         DRM_FORMAT_XRGB8888,
5504         DRM_FORMAT_ARGB8888,
5505         DRM_FORMAT_RGBA8888,
5506         DRM_FORMAT_XRGB2101010,
5507         DRM_FORMAT_XBGR2101010,
5508         DRM_FORMAT_ARGB2101010,
5509         DRM_FORMAT_ABGR2101010,
5510         DRM_FORMAT_XBGR8888,
5511         DRM_FORMAT_ABGR8888,
5512         DRM_FORMAT_RGB565,
5513 };
5514
5515 static const uint32_t overlay_formats[] = {
5516         DRM_FORMAT_XRGB8888,
5517         DRM_FORMAT_ARGB8888,
5518         DRM_FORMAT_RGBA8888,
5519         DRM_FORMAT_XBGR8888,
5520         DRM_FORMAT_ABGR8888,
5521         DRM_FORMAT_RGB565
5522 };
5523
5524 static const u32 cursor_formats[] = {
5525         DRM_FORMAT_ARGB8888
5526 };
5527
5528 static int get_plane_formats(const struct drm_plane *plane,
5529                              const struct dc_plane_cap *plane_cap,
5530                              uint32_t *formats, int max_formats)
5531 {
5532         int i, num_formats = 0;
5533
5534         /*
5535          * TODO: Query support for each group of formats directly from
5536          * DC plane caps. This will require adding more formats to the
5537          * caps list.
5538          */
5539
5540         switch (plane->type) {
5541         case DRM_PLANE_TYPE_PRIMARY:
5542                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5543                         if (num_formats >= max_formats)
5544                                 break;
5545
5546                         formats[num_formats++] = rgb_formats[i];
5547                 }
5548
5549                 if (plane_cap && plane_cap->pixel_format_support.nv12)
5550                         formats[num_formats++] = DRM_FORMAT_NV12;
5551                 if (plane_cap && plane_cap->pixel_format_support.p010)
5552                         formats[num_formats++] = DRM_FORMAT_P010;
5553                 break;
5554
5555         case DRM_PLANE_TYPE_OVERLAY:
5556                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5557                         if (num_formats >= max_formats)
5558                                 break;
5559
5560                         formats[num_formats++] = overlay_formats[i];
5561                 }
5562                 break;
5563
5564         case DRM_PLANE_TYPE_CURSOR:
5565                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5566                         if (num_formats >= max_formats)
5567                                 break;
5568
5569                         formats[num_formats++] = cursor_formats[i];
5570                 }
5571                 break;
5572         }
5573
5574         return num_formats;
5575 }
5576
5577 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5578                                 struct drm_plane *plane,
5579                                 unsigned long possible_crtcs,
5580                                 const struct dc_plane_cap *plane_cap)
5581 {
5582         uint32_t formats[32];
5583         int num_formats;
5584         int res = -EPERM;
5585
5586         num_formats = get_plane_formats(plane, plane_cap, formats,
5587                                         ARRAY_SIZE(formats));
5588
5589         res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5590                                        &dm_plane_funcs, formats, num_formats,
5591                                        NULL, plane->type, NULL);
5592         if (res)
5593                 return res;
5594
5595         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5596             plane_cap && plane_cap->per_pixel_alpha) {
5597                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5598                                           BIT(DRM_MODE_BLEND_PREMULTI);
5599
5600                 drm_plane_create_alpha_property(plane);
5601                 drm_plane_create_blend_mode_property(plane, blend_caps);
5602         }
5603
5604         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5605             plane_cap &&
5606             (plane_cap->pixel_format_support.nv12 ||
5607              plane_cap->pixel_format_support.p010)) {
5608                 /* This only affects YUV formats. */
5609                 drm_plane_create_color_properties(
5610                         plane,
5611                         BIT(DRM_COLOR_YCBCR_BT601) |
5612                         BIT(DRM_COLOR_YCBCR_BT709) |
5613                         BIT(DRM_COLOR_YCBCR_BT2020),
5614                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5615                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5616                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5617         }
5618
5619         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5620
5621         /* Create (reset) the plane state */
5622         if (plane->funcs->reset)
5623                 plane->funcs->reset(plane);
5624
5625         return 0;
5626 }
5627
5628 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5629                                struct drm_plane *plane,
5630                                uint32_t crtc_index)
5631 {
5632         struct amdgpu_crtc *acrtc = NULL;
5633         struct drm_plane *cursor_plane;
5634
5635         int res = -ENOMEM;
5636
5637         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5638         if (!cursor_plane)
5639                 goto fail;
5640
5641         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5642         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5643
5644         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5645         if (!acrtc)
5646                 goto fail;
5647
5648         res = drm_crtc_init_with_planes(
5649                         dm->ddev,
5650                         &acrtc->base,
5651                         plane,
5652                         cursor_plane,
5653                         &amdgpu_dm_crtc_funcs, NULL);
5654
5655         if (res)
5656                 goto fail;
5657
5658         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5659
5660         /* Create (reset) the plane state */
5661         if (acrtc->base.funcs->reset)
5662                 acrtc->base.funcs->reset(&acrtc->base);
5663
5664         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5665         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5666
5667         acrtc->crtc_id = crtc_index;
5668         acrtc->base.enabled = false;
5669         acrtc->otg_inst = -1;
5670
5671         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5672         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5673                                    true, MAX_COLOR_LUT_ENTRIES);
5674         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5675
5676         return 0;
5677
5678 fail:
5679         kfree(acrtc);
5680         kfree(cursor_plane);
5681         return res;
5682 }
5683
5684
5685 static int to_drm_connector_type(enum signal_type st)
5686 {
5687         switch (st) {
5688         case SIGNAL_TYPE_HDMI_TYPE_A:
5689                 return DRM_MODE_CONNECTOR_HDMIA;
5690         case SIGNAL_TYPE_EDP:
5691                 return DRM_MODE_CONNECTOR_eDP;
5692         case SIGNAL_TYPE_LVDS:
5693                 return DRM_MODE_CONNECTOR_LVDS;
5694         case SIGNAL_TYPE_RGB:
5695                 return DRM_MODE_CONNECTOR_VGA;
5696         case SIGNAL_TYPE_DISPLAY_PORT:
5697         case SIGNAL_TYPE_DISPLAY_PORT_MST:
5698                 return DRM_MODE_CONNECTOR_DisplayPort;
5699         case SIGNAL_TYPE_DVI_DUAL_LINK:
5700         case SIGNAL_TYPE_DVI_SINGLE_LINK:
5701                 return DRM_MODE_CONNECTOR_DVID;
5702         case SIGNAL_TYPE_VIRTUAL:
5703                 return DRM_MODE_CONNECTOR_VIRTUAL;
5704
5705         default:
5706                 return DRM_MODE_CONNECTOR_Unknown;
5707         }
5708 }
5709
5710 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5711 {
5712         struct drm_encoder *encoder;
5713
5714         /* There is only one encoder per connector */
5715         drm_connector_for_each_possible_encoder(connector, encoder)
5716                 return encoder;
5717
5718         return NULL;
5719 }
5720
5721 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5722 {
5723         struct drm_encoder *encoder;
5724         struct amdgpu_encoder *amdgpu_encoder;
5725
5726         encoder = amdgpu_dm_connector_to_encoder(connector);
5727
5728         if (encoder == NULL)
5729                 return;
5730
5731         amdgpu_encoder = to_amdgpu_encoder(encoder);
5732
5733         amdgpu_encoder->native_mode.clock = 0;
5734
5735         if (!list_empty(&connector->probed_modes)) {
5736                 struct drm_display_mode *preferred_mode = NULL;
5737
5738                 list_for_each_entry(preferred_mode,
5739                                     &connector->probed_modes,
5740                                     head) {
5741                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5742                                 amdgpu_encoder->native_mode = *preferred_mode;
5743
5744                         break;
5745                 }
5746
5747         }
5748 }
5749
5750 static struct drm_display_mode *
5751 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5752                              char *name,
5753                              int hdisplay, int vdisplay)
5754 {
5755         struct drm_device *dev = encoder->dev;
5756         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5757         struct drm_display_mode *mode = NULL;
5758         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5759
5760         mode = drm_mode_duplicate(dev, native_mode);
5761
5762         if (mode == NULL)
5763                 return NULL;
5764
5765         mode->hdisplay = hdisplay;
5766         mode->vdisplay = vdisplay;
5767         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5768         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5769
5770         return mode;
5771
5772 }
5773
5774 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5775                                                  struct drm_connector *connector)
5776 {
5777         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5778         struct drm_display_mode *mode = NULL;
5779         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5780         struct amdgpu_dm_connector *amdgpu_dm_connector =
5781                                 to_amdgpu_dm_connector(connector);
5782         int i;
5783         int n;
5784         struct mode_size {
5785                 char name[DRM_DISPLAY_MODE_LEN];
5786                 int w;
5787                 int h;
5788         } common_modes[] = {
5789                 {  "640x480",  640,  480},
5790                 {  "800x600",  800,  600},
5791                 { "1024x768", 1024,  768},
5792                 { "1280x720", 1280,  720},
5793                 { "1280x800", 1280,  800},
5794                 {"1280x1024", 1280, 1024},
5795                 { "1440x900", 1440,  900},
5796                 {"1680x1050", 1680, 1050},
5797                 {"1600x1200", 1600, 1200},
5798                 {"1920x1080", 1920, 1080},
5799                 {"1920x1200", 1920, 1200}
5800         };
5801
5802         n = ARRAY_SIZE(common_modes);
5803
5804         for (i = 0; i < n; i++) {
5805                 struct drm_display_mode *curmode = NULL;
5806                 bool mode_existed = false;
5807
5808                 if (common_modes[i].w > native_mode->hdisplay ||
5809                     common_modes[i].h > native_mode->vdisplay ||
5810                    (common_modes[i].w == native_mode->hdisplay &&
5811                     common_modes[i].h == native_mode->vdisplay))
5812                         continue;
5813
5814                 list_for_each_entry(curmode, &connector->probed_modes, head) {
5815                         if (common_modes[i].w == curmode->hdisplay &&
5816                             common_modes[i].h == curmode->vdisplay) {
5817                                 mode_existed = true;
5818                                 break;
5819                         }
5820                 }
5821
5822                 if (mode_existed)
5823                         continue;
5824
5825                 mode = amdgpu_dm_create_common_mode(encoder,
5826                                 common_modes[i].name, common_modes[i].w,
5827                                 common_modes[i].h);
5828                 drm_mode_probed_add(connector, mode);
5829                 amdgpu_dm_connector->num_modes++;
5830         }
5831 }
5832
5833 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5834                                               struct edid *edid)
5835 {
5836         struct amdgpu_dm_connector *amdgpu_dm_connector =
5837                         to_amdgpu_dm_connector(connector);
5838
5839         if (edid) {
5840                 /* empty probed_modes */
5841                 INIT_LIST_HEAD(&connector->probed_modes);
5842                 amdgpu_dm_connector->num_modes =
5843                                 drm_add_edid_modes(connector, edid);
5844
5845                 /* sorting the probed modes before calling function
5846                  * amdgpu_dm_get_native_mode() since EDID can have
5847                  * more than one preferred mode. The modes that are
5848                  * later in the probed mode list could be of higher
5849                  * and preferred resolution. For example, 3840x2160
5850                  * resolution in base EDID preferred timing and 4096x2160
5851                  * preferred resolution in DID extension block later.
5852                  */
5853                 drm_mode_sort(&connector->probed_modes);
5854                 amdgpu_dm_get_native_mode(connector);
5855         } else {
5856                 amdgpu_dm_connector->num_modes = 0;
5857         }
5858 }
5859
5860 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5861 {
5862         struct amdgpu_dm_connector *amdgpu_dm_connector =
5863                         to_amdgpu_dm_connector(connector);
5864         struct drm_encoder *encoder;
5865         struct edid *edid = amdgpu_dm_connector->edid;
5866
5867         encoder = amdgpu_dm_connector_to_encoder(connector);
5868
5869         if (!edid || !drm_edid_is_valid(edid)) {
5870                 amdgpu_dm_connector->num_modes =
5871                                 drm_add_modes_noedid(connector, 640, 480);
5872         } else {
5873                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5874                 amdgpu_dm_connector_add_common_modes(encoder, connector);
5875         }
5876         amdgpu_dm_fbc_init(connector);
5877
5878         return amdgpu_dm_connector->num_modes;
5879 }
5880
5881 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5882                                      struct amdgpu_dm_connector *aconnector,
5883                                      int connector_type,
5884                                      struct dc_link *link,
5885                                      int link_index)
5886 {
5887         struct amdgpu_device *adev = dm->ddev->dev_private;
5888
5889         /*
5890          * Some of the properties below require access to state, like bpc.
5891          * Allocate some default initial connector state with our reset helper.
5892          */
5893         if (aconnector->base.funcs->reset)
5894                 aconnector->base.funcs->reset(&aconnector->base);
5895
5896         aconnector->connector_id = link_index;
5897         aconnector->dc_link = link;
5898         aconnector->base.interlace_allowed = false;
5899         aconnector->base.doublescan_allowed = false;
5900         aconnector->base.stereo_allowed = false;
5901         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5902         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5903         aconnector->audio_inst = -1;
5904         mutex_init(&aconnector->hpd_lock);
5905
5906         /*
5907          * configure support HPD hot plug connector_>polled default value is 0
5908          * which means HPD hot plug not supported
5909          */
5910         switch (connector_type) {
5911         case DRM_MODE_CONNECTOR_HDMIA:
5912                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5913                 aconnector->base.ycbcr_420_allowed =
5914                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5915                 break;
5916         case DRM_MODE_CONNECTOR_DisplayPort:
5917                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5918                 aconnector->base.ycbcr_420_allowed =
5919                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
5920                 break;
5921         case DRM_MODE_CONNECTOR_DVID:
5922                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5923                 break;
5924         default:
5925                 break;
5926         }
5927
5928         drm_object_attach_property(&aconnector->base.base,
5929                                 dm->ddev->mode_config.scaling_mode_property,
5930                                 DRM_MODE_SCALE_NONE);
5931
5932         drm_object_attach_property(&aconnector->base.base,
5933                                 adev->mode_info.underscan_property,
5934                                 UNDERSCAN_OFF);
5935         drm_object_attach_property(&aconnector->base.base,
5936                                 adev->mode_info.underscan_hborder_property,
5937                                 0);
5938         drm_object_attach_property(&aconnector->base.base,
5939                                 adev->mode_info.underscan_vborder_property,
5940                                 0);
5941
5942         if (!aconnector->mst_port)
5943                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5944
5945         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5946         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5947         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5948
5949         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5950             dc_is_dmcu_initialized(adev->dm.dc)) {
5951                 drm_object_attach_property(&aconnector->base.base,
5952                                 adev->mode_info.abm_level_property, 0);
5953         }
5954
5955         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5956             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5957             connector_type == DRM_MODE_CONNECTOR_eDP) {
5958                 drm_object_attach_property(
5959                         &aconnector->base.base,
5960                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
5961
5962                 if (!aconnector->mst_port)
5963                         drm_connector_attach_vrr_capable_property(&aconnector->base);
5964
5965 #ifdef CONFIG_DRM_AMD_DC_HDCP
5966                 if (adev->dm.hdcp_workqueue)
5967                         drm_connector_attach_content_protection_property(&aconnector->base, true);
5968 #endif
5969         }
5970 }
5971
5972 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5973                               struct i2c_msg *msgs, int num)
5974 {
5975         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5976         struct ddc_service *ddc_service = i2c->ddc_service;
5977         struct i2c_command cmd;
5978         int i;
5979         int result = -EIO;
5980
5981         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5982
5983         if (!cmd.payloads)
5984                 return result;
5985
5986         cmd.number_of_payloads = num;
5987         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5988         cmd.speed = 100;
5989
5990         for (i = 0; i < num; i++) {
5991                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5992                 cmd.payloads[i].address = msgs[i].addr;
5993                 cmd.payloads[i].length = msgs[i].len;
5994                 cmd.payloads[i].data = msgs[i].buf;
5995         }
5996
5997         if (dc_submit_i2c(
5998                         ddc_service->ctx->dc,
5999                         ddc_service->ddc_pin->hw_info.ddc_channel,
6000                         &cmd))
6001                 result = num;
6002
6003         kfree(cmd.payloads);
6004         return result;
6005 }
6006
6007 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6008 {
6009         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6010 }
6011
6012 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6013         .master_xfer = amdgpu_dm_i2c_xfer,
6014         .functionality = amdgpu_dm_i2c_func,
6015 };
6016
6017 static struct amdgpu_i2c_adapter *
6018 create_i2c(struct ddc_service *ddc_service,
6019            int link_index,
6020            int *res)
6021 {
6022         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6023         struct amdgpu_i2c_adapter *i2c;
6024
6025         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6026         if (!i2c)
6027                 return NULL;
6028         i2c->base.owner = THIS_MODULE;
6029         i2c->base.class = I2C_CLASS_DDC;
6030         i2c->base.dev.parent = &adev->pdev->dev;
6031         i2c->base.algo = &amdgpu_dm_i2c_algo;
6032         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6033         i2c_set_adapdata(&i2c->base, i2c);
6034         i2c->ddc_service = ddc_service;
6035         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6036
6037         return i2c;
6038 }
6039
6040
6041 /*
6042  * Note: this function assumes that dc_link_detect() was called for the
6043  * dc_link which will be represented by this aconnector.
6044  */
6045 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6046                                     struct amdgpu_dm_connector *aconnector,
6047                                     uint32_t link_index,
6048                                     struct amdgpu_encoder *aencoder)
6049 {
6050         int res = 0;
6051         int connector_type;
6052         struct dc *dc = dm->dc;
6053         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6054         struct amdgpu_i2c_adapter *i2c;
6055
6056         link->priv = aconnector;
6057
6058         DRM_DEBUG_DRIVER("%s()\n", __func__);
6059
6060         i2c = create_i2c(link->ddc, link->link_index, &res);
6061         if (!i2c) {
6062                 DRM_ERROR("Failed to create i2c adapter data\n");
6063                 return -ENOMEM;
6064         }
6065
6066         aconnector->i2c = i2c;
6067         res = i2c_add_adapter(&i2c->base);
6068
6069         if (res) {
6070                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6071                 goto out_free;
6072         }
6073
6074         connector_type = to_drm_connector_type(link->connector_signal);
6075
6076         res = drm_connector_init_with_ddc(
6077                         dm->ddev,
6078                         &aconnector->base,
6079                         &amdgpu_dm_connector_funcs,
6080                         connector_type,
6081                         &i2c->base);
6082
6083         if (res) {
6084                 DRM_ERROR("connector_init failed\n");
6085                 aconnector->connector_id = -1;
6086                 goto out_free;
6087         }
6088
6089         drm_connector_helper_add(
6090                         &aconnector->base,
6091                         &amdgpu_dm_connector_helper_funcs);
6092
6093         amdgpu_dm_connector_init_helper(
6094                 dm,
6095                 aconnector,
6096                 connector_type,
6097                 link,
6098                 link_index);
6099
6100         drm_connector_attach_encoder(
6101                 &aconnector->base, &aencoder->base);
6102
6103         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6104                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6105                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6106
6107 out_free:
6108         if (res) {
6109                 kfree(i2c);
6110                 aconnector->i2c = NULL;
6111         }
6112         return res;
6113 }
6114
6115 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6116 {
6117         switch (adev->mode_info.num_crtc) {
6118         case 1:
6119                 return 0x1;
6120         case 2:
6121                 return 0x3;
6122         case 3:
6123                 return 0x7;
6124         case 4:
6125                 return 0xf;
6126         case 5:
6127                 return 0x1f;
6128         case 6:
6129         default:
6130                 return 0x3f;
6131         }
6132 }
6133
6134 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6135                                   struct amdgpu_encoder *aencoder,
6136                                   uint32_t link_index)
6137 {
6138         struct amdgpu_device *adev = dev->dev_private;
6139
6140         int res = drm_encoder_init(dev,
6141                                    &aencoder->base,
6142                                    &amdgpu_dm_encoder_funcs,
6143                                    DRM_MODE_ENCODER_TMDS,
6144                                    NULL);
6145
6146         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6147
6148         if (!res)
6149                 aencoder->encoder_id = link_index;
6150         else
6151                 aencoder->encoder_id = -1;
6152
6153         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6154
6155         return res;
6156 }
6157
6158 static void manage_dm_interrupts(struct amdgpu_device *adev,
6159                                  struct amdgpu_crtc *acrtc,
6160                                  bool enable)
6161 {
6162         /*
6163          * this is not correct translation but will work as soon as VBLANK
6164          * constant is the same as PFLIP
6165          */
6166         int irq_type =
6167                 amdgpu_display_crtc_idx_to_irq_type(
6168                         adev,
6169                         acrtc->crtc_id);
6170
6171         if (enable) {
6172                 drm_crtc_vblank_on(&acrtc->base);
6173                 amdgpu_irq_get(
6174                         adev,
6175                         &adev->pageflip_irq,
6176                         irq_type);
6177         } else {
6178
6179                 amdgpu_irq_put(
6180                         adev,
6181                         &adev->pageflip_irq,
6182                         irq_type);
6183                 drm_crtc_vblank_off(&acrtc->base);
6184         }
6185 }
6186
6187 static bool
6188 is_scaling_state_different(const struct dm_connector_state *dm_state,
6189                            const struct dm_connector_state *old_dm_state)
6190 {
6191         if (dm_state->scaling != old_dm_state->scaling)
6192                 return true;
6193         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6194                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6195                         return true;
6196         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6197                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6198                         return true;
6199         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6200                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6201                 return true;
6202         return false;
6203 }
6204
6205 #ifdef CONFIG_DRM_AMD_DC_HDCP
6206 static bool is_content_protection_different(struct drm_connector_state *state,
6207                                             const struct drm_connector_state *old_state,
6208                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6209 {
6210         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6211
6212         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6213             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6214                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6215                 return true;
6216         }
6217
6218         /* CP is being re enabled, ignore this */
6219         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6220             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6221                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6222                 return false;
6223         }
6224
6225         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6226         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6227             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6228                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6229
6230         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6231          * hot-plug, headless s3, dpms
6232          */
6233         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6234             aconnector->dc_sink != NULL)
6235                 return true;
6236
6237         if (old_state->content_protection == state->content_protection)
6238                 return false;
6239
6240         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6241                 return true;
6242
6243         return false;
6244 }
6245
6246 #endif
6247 static void remove_stream(struct amdgpu_device *adev,
6248                           struct amdgpu_crtc *acrtc,
6249                           struct dc_stream_state *stream)
6250 {
6251         /* this is the update mode case */
6252
6253         acrtc->otg_inst = -1;
6254         acrtc->enabled = false;
6255 }
6256
6257 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6258                                struct dc_cursor_position *position)
6259 {
6260         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6261         int x, y;
6262         int xorigin = 0, yorigin = 0;
6263
6264         position->enable = false;
6265         position->x = 0;
6266         position->y = 0;
6267
6268         if (!crtc || !plane->state->fb)
6269                 return 0;
6270
6271         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6272             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6273                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6274                           __func__,
6275                           plane->state->crtc_w,
6276                           plane->state->crtc_h);
6277                 return -EINVAL;
6278         }
6279
6280         x = plane->state->crtc_x;
6281         y = plane->state->crtc_y;
6282
6283         if (x <= -amdgpu_crtc->max_cursor_width ||
6284             y <= -amdgpu_crtc->max_cursor_height)
6285                 return 0;
6286
6287         if (x < 0) {
6288                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6289                 x = 0;
6290         }
6291         if (y < 0) {
6292                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6293                 y = 0;
6294         }
6295         position->enable = true;
6296         position->translate_by_source = true;
6297         position->x = x;
6298         position->y = y;
6299         position->x_hotspot = xorigin;
6300         position->y_hotspot = yorigin;
6301
6302         return 0;
6303 }
6304
6305 static void handle_cursor_update(struct drm_plane *plane,
6306                                  struct drm_plane_state *old_plane_state)
6307 {
6308         struct amdgpu_device *adev = plane->dev->dev_private;
6309         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6310         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6311         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6312         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6313         uint64_t address = afb ? afb->address : 0;
6314         struct dc_cursor_position position;
6315         struct dc_cursor_attributes attributes;
6316         int ret;
6317
6318         if (!plane->state->fb && !old_plane_state->fb)
6319                 return;
6320
6321         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6322                          __func__,
6323                          amdgpu_crtc->crtc_id,
6324                          plane->state->crtc_w,
6325                          plane->state->crtc_h);
6326
6327         ret = get_cursor_position(plane, crtc, &position);
6328         if (ret)
6329                 return;
6330
6331         if (!position.enable) {
6332                 /* turn off cursor */
6333                 if (crtc_state && crtc_state->stream) {
6334                         mutex_lock(&adev->dm.dc_lock);
6335                         dc_stream_set_cursor_position(crtc_state->stream,
6336                                                       &position);
6337                         mutex_unlock(&adev->dm.dc_lock);
6338                 }
6339                 return;
6340         }
6341
6342         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6343         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6344
6345         memset(&attributes, 0, sizeof(attributes));
6346         attributes.address.high_part = upper_32_bits(address);
6347         attributes.address.low_part  = lower_32_bits(address);
6348         attributes.width             = plane->state->crtc_w;
6349         attributes.height            = plane->state->crtc_h;
6350         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6351         attributes.rotation_angle    = 0;
6352         attributes.attribute_flags.value = 0;
6353
6354         attributes.pitch = attributes.width;
6355
6356         if (crtc_state->stream) {
6357                 mutex_lock(&adev->dm.dc_lock);
6358                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6359                                                          &attributes))
6360                         DRM_ERROR("DC failed to set cursor attributes\n");
6361
6362                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6363                                                    &position))
6364                         DRM_ERROR("DC failed to set cursor position\n");
6365                 mutex_unlock(&adev->dm.dc_lock);
6366         }
6367 }
6368
6369 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6370 {
6371
6372         assert_spin_locked(&acrtc->base.dev->event_lock);
6373         WARN_ON(acrtc->event);
6374
6375         acrtc->event = acrtc->base.state->event;
6376
6377         /* Set the flip status */
6378         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6379
6380         /* Mark this event as consumed */
6381         acrtc->base.state->event = NULL;
6382
6383         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6384                                                  acrtc->crtc_id);
6385 }
6386
6387 static void update_freesync_state_on_stream(
6388         struct amdgpu_display_manager *dm,
6389         struct dm_crtc_state *new_crtc_state,
6390         struct dc_stream_state *new_stream,
6391         struct dc_plane_state *surface,
6392         u32 flip_timestamp_in_us)
6393 {
6394         struct mod_vrr_params vrr_params;
6395         struct dc_info_packet vrr_infopacket = {0};
6396         struct amdgpu_device *adev = dm->adev;
6397         unsigned long flags;
6398
6399         if (!new_stream)
6400                 return;
6401
6402         /*
6403          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6404          * For now it's sufficient to just guard against these conditions.
6405          */
6406
6407         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6408                 return;
6409
6410         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6411         vrr_params = new_crtc_state->vrr_params;
6412
6413         if (surface) {
6414                 mod_freesync_handle_preflip(
6415                         dm->freesync_module,
6416                         surface,
6417                         new_stream,
6418                         flip_timestamp_in_us,
6419                         &vrr_params);
6420
6421                 if (adev->family < AMDGPU_FAMILY_AI &&
6422                     amdgpu_dm_vrr_active(new_crtc_state)) {
6423                         mod_freesync_handle_v_update(dm->freesync_module,
6424                                                      new_stream, &vrr_params);
6425
6426                         /* Need to call this before the frame ends. */
6427                         dc_stream_adjust_vmin_vmax(dm->dc,
6428                                                    new_crtc_state->stream,
6429                                                    &vrr_params.adjust);
6430                 }
6431         }
6432
6433         mod_freesync_build_vrr_infopacket(
6434                 dm->freesync_module,
6435                 new_stream,
6436                 &vrr_params,
6437                 PACKET_TYPE_VRR,
6438                 TRANSFER_FUNC_UNKNOWN,
6439                 &vrr_infopacket);
6440
6441         new_crtc_state->freesync_timing_changed |=
6442                 (memcmp(&new_crtc_state->vrr_params.adjust,
6443                         &vrr_params.adjust,
6444                         sizeof(vrr_params.adjust)) != 0);
6445
6446         new_crtc_state->freesync_vrr_info_changed |=
6447                 (memcmp(&new_crtc_state->vrr_infopacket,
6448                         &vrr_infopacket,
6449                         sizeof(vrr_infopacket)) != 0);
6450
6451         new_crtc_state->vrr_params = vrr_params;
6452         new_crtc_state->vrr_infopacket = vrr_infopacket;
6453
6454         new_stream->adjust = new_crtc_state->vrr_params.adjust;
6455         new_stream->vrr_infopacket = vrr_infopacket;
6456
6457         if (new_crtc_state->freesync_vrr_info_changed)
6458                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6459                               new_crtc_state->base.crtc->base.id,
6460                               (int)new_crtc_state->base.vrr_enabled,
6461                               (int)vrr_params.state);
6462
6463         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6464 }
6465
6466 static void pre_update_freesync_state_on_stream(
6467         struct amdgpu_display_manager *dm,
6468         struct dm_crtc_state *new_crtc_state)
6469 {
6470         struct dc_stream_state *new_stream = new_crtc_state->stream;
6471         struct mod_vrr_params vrr_params;
6472         struct mod_freesync_config config = new_crtc_state->freesync_config;
6473         struct amdgpu_device *adev = dm->adev;
6474         unsigned long flags;
6475
6476         if (!new_stream)
6477                 return;
6478
6479         /*
6480          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6481          * For now it's sufficient to just guard against these conditions.
6482          */
6483         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6484                 return;
6485
6486         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6487         vrr_params = new_crtc_state->vrr_params;
6488
6489         if (new_crtc_state->vrr_supported &&
6490             config.min_refresh_in_uhz &&
6491             config.max_refresh_in_uhz) {
6492                 config.state = new_crtc_state->base.vrr_enabled ?
6493                         VRR_STATE_ACTIVE_VARIABLE :
6494                         VRR_STATE_INACTIVE;
6495         } else {
6496                 config.state = VRR_STATE_UNSUPPORTED;
6497         }
6498
6499         mod_freesync_build_vrr_params(dm->freesync_module,
6500                                       new_stream,
6501                                       &config, &vrr_params);
6502
6503         new_crtc_state->freesync_timing_changed |=
6504                 (memcmp(&new_crtc_state->vrr_params.adjust,
6505                         &vrr_params.adjust,
6506                         sizeof(vrr_params.adjust)) != 0);
6507
6508         new_crtc_state->vrr_params = vrr_params;
6509         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6510 }
6511
6512 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6513                                             struct dm_crtc_state *new_state)
6514 {
6515         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6516         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6517
6518         if (!old_vrr_active && new_vrr_active) {
6519                 /* Transition VRR inactive -> active:
6520                  * While VRR is active, we must not disable vblank irq, as a
6521                  * reenable after disable would compute bogus vblank/pflip
6522                  * timestamps if it likely happened inside display front-porch.
6523                  *
6524                  * We also need vupdate irq for the actual core vblank handling
6525                  * at end of vblank.
6526                  */
6527                 dm_set_vupdate_irq(new_state->base.crtc, true);
6528                 drm_crtc_vblank_get(new_state->base.crtc);
6529                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6530                                  __func__, new_state->base.crtc->base.id);
6531         } else if (old_vrr_active && !new_vrr_active) {
6532                 /* Transition VRR active -> inactive:
6533                  * Allow vblank irq disable again for fixed refresh rate.
6534                  */
6535                 dm_set_vupdate_irq(new_state->base.crtc, false);
6536                 drm_crtc_vblank_put(new_state->base.crtc);
6537                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6538                                  __func__, new_state->base.crtc->base.id);
6539         }
6540 }
6541
6542 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6543 {
6544         struct drm_plane *plane;
6545         struct drm_plane_state *old_plane_state, *new_plane_state;
6546         int i;
6547
6548         /*
6549          * TODO: Make this per-stream so we don't issue redundant updates for
6550          * commits with multiple streams.
6551          */
6552         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6553                                        new_plane_state, i)
6554                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6555                         handle_cursor_update(plane, old_plane_state);
6556 }
6557
6558 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6559                                     struct dc_state *dc_state,
6560                                     struct drm_device *dev,
6561                                     struct amdgpu_display_manager *dm,
6562                                     struct drm_crtc *pcrtc,
6563                                     bool wait_for_vblank)
6564 {
6565         uint32_t i;
6566         uint64_t timestamp_ns;
6567         struct drm_plane *plane;
6568         struct drm_plane_state *old_plane_state, *new_plane_state;
6569         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6570         struct drm_crtc_state *new_pcrtc_state =
6571                         drm_atomic_get_new_crtc_state(state, pcrtc);
6572         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6573         struct dm_crtc_state *dm_old_crtc_state =
6574                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6575         int planes_count = 0, vpos, hpos;
6576         long r;
6577         unsigned long flags;
6578         struct amdgpu_bo *abo;
6579         uint64_t tiling_flags;
6580         uint32_t target_vblank, last_flip_vblank;
6581         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6582         bool pflip_present = false;
6583         struct {
6584                 struct dc_surface_update surface_updates[MAX_SURFACES];
6585                 struct dc_plane_info plane_infos[MAX_SURFACES];
6586                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6587                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6588                 struct dc_stream_update stream_update;
6589         } *bundle;
6590
6591         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6592
6593         if (!bundle) {
6594                 dm_error("Failed to allocate update bundle\n");
6595                 goto cleanup;
6596         }
6597
6598         /*
6599          * Disable the cursor first if we're disabling all the planes.
6600          * It'll remain on the screen after the planes are re-enabled
6601          * if we don't.
6602          */
6603         if (acrtc_state->active_planes == 0)
6604                 amdgpu_dm_commit_cursors(state);
6605
6606         /* update planes when needed */
6607         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6608                 struct drm_crtc *crtc = new_plane_state->crtc;
6609                 struct drm_crtc_state *new_crtc_state;
6610                 struct drm_framebuffer *fb = new_plane_state->fb;
6611                 bool plane_needs_flip;
6612                 struct dc_plane_state *dc_plane;
6613                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6614
6615                 /* Cursor plane is handled after stream updates */
6616                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6617                         continue;
6618
6619                 if (!fb || !crtc || pcrtc != crtc)
6620                         continue;
6621
6622                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6623                 if (!new_crtc_state->active)
6624                         continue;
6625
6626                 dc_plane = dm_new_plane_state->dc_state;
6627
6628                 bundle->surface_updates[planes_count].surface = dc_plane;
6629                 if (new_pcrtc_state->color_mgmt_changed) {
6630                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6631                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6632                 }
6633
6634                 fill_dc_scaling_info(new_plane_state,
6635                                      &bundle->scaling_infos[planes_count]);
6636
6637                 bundle->surface_updates[planes_count].scaling_info =
6638                         &bundle->scaling_infos[planes_count];
6639
6640                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6641
6642                 pflip_present = pflip_present || plane_needs_flip;
6643
6644                 if (!plane_needs_flip) {
6645                         planes_count += 1;
6646                         continue;
6647                 }
6648
6649                 abo = gem_to_amdgpu_bo(fb->obj[0]);
6650
6651                 /*
6652                  * Wait for all fences on this FB. Do limited wait to avoid
6653                  * deadlock during GPU reset when this fence will not signal
6654                  * but we hold reservation lock for the BO.
6655                  */
6656                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6657                                                         false,
6658                                                         msecs_to_jiffies(5000));
6659                 if (unlikely(r <= 0))
6660                         DRM_ERROR("Waiting for fences timed out!");
6661
6662                 /*
6663                  * TODO This might fail and hence better not used, wait
6664                  * explicitly on fences instead
6665                  * and in general should be called for
6666                  * blocking commit to as per framework helpers
6667                  */
6668                 r = amdgpu_bo_reserve(abo, true);
6669                 if (unlikely(r != 0))
6670                         DRM_ERROR("failed to reserve buffer before flip\n");
6671
6672                 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6673
6674                 amdgpu_bo_unreserve(abo);
6675
6676                 fill_dc_plane_info_and_addr(
6677                         dm->adev, new_plane_state, tiling_flags,
6678                         &bundle->plane_infos[planes_count],
6679                         &bundle->flip_addrs[planes_count].address);
6680
6681                 bundle->surface_updates[planes_count].plane_info =
6682                         &bundle->plane_infos[planes_count];
6683
6684                 /*
6685                  * Only allow immediate flips for fast updates that don't
6686                  * change FB pitch, DCC state, rotation or mirroing.
6687                  */
6688                 bundle->flip_addrs[planes_count].flip_immediate =
6689                         crtc->state->async_flip &&
6690                         acrtc_state->update_type == UPDATE_TYPE_FAST;
6691
6692                 timestamp_ns = ktime_get_ns();
6693                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6694                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6695                 bundle->surface_updates[planes_count].surface = dc_plane;
6696
6697                 if (!bundle->surface_updates[planes_count].surface) {
6698                         DRM_ERROR("No surface for CRTC: id=%d\n",
6699                                         acrtc_attach->crtc_id);
6700                         continue;
6701                 }
6702
6703                 if (plane == pcrtc->primary)
6704                         update_freesync_state_on_stream(
6705                                 dm,
6706                                 acrtc_state,
6707                                 acrtc_state->stream,
6708                                 dc_plane,
6709                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6710
6711                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6712                                  __func__,
6713                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6714                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6715
6716                 planes_count += 1;
6717
6718         }
6719
6720         if (pflip_present) {
6721                 if (!vrr_active) {
6722                         /* Use old throttling in non-vrr fixed refresh rate mode
6723                          * to keep flip scheduling based on target vblank counts
6724                          * working in a backwards compatible way, e.g., for
6725                          * clients using the GLX_OML_sync_control extension or
6726                          * DRI3/Present extension with defined target_msc.
6727                          */
6728                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6729                 }
6730                 else {
6731                         /* For variable refresh rate mode only:
6732                          * Get vblank of last completed flip to avoid > 1 vrr
6733                          * flips per video frame by use of throttling, but allow
6734                          * flip programming anywhere in the possibly large
6735                          * variable vrr vblank interval for fine-grained flip
6736                          * timing control and more opportunity to avoid stutter
6737                          * on late submission of flips.
6738                          */
6739                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6740                         last_flip_vblank = acrtc_attach->last_flip_vblank;
6741                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6742                 }
6743
6744                 target_vblank = last_flip_vblank + wait_for_vblank;
6745
6746                 /*
6747                  * Wait until we're out of the vertical blank period before the one
6748                  * targeted by the flip
6749                  */
6750                 while ((acrtc_attach->enabled &&
6751                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6752                                                             0, &vpos, &hpos, NULL,
6753                                                             NULL, &pcrtc->hwmode)
6754                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6755                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6756                         (int)(target_vblank -
6757                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6758                         usleep_range(1000, 1100);
6759                 }
6760
6761                 if (acrtc_attach->base.state->event) {
6762                         drm_crtc_vblank_get(pcrtc);
6763
6764                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6765
6766                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6767                         prepare_flip_isr(acrtc_attach);
6768
6769                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6770                 }
6771
6772                 if (acrtc_state->stream) {
6773                         if (acrtc_state->freesync_vrr_info_changed)
6774                                 bundle->stream_update.vrr_infopacket =
6775                                         &acrtc_state->stream->vrr_infopacket;
6776                 }
6777         }
6778
6779         /* Update the planes if changed or disable if we don't have any. */
6780         if ((planes_count || acrtc_state->active_planes == 0) &&
6781                 acrtc_state->stream) {
6782                 bundle->stream_update.stream = acrtc_state->stream;
6783                 if (new_pcrtc_state->mode_changed) {
6784                         bundle->stream_update.src = acrtc_state->stream->src;
6785                         bundle->stream_update.dst = acrtc_state->stream->dst;
6786                 }
6787
6788                 if (new_pcrtc_state->color_mgmt_changed) {
6789                         /*
6790                          * TODO: This isn't fully correct since we've actually
6791                          * already modified the stream in place.
6792                          */
6793                         bundle->stream_update.gamut_remap =
6794                                 &acrtc_state->stream->gamut_remap_matrix;
6795                         bundle->stream_update.output_csc_transform =
6796                                 &acrtc_state->stream->csc_color_matrix;
6797                         bundle->stream_update.out_transfer_func =
6798                                 acrtc_state->stream->out_transfer_func;
6799                 }
6800
6801                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6802                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6803                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
6804
6805                 /*
6806                  * If FreeSync state on the stream has changed then we need to
6807                  * re-adjust the min/max bounds now that DC doesn't handle this
6808                  * as part of commit.
6809                  */
6810                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6811                     amdgpu_dm_vrr_active(acrtc_state)) {
6812                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6813                         dc_stream_adjust_vmin_vmax(
6814                                 dm->dc, acrtc_state->stream,
6815                                 &acrtc_state->vrr_params.adjust);
6816                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6817                 }
6818                 mutex_lock(&dm->dc_lock);
6819                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6820                                 acrtc_state->stream->link->psr_allow_active)
6821                         amdgpu_dm_psr_disable(acrtc_state->stream);
6822
6823                 dc_commit_updates_for_stream(dm->dc,
6824                                                      bundle->surface_updates,
6825                                                      planes_count,
6826                                                      acrtc_state->stream,
6827                                                      &bundle->stream_update,
6828                                                      dc_state);
6829
6830                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6831                                                 acrtc_state->stream->psr_version &&
6832                                                 !acrtc_state->stream->link->psr_feature_enabled)
6833                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
6834                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6835                                                 acrtc_state->stream->link->psr_feature_enabled &&
6836                                                 !acrtc_state->stream->link->psr_allow_active) {
6837                         amdgpu_dm_psr_enable(acrtc_state->stream);
6838                 }
6839
6840                 mutex_unlock(&dm->dc_lock);
6841         }
6842
6843         /*
6844          * Update cursor state *after* programming all the planes.
6845          * This avoids redundant programming in the case where we're going
6846          * to be disabling a single plane - those pipes are being disabled.
6847          */
6848         if (acrtc_state->active_planes)
6849                 amdgpu_dm_commit_cursors(state);
6850
6851 cleanup:
6852         kfree(bundle);
6853 }
6854
6855 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6856                                    struct drm_atomic_state *state)
6857 {
6858         struct amdgpu_device *adev = dev->dev_private;
6859         struct amdgpu_dm_connector *aconnector;
6860         struct drm_connector *connector;
6861         struct drm_connector_state *old_con_state, *new_con_state;
6862         struct drm_crtc_state *new_crtc_state;
6863         struct dm_crtc_state *new_dm_crtc_state;
6864         const struct dc_stream_status *status;
6865         int i, inst;
6866
6867         /* Notify device removals. */
6868         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6869                 if (old_con_state->crtc != new_con_state->crtc) {
6870                         /* CRTC changes require notification. */
6871                         goto notify;
6872                 }
6873
6874                 if (!new_con_state->crtc)
6875                         continue;
6876
6877                 new_crtc_state = drm_atomic_get_new_crtc_state(
6878                         state, new_con_state->crtc);
6879
6880                 if (!new_crtc_state)
6881                         continue;
6882
6883                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6884                         continue;
6885
6886         notify:
6887                 aconnector = to_amdgpu_dm_connector(connector);
6888
6889                 mutex_lock(&adev->dm.audio_lock);
6890                 inst = aconnector->audio_inst;
6891                 aconnector->audio_inst = -1;
6892                 mutex_unlock(&adev->dm.audio_lock);
6893
6894                 amdgpu_dm_audio_eld_notify(adev, inst);
6895         }
6896
6897         /* Notify audio device additions. */
6898         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6899                 if (!new_con_state->crtc)
6900                         continue;
6901
6902                 new_crtc_state = drm_atomic_get_new_crtc_state(
6903                         state, new_con_state->crtc);
6904
6905                 if (!new_crtc_state)
6906                         continue;
6907
6908                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6909                         continue;
6910
6911                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6912                 if (!new_dm_crtc_state->stream)
6913                         continue;
6914
6915                 status = dc_stream_get_status(new_dm_crtc_state->stream);
6916                 if (!status)
6917                         continue;
6918
6919                 aconnector = to_amdgpu_dm_connector(connector);
6920
6921                 mutex_lock(&adev->dm.audio_lock);
6922                 inst = status->audio_inst;
6923                 aconnector->audio_inst = inst;
6924                 mutex_unlock(&adev->dm.audio_lock);
6925
6926                 amdgpu_dm_audio_eld_notify(adev, inst);
6927         }
6928 }
6929
6930 /*
6931  * Enable interrupts on CRTCs that are newly active, undergone
6932  * a modeset, or have active planes again.
6933  *
6934  * Done in two passes, based on the for_modeset flag:
6935  * Pass 1: For CRTCs going through modeset
6936  * Pass 2: For CRTCs going from 0 to n active planes
6937  *
6938  * Interrupts can only be enabled after the planes are programmed,
6939  * so this requires a two-pass approach since we don't want to
6940  * just defer the interrupts until after commit planes every time.
6941  */
6942 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6943                                              struct drm_atomic_state *state,
6944                                              bool for_modeset)
6945 {
6946         struct amdgpu_device *adev = dev->dev_private;
6947         struct drm_crtc *crtc;
6948         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6949         int i;
6950 #ifdef CONFIG_DEBUG_FS
6951         enum amdgpu_dm_pipe_crc_source source;
6952 #endif
6953
6954         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6955                                       new_crtc_state, i) {
6956                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6957                 struct dm_crtc_state *dm_new_crtc_state =
6958                         to_dm_crtc_state(new_crtc_state);
6959                 struct dm_crtc_state *dm_old_crtc_state =
6960                         to_dm_crtc_state(old_crtc_state);
6961                 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6962                 bool run_pass;
6963
6964                 run_pass = (for_modeset && modeset) ||
6965                            (!for_modeset && !modeset &&
6966                             !dm_old_crtc_state->interrupts_enabled);
6967
6968                 if (!run_pass)
6969                         continue;
6970
6971                 if (!dm_new_crtc_state->interrupts_enabled)
6972                         continue;
6973
6974                 manage_dm_interrupts(adev, acrtc, true);
6975
6976 #ifdef CONFIG_DEBUG_FS
6977                 /* The stream has changed so CRC capture needs to re-enabled. */
6978                 source = dm_new_crtc_state->crc_src;
6979                 if (amdgpu_dm_is_valid_crc_source(source)) {
6980                         amdgpu_dm_crtc_configure_crc_source(
6981                                 crtc, dm_new_crtc_state,
6982                                 dm_new_crtc_state->crc_src);
6983                 }
6984 #endif
6985         }
6986 }
6987
6988 /*
6989  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6990  * @crtc_state: the DRM CRTC state
6991  * @stream_state: the DC stream state.
6992  *
6993  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6994  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6995  */
6996 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6997                                                 struct dc_stream_state *stream_state)
6998 {
6999         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7000 }
7001
7002 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7003                                    struct drm_atomic_state *state,
7004                                    bool nonblock)
7005 {
7006         struct drm_crtc *crtc;
7007         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7008         struct amdgpu_device *adev = dev->dev_private;
7009         int i;
7010
7011         /*
7012          * We evade vblank and pflip interrupts on CRTCs that are undergoing
7013          * a modeset, being disabled, or have no active planes.
7014          *
7015          * It's done in atomic commit rather than commit tail for now since
7016          * some of these interrupt handlers access the current CRTC state and
7017          * potentially the stream pointer itself.
7018          *
7019          * Since the atomic state is swapped within atomic commit and not within
7020          * commit tail this would leave to new state (that hasn't been committed yet)
7021          * being accesssed from within the handlers.
7022          *
7023          * TODO: Fix this so we can do this in commit tail and not have to block
7024          * in atomic check.
7025          */
7026         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7027                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7028                 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7029                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7030
7031                 if (dm_old_crtc_state->interrupts_enabled &&
7032                     (!dm_new_crtc_state->interrupts_enabled ||
7033                      drm_atomic_crtc_needs_modeset(new_crtc_state)))
7034                         manage_dm_interrupts(adev, acrtc, false);
7035         }
7036         /*
7037          * Add check here for SoC's that support hardware cursor plane, to
7038          * unset legacy_cursor_update
7039          */
7040
7041         return drm_atomic_helper_commit(dev, state, nonblock);
7042
7043         /*TODO Handle EINTR, reenable IRQ*/
7044 }
7045
7046 /**
7047  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7048  * @state: The atomic state to commit
7049  *
7050  * This will tell DC to commit the constructed DC state from atomic_check,
7051  * programming the hardware. Any failures here implies a hardware failure, since
7052  * atomic check should have filtered anything non-kosher.
7053  */
7054 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7055 {
7056         struct drm_device *dev = state->dev;
7057         struct amdgpu_device *adev = dev->dev_private;
7058         struct amdgpu_display_manager *dm = &adev->dm;
7059         struct dm_atomic_state *dm_state;
7060         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7061         uint32_t i, j;
7062         struct drm_crtc *crtc;
7063         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7064         unsigned long flags;
7065         bool wait_for_vblank = true;
7066         struct drm_connector *connector;
7067         struct drm_connector_state *old_con_state, *new_con_state;
7068         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7069         int crtc_disable_count = 0;
7070
7071         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7072
7073         dm_state = dm_atomic_get_new_state(state);
7074         if (dm_state && dm_state->context) {
7075                 dc_state = dm_state->context;
7076         } else {
7077                 /* No state changes, retain current state. */
7078                 dc_state_temp = dc_create_state(dm->dc);
7079                 ASSERT(dc_state_temp);
7080                 dc_state = dc_state_temp;
7081                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7082         }
7083
7084         /* update changed items */
7085         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7086                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7087
7088                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7089                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7090
7091                 DRM_DEBUG_DRIVER(
7092                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7093                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7094                         "connectors_changed:%d\n",
7095                         acrtc->crtc_id,
7096                         new_crtc_state->enable,
7097                         new_crtc_state->active,
7098                         new_crtc_state->planes_changed,
7099                         new_crtc_state->mode_changed,
7100                         new_crtc_state->active_changed,
7101                         new_crtc_state->connectors_changed);
7102
7103                 /* Copy all transient state flags into dc state */
7104                 if (dm_new_crtc_state->stream) {
7105                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7106                                                             dm_new_crtc_state->stream);
7107                 }
7108
7109                 /* handles headless hotplug case, updating new_state and
7110                  * aconnector as needed
7111                  */
7112
7113                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7114
7115                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7116
7117                         if (!dm_new_crtc_state->stream) {
7118                                 /*
7119                                  * this could happen because of issues with
7120                                  * userspace notifications delivery.
7121                                  * In this case userspace tries to set mode on
7122                                  * display which is disconnected in fact.
7123                                  * dc_sink is NULL in this case on aconnector.
7124                                  * We expect reset mode will come soon.
7125                                  *
7126                                  * This can also happen when unplug is done
7127                                  * during resume sequence ended
7128                                  *
7129                                  * In this case, we want to pretend we still
7130                                  * have a sink to keep the pipe running so that
7131                                  * hw state is consistent with the sw state
7132                                  */
7133                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7134                                                 __func__, acrtc->base.base.id);
7135                                 continue;
7136                         }
7137
7138                         if (dm_old_crtc_state->stream)
7139                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7140
7141                         pm_runtime_get_noresume(dev->dev);
7142
7143                         acrtc->enabled = true;
7144                         acrtc->hw_mode = new_crtc_state->mode;
7145                         crtc->hwmode = new_crtc_state->mode;
7146                 } else if (modereset_required(new_crtc_state)) {
7147                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7148                         /* i.e. reset mode */
7149                         if (dm_old_crtc_state->stream) {
7150                                 if (dm_old_crtc_state->stream->link->psr_allow_active)
7151                                         amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7152
7153                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7154                         }
7155                 }
7156         } /* for_each_crtc_in_state() */
7157
7158         if (dc_state) {
7159                 dm_enable_per_frame_crtc_master_sync(dc_state);
7160                 mutex_lock(&dm->dc_lock);
7161                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7162                 mutex_unlock(&dm->dc_lock);
7163         }
7164
7165         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7166                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7167
7168                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7169
7170                 if (dm_new_crtc_state->stream != NULL) {
7171                         const struct dc_stream_status *status =
7172                                         dc_stream_get_status(dm_new_crtc_state->stream);
7173
7174                         if (!status)
7175                                 status = dc_stream_get_status_from_state(dc_state,
7176                                                                          dm_new_crtc_state->stream);
7177
7178                         if (!status)
7179                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7180                         else
7181                                 acrtc->otg_inst = status->primary_otg_inst;
7182                 }
7183         }
7184 #ifdef CONFIG_DRM_AMD_DC_HDCP
7185         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7186                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7187                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7188                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7189
7190                 new_crtc_state = NULL;
7191
7192                 if (acrtc)
7193                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7194
7195                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7196
7197                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7198                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7199                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7200                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7201                         continue;
7202                 }
7203
7204                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7205                         hdcp_update_display(
7206                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7207                                 new_con_state->hdcp_content_type,
7208                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7209                                                                                                          : false);
7210         }
7211 #endif
7212
7213         /* Handle connector state changes */
7214         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7215                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7216                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7217                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7218                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7219                 struct dc_stream_update stream_update;
7220                 struct dc_info_packet hdr_packet;
7221                 struct dc_stream_status *status = NULL;
7222                 bool abm_changed, hdr_changed, scaling_changed;
7223
7224                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7225                 memset(&stream_update, 0, sizeof(stream_update));
7226
7227                 if (acrtc) {
7228                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7229                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7230                 }
7231
7232                 /* Skip any modesets/resets */
7233                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7234                         continue;
7235
7236                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7237                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7238
7239                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7240                                                              dm_old_con_state);
7241
7242                 abm_changed = dm_new_crtc_state->abm_level !=
7243                               dm_old_crtc_state->abm_level;
7244
7245                 hdr_changed =
7246                         is_hdr_metadata_different(old_con_state, new_con_state);
7247
7248                 if (!scaling_changed && !abm_changed && !hdr_changed)
7249                         continue;
7250
7251                 stream_update.stream = dm_new_crtc_state->stream;
7252                 if (scaling_changed) {
7253                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7254                                         dm_new_con_state, dm_new_crtc_state->stream);
7255
7256                         stream_update.src = dm_new_crtc_state->stream->src;
7257                         stream_update.dst = dm_new_crtc_state->stream->dst;
7258                 }
7259
7260                 if (abm_changed) {
7261                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7262
7263                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7264                 }
7265
7266                 if (hdr_changed) {
7267                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7268                         stream_update.hdr_static_metadata = &hdr_packet;
7269                 }
7270
7271                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7272                 WARN_ON(!status);
7273                 WARN_ON(!status->plane_count);
7274
7275                 /*
7276                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7277                  * Here we create an empty update on each plane.
7278                  * To fix this, DC should permit updating only stream properties.
7279                  */
7280                 for (j = 0; j < status->plane_count; j++)
7281                         dummy_updates[j].surface = status->plane_states[0];
7282
7283
7284                 mutex_lock(&dm->dc_lock);
7285                 dc_commit_updates_for_stream(dm->dc,
7286                                                      dummy_updates,
7287                                                      status->plane_count,
7288                                                      dm_new_crtc_state->stream,
7289                                                      &stream_update,
7290                                                      dc_state);
7291                 mutex_unlock(&dm->dc_lock);
7292         }
7293
7294         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7295         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7296                                       new_crtc_state, i) {
7297                 if (old_crtc_state->active && !new_crtc_state->active)
7298                         crtc_disable_count++;
7299
7300                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7301                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7302
7303                 /* Update freesync active state. */
7304                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7305
7306                 /* Handle vrr on->off / off->on transitions */
7307                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7308                                                 dm_new_crtc_state);
7309         }
7310
7311         /* Enable interrupts for CRTCs going through a modeset. */
7312         amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7313
7314         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7315                 if (new_crtc_state->async_flip)
7316                         wait_for_vblank = false;
7317
7318         /* update planes when needed per crtc*/
7319         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7320                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7321
7322                 if (dm_new_crtc_state->stream)
7323                         amdgpu_dm_commit_planes(state, dc_state, dev,
7324                                                 dm, crtc, wait_for_vblank);
7325         }
7326
7327         /* Enable interrupts for CRTCs going from 0 to n active planes. */
7328         amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7329
7330         /* Update audio instances for each connector. */
7331         amdgpu_dm_commit_audio(dev, state);
7332
7333         /*
7334          * send vblank event on all events not handled in flip and
7335          * mark consumed event for drm_atomic_helper_commit_hw_done
7336          */
7337         spin_lock_irqsave(&adev->ddev->event_lock, flags);
7338         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7339
7340                 if (new_crtc_state->event)
7341                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7342
7343                 new_crtc_state->event = NULL;
7344         }
7345         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7346
7347         /* Signal HW programming completion */
7348         drm_atomic_helper_commit_hw_done(state);
7349
7350         if (wait_for_vblank)
7351                 drm_atomic_helper_wait_for_flip_done(dev, state);
7352
7353         drm_atomic_helper_cleanup_planes(dev, state);
7354
7355         /*
7356          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7357          * so we can put the GPU into runtime suspend if we're not driving any
7358          * displays anymore
7359          */
7360         for (i = 0; i < crtc_disable_count; i++)
7361                 pm_runtime_put_autosuspend(dev->dev);
7362         pm_runtime_mark_last_busy(dev->dev);
7363
7364         if (dc_state_temp)
7365                 dc_release_state(dc_state_temp);
7366 }
7367
7368
7369 static int dm_force_atomic_commit(struct drm_connector *connector)
7370 {
7371         int ret = 0;
7372         struct drm_device *ddev = connector->dev;
7373         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7374         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7375         struct drm_plane *plane = disconnected_acrtc->base.primary;
7376         struct drm_connector_state *conn_state;
7377         struct drm_crtc_state *crtc_state;
7378         struct drm_plane_state *plane_state;
7379
7380         if (!state)
7381                 return -ENOMEM;
7382
7383         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7384
7385         /* Construct an atomic state to restore previous display setting */
7386
7387         /*
7388          * Attach connectors to drm_atomic_state
7389          */
7390         conn_state = drm_atomic_get_connector_state(state, connector);
7391
7392         ret = PTR_ERR_OR_ZERO(conn_state);
7393         if (ret)
7394                 goto err;
7395
7396         /* Attach crtc to drm_atomic_state*/
7397         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7398
7399         ret = PTR_ERR_OR_ZERO(crtc_state);
7400         if (ret)
7401                 goto err;
7402
7403         /* force a restore */
7404         crtc_state->mode_changed = true;
7405
7406         /* Attach plane to drm_atomic_state */
7407         plane_state = drm_atomic_get_plane_state(state, plane);
7408
7409         ret = PTR_ERR_OR_ZERO(plane_state);
7410         if (ret)
7411                 goto err;
7412
7413
7414         /* Call commit internally with the state we just constructed */
7415         ret = drm_atomic_commit(state);
7416         if (!ret)
7417                 return 0;
7418
7419 err:
7420         DRM_ERROR("Restoring old state failed with %i\n", ret);
7421         drm_atomic_state_put(state);
7422
7423         return ret;
7424 }
7425
7426 /*
7427  * This function handles all cases when set mode does not come upon hotplug.
7428  * This includes when a display is unplugged then plugged back into the
7429  * same port and when running without usermode desktop manager supprot
7430  */
7431 void dm_restore_drm_connector_state(struct drm_device *dev,
7432                                     struct drm_connector *connector)
7433 {
7434         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7435         struct amdgpu_crtc *disconnected_acrtc;
7436         struct dm_crtc_state *acrtc_state;
7437
7438         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7439                 return;
7440
7441         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7442         if (!disconnected_acrtc)
7443                 return;
7444
7445         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7446         if (!acrtc_state->stream)
7447                 return;
7448
7449         /*
7450          * If the previous sink is not released and different from the current,
7451          * we deduce we are in a state where we can not rely on usermode call
7452          * to turn on the display, so we do it here
7453          */
7454         if (acrtc_state->stream->sink != aconnector->dc_sink)
7455                 dm_force_atomic_commit(&aconnector->base);
7456 }
7457
7458 /*
7459  * Grabs all modesetting locks to serialize against any blocking commits,
7460  * Waits for completion of all non blocking commits.
7461  */
7462 static int do_aquire_global_lock(struct drm_device *dev,
7463                                  struct drm_atomic_state *state)
7464 {
7465         struct drm_crtc *crtc;
7466         struct drm_crtc_commit *commit;
7467         long ret;
7468
7469         /*
7470          * Adding all modeset locks to aquire_ctx will
7471          * ensure that when the framework release it the
7472          * extra locks we are locking here will get released to
7473          */
7474         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7475         if (ret)
7476                 return ret;
7477
7478         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7479                 spin_lock(&crtc->commit_lock);
7480                 commit = list_first_entry_or_null(&crtc->commit_list,
7481                                 struct drm_crtc_commit, commit_entry);
7482                 if (commit)
7483                         drm_crtc_commit_get(commit);
7484                 spin_unlock(&crtc->commit_lock);
7485
7486                 if (!commit)
7487                         continue;
7488
7489                 /*
7490                  * Make sure all pending HW programming completed and
7491                  * page flips done
7492                  */
7493                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7494
7495                 if (ret > 0)
7496                         ret = wait_for_completion_interruptible_timeout(
7497                                         &commit->flip_done, 10*HZ);
7498
7499                 if (ret == 0)
7500                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7501                                   "timed out\n", crtc->base.id, crtc->name);
7502
7503                 drm_crtc_commit_put(commit);
7504         }
7505
7506         return ret < 0 ? ret : 0;
7507 }
7508
7509 static void get_freesync_config_for_crtc(
7510         struct dm_crtc_state *new_crtc_state,
7511         struct dm_connector_state *new_con_state)
7512 {
7513         struct mod_freesync_config config = {0};
7514         struct amdgpu_dm_connector *aconnector =
7515                         to_amdgpu_dm_connector(new_con_state->base.connector);
7516         struct drm_display_mode *mode = &new_crtc_state->base.mode;
7517         int vrefresh = drm_mode_vrefresh(mode);
7518
7519         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7520                                         vrefresh >= aconnector->min_vfreq &&
7521                                         vrefresh <= aconnector->max_vfreq;
7522
7523         if (new_crtc_state->vrr_supported) {
7524                 new_crtc_state->stream->ignore_msa_timing_param = true;
7525                 config.state = new_crtc_state->base.vrr_enabled ?
7526                                 VRR_STATE_ACTIVE_VARIABLE :
7527                                 VRR_STATE_INACTIVE;
7528                 config.min_refresh_in_uhz =
7529                                 aconnector->min_vfreq * 1000000;
7530                 config.max_refresh_in_uhz =
7531                                 aconnector->max_vfreq * 1000000;
7532                 config.vsif_supported = true;
7533                 config.btr = true;
7534         }
7535
7536         new_crtc_state->freesync_config = config;
7537 }
7538
7539 static void reset_freesync_config_for_crtc(
7540         struct dm_crtc_state *new_crtc_state)
7541 {
7542         new_crtc_state->vrr_supported = false;
7543
7544         memset(&new_crtc_state->vrr_params, 0,
7545                sizeof(new_crtc_state->vrr_params));
7546         memset(&new_crtc_state->vrr_infopacket, 0,
7547                sizeof(new_crtc_state->vrr_infopacket));
7548 }
7549
7550 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7551                                 struct drm_atomic_state *state,
7552                                 struct drm_crtc *crtc,
7553                                 struct drm_crtc_state *old_crtc_state,
7554                                 struct drm_crtc_state *new_crtc_state,
7555                                 bool enable,
7556                                 bool *lock_and_validation_needed)
7557 {
7558         struct dm_atomic_state *dm_state = NULL;
7559         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7560         struct dc_stream_state *new_stream;
7561         int ret = 0;
7562
7563         /*
7564          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7565          * update changed items
7566          */
7567         struct amdgpu_crtc *acrtc = NULL;
7568         struct amdgpu_dm_connector *aconnector = NULL;
7569         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7570         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7571
7572         new_stream = NULL;
7573
7574         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7575         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7576         acrtc = to_amdgpu_crtc(crtc);
7577         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7578
7579         /* TODO This hack should go away */
7580         if (aconnector && enable) {
7581                 /* Make sure fake sink is created in plug-in scenario */
7582                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7583                                                             &aconnector->base);
7584                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7585                                                             &aconnector->base);
7586
7587                 if (IS_ERR(drm_new_conn_state)) {
7588                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7589                         goto fail;
7590                 }
7591
7592                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7593                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7594
7595                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7596                         goto skip_modeset;
7597
7598                 new_stream = create_stream_for_sink(aconnector,
7599                                                      &new_crtc_state->mode,
7600                                                     dm_new_conn_state,
7601                                                     dm_old_crtc_state->stream);
7602
7603                 /*
7604                  * we can have no stream on ACTION_SET if a display
7605                  * was disconnected during S3, in this case it is not an
7606                  * error, the OS will be updated after detection, and
7607                  * will do the right thing on next atomic commit
7608                  */
7609
7610                 if (!new_stream) {
7611                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7612                                         __func__, acrtc->base.base.id);
7613                         ret = -ENOMEM;
7614                         goto fail;
7615                 }
7616
7617                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7618
7619                 ret = fill_hdr_info_packet(drm_new_conn_state,
7620                                            &new_stream->hdr_static_metadata);
7621                 if (ret)
7622                         goto fail;
7623
7624                 /*
7625                  * If we already removed the old stream from the context
7626                  * (and set the new stream to NULL) then we can't reuse
7627                  * the old stream even if the stream and scaling are unchanged.
7628                  * We'll hit the BUG_ON and black screen.
7629                  *
7630                  * TODO: Refactor this function to allow this check to work
7631                  * in all conditions.
7632                  */
7633                 if (dm_new_crtc_state->stream &&
7634                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7635                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7636                         new_crtc_state->mode_changed = false;
7637                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7638                                          new_crtc_state->mode_changed);
7639                 }
7640         }
7641
7642         /* mode_changed flag may get updated above, need to check again */
7643         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7644                 goto skip_modeset;
7645
7646         DRM_DEBUG_DRIVER(
7647                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7648                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7649                 "connectors_changed:%d\n",
7650                 acrtc->crtc_id,
7651                 new_crtc_state->enable,
7652                 new_crtc_state->active,
7653                 new_crtc_state->planes_changed,
7654                 new_crtc_state->mode_changed,
7655                 new_crtc_state->active_changed,
7656                 new_crtc_state->connectors_changed);
7657
7658         /* Remove stream for any changed/disabled CRTC */
7659         if (!enable) {
7660
7661                 if (!dm_old_crtc_state->stream)
7662                         goto skip_modeset;
7663
7664                 ret = dm_atomic_get_state(state, &dm_state);
7665                 if (ret)
7666                         goto fail;
7667
7668                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7669                                 crtc->base.id);
7670
7671                 /* i.e. reset mode */
7672                 if (dc_remove_stream_from_ctx(
7673                                 dm->dc,
7674                                 dm_state->context,
7675                                 dm_old_crtc_state->stream) != DC_OK) {
7676                         ret = -EINVAL;
7677                         goto fail;
7678                 }
7679
7680                 dc_stream_release(dm_old_crtc_state->stream);
7681                 dm_new_crtc_state->stream = NULL;
7682
7683                 reset_freesync_config_for_crtc(dm_new_crtc_state);
7684
7685                 *lock_and_validation_needed = true;
7686
7687         } else {/* Add stream for any updated/enabled CRTC */
7688                 /*
7689                  * Quick fix to prevent NULL pointer on new_stream when
7690                  * added MST connectors not found in existing crtc_state in the chained mode
7691                  * TODO: need to dig out the root cause of that
7692                  */
7693                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7694                         goto skip_modeset;
7695
7696                 if (modereset_required(new_crtc_state))
7697                         goto skip_modeset;
7698
7699                 if (modeset_required(new_crtc_state, new_stream,
7700                                      dm_old_crtc_state->stream)) {
7701
7702                         WARN_ON(dm_new_crtc_state->stream);
7703
7704                         ret = dm_atomic_get_state(state, &dm_state);
7705                         if (ret)
7706                                 goto fail;
7707
7708                         dm_new_crtc_state->stream = new_stream;
7709
7710                         dc_stream_retain(new_stream);
7711
7712                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7713                                                 crtc->base.id);
7714
7715                         if (dc_add_stream_to_ctx(
7716                                         dm->dc,
7717                                         dm_state->context,
7718                                         dm_new_crtc_state->stream) != DC_OK) {
7719                                 ret = -EINVAL;
7720                                 goto fail;
7721                         }
7722
7723                         *lock_and_validation_needed = true;
7724                 }
7725         }
7726
7727 skip_modeset:
7728         /* Release extra reference */
7729         if (new_stream)
7730                  dc_stream_release(new_stream);
7731
7732         /*
7733          * We want to do dc stream updates that do not require a
7734          * full modeset below.
7735          */
7736         if (!(enable && aconnector && new_crtc_state->enable &&
7737               new_crtc_state->active))
7738                 return 0;
7739         /*
7740          * Given above conditions, the dc state cannot be NULL because:
7741          * 1. We're in the process of enabling CRTCs (just been added
7742          *    to the dc context, or already is on the context)
7743          * 2. Has a valid connector attached, and
7744          * 3. Is currently active and enabled.
7745          * => The dc stream state currently exists.
7746          */
7747         BUG_ON(dm_new_crtc_state->stream == NULL);
7748
7749         /* Scaling or underscan settings */
7750         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7751                 update_stream_scaling_settings(
7752                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7753
7754         /* ABM settings */
7755         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7756
7757         /*
7758          * Color management settings. We also update color properties
7759          * when a modeset is needed, to ensure it gets reprogrammed.
7760          */
7761         if (dm_new_crtc_state->base.color_mgmt_changed ||
7762             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7763                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7764                 if (ret)
7765                         goto fail;
7766         }
7767
7768         /* Update Freesync settings. */
7769         get_freesync_config_for_crtc(dm_new_crtc_state,
7770                                      dm_new_conn_state);
7771
7772         return ret;
7773
7774 fail:
7775         if (new_stream)
7776                 dc_stream_release(new_stream);
7777         return ret;
7778 }
7779
7780 static bool should_reset_plane(struct drm_atomic_state *state,
7781                                struct drm_plane *plane,
7782                                struct drm_plane_state *old_plane_state,
7783                                struct drm_plane_state *new_plane_state)
7784 {
7785         struct drm_plane *other;
7786         struct drm_plane_state *old_other_state, *new_other_state;
7787         struct drm_crtc_state *new_crtc_state;
7788         int i;
7789
7790         /*
7791          * TODO: Remove this hack once the checks below are sufficient
7792          * enough to determine when we need to reset all the planes on
7793          * the stream.
7794          */
7795         if (state->allow_modeset)
7796                 return true;
7797
7798         /* Exit early if we know that we're adding or removing the plane. */
7799         if (old_plane_state->crtc != new_plane_state->crtc)
7800                 return true;
7801
7802         /* old crtc == new_crtc == NULL, plane not in context. */
7803         if (!new_plane_state->crtc)
7804                 return false;
7805
7806         new_crtc_state =
7807                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7808
7809         if (!new_crtc_state)
7810                 return true;
7811
7812         /* CRTC Degamma changes currently require us to recreate planes. */
7813         if (new_crtc_state->color_mgmt_changed)
7814                 return true;
7815
7816         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7817                 return true;
7818
7819         /*
7820          * If there are any new primary or overlay planes being added or
7821          * removed then the z-order can potentially change. To ensure
7822          * correct z-order and pipe acquisition the current DC architecture
7823          * requires us to remove and recreate all existing planes.
7824          *
7825          * TODO: Come up with a more elegant solution for this.
7826          */
7827         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7828                 if (other->type == DRM_PLANE_TYPE_CURSOR)
7829                         continue;
7830
7831                 if (old_other_state->crtc != new_plane_state->crtc &&
7832                     new_other_state->crtc != new_plane_state->crtc)
7833                         continue;
7834
7835                 if (old_other_state->crtc != new_other_state->crtc)
7836                         return true;
7837
7838                 /* TODO: Remove this once we can handle fast format changes. */
7839                 if (old_other_state->fb && new_other_state->fb &&
7840                     old_other_state->fb->format != new_other_state->fb->format)
7841                         return true;
7842         }
7843
7844         return false;
7845 }
7846
7847 static int dm_update_plane_state(struct dc *dc,
7848                                  struct drm_atomic_state *state,
7849                                  struct drm_plane *plane,
7850                                  struct drm_plane_state *old_plane_state,
7851                                  struct drm_plane_state *new_plane_state,
7852                                  bool enable,
7853                                  bool *lock_and_validation_needed)
7854 {
7855
7856         struct dm_atomic_state *dm_state = NULL;
7857         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7858         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7859         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7860         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7861         bool needs_reset;
7862         int ret = 0;
7863
7864
7865         new_plane_crtc = new_plane_state->crtc;
7866         old_plane_crtc = old_plane_state->crtc;
7867         dm_new_plane_state = to_dm_plane_state(new_plane_state);
7868         dm_old_plane_state = to_dm_plane_state(old_plane_state);
7869
7870         /*TODO Implement atomic check for cursor plane */
7871         if (plane->type == DRM_PLANE_TYPE_CURSOR)
7872                 return 0;
7873
7874         needs_reset = should_reset_plane(state, plane, old_plane_state,
7875                                          new_plane_state);
7876
7877         /* Remove any changed/removed planes */
7878         if (!enable) {
7879                 if (!needs_reset)
7880                         return 0;
7881
7882                 if (!old_plane_crtc)
7883                         return 0;
7884
7885                 old_crtc_state = drm_atomic_get_old_crtc_state(
7886                                 state, old_plane_crtc);
7887                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7888
7889                 if (!dm_old_crtc_state->stream)
7890                         return 0;
7891
7892                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7893                                 plane->base.id, old_plane_crtc->base.id);
7894
7895                 ret = dm_atomic_get_state(state, &dm_state);
7896                 if (ret)
7897                         return ret;
7898
7899                 if (!dc_remove_plane_from_context(
7900                                 dc,
7901                                 dm_old_crtc_state->stream,
7902                                 dm_old_plane_state->dc_state,
7903                                 dm_state->context)) {
7904
7905                         ret = EINVAL;
7906                         return ret;
7907                 }
7908
7909
7910                 dc_plane_state_release(dm_old_plane_state->dc_state);
7911                 dm_new_plane_state->dc_state = NULL;
7912
7913                 *lock_and_validation_needed = true;
7914
7915         } else { /* Add new planes */
7916                 struct dc_plane_state *dc_new_plane_state;
7917
7918                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7919                         return 0;
7920
7921                 if (!new_plane_crtc)
7922                         return 0;
7923
7924                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7925                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7926
7927                 if (!dm_new_crtc_state->stream)
7928                         return 0;
7929
7930                 if (!needs_reset)
7931                         return 0;
7932
7933                 WARN_ON(dm_new_plane_state->dc_state);
7934
7935                 dc_new_plane_state = dc_create_plane_state(dc);
7936                 if (!dc_new_plane_state)
7937                         return -ENOMEM;
7938
7939                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7940                                 plane->base.id, new_plane_crtc->base.id);
7941
7942                 ret = fill_dc_plane_attributes(
7943                         new_plane_crtc->dev->dev_private,
7944                         dc_new_plane_state,
7945                         new_plane_state,
7946                         new_crtc_state);
7947                 if (ret) {
7948                         dc_plane_state_release(dc_new_plane_state);
7949                         return ret;
7950                 }
7951
7952                 ret = dm_atomic_get_state(state, &dm_state);
7953                 if (ret) {
7954                         dc_plane_state_release(dc_new_plane_state);
7955                         return ret;
7956                 }
7957
7958                 /*
7959                  * Any atomic check errors that occur after this will
7960                  * not need a release. The plane state will be attached
7961                  * to the stream, and therefore part of the atomic
7962                  * state. It'll be released when the atomic state is
7963                  * cleaned.
7964                  */
7965                 if (!dc_add_plane_to_context(
7966                                 dc,
7967                                 dm_new_crtc_state->stream,
7968                                 dc_new_plane_state,
7969                                 dm_state->context)) {
7970
7971                         dc_plane_state_release(dc_new_plane_state);
7972                         return -EINVAL;
7973                 }
7974
7975                 dm_new_plane_state->dc_state = dc_new_plane_state;
7976
7977                 /* Tell DC to do a full surface update every time there
7978                  * is a plane change. Inefficient, but works for now.
7979                  */
7980                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7981
7982                 *lock_and_validation_needed = true;
7983         }
7984
7985
7986         return ret;
7987 }
7988
7989 static int
7990 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7991                                     struct drm_atomic_state *state,
7992                                     enum surface_update_type *out_type)
7993 {
7994         struct dc *dc = dm->dc;
7995         struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7996         int i, j, num_plane, ret = 0;
7997         struct drm_plane_state *old_plane_state, *new_plane_state;
7998         struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7999         struct drm_crtc *new_plane_crtc;
8000         struct drm_plane *plane;
8001
8002         struct drm_crtc *crtc;
8003         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8004         struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8005         struct dc_stream_status *status = NULL;
8006         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8007         struct surface_info_bundle {
8008                 struct dc_surface_update surface_updates[MAX_SURFACES];
8009                 struct dc_plane_info plane_infos[MAX_SURFACES];
8010                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8011                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8012                 struct dc_stream_update stream_update;
8013         } *bundle;
8014
8015         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8016
8017         if (!bundle) {
8018                 DRM_ERROR("Failed to allocate update bundle\n");
8019                 /* Set type to FULL to avoid crashing in DC*/
8020                 update_type = UPDATE_TYPE_FULL;
8021                 goto cleanup;
8022         }
8023
8024         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8025
8026                 memset(bundle, 0, sizeof(struct surface_info_bundle));
8027
8028                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8029                 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8030                 num_plane = 0;
8031
8032                 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8033                         update_type = UPDATE_TYPE_FULL;
8034                         goto cleanup;
8035                 }
8036
8037                 if (!new_dm_crtc_state->stream)
8038                         continue;
8039
8040                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8041                         const struct amdgpu_framebuffer *amdgpu_fb =
8042                                 to_amdgpu_framebuffer(new_plane_state->fb);
8043                         struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8044                         struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8045                         struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8046                         uint64_t tiling_flags;
8047
8048                         new_plane_crtc = new_plane_state->crtc;
8049                         new_dm_plane_state = to_dm_plane_state(new_plane_state);
8050                         old_dm_plane_state = to_dm_plane_state(old_plane_state);
8051
8052                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8053                                 continue;
8054
8055                         if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8056                                 update_type = UPDATE_TYPE_FULL;
8057                                 goto cleanup;
8058                         }
8059
8060                         if (crtc != new_plane_crtc)
8061                                 continue;
8062
8063                         bundle->surface_updates[num_plane].surface =
8064                                         new_dm_plane_state->dc_state;
8065
8066                         if (new_crtc_state->mode_changed) {
8067                                 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8068                                 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8069                         }
8070
8071                         if (new_crtc_state->color_mgmt_changed) {
8072                                 bundle->surface_updates[num_plane].gamma =
8073                                                 new_dm_plane_state->dc_state->gamma_correction;
8074                                 bundle->surface_updates[num_plane].in_transfer_func =
8075                                                 new_dm_plane_state->dc_state->in_transfer_func;
8076                                 bundle->stream_update.gamut_remap =
8077                                                 &new_dm_crtc_state->stream->gamut_remap_matrix;
8078                                 bundle->stream_update.output_csc_transform =
8079                                                 &new_dm_crtc_state->stream->csc_color_matrix;
8080                                 bundle->stream_update.out_transfer_func =
8081                                                 new_dm_crtc_state->stream->out_transfer_func;
8082                         }
8083
8084                         ret = fill_dc_scaling_info(new_plane_state,
8085                                                    scaling_info);
8086                         if (ret)
8087                                 goto cleanup;
8088
8089                         bundle->surface_updates[num_plane].scaling_info = scaling_info;
8090
8091                         if (amdgpu_fb) {
8092                                 ret = get_fb_info(amdgpu_fb, &tiling_flags);
8093                                 if (ret)
8094                                         goto cleanup;
8095
8096                                 ret = fill_dc_plane_info_and_addr(
8097                                         dm->adev, new_plane_state, tiling_flags,
8098                                         plane_info,
8099                                         &flip_addr->address);
8100                                 if (ret)
8101                                         goto cleanup;
8102
8103                                 bundle->surface_updates[num_plane].plane_info = plane_info;
8104                                 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8105                         }
8106
8107                         num_plane++;
8108                 }
8109
8110                 if (num_plane == 0)
8111                         continue;
8112
8113                 ret = dm_atomic_get_state(state, &dm_state);
8114                 if (ret)
8115                         goto cleanup;
8116
8117                 old_dm_state = dm_atomic_get_old_state(state);
8118                 if (!old_dm_state) {
8119                         ret = -EINVAL;
8120                         goto cleanup;
8121                 }
8122
8123                 status = dc_stream_get_status_from_state(old_dm_state->context,
8124                                                          new_dm_crtc_state->stream);
8125                 bundle->stream_update.stream = new_dm_crtc_state->stream;
8126                 /*
8127                  * TODO: DC modifies the surface during this call so we need
8128                  * to lock here - find a way to do this without locking.
8129                  */
8130                 mutex_lock(&dm->dc_lock);
8131                 update_type = dc_check_update_surfaces_for_stream(
8132                                 dc,     bundle->surface_updates, num_plane,
8133                                 &bundle->stream_update, status);
8134                 mutex_unlock(&dm->dc_lock);
8135
8136                 if (update_type > UPDATE_TYPE_MED) {
8137                         update_type = UPDATE_TYPE_FULL;
8138                         goto cleanup;
8139                 }
8140         }
8141
8142 cleanup:
8143         kfree(bundle);
8144
8145         *out_type = update_type;
8146         return ret;
8147 }
8148
8149 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8150 {
8151         struct drm_connector *connector;
8152         struct drm_connector_state *conn_state;
8153         struct amdgpu_dm_connector *aconnector = NULL;
8154         int i;
8155         for_each_new_connector_in_state(state, connector, conn_state, i) {
8156                 if (conn_state->crtc != crtc)
8157                         continue;
8158
8159                 aconnector = to_amdgpu_dm_connector(connector);
8160                 if (!aconnector->port || !aconnector->mst_port)
8161                         aconnector = NULL;
8162                 else
8163                         break;
8164         }
8165
8166         if (!aconnector)
8167                 return 0;
8168
8169         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8170 }
8171
8172 /**
8173  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8174  * @dev: The DRM device
8175  * @state: The atomic state to commit
8176  *
8177  * Validate that the given atomic state is programmable by DC into hardware.
8178  * This involves constructing a &struct dc_state reflecting the new hardware
8179  * state we wish to commit, then querying DC to see if it is programmable. It's
8180  * important not to modify the existing DC state. Otherwise, atomic_check
8181  * may unexpectedly commit hardware changes.
8182  *
8183  * When validating the DC state, it's important that the right locks are
8184  * acquired. For full updates case which removes/adds/updates streams on one
8185  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8186  * that any such full update commit will wait for completion of any outstanding
8187  * flip using DRMs synchronization events. See
8188  * dm_determine_update_type_for_commit()
8189  *
8190  * Note that DM adds the affected connectors for all CRTCs in state, when that
8191  * might not seem necessary. This is because DC stream creation requires the
8192  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8193  * be possible but non-trivial - a possible TODO item.
8194  *
8195  * Return: -Error code if validation failed.
8196  */
8197 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8198                                   struct drm_atomic_state *state)
8199 {
8200         struct amdgpu_device *adev = dev->dev_private;
8201         struct dm_atomic_state *dm_state = NULL;
8202         struct dc *dc = adev->dm.dc;
8203         struct drm_connector *connector;
8204         struct drm_connector_state *old_con_state, *new_con_state;
8205         struct drm_crtc *crtc;
8206         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8207         struct drm_plane *plane;
8208         struct drm_plane_state *old_plane_state, *new_plane_state;
8209         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8210         enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8211
8212         int ret, i;
8213
8214         /*
8215          * This bool will be set for true for any modeset/reset
8216          * or plane update which implies non fast surface update.
8217          */
8218         bool lock_and_validation_needed = false;
8219
8220         ret = drm_atomic_helper_check_modeset(dev, state);
8221         if (ret)
8222                 goto fail;
8223
8224         if (adev->asic_type >= CHIP_NAVI10) {
8225                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8226                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8227                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8228                                 if (ret)
8229                                         goto fail;
8230                         }
8231                 }
8232         }
8233
8234         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8235                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8236                     !new_crtc_state->color_mgmt_changed &&
8237                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8238                         continue;
8239
8240                 if (!new_crtc_state->enable)
8241                         continue;
8242
8243                 ret = drm_atomic_add_affected_connectors(state, crtc);
8244                 if (ret)
8245                         return ret;
8246
8247                 ret = drm_atomic_add_affected_planes(state, crtc);
8248                 if (ret)
8249                         goto fail;
8250         }
8251
8252         /*
8253          * Add all primary and overlay planes on the CRTC to the state
8254          * whenever a plane is enabled to maintain correct z-ordering
8255          * and to enable fast surface updates.
8256          */
8257         drm_for_each_crtc(crtc, dev) {
8258                 bool modified = false;
8259
8260                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8261                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8262                                 continue;
8263
8264                         if (new_plane_state->crtc == crtc ||
8265                             old_plane_state->crtc == crtc) {
8266                                 modified = true;
8267                                 break;
8268                         }
8269                 }
8270
8271                 if (!modified)
8272                         continue;
8273
8274                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8275                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8276                                 continue;
8277
8278                         new_plane_state =
8279                                 drm_atomic_get_plane_state(state, plane);
8280
8281                         if (IS_ERR(new_plane_state)) {
8282                                 ret = PTR_ERR(new_plane_state);
8283                                 goto fail;
8284                         }
8285                 }
8286         }
8287
8288         /* Remove exiting planes if they are modified */
8289         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8290                 ret = dm_update_plane_state(dc, state, plane,
8291                                             old_plane_state,
8292                                             new_plane_state,
8293                                             false,
8294                                             &lock_and_validation_needed);
8295                 if (ret)
8296                         goto fail;
8297         }
8298
8299         /* Disable all crtcs which require disable */
8300         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8301                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8302                                            old_crtc_state,
8303                                            new_crtc_state,
8304                                            false,
8305                                            &lock_and_validation_needed);
8306                 if (ret)
8307                         goto fail;
8308         }
8309
8310         /* Enable all crtcs which require enable */
8311         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8312                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8313                                            old_crtc_state,
8314                                            new_crtc_state,
8315                                            true,
8316                                            &lock_and_validation_needed);
8317                 if (ret)
8318                         goto fail;
8319         }
8320
8321         /* Add new/modified planes */
8322         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8323                 ret = dm_update_plane_state(dc, state, plane,
8324                                             old_plane_state,
8325                                             new_plane_state,
8326                                             true,
8327                                             &lock_and_validation_needed);
8328                 if (ret)
8329                         goto fail;
8330         }
8331
8332         /* Run this here since we want to validate the streams we created */
8333         ret = drm_atomic_helper_check_planes(dev, state);
8334         if (ret)
8335                 goto fail;
8336
8337         if (state->legacy_cursor_update) {
8338                 /*
8339                  * This is a fast cursor update coming from the plane update
8340                  * helper, check if it can be done asynchronously for better
8341                  * performance.
8342                  */
8343                 state->async_update =
8344                         !drm_atomic_helper_async_check(dev, state);
8345
8346                 /*
8347                  * Skip the remaining global validation if this is an async
8348                  * update. Cursor updates can be done without affecting
8349                  * state or bandwidth calcs and this avoids the performance
8350                  * penalty of locking the private state object and
8351                  * allocating a new dc_state.
8352                  */
8353                 if (state->async_update)
8354                         return 0;
8355         }
8356
8357         /* Check scaling and underscan changes*/
8358         /* TODO Removed scaling changes validation due to inability to commit
8359          * new stream into context w\o causing full reset. Need to
8360          * decide how to handle.
8361          */
8362         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8363                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8364                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8365                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8366
8367                 /* Skip any modesets/resets */
8368                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8369                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8370                         continue;
8371
8372                 /* Skip any thing not scale or underscan changes */
8373                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8374                         continue;
8375
8376                 overall_update_type = UPDATE_TYPE_FULL;
8377                 lock_and_validation_needed = true;
8378         }
8379
8380         ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8381         if (ret)
8382                 goto fail;
8383
8384         if (overall_update_type < update_type)
8385                 overall_update_type = update_type;
8386
8387         /*
8388          * lock_and_validation_needed was an old way to determine if we need to set
8389          * the global lock. Leaving it in to check if we broke any corner cases
8390          * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8391          * lock_and_validation_needed false = UPDATE_TYPE_FAST
8392          */
8393         if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8394                 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8395
8396         if (overall_update_type > UPDATE_TYPE_FAST) {
8397                 ret = dm_atomic_get_state(state, &dm_state);
8398                 if (ret)
8399                         goto fail;
8400
8401                 ret = do_aquire_global_lock(dev, state);
8402                 if (ret)
8403                         goto fail;
8404
8405 #if defined(CONFIG_DRM_AMD_DC_DCN)
8406                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8407                         goto fail;
8408
8409                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8410                 if (ret)
8411                         goto fail;
8412 #endif
8413
8414                 /*
8415                  * Perform validation of MST topology in the state:
8416                  * We need to perform MST atomic check before calling
8417                  * dc_validate_global_state(), or there is a chance
8418                  * to get stuck in an infinite loop and hang eventually.
8419                  */
8420                 ret = drm_dp_mst_atomic_check(state);
8421                 if (ret)
8422                         goto fail;
8423
8424                 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8425                         ret = -EINVAL;
8426                         goto fail;
8427                 }
8428         } else {
8429                 /*
8430                  * The commit is a fast update. Fast updates shouldn't change
8431                  * the DC context, affect global validation, and can have their
8432                  * commit work done in parallel with other commits not touching
8433                  * the same resource. If we have a new DC context as part of
8434                  * the DM atomic state from validation we need to free it and
8435                  * retain the existing one instead.
8436                  */
8437                 struct dm_atomic_state *new_dm_state, *old_dm_state;
8438
8439                 new_dm_state = dm_atomic_get_new_state(state);
8440                 old_dm_state = dm_atomic_get_old_state(state);
8441
8442                 if (new_dm_state && old_dm_state) {
8443                         if (new_dm_state->context)
8444                                 dc_release_state(new_dm_state->context);
8445
8446                         new_dm_state->context = old_dm_state->context;
8447
8448                         if (old_dm_state->context)
8449                                 dc_retain_state(old_dm_state->context);
8450                 }
8451         }
8452
8453         /* Store the overall update type for use later in atomic check. */
8454         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8455                 struct dm_crtc_state *dm_new_crtc_state =
8456                         to_dm_crtc_state(new_crtc_state);
8457
8458                 dm_new_crtc_state->update_type = (int)overall_update_type;
8459         }
8460
8461         /* Must be success */
8462         WARN_ON(ret);
8463         return ret;
8464
8465 fail:
8466         if (ret == -EDEADLK)
8467                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8468         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8469                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8470         else
8471                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8472
8473         return ret;
8474 }
8475
8476 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8477                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
8478 {
8479         uint8_t dpcd_data;
8480         bool capable = false;
8481
8482         if (amdgpu_dm_connector->dc_link &&
8483                 dm_helpers_dp_read_dpcd(
8484                                 NULL,
8485                                 amdgpu_dm_connector->dc_link,
8486                                 DP_DOWN_STREAM_PORT_COUNT,
8487                                 &dpcd_data,
8488                                 sizeof(dpcd_data))) {
8489                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8490         }
8491
8492         return capable;
8493 }
8494 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8495                                         struct edid *edid)
8496 {
8497         int i;
8498         bool edid_check_required;
8499         struct detailed_timing *timing;
8500         struct detailed_non_pixel *data;
8501         struct detailed_data_monitor_range *range;
8502         struct amdgpu_dm_connector *amdgpu_dm_connector =
8503                         to_amdgpu_dm_connector(connector);
8504         struct dm_connector_state *dm_con_state = NULL;
8505
8506         struct drm_device *dev = connector->dev;
8507         struct amdgpu_device *adev = dev->dev_private;
8508         bool freesync_capable = false;
8509
8510         if (!connector->state) {
8511                 DRM_ERROR("%s - Connector has no state", __func__);
8512                 goto update;
8513         }
8514
8515         if (!edid) {
8516                 dm_con_state = to_dm_connector_state(connector->state);
8517
8518                 amdgpu_dm_connector->min_vfreq = 0;
8519                 amdgpu_dm_connector->max_vfreq = 0;
8520                 amdgpu_dm_connector->pixel_clock_mhz = 0;
8521
8522                 goto update;
8523         }
8524
8525         dm_con_state = to_dm_connector_state(connector->state);
8526
8527         edid_check_required = false;
8528         if (!amdgpu_dm_connector->dc_sink) {
8529                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8530                 goto update;
8531         }
8532         if (!adev->dm.freesync_module)
8533                 goto update;
8534         /*
8535          * if edid non zero restrict freesync only for dp and edp
8536          */
8537         if (edid) {
8538                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8539                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8540                         edid_check_required = is_dp_capable_without_timing_msa(
8541                                                 adev->dm.dc,
8542                                                 amdgpu_dm_connector);
8543                 }
8544         }
8545         if (edid_check_required == true && (edid->version > 1 ||
8546            (edid->version == 1 && edid->revision > 1))) {
8547                 for (i = 0; i < 4; i++) {
8548
8549                         timing  = &edid->detailed_timings[i];
8550                         data    = &timing->data.other_data;
8551                         range   = &data->data.range;
8552                         /*
8553                          * Check if monitor has continuous frequency mode
8554                          */
8555                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
8556                                 continue;
8557                         /*
8558                          * Check for flag range limits only. If flag == 1 then
8559                          * no additional timing information provided.
8560                          * Default GTF, GTF Secondary curve and CVT are not
8561                          * supported
8562                          */
8563                         if (range->flags != 1)
8564                                 continue;
8565
8566                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8567                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8568                         amdgpu_dm_connector->pixel_clock_mhz =
8569                                 range->pixel_clock_mhz * 10;
8570                         break;
8571                 }
8572
8573                 if (amdgpu_dm_connector->max_vfreq -
8574                     amdgpu_dm_connector->min_vfreq > 10) {
8575
8576                         freesync_capable = true;
8577                 }
8578         }
8579
8580 update:
8581         if (dm_con_state)
8582                 dm_con_state->freesync_capable = freesync_capable;
8583
8584         if (connector->vrr_capable_property)
8585                 drm_connector_set_vrr_capable_property(connector,
8586                                                        freesync_capable);
8587 }
8588
8589 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8590 {
8591         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8592
8593         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8594                 return;
8595         if (link->type == dc_connection_none)
8596                 return;
8597         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8598                                         dpcd_data, sizeof(dpcd_data))) {
8599                 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8600                 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8601         }
8602 }
8603
8604 /*
8605  * amdgpu_dm_link_setup_psr() - configure psr link
8606  * @stream: stream state
8607  *
8608  * Return: true if success
8609  */
8610 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8611 {
8612         struct dc_link *link = NULL;
8613         struct psr_config psr_config = {0};
8614         struct psr_context psr_context = {0};
8615         struct dc *dc = NULL;
8616         bool ret = false;
8617
8618         if (stream == NULL)
8619                 return false;
8620
8621         link = stream->link;
8622         dc = link->ctx->dc;
8623
8624         psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8625
8626         if (psr_config.psr_version > 0) {
8627                 psr_config.psr_exit_link_training_required = 0x1;
8628                 psr_config.psr_frame_capture_indication_req = 0;
8629                 psr_config.psr_rfb_setup_time = 0x37;
8630                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8631                 psr_config.allow_smu_optimizations = 0x0;
8632
8633                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8634
8635         }
8636         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_feature_enabled);
8637
8638         return ret;
8639 }
8640
8641 /*
8642  * amdgpu_dm_psr_enable() - enable psr f/w
8643  * @stream: stream state
8644  *
8645  * Return: true if success
8646  */
8647 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8648 {
8649         struct dc_link *link = stream->link;
8650         unsigned int vsync_rate_hz = 0;
8651         struct dc_static_screen_params params = {0};
8652         /* Calculate number of static frames before generating interrupt to
8653          * enter PSR.
8654          */
8655         // Init fail safe of 2 frames static
8656         unsigned int num_frames_static = 2;
8657
8658         DRM_DEBUG_DRIVER("Enabling psr...\n");
8659
8660         vsync_rate_hz = div64_u64(div64_u64((
8661                         stream->timing.pix_clk_100hz * 100),
8662                         stream->timing.v_total),
8663                         stream->timing.h_total);
8664
8665         /* Round up
8666          * Calculate number of frames such that at least 30 ms of time has
8667          * passed.
8668          */
8669         if (vsync_rate_hz != 0) {
8670                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8671                 num_frames_static = (30000 / frame_time_microsec) + 1;
8672         }
8673
8674         params.triggers.cursor_update = true;
8675         params.triggers.overlay_update = true;
8676         params.triggers.surface_update = true;
8677         params.num_frames = num_frames_static;
8678
8679         dc_stream_set_static_screen_params(link->ctx->dc,
8680                                            &stream, 1,
8681                                            &params);
8682
8683         return dc_link_set_psr_allow_active(link, true, false);
8684 }
8685
8686 /*
8687  * amdgpu_dm_psr_disable() - disable psr f/w
8688  * @stream:  stream state
8689  *
8690  * Return: true if success
8691  */
8692 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8693 {
8694
8695         DRM_DEBUG_DRIVER("Disabling psr...\n");
8696
8697         return dc_link_set_psr_allow_active(stream->link, false, true);
8698 }