drm/amdgpu: retire legacy vega10 sos version check
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97
98 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136                                 struct drm_plane *plane,
137                                 unsigned long possible_crtcs,
138                                 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140                                struct drm_plane *plane,
141                                uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
144                                     uint32_t link_index,
145                                     struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147                                   struct amdgpu_encoder *aencoder,
148                                   uint32_t link_index);
149
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153                                    struct drm_atomic_state *state,
154                                    bool nonblock);
155
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159                                   struct drm_atomic_state *state);
160
161 static void handle_cursor_update(struct drm_plane *plane,
162                                  struct drm_plane_state *old_plane_state);
163
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185         if (crtc >= adev->mode_info.num_crtc)
186                 return 0;
187         else {
188                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190                                 acrtc->base.state);
191
192
193                 if (acrtc_state->stream == NULL) {
194                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195                                   crtc);
196                         return 0;
197                 }
198
199                 return dc_stream_get_vblank_counter(acrtc_state->stream);
200         }
201 }
202
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204                                   u32 *vbl, u32 *position)
205 {
206         uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
208         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209                 return -EINVAL;
210         else {
211                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213                                                 acrtc->base.state);
214
215                 if (acrtc_state->stream ==  NULL) {
216                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217                                   crtc);
218                         return 0;
219                 }
220
221                 /*
222                  * TODO rework base driver to use values directly.
223                  * for now parse it back into reg-format
224                  */
225                 dc_stream_get_scanoutpos(acrtc_state->stream,
226                                          &v_blank_start,
227                                          &v_blank_end,
228                                          &h_position,
229                                          &v_position);
230
231                 *position = v_position | (h_position << 16);
232                 *vbl = v_blank_start | (v_blank_end << 16);
233         }
234
235         return 0;
236 }
237
238 static bool dm_is_idle(void *handle)
239 {
240         /* XXX todo */
241         return true;
242 }
243
244 static int dm_wait_for_idle(void *handle)
245 {
246         /* XXX todo */
247         return 0;
248 }
249
250 static bool dm_check_soft_reset(void *handle)
251 {
252         return false;
253 }
254
255 static int dm_soft_reset(void *handle)
256 {
257         /* XXX todo */
258         return 0;
259 }
260
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263                      int otg_inst)
264 {
265         struct drm_device *dev = adev->ddev;
266         struct drm_crtc *crtc;
267         struct amdgpu_crtc *amdgpu_crtc;
268
269         if (otg_inst == -1) {
270                 WARN_ON(1);
271                 return adev->mode_info.crtcs[0];
272         }
273
274         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275                 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277                 if (amdgpu_crtc->otg_inst == otg_inst)
278                         return amdgpu_crtc;
279         }
280
281         return NULL;
282 }
283
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299         struct amdgpu_crtc *amdgpu_crtc;
300         struct common_irq_params *irq_params = interrupt_params;
301         struct amdgpu_device *adev = irq_params->adev;
302         unsigned long flags;
303         struct drm_pending_vblank_event *e;
304         struct dm_crtc_state *acrtc_state;
305         uint32_t vpos, hpos, v_blank_start, v_blank_end;
306         bool vrr_active;
307
308         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310         /* IRQ could occur when in initial stage */
311         /* TODO work and BO cleanup */
312         if (amdgpu_crtc == NULL) {
313                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314                 return;
315         }
316
317         spin_lock_irqsave(&adev->ddev->event_lock, flags);
318
319         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321                                                  amdgpu_crtc->pflip_status,
322                                                  AMDGPU_FLIP_SUBMITTED,
323                                                  amdgpu_crtc->crtc_id,
324                                                  amdgpu_crtc);
325                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326                 return;
327         }
328
329         /* page flip completed. */
330         e = amdgpu_crtc->event;
331         amdgpu_crtc->event = NULL;
332
333         if (!e)
334                 WARN_ON(1);
335
336         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340         if (!vrr_active ||
341             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342                                       &v_blank_end, &hpos, &vpos) ||
343             (vpos < v_blank_start)) {
344                 /* Update to correct count and vblank timestamp if racing with
345                  * vblank irq. This also updates to the correct vblank timestamp
346                  * even in VRR mode, as scanout is past the front-porch atm.
347                  */
348                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349
350                 /* Wake up userspace by sending the pageflip event with proper
351                  * count and timestamp of vblank of flip completion.
352                  */
353                 if (e) {
354                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356                         /* Event sent, so done with vblank for this flip */
357                         drm_crtc_vblank_put(&amdgpu_crtc->base);
358                 }
359         } else if (e) {
360                 /* VRR active and inside front-porch: vblank count and
361                  * timestamp for pageflip event will only be up to date after
362                  * drm_crtc_handle_vblank() has been executed from late vblank
363                  * irq handler after start of back-porch (vline 0). We queue the
364                  * pageflip event for send-out by drm_crtc_handle_vblank() with
365                  * updated timestamp and count, once it runs after us.
366                  *
367                  * We need to open-code this instead of using the helper
368                  * drm_crtc_arm_vblank_event(), as that helper would
369                  * call drm_crtc_accurate_vblank_count(), which we must
370                  * not call in VRR mode while we are in front-porch!
371                  */
372
373                 /* sequence will be replaced by real count during send-out. */
374                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375                 e->pipe = amdgpu_crtc->crtc_id;
376
377                 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378                 e = NULL;
379         }
380
381         /* Keep track of vblank of this flip for flip throttling. We use the
382          * cooked hw counter, as that one incremented at start of this vblank
383          * of pageflip completion, so last_flip_vblank is the forbidden count
384          * for queueing new pageflips if vsync + VRR is enabled.
385          */
386         amdgpu_crtc->last_flip_vblank =
387                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388
389         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
392         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393                          amdgpu_crtc->crtc_id, amdgpu_crtc,
394                          vrr_active, (int) !e);
395 }
396
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399         struct common_irq_params *irq_params = interrupt_params;
400         struct amdgpu_device *adev = irq_params->adev;
401         struct amdgpu_crtc *acrtc;
402         struct dm_crtc_state *acrtc_state;
403         unsigned long flags;
404
405         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407         if (acrtc) {
408                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
410                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411                               acrtc->crtc_id,
412                               amdgpu_dm_vrr_active(acrtc_state));
413
414                 /* Core vblank handling is done here after end of front-porch in
415                  * vrr mode, as vblank timestamping will give valid results
416                  * while now done after front-porch. This will also deliver
417                  * page-flip completion events that have been queued to us
418                  * if a pageflip happened inside front-porch.
419                  */
420                 if (amdgpu_dm_vrr_active(acrtc_state)) {
421                         drm_crtc_handle_vblank(&acrtc->base);
422
423                         /* BTR processing for pre-DCE12 ASICs */
424                         if (acrtc_state->stream &&
425                             adev->family < AMDGPU_FAMILY_AI) {
426                                 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427                                 mod_freesync_handle_v_update(
428                                     adev->dm.freesync_module,
429                                     acrtc_state->stream,
430                                     &acrtc_state->vrr_params);
431
432                                 dc_stream_adjust_vmin_vmax(
433                                     adev->dm.dc,
434                                     acrtc_state->stream,
435                                     &acrtc_state->vrr_params.adjust);
436                                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437                         }
438                 }
439         }
440 }
441
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: ignored
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451         struct common_irq_params *irq_params = interrupt_params;
452         struct amdgpu_device *adev = irq_params->adev;
453         struct amdgpu_crtc *acrtc;
454         struct dm_crtc_state *acrtc_state;
455         unsigned long flags;
456
457         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458
459         if (acrtc) {
460                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
461
462                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
463                               acrtc->crtc_id,
464                               amdgpu_dm_vrr_active(acrtc_state));
465
466                 /* Core vblank handling at start of front-porch is only possible
467                  * in non-vrr mode, as only there vblank timestamping will give
468                  * valid results while done in front-porch. Otherwise defer it
469                  * to dm_vupdate_high_irq after end of front-porch.
470                  */
471                 if (!amdgpu_dm_vrr_active(acrtc_state))
472                         drm_crtc_handle_vblank(&acrtc->base);
473
474                 /* Following stuff must happen at start of vblank, for crc
475                  * computation and below-the-range btr support in vrr mode.
476                  */
477                 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
478
479                 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
480                     acrtc_state->vrr_params.supported &&
481                     acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
482                         spin_lock_irqsave(&adev->ddev->event_lock, flags);
483                         mod_freesync_handle_v_update(
484                                 adev->dm.freesync_module,
485                                 acrtc_state->stream,
486                                 &acrtc_state->vrr_params);
487
488                         dc_stream_adjust_vmin_vmax(
489                                 adev->dm.dc,
490                                 acrtc_state->stream,
491                                 &acrtc_state->vrr_params.adjust);
492                         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
493                 }
494         }
495 }
496
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
498 /**
499  * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500  * @interrupt params - interrupt parameters
501  *
502  * Notify DRM's vblank event handler at VSTARTUP
503  *
504  * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505  * * We are close enough to VUPDATE - the point of no return for hw
506  * * We are in the fixed portion of variable front porch when vrr is enabled
507  * * We are before VUPDATE, where double-buffered vrr registers are swapped
508  *
509  * It is therefore the correct place to signal vblank, send user flip events,
510  * and update VRR.
511  */
512 static void dm_dcn_crtc_high_irq(void *interrupt_params)
513 {
514         struct common_irq_params *irq_params = interrupt_params;
515         struct amdgpu_device *adev = irq_params->adev;
516         struct amdgpu_crtc *acrtc;
517         struct dm_crtc_state *acrtc_state;
518         unsigned long flags;
519
520         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521
522         if (!acrtc)
523                 return;
524
525         acrtc_state = to_dm_crtc_state(acrtc->base.state);
526
527         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528                          amdgpu_dm_vrr_active(acrtc_state),
529                          acrtc_state->active_planes);
530
531         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532         drm_crtc_handle_vblank(&acrtc->base);
533
534         spin_lock_irqsave(&adev->ddev->event_lock, flags);
535
536         if (acrtc_state->vrr_params.supported &&
537             acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538                 mod_freesync_handle_v_update(
539                 adev->dm.freesync_module,
540                 acrtc_state->stream,
541                 &acrtc_state->vrr_params);
542
543                 dc_stream_adjust_vmin_vmax(
544                         adev->dm.dc,
545                         acrtc_state->stream,
546                         &acrtc_state->vrr_params.adjust);
547         }
548
549         /*
550          * If there aren't any active_planes then DCH HUBP may be clock-gated.
551          * In that case, pageflip completion interrupts won't fire and pageflip
552          * completion events won't get delivered. Prevent this by sending
553          * pending pageflip events from here if a flip is still pending.
554          *
555          * If any planes are enabled, use dm_pflip_high_irq() instead, to
556          * avoid race conditions between flip programming and completion,
557          * which could cause too early flip completion events.
558          */
559         if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560             acrtc_state->active_planes == 0) {
561                 if (acrtc->event) {
562                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
563                         acrtc->event = NULL;
564                         drm_crtc_vblank_put(&acrtc->base);
565                 }
566                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
567         }
568
569         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
570 }
571 #endif
572
573 static int dm_set_clockgating_state(void *handle,
574                   enum amd_clockgating_state state)
575 {
576         return 0;
577 }
578
579 static int dm_set_powergating_state(void *handle,
580                   enum amd_powergating_state state)
581 {
582         return 0;
583 }
584
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle);
587
588 /* Allocate memory for FBC compressed data  */
589 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
590 {
591         struct drm_device *dev = connector->dev;
592         struct amdgpu_device *adev = dev->dev_private;
593         struct dm_comressor_info *compressor = &adev->dm.compressor;
594         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595         struct drm_display_mode *mode;
596         unsigned long max_size = 0;
597
598         if (adev->dm.dc->fbc_compressor == NULL)
599                 return;
600
601         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
602                 return;
603
604         if (compressor->bo_ptr)
605                 return;
606
607
608         list_for_each_entry(mode, &connector->modes, head) {
609                 if (max_size < mode->htotal * mode->vtotal)
610                         max_size = mode->htotal * mode->vtotal;
611         }
612
613         if (max_size) {
614                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
615                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
616                             &compressor->gpu_addr, &compressor->cpu_addr);
617
618                 if (r)
619                         DRM_ERROR("DM: Failed to initialize FBC\n");
620                 else {
621                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
623                 }
624
625         }
626
627 }
628
629 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630                                           int pipe, bool *enabled,
631                                           unsigned char *buf, int max_bytes)
632 {
633         struct drm_device *dev = dev_get_drvdata(kdev);
634         struct amdgpu_device *adev = dev->dev_private;
635         struct drm_connector *connector;
636         struct drm_connector_list_iter conn_iter;
637         struct amdgpu_dm_connector *aconnector;
638         int ret = 0;
639
640         *enabled = false;
641
642         mutex_lock(&adev->dm.audio_lock);
643
644         drm_connector_list_iter_begin(dev, &conn_iter);
645         drm_for_each_connector_iter(connector, &conn_iter) {
646                 aconnector = to_amdgpu_dm_connector(connector);
647                 if (aconnector->audio_inst != port)
648                         continue;
649
650                 *enabled = true;
651                 ret = drm_eld_size(connector->eld);
652                 memcpy(buf, connector->eld, min(max_bytes, ret));
653
654                 break;
655         }
656         drm_connector_list_iter_end(&conn_iter);
657
658         mutex_unlock(&adev->dm.audio_lock);
659
660         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661
662         return ret;
663 }
664
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666         .get_eld = amdgpu_dm_audio_component_get_eld,
667 };
668
669 static int amdgpu_dm_audio_component_bind(struct device *kdev,
670                                        struct device *hda_kdev, void *data)
671 {
672         struct drm_device *dev = dev_get_drvdata(kdev);
673         struct amdgpu_device *adev = dev->dev_private;
674         struct drm_audio_component *acomp = data;
675
676         acomp->ops = &amdgpu_dm_audio_component_ops;
677         acomp->dev = kdev;
678         adev->dm.audio_component = acomp;
679
680         return 0;
681 }
682
683 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684                                           struct device *hda_kdev, void *data)
685 {
686         struct drm_device *dev = dev_get_drvdata(kdev);
687         struct amdgpu_device *adev = dev->dev_private;
688         struct drm_audio_component *acomp = data;
689
690         acomp->ops = NULL;
691         acomp->dev = NULL;
692         adev->dm.audio_component = NULL;
693 }
694
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696         .bind   = amdgpu_dm_audio_component_bind,
697         .unbind = amdgpu_dm_audio_component_unbind,
698 };
699
700 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
701 {
702         int i, ret;
703
704         if (!amdgpu_audio)
705                 return 0;
706
707         adev->mode_info.audio.enabled = true;
708
709         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
710
711         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712                 adev->mode_info.audio.pin[i].channels = -1;
713                 adev->mode_info.audio.pin[i].rate = -1;
714                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
715                 adev->mode_info.audio.pin[i].status_bits = 0;
716                 adev->mode_info.audio.pin[i].category_code = 0;
717                 adev->mode_info.audio.pin[i].connected = false;
718                 adev->mode_info.audio.pin[i].id =
719                         adev->dm.dc->res_pool->audios[i]->inst;
720                 adev->mode_info.audio.pin[i].offset = 0;
721         }
722
723         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724         if (ret < 0)
725                 return ret;
726
727         adev->dm.audio_registered = true;
728
729         return 0;
730 }
731
732 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 {
734         if (!amdgpu_audio)
735                 return;
736
737         if (!adev->mode_info.audio.enabled)
738                 return;
739
740         if (adev->dm.audio_registered) {
741                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742                 adev->dm.audio_registered = false;
743         }
744
745         /* TODO: Disable audio? */
746
747         adev->mode_info.audio.enabled = false;
748 }
749
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
751 {
752         struct drm_audio_component *acomp = adev->dm.audio_component;
753
754         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
756
757                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758                                                  pin, -1);
759         }
760 }
761
762 static int dm_dmub_hw_init(struct amdgpu_device *adev)
763 {
764         const struct dmcub_firmware_header_v1_0 *hdr;
765         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
766         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
767         const struct firmware *dmub_fw = adev->dm.dmub_fw;
768         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769         struct abm *abm = adev->dm.dc->res_pool->abm;
770         struct dmub_srv_hw_params hw_params;
771         enum dmub_status status;
772         const unsigned char *fw_inst_const, *fw_bss_data;
773         uint32_t i, fw_inst_const_size, fw_bss_data_size;
774         bool has_hw_support;
775
776         if (!dmub_srv)
777                 /* DMUB isn't supported on the ASIC. */
778                 return 0;
779
780         if (!fb_info) {
781                 DRM_ERROR("No framebuffer info for DMUB service.\n");
782                 return -EINVAL;
783         }
784
785         if (!dmub_fw) {
786                 /* Firmware required for DMUB support. */
787                 DRM_ERROR("No firmware provided for DMUB.\n");
788                 return -EINVAL;
789         }
790
791         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792         if (status != DMUB_STATUS_OK) {
793                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794                 return -EINVAL;
795         }
796
797         if (!has_hw_support) {
798                 DRM_INFO("DMUB unsupported on ASIC\n");
799                 return 0;
800         }
801
802         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
803
804         fw_inst_const = dmub_fw->data +
805                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806                         PSP_HEADER_BYTES;
807
808         fw_bss_data = dmub_fw->data +
809                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810                       le32_to_cpu(hdr->inst_const_bytes);
811
812         /* Copy firmware and bios info into FB memory. */
813         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
815
816         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
817
818         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819          * amdgpu_ucode_init_single_fw will load dmub firmware
820          * fw_inst_const part to cw0; otherwise, the firmware back door load
821          * will be done by dm_dmub_hw_init
822          */
823         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825                                 fw_inst_const_size);
826         }
827
828         if (fw_bss_data_size)
829                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
830                        fw_bss_data, fw_bss_data_size);
831
832         /* Copy firmware bios info into FB memory. */
833         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
834                adev->bios_size);
835
836         /* Reset regions that need to be reset. */
837         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
838         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
839
840         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
841                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
842
843         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
844                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
845
846         /* Initialize hardware. */
847         memset(&hw_params, 0, sizeof(hw_params));
848         hw_params.fb_base = adev->gmc.fb_start;
849         hw_params.fb_offset = adev->gmc.aper_base;
850
851         /* backdoor load firmware and trigger dmub running */
852         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
853                 hw_params.load_inst_const = true;
854
855         if (dmcu)
856                 hw_params.psp_version = dmcu->psp_version;
857
858         for (i = 0; i < fb_info->num_fb; ++i)
859                 hw_params.fb[i] = &fb_info->fb[i];
860
861         status = dmub_srv_hw_init(dmub_srv, &hw_params);
862         if (status != DMUB_STATUS_OK) {
863                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
864                 return -EINVAL;
865         }
866
867         /* Wait for firmware load to finish. */
868         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
869         if (status != DMUB_STATUS_OK)
870                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
871
872         /* Init DMCU and ABM if available. */
873         if (dmcu && abm) {
874                 dmcu->funcs->dmcu_init(dmcu);
875                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
876         }
877
878         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
879         if (!adev->dm.dc->ctx->dmub_srv) {
880                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
881                 return -ENOMEM;
882         }
883
884         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
885                  adev->dm.dmcub_fw_version);
886
887         return 0;
888 }
889
890 static int amdgpu_dm_init(struct amdgpu_device *adev)
891 {
892         struct dc_init_data init_data;
893 #ifdef CONFIG_DRM_AMD_DC_HDCP
894         struct dc_callback_init init_params;
895 #endif
896         int r;
897
898         adev->dm.ddev = adev->ddev;
899         adev->dm.adev = adev;
900
901         /* Zero all the fields */
902         memset(&init_data, 0, sizeof(init_data));
903 #ifdef CONFIG_DRM_AMD_DC_HDCP
904         memset(&init_params, 0, sizeof(init_params));
905 #endif
906
907         mutex_init(&adev->dm.dc_lock);
908         mutex_init(&adev->dm.audio_lock);
909
910         if(amdgpu_dm_irq_init(adev)) {
911                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
912                 goto error;
913         }
914
915         init_data.asic_id.chip_family = adev->family;
916
917         init_data.asic_id.pci_revision_id = adev->pdev->revision;
918         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
919
920         init_data.asic_id.vram_width = adev->gmc.vram_width;
921         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
922         init_data.asic_id.atombios_base_address =
923                 adev->mode_info.atom_context->bios;
924
925         init_data.driver = adev;
926
927         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
928
929         if (!adev->dm.cgs_device) {
930                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
931                 goto error;
932         }
933
934         init_data.cgs_device = adev->dm.cgs_device;
935
936         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
937
938         switch (adev->asic_type) {
939         case CHIP_CARRIZO:
940         case CHIP_STONEY:
941         case CHIP_RAVEN:
942         case CHIP_RENOIR:
943                 init_data.flags.gpu_vm_support = true;
944                 break;
945         default:
946                 break;
947         }
948
949         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
950                 init_data.flags.fbc_support = true;
951
952         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
953                 init_data.flags.multi_mon_pp_mclk_switch = true;
954
955         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
956                 init_data.flags.disable_fractional_pwm = true;
957
958         init_data.flags.power_down_display_on_boot = true;
959
960         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
961
962         /* Display Core create. */
963         adev->dm.dc = dc_create(&init_data);
964
965         if (adev->dm.dc) {
966                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
967         } else {
968                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
969                 goto error;
970         }
971
972         r = dm_dmub_hw_init(adev);
973         if (r) {
974                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
975                 goto error;
976         }
977
978         dc_hardware_init(adev->dm.dc);
979
980         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
981         if (!adev->dm.freesync_module) {
982                 DRM_ERROR(
983                 "amdgpu: failed to initialize freesync_module.\n");
984         } else
985                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
986                                 adev->dm.freesync_module);
987
988         amdgpu_dm_init_color_mod();
989
990 #ifdef CONFIG_DRM_AMD_DC_HDCP
991         if (adev->asic_type >= CHIP_RAVEN) {
992                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
993
994                 if (!adev->dm.hdcp_workqueue)
995                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
996                 else
997                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
998
999                 dc_init_callbacks(adev->dm.dc, &init_params);
1000         }
1001 #endif
1002         if (amdgpu_dm_initialize_drm_device(adev)) {
1003                 DRM_ERROR(
1004                 "amdgpu: failed to initialize sw for display support.\n");
1005                 goto error;
1006         }
1007
1008         /* Update the actual used number of crtc */
1009         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1010
1011         /* TODO: Add_display_info? */
1012
1013         /* TODO use dynamic cursor width */
1014         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1015         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1016
1017         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1018                 DRM_ERROR(
1019                 "amdgpu: failed to initialize sw for display support.\n");
1020                 goto error;
1021         }
1022
1023         DRM_DEBUG_DRIVER("KMS initialized.\n");
1024
1025         return 0;
1026 error:
1027         amdgpu_dm_fini(adev);
1028
1029         return -EINVAL;
1030 }
1031
1032 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1033 {
1034         amdgpu_dm_audio_fini(adev);
1035
1036         amdgpu_dm_destroy_drm_device(&adev->dm);
1037
1038 #ifdef CONFIG_DRM_AMD_DC_HDCP
1039         if (adev->dm.hdcp_workqueue) {
1040                 hdcp_destroy(adev->dm.hdcp_workqueue);
1041                 adev->dm.hdcp_workqueue = NULL;
1042         }
1043
1044         if (adev->dm.dc)
1045                 dc_deinit_callbacks(adev->dm.dc);
1046 #endif
1047         if (adev->dm.dc->ctx->dmub_srv) {
1048                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1049                 adev->dm.dc->ctx->dmub_srv = NULL;
1050         }
1051
1052         if (adev->dm.dmub_bo)
1053                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1054                                       &adev->dm.dmub_bo_gpu_addr,
1055                                       &adev->dm.dmub_bo_cpu_addr);
1056
1057         /* DC Destroy TODO: Replace destroy DAL */
1058         if (adev->dm.dc)
1059                 dc_destroy(&adev->dm.dc);
1060         /*
1061          * TODO: pageflip, vlank interrupt
1062          *
1063          * amdgpu_dm_irq_fini(adev);
1064          */
1065
1066         if (adev->dm.cgs_device) {
1067                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1068                 adev->dm.cgs_device = NULL;
1069         }
1070         if (adev->dm.freesync_module) {
1071                 mod_freesync_destroy(adev->dm.freesync_module);
1072                 adev->dm.freesync_module = NULL;
1073         }
1074
1075         mutex_destroy(&adev->dm.audio_lock);
1076         mutex_destroy(&adev->dm.dc_lock);
1077
1078         return;
1079 }
1080
1081 static int load_dmcu_fw(struct amdgpu_device *adev)
1082 {
1083         const char *fw_name_dmcu = NULL;
1084         int r;
1085         const struct dmcu_firmware_header_v1_0 *hdr;
1086
1087         switch(adev->asic_type) {
1088         case CHIP_BONAIRE:
1089         case CHIP_HAWAII:
1090         case CHIP_KAVERI:
1091         case CHIP_KABINI:
1092         case CHIP_MULLINS:
1093         case CHIP_TONGA:
1094         case CHIP_FIJI:
1095         case CHIP_CARRIZO:
1096         case CHIP_STONEY:
1097         case CHIP_POLARIS11:
1098         case CHIP_POLARIS10:
1099         case CHIP_POLARIS12:
1100         case CHIP_VEGAM:
1101         case CHIP_VEGA10:
1102         case CHIP_VEGA12:
1103         case CHIP_VEGA20:
1104         case CHIP_NAVI10:
1105         case CHIP_NAVI14:
1106         case CHIP_RENOIR:
1107                 return 0;
1108         case CHIP_NAVI12:
1109                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1110                 break;
1111         case CHIP_RAVEN:
1112                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1113                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1114                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1115                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1116                 else
1117                         return 0;
1118                 break;
1119         default:
1120                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1121                 return -EINVAL;
1122         }
1123
1124         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1125                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1126                 return 0;
1127         }
1128
1129         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1130         if (r == -ENOENT) {
1131                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1132                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1133                 adev->dm.fw_dmcu = NULL;
1134                 return 0;
1135         }
1136         if (r) {
1137                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1138                         fw_name_dmcu);
1139                 return r;
1140         }
1141
1142         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1143         if (r) {
1144                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1145                         fw_name_dmcu);
1146                 release_firmware(adev->dm.fw_dmcu);
1147                 adev->dm.fw_dmcu = NULL;
1148                 return r;
1149         }
1150
1151         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1152         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1153         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1154         adev->firmware.fw_size +=
1155                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1156
1157         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1158         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1159         adev->firmware.fw_size +=
1160                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1161
1162         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1163
1164         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1165
1166         return 0;
1167 }
1168
1169 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1170 {
1171         struct amdgpu_device *adev = ctx;
1172
1173         return dm_read_reg(adev->dm.dc->ctx, address);
1174 }
1175
1176 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1177                                      uint32_t value)
1178 {
1179         struct amdgpu_device *adev = ctx;
1180
1181         return dm_write_reg(adev->dm.dc->ctx, address, value);
1182 }
1183
1184 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1185 {
1186         struct dmub_srv_create_params create_params;
1187         struct dmub_srv_region_params region_params;
1188         struct dmub_srv_region_info region_info;
1189         struct dmub_srv_fb_params fb_params;
1190         struct dmub_srv_fb_info *fb_info;
1191         struct dmub_srv *dmub_srv;
1192         const struct dmcub_firmware_header_v1_0 *hdr;
1193         const char *fw_name_dmub;
1194         enum dmub_asic dmub_asic;
1195         enum dmub_status status;
1196         int r;
1197
1198         switch (adev->asic_type) {
1199         case CHIP_RENOIR:
1200                 dmub_asic = DMUB_ASIC_DCN21;
1201                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1202                 break;
1203
1204         default:
1205                 /* ASIC doesn't support DMUB. */
1206                 return 0;
1207         }
1208
1209         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1210         if (r) {
1211                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1212                 return 0;
1213         }
1214
1215         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1216         if (r) {
1217                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1218                 return 0;
1219         }
1220
1221         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1222
1223         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1224                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1225                         AMDGPU_UCODE_ID_DMCUB;
1226                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1227                         adev->dm.dmub_fw;
1228                 adev->firmware.fw_size +=
1229                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1230
1231                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1232                          adev->dm.dmcub_fw_version);
1233         }
1234
1235         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1236
1237         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1238         dmub_srv = adev->dm.dmub_srv;
1239
1240         if (!dmub_srv) {
1241                 DRM_ERROR("Failed to allocate DMUB service!\n");
1242                 return -ENOMEM;
1243         }
1244
1245         memset(&create_params, 0, sizeof(create_params));
1246         create_params.user_ctx = adev;
1247         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1248         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1249         create_params.asic = dmub_asic;
1250
1251         /* Create the DMUB service. */
1252         status = dmub_srv_create(dmub_srv, &create_params);
1253         if (status != DMUB_STATUS_OK) {
1254                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1255                 return -EINVAL;
1256         }
1257
1258         /* Calculate the size of all the regions for the DMUB service. */
1259         memset(&region_params, 0, sizeof(region_params));
1260
1261         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1262                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1263         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1264         region_params.vbios_size = adev->bios_size;
1265         region_params.fw_bss_data =
1266                 adev->dm.dmub_fw->data +
1267                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1268                 le32_to_cpu(hdr->inst_const_bytes);
1269         region_params.fw_inst_const =
1270                 adev->dm.dmub_fw->data +
1271                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1272                 PSP_HEADER_BYTES;
1273
1274         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1275                                            &region_info);
1276
1277         if (status != DMUB_STATUS_OK) {
1278                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1279                 return -EINVAL;
1280         }
1281
1282         /*
1283          * Allocate a framebuffer based on the total size of all the regions.
1284          * TODO: Move this into GART.
1285          */
1286         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1287                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1288                                     &adev->dm.dmub_bo_gpu_addr,
1289                                     &adev->dm.dmub_bo_cpu_addr);
1290         if (r)
1291                 return r;
1292
1293         /* Rebase the regions on the framebuffer address. */
1294         memset(&fb_params, 0, sizeof(fb_params));
1295         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1296         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1297         fb_params.region_info = &region_info;
1298
1299         adev->dm.dmub_fb_info =
1300                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1301         fb_info = adev->dm.dmub_fb_info;
1302
1303         if (!fb_info) {
1304                 DRM_ERROR(
1305                         "Failed to allocate framebuffer info for DMUB service!\n");
1306                 return -ENOMEM;
1307         }
1308
1309         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1310         if (status != DMUB_STATUS_OK) {
1311                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1312                 return -EINVAL;
1313         }
1314
1315         return 0;
1316 }
1317
1318 static int dm_sw_init(void *handle)
1319 {
1320         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1321         int r;
1322
1323         r = dm_dmub_sw_init(adev);
1324         if (r)
1325                 return r;
1326
1327         return load_dmcu_fw(adev);
1328 }
1329
1330 static int dm_sw_fini(void *handle)
1331 {
1332         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1333
1334         kfree(adev->dm.dmub_fb_info);
1335         adev->dm.dmub_fb_info = NULL;
1336
1337         if (adev->dm.dmub_srv) {
1338                 dmub_srv_destroy(adev->dm.dmub_srv);
1339                 adev->dm.dmub_srv = NULL;
1340         }
1341
1342         if (adev->dm.dmub_fw) {
1343                 release_firmware(adev->dm.dmub_fw);
1344                 adev->dm.dmub_fw = NULL;
1345         }
1346
1347         if(adev->dm.fw_dmcu) {
1348                 release_firmware(adev->dm.fw_dmcu);
1349                 adev->dm.fw_dmcu = NULL;
1350         }
1351
1352         return 0;
1353 }
1354
1355 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1356 {
1357         struct amdgpu_dm_connector *aconnector;
1358         struct drm_connector *connector;
1359         struct drm_connector_list_iter iter;
1360         int ret = 0;
1361
1362         drm_connector_list_iter_begin(dev, &iter);
1363         drm_for_each_connector_iter(connector, &iter) {
1364                 aconnector = to_amdgpu_dm_connector(connector);
1365                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1366                     aconnector->mst_mgr.aux) {
1367                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1368                                          aconnector,
1369                                          aconnector->base.base.id);
1370
1371                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1372                         if (ret < 0) {
1373                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1374                                 aconnector->dc_link->type =
1375                                         dc_connection_single;
1376                                 break;
1377                         }
1378                 }
1379         }
1380         drm_connector_list_iter_end(&iter);
1381
1382         return ret;
1383 }
1384
1385 static int dm_late_init(void *handle)
1386 {
1387         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1388
1389         struct dmcu_iram_parameters params;
1390         unsigned int linear_lut[16];
1391         int i;
1392         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1393         bool ret = false;
1394
1395         for (i = 0; i < 16; i++)
1396                 linear_lut[i] = 0xFFFF * i / 15;
1397
1398         params.set = 0;
1399         params.backlight_ramping_start = 0xCCCC;
1400         params.backlight_ramping_reduction = 0xCCCCCCCC;
1401         params.backlight_lut_array_size = 16;
1402         params.backlight_lut_array = linear_lut;
1403
1404         /* Min backlight level after ABM reduction,  Don't allow below 1%
1405          * 0xFFFF x 0.01 = 0x28F
1406          */
1407         params.min_abm_backlight = 0x28F;
1408
1409         /* todo will enable for navi10 */
1410         if (adev->asic_type <= CHIP_RAVEN) {
1411                 ret = dmcu_load_iram(dmcu, params);
1412
1413                 if (!ret)
1414                         return -EINVAL;
1415         }
1416
1417         return detect_mst_link_for_all_connectors(adev->ddev);
1418 }
1419
1420 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1421 {
1422         struct amdgpu_dm_connector *aconnector;
1423         struct drm_connector *connector;
1424         struct drm_connector_list_iter iter;
1425         struct drm_dp_mst_topology_mgr *mgr;
1426         int ret;
1427         bool need_hotplug = false;
1428
1429         drm_connector_list_iter_begin(dev, &iter);
1430         drm_for_each_connector_iter(connector, &iter) {
1431                 aconnector = to_amdgpu_dm_connector(connector);
1432                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433                     aconnector->mst_port)
1434                         continue;
1435
1436                 mgr = &aconnector->mst_mgr;
1437
1438                 if (suspend) {
1439                         drm_dp_mst_topology_mgr_suspend(mgr);
1440                 } else {
1441                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1442                         if (ret < 0) {
1443                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444                                 need_hotplug = true;
1445                         }
1446                 }
1447         }
1448         drm_connector_list_iter_end(&iter);
1449
1450         if (need_hotplug)
1451                 drm_kms_helper_hotplug_event(dev);
1452 }
1453
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1455 {
1456         struct smu_context *smu = &adev->smu;
1457         int ret = 0;
1458
1459         if (!is_support_sw_smu(adev))
1460                 return 0;
1461
1462         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463          * on window driver dc implementation.
1464          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465          * should be passed to smu during boot up and resume from s3.
1466          * boot up: dc calculate dcn watermark clock settings within dc_create,
1467          * dcn20_resource_construct
1468          * then call pplib functions below to pass the settings to smu:
1469          * smu_set_watermarks_for_clock_ranges
1470          * smu_set_watermarks_table
1471          * navi10_set_watermarks_table
1472          * smu_write_watermarks_table
1473          *
1474          * For Renoir, clock settings of dcn watermark are also fixed values.
1475          * dc has implemented different flow for window driver:
1476          * dc_hardware_init / dc_set_power_state
1477          * dcn10_init_hw
1478          * notify_wm_ranges
1479          * set_wm_ranges
1480          * -- Linux
1481          * smu_set_watermarks_for_clock_ranges
1482          * renoir_set_watermarks_table
1483          * smu_write_watermarks_table
1484          *
1485          * For Linux,
1486          * dc_hardware_init -> amdgpu_dm_init
1487          * dc_set_power_state --> dm_resume
1488          *
1489          * therefore, this function apply to navi10/12/14 but not Renoir
1490          * *
1491          */
1492         switch(adev->asic_type) {
1493         case CHIP_NAVI10:
1494         case CHIP_NAVI14:
1495         case CHIP_NAVI12:
1496                 break;
1497         default:
1498                 return 0;
1499         }
1500
1501         mutex_lock(&smu->mutex);
1502
1503         /* pass data to smu controller */
1504         if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1505                         !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1506                 ret = smu_write_watermarks_table(smu);
1507
1508                 if (ret) {
1509                         mutex_unlock(&smu->mutex);
1510                         DRM_ERROR("Failed to update WMTABLE!\n");
1511                         return ret;
1512                 }
1513                 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1514         }
1515
1516         mutex_unlock(&smu->mutex);
1517
1518         return 0;
1519 }
1520
1521 /**
1522  * dm_hw_init() - Initialize DC device
1523  * @handle: The base driver device containing the amdgpu_dm device.
1524  *
1525  * Initialize the &struct amdgpu_display_manager device. This involves calling
1526  * the initializers of each DM component, then populating the struct with them.
1527  *
1528  * Although the function implies hardware initialization, both hardware and
1529  * software are initialized here. Splitting them out to their relevant init
1530  * hooks is a future TODO item.
1531  *
1532  * Some notable things that are initialized here:
1533  *
1534  * - Display Core, both software and hardware
1535  * - DC modules that we need (freesync and color management)
1536  * - DRM software states
1537  * - Interrupt sources and handlers
1538  * - Vblank support
1539  * - Debug FS entries, if enabled
1540  */
1541 static int dm_hw_init(void *handle)
1542 {
1543         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1544         /* Create DAL display manager */
1545         amdgpu_dm_init(adev);
1546         amdgpu_dm_hpd_init(adev);
1547
1548         return 0;
1549 }
1550
1551 /**
1552  * dm_hw_fini() - Teardown DC device
1553  * @handle: The base driver device containing the amdgpu_dm device.
1554  *
1555  * Teardown components within &struct amdgpu_display_manager that require
1556  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1557  * were loaded. Also flush IRQ workqueues and disable them.
1558  */
1559 static int dm_hw_fini(void *handle)
1560 {
1561         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1562
1563         amdgpu_dm_hpd_fini(adev);
1564
1565         amdgpu_dm_irq_fini(adev);
1566         amdgpu_dm_fini(adev);
1567         return 0;
1568 }
1569
1570 static int dm_suspend(void *handle)
1571 {
1572         struct amdgpu_device *adev = handle;
1573         struct amdgpu_display_manager *dm = &adev->dm;
1574         int ret = 0;
1575
1576         WARN_ON(adev->dm.cached_state);
1577         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1578
1579         s3_handle_mst(adev->ddev, true);
1580
1581         amdgpu_dm_irq_suspend(adev);
1582
1583
1584         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1585
1586         return ret;
1587 }
1588
1589 static struct amdgpu_dm_connector *
1590 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1591                                              struct drm_crtc *crtc)
1592 {
1593         uint32_t i;
1594         struct drm_connector_state *new_con_state;
1595         struct drm_connector *connector;
1596         struct drm_crtc *crtc_from_state;
1597
1598         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1599                 crtc_from_state = new_con_state->crtc;
1600
1601                 if (crtc_from_state == crtc)
1602                         return to_amdgpu_dm_connector(connector);
1603         }
1604
1605         return NULL;
1606 }
1607
1608 static void emulated_link_detect(struct dc_link *link)
1609 {
1610         struct dc_sink_init_data sink_init_data = { 0 };
1611         struct display_sink_capability sink_caps = { 0 };
1612         enum dc_edid_status edid_status;
1613         struct dc_context *dc_ctx = link->ctx;
1614         struct dc_sink *sink = NULL;
1615         struct dc_sink *prev_sink = NULL;
1616
1617         link->type = dc_connection_none;
1618         prev_sink = link->local_sink;
1619
1620         if (prev_sink != NULL)
1621                 dc_sink_retain(prev_sink);
1622
1623         switch (link->connector_signal) {
1624         case SIGNAL_TYPE_HDMI_TYPE_A: {
1625                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1626                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1627                 break;
1628         }
1629
1630         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1631                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1632                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1633                 break;
1634         }
1635
1636         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1637                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1638                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1639                 break;
1640         }
1641
1642         case SIGNAL_TYPE_LVDS: {
1643                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1644                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1645                 break;
1646         }
1647
1648         case SIGNAL_TYPE_EDP: {
1649                 sink_caps.transaction_type =
1650                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1651                 sink_caps.signal = SIGNAL_TYPE_EDP;
1652                 break;
1653         }
1654
1655         case SIGNAL_TYPE_DISPLAY_PORT: {
1656                 sink_caps.transaction_type =
1657                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1658                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1659                 break;
1660         }
1661
1662         default:
1663                 DC_ERROR("Invalid connector type! signal:%d\n",
1664                         link->connector_signal);
1665                 return;
1666         }
1667
1668         sink_init_data.link = link;
1669         sink_init_data.sink_signal = sink_caps.signal;
1670
1671         sink = dc_sink_create(&sink_init_data);
1672         if (!sink) {
1673                 DC_ERROR("Failed to create sink!\n");
1674                 return;
1675         }
1676
1677         /* dc_sink_create returns a new reference */
1678         link->local_sink = sink;
1679
1680         edid_status = dm_helpers_read_local_edid(
1681                         link->ctx,
1682                         link,
1683                         sink);
1684
1685         if (edid_status != EDID_OK)
1686                 DC_ERROR("Failed to read EDID");
1687
1688 }
1689
1690 static int dm_resume(void *handle)
1691 {
1692         struct amdgpu_device *adev = handle;
1693         struct drm_device *ddev = adev->ddev;
1694         struct amdgpu_display_manager *dm = &adev->dm;
1695         struct amdgpu_dm_connector *aconnector;
1696         struct drm_connector *connector;
1697         struct drm_connector_list_iter iter;
1698         struct drm_crtc *crtc;
1699         struct drm_crtc_state *new_crtc_state;
1700         struct dm_crtc_state *dm_new_crtc_state;
1701         struct drm_plane *plane;
1702         struct drm_plane_state *new_plane_state;
1703         struct dm_plane_state *dm_new_plane_state;
1704         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1705         enum dc_connection_type new_connection_type = dc_connection_none;
1706         int i, r;
1707
1708         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1709         dc_release_state(dm_state->context);
1710         dm_state->context = dc_create_state(dm->dc);
1711         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1712         dc_resource_state_construct(dm->dc, dm_state->context);
1713
1714         /* Before powering on DC we need to re-initialize DMUB. */
1715         r = dm_dmub_hw_init(adev);
1716         if (r)
1717                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1718
1719         /* power on hardware */
1720         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1721
1722         /* program HPD filter */
1723         dc_resume(dm->dc);
1724
1725         /*
1726          * early enable HPD Rx IRQ, should be done before set mode as short
1727          * pulse interrupts are used for MST
1728          */
1729         amdgpu_dm_irq_resume_early(adev);
1730
1731         /* On resume we need to rewrite the MSTM control bits to enable MST*/
1732         s3_handle_mst(ddev, false);
1733
1734         /* Do detection*/
1735         drm_connector_list_iter_begin(ddev, &iter);
1736         drm_for_each_connector_iter(connector, &iter) {
1737                 aconnector = to_amdgpu_dm_connector(connector);
1738
1739                 /*
1740                  * this is the case when traversing through already created
1741                  * MST connectors, should be skipped
1742                  */
1743                 if (aconnector->mst_port)
1744                         continue;
1745
1746                 mutex_lock(&aconnector->hpd_lock);
1747                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1748                         DRM_ERROR("KMS: Failed to detect connector\n");
1749
1750                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1751                         emulated_link_detect(aconnector->dc_link);
1752                 else
1753                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1754
1755                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1756                         aconnector->fake_enable = false;
1757
1758                 if (aconnector->dc_sink)
1759                         dc_sink_release(aconnector->dc_sink);
1760                 aconnector->dc_sink = NULL;
1761                 amdgpu_dm_update_connector_after_detect(aconnector);
1762                 mutex_unlock(&aconnector->hpd_lock);
1763         }
1764         drm_connector_list_iter_end(&iter);
1765
1766         /* Force mode set in atomic commit */
1767         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1768                 new_crtc_state->active_changed = true;
1769
1770         /*
1771          * atomic_check is expected to create the dc states. We need to release
1772          * them here, since they were duplicated as part of the suspend
1773          * procedure.
1774          */
1775         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1776                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1777                 if (dm_new_crtc_state->stream) {
1778                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1779                         dc_stream_release(dm_new_crtc_state->stream);
1780                         dm_new_crtc_state->stream = NULL;
1781                 }
1782         }
1783
1784         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1785                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1786                 if (dm_new_plane_state->dc_state) {
1787                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1788                         dc_plane_state_release(dm_new_plane_state->dc_state);
1789                         dm_new_plane_state->dc_state = NULL;
1790                 }
1791         }
1792
1793         drm_atomic_helper_resume(ddev, dm->cached_state);
1794
1795         dm->cached_state = NULL;
1796
1797         amdgpu_dm_irq_resume_late(adev);
1798
1799         amdgpu_dm_smu_write_watermarks_table(adev);
1800
1801         return 0;
1802 }
1803
1804 /**
1805  * DOC: DM Lifecycle
1806  *
1807  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1808  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1809  * the base driver's device list to be initialized and torn down accordingly.
1810  *
1811  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1812  */
1813
1814 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1815         .name = "dm",
1816         .early_init = dm_early_init,
1817         .late_init = dm_late_init,
1818         .sw_init = dm_sw_init,
1819         .sw_fini = dm_sw_fini,
1820         .hw_init = dm_hw_init,
1821         .hw_fini = dm_hw_fini,
1822         .suspend = dm_suspend,
1823         .resume = dm_resume,
1824         .is_idle = dm_is_idle,
1825         .wait_for_idle = dm_wait_for_idle,
1826         .check_soft_reset = dm_check_soft_reset,
1827         .soft_reset = dm_soft_reset,
1828         .set_clockgating_state = dm_set_clockgating_state,
1829         .set_powergating_state = dm_set_powergating_state,
1830 };
1831
1832 const struct amdgpu_ip_block_version dm_ip_block =
1833 {
1834         .type = AMD_IP_BLOCK_TYPE_DCE,
1835         .major = 1,
1836         .minor = 0,
1837         .rev = 0,
1838         .funcs = &amdgpu_dm_funcs,
1839 };
1840
1841
1842 /**
1843  * DOC: atomic
1844  *
1845  * *WIP*
1846  */
1847
1848 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1849         .fb_create = amdgpu_display_user_framebuffer_create,
1850         .output_poll_changed = drm_fb_helper_output_poll_changed,
1851         .atomic_check = amdgpu_dm_atomic_check,
1852         .atomic_commit = amdgpu_dm_atomic_commit,
1853 };
1854
1855 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1856         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1857 };
1858
1859 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1860 {
1861         u32 max_cll, min_cll, max, min, q, r;
1862         struct amdgpu_dm_backlight_caps *caps;
1863         struct amdgpu_display_manager *dm;
1864         struct drm_connector *conn_base;
1865         struct amdgpu_device *adev;
1866         static const u8 pre_computed_values[] = {
1867                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1868                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1869
1870         if (!aconnector || !aconnector->dc_link)
1871                 return;
1872
1873         conn_base = &aconnector->base;
1874         adev = conn_base->dev->dev_private;
1875         dm = &adev->dm;
1876         caps = &dm->backlight_caps;
1877         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1878         caps->aux_support = false;
1879         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1880         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1881
1882         if (caps->ext_caps->bits.oled == 1 ||
1883             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1884             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1885                 caps->aux_support = true;
1886
1887         /* From the specification (CTA-861-G), for calculating the maximum
1888          * luminance we need to use:
1889          *      Luminance = 50*2**(CV/32)
1890          * Where CV is a one-byte value.
1891          * For calculating this expression we may need float point precision;
1892          * to avoid this complexity level, we take advantage that CV is divided
1893          * by a constant. From the Euclids division algorithm, we know that CV
1894          * can be written as: CV = 32*q + r. Next, we replace CV in the
1895          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1896          * need to pre-compute the value of r/32. For pre-computing the values
1897          * We just used the following Ruby line:
1898          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1899          * The results of the above expressions can be verified at
1900          * pre_computed_values.
1901          */
1902         q = max_cll >> 5;
1903         r = max_cll % 32;
1904         max = (1 << q) * pre_computed_values[r];
1905
1906         // min luminance: maxLum * (CV/255)^2 / 100
1907         q = DIV_ROUND_CLOSEST(min_cll, 255);
1908         min = max * DIV_ROUND_CLOSEST((q * q), 100);
1909
1910         caps->aux_max_input_signal = max;
1911         caps->aux_min_input_signal = min;
1912 }
1913
1914 void amdgpu_dm_update_connector_after_detect(
1915                 struct amdgpu_dm_connector *aconnector)
1916 {
1917         struct drm_connector *connector = &aconnector->base;
1918         struct drm_device *dev = connector->dev;
1919         struct dc_sink *sink;
1920
1921         /* MST handled by drm_mst framework */
1922         if (aconnector->mst_mgr.mst_state == true)
1923                 return;
1924
1925
1926         sink = aconnector->dc_link->local_sink;
1927         if (sink)
1928                 dc_sink_retain(sink);
1929
1930         /*
1931          * Edid mgmt connector gets first update only in mode_valid hook and then
1932          * the connector sink is set to either fake or physical sink depends on link status.
1933          * Skip if already done during boot.
1934          */
1935         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1936                         && aconnector->dc_em_sink) {
1937
1938                 /*
1939                  * For S3 resume with headless use eml_sink to fake stream
1940                  * because on resume connector->sink is set to NULL
1941                  */
1942                 mutex_lock(&dev->mode_config.mutex);
1943
1944                 if (sink) {
1945                         if (aconnector->dc_sink) {
1946                                 amdgpu_dm_update_freesync_caps(connector, NULL);
1947                                 /*
1948                                  * retain and release below are used to
1949                                  * bump up refcount for sink because the link doesn't point
1950                                  * to it anymore after disconnect, so on next crtc to connector
1951                                  * reshuffle by UMD we will get into unwanted dc_sink release
1952                                  */
1953                                 dc_sink_release(aconnector->dc_sink);
1954                         }
1955                         aconnector->dc_sink = sink;
1956                         dc_sink_retain(aconnector->dc_sink);
1957                         amdgpu_dm_update_freesync_caps(connector,
1958                                         aconnector->edid);
1959                 } else {
1960                         amdgpu_dm_update_freesync_caps(connector, NULL);
1961                         if (!aconnector->dc_sink) {
1962                                 aconnector->dc_sink = aconnector->dc_em_sink;
1963                                 dc_sink_retain(aconnector->dc_sink);
1964                         }
1965                 }
1966
1967                 mutex_unlock(&dev->mode_config.mutex);
1968
1969                 if (sink)
1970                         dc_sink_release(sink);
1971                 return;
1972         }
1973
1974         /*
1975          * TODO: temporary guard to look for proper fix
1976          * if this sink is MST sink, we should not do anything
1977          */
1978         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1979                 dc_sink_release(sink);
1980                 return;
1981         }
1982
1983         if (aconnector->dc_sink == sink) {
1984                 /*
1985                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
1986                  * Do nothing!!
1987                  */
1988                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1989                                 aconnector->connector_id);
1990                 if (sink)
1991                         dc_sink_release(sink);
1992                 return;
1993         }
1994
1995         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1996                 aconnector->connector_id, aconnector->dc_sink, sink);
1997
1998         mutex_lock(&dev->mode_config.mutex);
1999
2000         /*
2001          * 1. Update status of the drm connector
2002          * 2. Send an event and let userspace tell us what to do
2003          */
2004         if (sink) {
2005                 /*
2006                  * TODO: check if we still need the S3 mode update workaround.
2007                  * If yes, put it here.
2008                  */
2009                 if (aconnector->dc_sink)
2010                         amdgpu_dm_update_freesync_caps(connector, NULL);
2011
2012                 aconnector->dc_sink = sink;
2013                 dc_sink_retain(aconnector->dc_sink);
2014                 if (sink->dc_edid.length == 0) {
2015                         aconnector->edid = NULL;
2016                         drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2017                 } else {
2018                         aconnector->edid =
2019                                 (struct edid *) sink->dc_edid.raw_edid;
2020
2021
2022                         drm_connector_update_edid_property(connector,
2023                                         aconnector->edid);
2024                         drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2025                                             aconnector->edid);
2026                 }
2027                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2028                 update_connector_ext_caps(aconnector);
2029         } else {
2030                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2031                 amdgpu_dm_update_freesync_caps(connector, NULL);
2032                 drm_connector_update_edid_property(connector, NULL);
2033                 aconnector->num_modes = 0;
2034                 dc_sink_release(aconnector->dc_sink);
2035                 aconnector->dc_sink = NULL;
2036                 aconnector->edid = NULL;
2037 #ifdef CONFIG_DRM_AMD_DC_HDCP
2038                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2039                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2040                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2041 #endif
2042         }
2043
2044         mutex_unlock(&dev->mode_config.mutex);
2045
2046         if (sink)
2047                 dc_sink_release(sink);
2048 }
2049
2050 static void handle_hpd_irq(void *param)
2051 {
2052         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2053         struct drm_connector *connector = &aconnector->base;
2054         struct drm_device *dev = connector->dev;
2055         enum dc_connection_type new_connection_type = dc_connection_none;
2056 #ifdef CONFIG_DRM_AMD_DC_HDCP
2057         struct amdgpu_device *adev = dev->dev_private;
2058 #endif
2059
2060         /*
2061          * In case of failure or MST no need to update connector status or notify the OS
2062          * since (for MST case) MST does this in its own context.
2063          */
2064         mutex_lock(&aconnector->hpd_lock);
2065
2066 #ifdef CONFIG_DRM_AMD_DC_HDCP
2067         if (adev->dm.hdcp_workqueue)
2068                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2069 #endif
2070         if (aconnector->fake_enable)
2071                 aconnector->fake_enable = false;
2072
2073         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2074                 DRM_ERROR("KMS: Failed to detect connector\n");
2075
2076         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2077                 emulated_link_detect(aconnector->dc_link);
2078
2079
2080                 drm_modeset_lock_all(dev);
2081                 dm_restore_drm_connector_state(dev, connector);
2082                 drm_modeset_unlock_all(dev);
2083
2084                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2085                         drm_kms_helper_hotplug_event(dev);
2086
2087         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2088                 amdgpu_dm_update_connector_after_detect(aconnector);
2089
2090
2091                 drm_modeset_lock_all(dev);
2092                 dm_restore_drm_connector_state(dev, connector);
2093                 drm_modeset_unlock_all(dev);
2094
2095                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2096                         drm_kms_helper_hotplug_event(dev);
2097         }
2098         mutex_unlock(&aconnector->hpd_lock);
2099
2100 }
2101
2102 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2103 {
2104         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2105         uint8_t dret;
2106         bool new_irq_handled = false;
2107         int dpcd_addr;
2108         int dpcd_bytes_to_read;
2109
2110         const int max_process_count = 30;
2111         int process_count = 0;
2112
2113         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2114
2115         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2116                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2117                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2118                 dpcd_addr = DP_SINK_COUNT;
2119         } else {
2120                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2121                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2122                 dpcd_addr = DP_SINK_COUNT_ESI;
2123         }
2124
2125         dret = drm_dp_dpcd_read(
2126                 &aconnector->dm_dp_aux.aux,
2127                 dpcd_addr,
2128                 esi,
2129                 dpcd_bytes_to_read);
2130
2131         while (dret == dpcd_bytes_to_read &&
2132                 process_count < max_process_count) {
2133                 uint8_t retry;
2134                 dret = 0;
2135
2136                 process_count++;
2137
2138                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2139                 /* handle HPD short pulse irq */
2140                 if (aconnector->mst_mgr.mst_state)
2141                         drm_dp_mst_hpd_irq(
2142                                 &aconnector->mst_mgr,
2143                                 esi,
2144                                 &new_irq_handled);
2145
2146                 if (new_irq_handled) {
2147                         /* ACK at DPCD to notify down stream */
2148                         const int ack_dpcd_bytes_to_write =
2149                                 dpcd_bytes_to_read - 1;
2150
2151                         for (retry = 0; retry < 3; retry++) {
2152                                 uint8_t wret;
2153
2154                                 wret = drm_dp_dpcd_write(
2155                                         &aconnector->dm_dp_aux.aux,
2156                                         dpcd_addr + 1,
2157                                         &esi[1],
2158                                         ack_dpcd_bytes_to_write);
2159                                 if (wret == ack_dpcd_bytes_to_write)
2160                                         break;
2161                         }
2162
2163                         /* check if there is new irq to be handled */
2164                         dret = drm_dp_dpcd_read(
2165                                 &aconnector->dm_dp_aux.aux,
2166                                 dpcd_addr,
2167                                 esi,
2168                                 dpcd_bytes_to_read);
2169
2170                         new_irq_handled = false;
2171                 } else {
2172                         break;
2173                 }
2174         }
2175
2176         if (process_count == max_process_count)
2177                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2178 }
2179
2180 static void handle_hpd_rx_irq(void *param)
2181 {
2182         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2183         struct drm_connector *connector = &aconnector->base;
2184         struct drm_device *dev = connector->dev;
2185         struct dc_link *dc_link = aconnector->dc_link;
2186         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2187         enum dc_connection_type new_connection_type = dc_connection_none;
2188 #ifdef CONFIG_DRM_AMD_DC_HDCP
2189         union hpd_irq_data hpd_irq_data;
2190         struct amdgpu_device *adev = dev->dev_private;
2191
2192         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2193 #endif
2194
2195         /*
2196          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2197          * conflict, after implement i2c helper, this mutex should be
2198          * retired.
2199          */
2200         if (dc_link->type != dc_connection_mst_branch)
2201                 mutex_lock(&aconnector->hpd_lock);
2202
2203
2204 #ifdef CONFIG_DRM_AMD_DC_HDCP
2205         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2206 #else
2207         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2208 #endif
2209                         !is_mst_root_connector) {
2210                 /* Downstream Port status changed. */
2211                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2212                         DRM_ERROR("KMS: Failed to detect connector\n");
2213
2214                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2215                         emulated_link_detect(dc_link);
2216
2217                         if (aconnector->fake_enable)
2218                                 aconnector->fake_enable = false;
2219
2220                         amdgpu_dm_update_connector_after_detect(aconnector);
2221
2222
2223                         drm_modeset_lock_all(dev);
2224                         dm_restore_drm_connector_state(dev, connector);
2225                         drm_modeset_unlock_all(dev);
2226
2227                         drm_kms_helper_hotplug_event(dev);
2228                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2229
2230                         if (aconnector->fake_enable)
2231                                 aconnector->fake_enable = false;
2232
2233                         amdgpu_dm_update_connector_after_detect(aconnector);
2234
2235
2236                         drm_modeset_lock_all(dev);
2237                         dm_restore_drm_connector_state(dev, connector);
2238                         drm_modeset_unlock_all(dev);
2239
2240                         drm_kms_helper_hotplug_event(dev);
2241                 }
2242         }
2243 #ifdef CONFIG_DRM_AMD_DC_HDCP
2244         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2245                 if (adev->dm.hdcp_workqueue)
2246                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2247         }
2248 #endif
2249         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2250             (dc_link->type == dc_connection_mst_branch))
2251                 dm_handle_hpd_rx_irq(aconnector);
2252
2253         if (dc_link->type != dc_connection_mst_branch) {
2254                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2255                 mutex_unlock(&aconnector->hpd_lock);
2256         }
2257 }
2258
2259 static void register_hpd_handlers(struct amdgpu_device *adev)
2260 {
2261         struct drm_device *dev = adev->ddev;
2262         struct drm_connector *connector;
2263         struct amdgpu_dm_connector *aconnector;
2264         const struct dc_link *dc_link;
2265         struct dc_interrupt_params int_params = {0};
2266
2267         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2268         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2269
2270         list_for_each_entry(connector,
2271                         &dev->mode_config.connector_list, head) {
2272
2273                 aconnector = to_amdgpu_dm_connector(connector);
2274                 dc_link = aconnector->dc_link;
2275
2276                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2277                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2278                         int_params.irq_source = dc_link->irq_source_hpd;
2279
2280                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2281                                         handle_hpd_irq,
2282                                         (void *) aconnector);
2283                 }
2284
2285                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2286
2287                         /* Also register for DP short pulse (hpd_rx). */
2288                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2289                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2290
2291                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2292                                         handle_hpd_rx_irq,
2293                                         (void *) aconnector);
2294                 }
2295         }
2296 }
2297
2298 /* Register IRQ sources and initialize IRQ callbacks */
2299 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2300 {
2301         struct dc *dc = adev->dm.dc;
2302         struct common_irq_params *c_irq_params;
2303         struct dc_interrupt_params int_params = {0};
2304         int r;
2305         int i;
2306         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2307
2308         if (adev->asic_type >= CHIP_VEGA10)
2309                 client_id = SOC15_IH_CLIENTID_DCE;
2310
2311         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2312         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2313
2314         /*
2315          * Actions of amdgpu_irq_add_id():
2316          * 1. Register a set() function with base driver.
2317          *    Base driver will call set() function to enable/disable an
2318          *    interrupt in DC hardware.
2319          * 2. Register amdgpu_dm_irq_handler().
2320          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2321          *    coming from DC hardware.
2322          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2323          *    for acknowledging and handling. */
2324
2325         /* Use VBLANK interrupt */
2326         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2327                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2328                 if (r) {
2329                         DRM_ERROR("Failed to add crtc irq id!\n");
2330                         return r;
2331                 }
2332
2333                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2334                 int_params.irq_source =
2335                         dc_interrupt_to_irq_source(dc, i, 0);
2336
2337                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2338
2339                 c_irq_params->adev = adev;
2340                 c_irq_params->irq_src = int_params.irq_source;
2341
2342                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2343                                 dm_crtc_high_irq, c_irq_params);
2344         }
2345
2346         /* Use VUPDATE interrupt */
2347         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2348                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2349                 if (r) {
2350                         DRM_ERROR("Failed to add vupdate irq id!\n");
2351                         return r;
2352                 }
2353
2354                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2355                 int_params.irq_source =
2356                         dc_interrupt_to_irq_source(dc, i, 0);
2357
2358                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2359
2360                 c_irq_params->adev = adev;
2361                 c_irq_params->irq_src = int_params.irq_source;
2362
2363                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2364                                 dm_vupdate_high_irq, c_irq_params);
2365         }
2366
2367         /* Use GRPH_PFLIP interrupt */
2368         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2369                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2370                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2371                 if (r) {
2372                         DRM_ERROR("Failed to add page flip irq id!\n");
2373                         return r;
2374                 }
2375
2376                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2377                 int_params.irq_source =
2378                         dc_interrupt_to_irq_source(dc, i, 0);
2379
2380                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2381
2382                 c_irq_params->adev = adev;
2383                 c_irq_params->irq_src = int_params.irq_source;
2384
2385                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2386                                 dm_pflip_high_irq, c_irq_params);
2387
2388         }
2389
2390         /* HPD */
2391         r = amdgpu_irq_add_id(adev, client_id,
2392                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2393         if (r) {
2394                 DRM_ERROR("Failed to add hpd irq id!\n");
2395                 return r;
2396         }
2397
2398         register_hpd_handlers(adev);
2399
2400         return 0;
2401 }
2402
2403 #if defined(CONFIG_DRM_AMD_DC_DCN)
2404 /* Register IRQ sources and initialize IRQ callbacks */
2405 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2406 {
2407         struct dc *dc = adev->dm.dc;
2408         struct common_irq_params *c_irq_params;
2409         struct dc_interrupt_params int_params = {0};
2410         int r;
2411         int i;
2412
2413         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2414         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2415
2416         /*
2417          * Actions of amdgpu_irq_add_id():
2418          * 1. Register a set() function with base driver.
2419          *    Base driver will call set() function to enable/disable an
2420          *    interrupt in DC hardware.
2421          * 2. Register amdgpu_dm_irq_handler().
2422          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2423          *    coming from DC hardware.
2424          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2425          *    for acknowledging and handling.
2426          */
2427
2428         /* Use VSTARTUP interrupt */
2429         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2430                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2431                         i++) {
2432                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2433
2434                 if (r) {
2435                         DRM_ERROR("Failed to add crtc irq id!\n");
2436                         return r;
2437                 }
2438
2439                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2440                 int_params.irq_source =
2441                         dc_interrupt_to_irq_source(dc, i, 0);
2442
2443                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2444
2445                 c_irq_params->adev = adev;
2446                 c_irq_params->irq_src = int_params.irq_source;
2447
2448                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2449                                 dm_dcn_crtc_high_irq, c_irq_params);
2450         }
2451
2452         /* Use GRPH_PFLIP interrupt */
2453         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2454                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2455                         i++) {
2456                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2457                 if (r) {
2458                         DRM_ERROR("Failed to add page flip irq id!\n");
2459                         return r;
2460                 }
2461
2462                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2463                 int_params.irq_source =
2464                         dc_interrupt_to_irq_source(dc, i, 0);
2465
2466                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2467
2468                 c_irq_params->adev = adev;
2469                 c_irq_params->irq_src = int_params.irq_source;
2470
2471                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2472                                 dm_pflip_high_irq, c_irq_params);
2473
2474         }
2475
2476         /* HPD */
2477         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2478                         &adev->hpd_irq);
2479         if (r) {
2480                 DRM_ERROR("Failed to add hpd irq id!\n");
2481                 return r;
2482         }
2483
2484         register_hpd_handlers(adev);
2485
2486         return 0;
2487 }
2488 #endif
2489
2490 /*
2491  * Acquires the lock for the atomic state object and returns
2492  * the new atomic state.
2493  *
2494  * This should only be called during atomic check.
2495  */
2496 static int dm_atomic_get_state(struct drm_atomic_state *state,
2497                                struct dm_atomic_state **dm_state)
2498 {
2499         struct drm_device *dev = state->dev;
2500         struct amdgpu_device *adev = dev->dev_private;
2501         struct amdgpu_display_manager *dm = &adev->dm;
2502         struct drm_private_state *priv_state;
2503
2504         if (*dm_state)
2505                 return 0;
2506
2507         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2508         if (IS_ERR(priv_state))
2509                 return PTR_ERR(priv_state);
2510
2511         *dm_state = to_dm_atomic_state(priv_state);
2512
2513         return 0;
2514 }
2515
2516 struct dm_atomic_state *
2517 dm_atomic_get_new_state(struct drm_atomic_state *state)
2518 {
2519         struct drm_device *dev = state->dev;
2520         struct amdgpu_device *adev = dev->dev_private;
2521         struct amdgpu_display_manager *dm = &adev->dm;
2522         struct drm_private_obj *obj;
2523         struct drm_private_state *new_obj_state;
2524         int i;
2525
2526         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2527                 if (obj->funcs == dm->atomic_obj.funcs)
2528                         return to_dm_atomic_state(new_obj_state);
2529         }
2530
2531         return NULL;
2532 }
2533
2534 struct dm_atomic_state *
2535 dm_atomic_get_old_state(struct drm_atomic_state *state)
2536 {
2537         struct drm_device *dev = state->dev;
2538         struct amdgpu_device *adev = dev->dev_private;
2539         struct amdgpu_display_manager *dm = &adev->dm;
2540         struct drm_private_obj *obj;
2541         struct drm_private_state *old_obj_state;
2542         int i;
2543
2544         for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2545                 if (obj->funcs == dm->atomic_obj.funcs)
2546                         return to_dm_atomic_state(old_obj_state);
2547         }
2548
2549         return NULL;
2550 }
2551
2552 static struct drm_private_state *
2553 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2554 {
2555         struct dm_atomic_state *old_state, *new_state;
2556
2557         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2558         if (!new_state)
2559                 return NULL;
2560
2561         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2562
2563         old_state = to_dm_atomic_state(obj->state);
2564
2565         if (old_state && old_state->context)
2566                 new_state->context = dc_copy_state(old_state->context);
2567
2568         if (!new_state->context) {
2569                 kfree(new_state);
2570                 return NULL;
2571         }
2572
2573         return &new_state->base;
2574 }
2575
2576 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2577                                     struct drm_private_state *state)
2578 {
2579         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2580
2581         if (dm_state && dm_state->context)
2582                 dc_release_state(dm_state->context);
2583
2584         kfree(dm_state);
2585 }
2586
2587 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2588         .atomic_duplicate_state = dm_atomic_duplicate_state,
2589         .atomic_destroy_state = dm_atomic_destroy_state,
2590 };
2591
2592 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2593 {
2594         struct dm_atomic_state *state;
2595         int r;
2596
2597         adev->mode_info.mode_config_initialized = true;
2598
2599         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2600         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2601
2602         adev->ddev->mode_config.max_width = 16384;
2603         adev->ddev->mode_config.max_height = 16384;
2604
2605         adev->ddev->mode_config.preferred_depth = 24;
2606         adev->ddev->mode_config.prefer_shadow = 1;
2607         /* indicates support for immediate flip */
2608         adev->ddev->mode_config.async_page_flip = true;
2609
2610         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2611
2612         state = kzalloc(sizeof(*state), GFP_KERNEL);
2613         if (!state)
2614                 return -ENOMEM;
2615
2616         state->context = dc_create_state(adev->dm.dc);
2617         if (!state->context) {
2618                 kfree(state);
2619                 return -ENOMEM;
2620         }
2621
2622         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2623
2624         drm_atomic_private_obj_init(adev->ddev,
2625                                     &adev->dm.atomic_obj,
2626                                     &state->base,
2627                                     &dm_atomic_state_funcs);
2628
2629         r = amdgpu_display_modeset_create_props(adev);
2630         if (r)
2631                 return r;
2632
2633         r = amdgpu_dm_audio_init(adev);
2634         if (r)
2635                 return r;
2636
2637         return 0;
2638 }
2639
2640 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2641 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2642 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2643
2644 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2645         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2646
2647 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2648 {
2649 #if defined(CONFIG_ACPI)
2650         struct amdgpu_dm_backlight_caps caps;
2651
2652         if (dm->backlight_caps.caps_valid)
2653                 return;
2654
2655         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2656         if (caps.caps_valid) {
2657                 dm->backlight_caps.caps_valid = true;
2658                 if (caps.aux_support)
2659                         return;
2660                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2661                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2662         } else {
2663                 dm->backlight_caps.min_input_signal =
2664                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2665                 dm->backlight_caps.max_input_signal =
2666                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2667         }
2668 #else
2669         if (dm->backlight_caps.aux_support)
2670                 return;
2671
2672         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2673         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2674 #endif
2675 }
2676
2677 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2678 {
2679         bool rc;
2680
2681         if (!link)
2682                 return 1;
2683
2684         rc = dc_link_set_backlight_level_nits(link, true, brightness,
2685                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2686
2687         return rc ? 0 : 1;
2688 }
2689
2690 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2691                               const uint32_t user_brightness)
2692 {
2693         u32 min, max, conversion_pace;
2694         u32 brightness = user_brightness;
2695
2696         if (!caps)
2697                 goto out;
2698
2699         if (!caps->aux_support) {
2700                 max = caps->max_input_signal;
2701                 min = caps->min_input_signal;
2702                 /*
2703                  * The brightness input is in the range 0-255
2704                  * It needs to be rescaled to be between the
2705                  * requested min and max input signal
2706                  * It also needs to be scaled up by 0x101 to
2707                  * match the DC interface which has a range of
2708                  * 0 to 0xffff
2709                  */
2710                 conversion_pace = 0x101;
2711                 brightness =
2712                         user_brightness
2713                         * conversion_pace
2714                         * (max - min)
2715                         / AMDGPU_MAX_BL_LEVEL
2716                         + min * conversion_pace;
2717         } else {
2718                 /* TODO
2719                  * We are doing a linear interpolation here, which is OK but
2720                  * does not provide the optimal result. We probably want
2721                  * something close to the Perceptual Quantizer (PQ) curve.
2722                  */
2723                 max = caps->aux_max_input_signal;
2724                 min = caps->aux_min_input_signal;
2725
2726                 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2727                                + user_brightness * max;
2728                 // Multiple the value by 1000 since we use millinits
2729                 brightness *= 1000;
2730                 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2731         }
2732
2733 out:
2734         return brightness;
2735 }
2736
2737 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2738 {
2739         struct amdgpu_display_manager *dm = bl_get_data(bd);
2740         struct amdgpu_dm_backlight_caps caps;
2741         struct dc_link *link = NULL;
2742         u32 brightness;
2743         bool rc;
2744
2745         amdgpu_dm_update_backlight_caps(dm);
2746         caps = dm->backlight_caps;
2747
2748         link = (struct dc_link *)dm->backlight_link;
2749
2750         brightness = convert_brightness(&caps, bd->props.brightness);
2751         // Change brightness based on AUX property
2752         if (caps.aux_support)
2753                 return set_backlight_via_aux(link, brightness);
2754
2755         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2756
2757         return rc ? 0 : 1;
2758 }
2759
2760 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2761 {
2762         struct amdgpu_display_manager *dm = bl_get_data(bd);
2763         int ret = dc_link_get_backlight_level(dm->backlight_link);
2764
2765         if (ret == DC_ERROR_UNEXPECTED)
2766                 return bd->props.brightness;
2767         return ret;
2768 }
2769
2770 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2771         .options = BL_CORE_SUSPENDRESUME,
2772         .get_brightness = amdgpu_dm_backlight_get_brightness,
2773         .update_status  = amdgpu_dm_backlight_update_status,
2774 };
2775
2776 static void
2777 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2778 {
2779         char bl_name[16];
2780         struct backlight_properties props = { 0 };
2781
2782         amdgpu_dm_update_backlight_caps(dm);
2783
2784         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2785         props.brightness = AMDGPU_MAX_BL_LEVEL;
2786         props.type = BACKLIGHT_RAW;
2787
2788         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2789                         dm->adev->ddev->primary->index);
2790
2791         dm->backlight_dev = backlight_device_register(bl_name,
2792                         dm->adev->ddev->dev,
2793                         dm,
2794                         &amdgpu_dm_backlight_ops,
2795                         &props);
2796
2797         if (IS_ERR(dm->backlight_dev))
2798                 DRM_ERROR("DM: Backlight registration failed!\n");
2799         else
2800                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2801 }
2802
2803 #endif
2804
2805 static int initialize_plane(struct amdgpu_display_manager *dm,
2806                             struct amdgpu_mode_info *mode_info, int plane_id,
2807                             enum drm_plane_type plane_type,
2808                             const struct dc_plane_cap *plane_cap)
2809 {
2810         struct drm_plane *plane;
2811         unsigned long possible_crtcs;
2812         int ret = 0;
2813
2814         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2815         if (!plane) {
2816                 DRM_ERROR("KMS: Failed to allocate plane\n");
2817                 return -ENOMEM;
2818         }
2819         plane->type = plane_type;
2820
2821         /*
2822          * HACK: IGT tests expect that the primary plane for a CRTC
2823          * can only have one possible CRTC. Only expose support for
2824          * any CRTC if they're not going to be used as a primary plane
2825          * for a CRTC - like overlay or underlay planes.
2826          */
2827         possible_crtcs = 1 << plane_id;
2828         if (plane_id >= dm->dc->caps.max_streams)
2829                 possible_crtcs = 0xff;
2830
2831         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2832
2833         if (ret) {
2834                 DRM_ERROR("KMS: Failed to initialize plane\n");
2835                 kfree(plane);
2836                 return ret;
2837         }
2838
2839         if (mode_info)
2840                 mode_info->planes[plane_id] = plane;
2841
2842         return ret;
2843 }
2844
2845
2846 static void register_backlight_device(struct amdgpu_display_manager *dm,
2847                                       struct dc_link *link)
2848 {
2849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2850         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2851
2852         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2853             link->type != dc_connection_none) {
2854                 /*
2855                  * Event if registration failed, we should continue with
2856                  * DM initialization because not having a backlight control
2857                  * is better then a black screen.
2858                  */
2859                 amdgpu_dm_register_backlight_device(dm);
2860
2861                 if (dm->backlight_dev)
2862                         dm->backlight_link = link;
2863         }
2864 #endif
2865 }
2866
2867
2868 /*
2869  * In this architecture, the association
2870  * connector -> encoder -> crtc
2871  * id not really requried. The crtc and connector will hold the
2872  * display_index as an abstraction to use with DAL component
2873  *
2874  * Returns 0 on success
2875  */
2876 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2877 {
2878         struct amdgpu_display_manager *dm = &adev->dm;
2879         int32_t i;
2880         struct amdgpu_dm_connector *aconnector = NULL;
2881         struct amdgpu_encoder *aencoder = NULL;
2882         struct amdgpu_mode_info *mode_info = &adev->mode_info;
2883         uint32_t link_cnt;
2884         int32_t primary_planes;
2885         enum dc_connection_type new_connection_type = dc_connection_none;
2886         const struct dc_plane_cap *plane;
2887
2888         link_cnt = dm->dc->caps.max_links;
2889         if (amdgpu_dm_mode_config_init(dm->adev)) {
2890                 DRM_ERROR("DM: Failed to initialize mode config\n");
2891                 return -EINVAL;
2892         }
2893
2894         /* There is one primary plane per CRTC */
2895         primary_planes = dm->dc->caps.max_streams;
2896         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2897
2898         /*
2899          * Initialize primary planes, implicit planes for legacy IOCTLS.
2900          * Order is reversed to match iteration order in atomic check.
2901          */
2902         for (i = (primary_planes - 1); i >= 0; i--) {
2903                 plane = &dm->dc->caps.planes[i];
2904
2905                 if (initialize_plane(dm, mode_info, i,
2906                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
2907                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
2908                         goto fail;
2909                 }
2910         }
2911
2912         /*
2913          * Initialize overlay planes, index starting after primary planes.
2914          * These planes have a higher DRM index than the primary planes since
2915          * they should be considered as having a higher z-order.
2916          * Order is reversed to match iteration order in atomic check.
2917          *
2918          * Only support DCN for now, and only expose one so we don't encourage
2919          * userspace to use up all the pipes.
2920          */
2921         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2922                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2923
2924                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2925                         continue;
2926
2927                 if (!plane->blends_with_above || !plane->blends_with_below)
2928                         continue;
2929
2930                 if (!plane->pixel_format_support.argb8888)
2931                         continue;
2932
2933                 if (initialize_plane(dm, NULL, primary_planes + i,
2934                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
2935                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2936                         goto fail;
2937                 }
2938
2939                 /* Only create one overlay plane. */
2940                 break;
2941         }
2942
2943         for (i = 0; i < dm->dc->caps.max_streams; i++)
2944                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2945                         DRM_ERROR("KMS: Failed to initialize crtc\n");
2946                         goto fail;
2947                 }
2948
2949         dm->display_indexes_num = dm->dc->caps.max_streams;
2950
2951         /* loops over all connectors on the board */
2952         for (i = 0; i < link_cnt; i++) {
2953                 struct dc_link *link = NULL;
2954
2955                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2956                         DRM_ERROR(
2957                                 "KMS: Cannot support more than %d display indexes\n",
2958                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
2959                         continue;
2960                 }
2961
2962                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2963                 if (!aconnector)
2964                         goto fail;
2965
2966                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2967                 if (!aencoder)
2968                         goto fail;
2969
2970                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2971                         DRM_ERROR("KMS: Failed to initialize encoder\n");
2972                         goto fail;
2973                 }
2974
2975                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2976                         DRM_ERROR("KMS: Failed to initialize connector\n");
2977                         goto fail;
2978                 }
2979
2980                 link = dc_get_link_at_index(dm->dc, i);
2981
2982                 if (!dc_link_detect_sink(link, &new_connection_type))
2983                         DRM_ERROR("KMS: Failed to detect connector\n");
2984
2985                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2986                         emulated_link_detect(link);
2987                         amdgpu_dm_update_connector_after_detect(aconnector);
2988
2989                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2990                         amdgpu_dm_update_connector_after_detect(aconnector);
2991                         register_backlight_device(dm, link);
2992                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2993                                 amdgpu_dm_set_psr_caps(link);
2994                 }
2995
2996
2997         }
2998
2999         /* Software is initialized. Now we can register interrupt handlers. */
3000         switch (adev->asic_type) {
3001         case CHIP_BONAIRE:
3002         case CHIP_HAWAII:
3003         case CHIP_KAVERI:
3004         case CHIP_KABINI:
3005         case CHIP_MULLINS:
3006         case CHIP_TONGA:
3007         case CHIP_FIJI:
3008         case CHIP_CARRIZO:
3009         case CHIP_STONEY:
3010         case CHIP_POLARIS11:
3011         case CHIP_POLARIS10:
3012         case CHIP_POLARIS12:
3013         case CHIP_VEGAM:
3014         case CHIP_VEGA10:
3015         case CHIP_VEGA12:
3016         case CHIP_VEGA20:
3017                 if (dce110_register_irq_handlers(dm->adev)) {
3018                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3019                         goto fail;
3020                 }
3021                 break;
3022 #if defined(CONFIG_DRM_AMD_DC_DCN)
3023         case CHIP_RAVEN:
3024         case CHIP_NAVI12:
3025         case CHIP_NAVI10:
3026         case CHIP_NAVI14:
3027         case CHIP_RENOIR:
3028                 if (dcn10_register_irq_handlers(dm->adev)) {
3029                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3030                         goto fail;
3031                 }
3032                 break;
3033 #endif
3034         default:
3035                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3036                 goto fail;
3037         }
3038
3039         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3040                 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3041
3042         /* No userspace support. */
3043         dm->dc->debug.disable_tri_buf = true;
3044
3045         return 0;
3046 fail:
3047         kfree(aencoder);
3048         kfree(aconnector);
3049
3050         return -EINVAL;
3051 }
3052
3053 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3054 {
3055         drm_mode_config_cleanup(dm->ddev);
3056         drm_atomic_private_obj_fini(&dm->atomic_obj);
3057         return;
3058 }
3059
3060 /******************************************************************************
3061  * amdgpu_display_funcs functions
3062  *****************************************************************************/
3063
3064 /*
3065  * dm_bandwidth_update - program display watermarks
3066  *
3067  * @adev: amdgpu_device pointer
3068  *
3069  * Calculate and program the display watermarks and line buffer allocation.
3070  */
3071 static void dm_bandwidth_update(struct amdgpu_device *adev)
3072 {
3073         /* TODO: implement later */
3074 }
3075
3076 static const struct amdgpu_display_funcs dm_display_funcs = {
3077         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3078         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3079         .backlight_set_level = NULL, /* never called for DC */
3080         .backlight_get_level = NULL, /* never called for DC */
3081         .hpd_sense = NULL,/* called unconditionally */
3082         .hpd_set_polarity = NULL, /* called unconditionally */
3083         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3084         .page_flip_get_scanoutpos =
3085                 dm_crtc_get_scanoutpos,/* called unconditionally */
3086         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3087         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3088 };
3089
3090 #if defined(CONFIG_DEBUG_KERNEL_DC)
3091
3092 static ssize_t s3_debug_store(struct device *device,
3093                               struct device_attribute *attr,
3094                               const char *buf,
3095                               size_t count)
3096 {
3097         int ret;
3098         int s3_state;
3099         struct drm_device *drm_dev = dev_get_drvdata(device);
3100         struct amdgpu_device *adev = drm_dev->dev_private;
3101
3102         ret = kstrtoint(buf, 0, &s3_state);
3103
3104         if (ret == 0) {
3105                 if (s3_state) {
3106                         dm_resume(adev);
3107                         drm_kms_helper_hotplug_event(adev->ddev);
3108                 } else
3109                         dm_suspend(adev);
3110         }
3111
3112         return ret == 0 ? count : 0;
3113 }
3114
3115 DEVICE_ATTR_WO(s3_debug);
3116
3117 #endif
3118
3119 static int dm_early_init(void *handle)
3120 {
3121         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3122
3123         switch (adev->asic_type) {
3124         case CHIP_BONAIRE:
3125         case CHIP_HAWAII:
3126                 adev->mode_info.num_crtc = 6;
3127                 adev->mode_info.num_hpd = 6;
3128                 adev->mode_info.num_dig = 6;
3129                 break;
3130         case CHIP_KAVERI:
3131                 adev->mode_info.num_crtc = 4;
3132                 adev->mode_info.num_hpd = 6;
3133                 adev->mode_info.num_dig = 7;
3134                 break;
3135         case CHIP_KABINI:
3136         case CHIP_MULLINS:
3137                 adev->mode_info.num_crtc = 2;
3138                 adev->mode_info.num_hpd = 6;
3139                 adev->mode_info.num_dig = 6;
3140                 break;
3141         case CHIP_FIJI:
3142         case CHIP_TONGA:
3143                 adev->mode_info.num_crtc = 6;
3144                 adev->mode_info.num_hpd = 6;
3145                 adev->mode_info.num_dig = 7;
3146                 break;
3147         case CHIP_CARRIZO:
3148                 adev->mode_info.num_crtc = 3;
3149                 adev->mode_info.num_hpd = 6;
3150                 adev->mode_info.num_dig = 9;
3151                 break;
3152         case CHIP_STONEY:
3153                 adev->mode_info.num_crtc = 2;
3154                 adev->mode_info.num_hpd = 6;
3155                 adev->mode_info.num_dig = 9;
3156                 break;
3157         case CHIP_POLARIS11:
3158         case CHIP_POLARIS12:
3159                 adev->mode_info.num_crtc = 5;
3160                 adev->mode_info.num_hpd = 5;
3161                 adev->mode_info.num_dig = 5;
3162                 break;
3163         case CHIP_POLARIS10:
3164         case CHIP_VEGAM:
3165                 adev->mode_info.num_crtc = 6;
3166                 adev->mode_info.num_hpd = 6;
3167                 adev->mode_info.num_dig = 6;
3168                 break;
3169         case CHIP_VEGA10:
3170         case CHIP_VEGA12:
3171         case CHIP_VEGA20:
3172                 adev->mode_info.num_crtc = 6;
3173                 adev->mode_info.num_hpd = 6;
3174                 adev->mode_info.num_dig = 6;
3175                 break;
3176 #if defined(CONFIG_DRM_AMD_DC_DCN)
3177         case CHIP_RAVEN:
3178                 adev->mode_info.num_crtc = 4;
3179                 adev->mode_info.num_hpd = 4;
3180                 adev->mode_info.num_dig = 4;
3181                 break;
3182 #endif
3183         case CHIP_NAVI10:
3184         case CHIP_NAVI12:
3185                 adev->mode_info.num_crtc = 6;
3186                 adev->mode_info.num_hpd = 6;
3187                 adev->mode_info.num_dig = 6;
3188                 break;
3189         case CHIP_NAVI14:
3190                 adev->mode_info.num_crtc = 5;
3191                 adev->mode_info.num_hpd = 5;
3192                 adev->mode_info.num_dig = 5;
3193                 break;
3194         case CHIP_RENOIR:
3195                 adev->mode_info.num_crtc = 4;
3196                 adev->mode_info.num_hpd = 4;
3197                 adev->mode_info.num_dig = 4;
3198                 break;
3199         default:
3200                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3201                 return -EINVAL;
3202         }
3203
3204         amdgpu_dm_set_irq_funcs(adev);
3205
3206         if (adev->mode_info.funcs == NULL)
3207                 adev->mode_info.funcs = &dm_display_funcs;
3208
3209         /*
3210          * Note: Do NOT change adev->audio_endpt_rreg and
3211          * adev->audio_endpt_wreg because they are initialised in
3212          * amdgpu_device_init()
3213          */
3214 #if defined(CONFIG_DEBUG_KERNEL_DC)
3215         device_create_file(
3216                 adev->ddev->dev,
3217                 &dev_attr_s3_debug);
3218 #endif
3219
3220         return 0;
3221 }
3222
3223 static bool modeset_required(struct drm_crtc_state *crtc_state,
3224                              struct dc_stream_state *new_stream,
3225                              struct dc_stream_state *old_stream)
3226 {
3227         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3228                 return false;
3229
3230         if (!crtc_state->enable)
3231                 return false;
3232
3233         return crtc_state->active;
3234 }
3235
3236 static bool modereset_required(struct drm_crtc_state *crtc_state)
3237 {
3238         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3239                 return false;
3240
3241         return !crtc_state->enable || !crtc_state->active;
3242 }
3243
3244 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3245 {
3246         drm_encoder_cleanup(encoder);
3247         kfree(encoder);
3248 }
3249
3250 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3251         .destroy = amdgpu_dm_encoder_destroy,
3252 };
3253
3254
3255 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3256                                 struct dc_scaling_info *scaling_info)
3257 {
3258         int scale_w, scale_h;
3259
3260         memset(scaling_info, 0, sizeof(*scaling_info));
3261
3262         /* Source is fixed 16.16 but we ignore mantissa for now... */
3263         scaling_info->src_rect.x = state->src_x >> 16;
3264         scaling_info->src_rect.y = state->src_y >> 16;
3265
3266         scaling_info->src_rect.width = state->src_w >> 16;
3267         if (scaling_info->src_rect.width == 0)
3268                 return -EINVAL;
3269
3270         scaling_info->src_rect.height = state->src_h >> 16;
3271         if (scaling_info->src_rect.height == 0)
3272                 return -EINVAL;
3273
3274         scaling_info->dst_rect.x = state->crtc_x;
3275         scaling_info->dst_rect.y = state->crtc_y;
3276
3277         if (state->crtc_w == 0)
3278                 return -EINVAL;
3279
3280         scaling_info->dst_rect.width = state->crtc_w;
3281
3282         if (state->crtc_h == 0)
3283                 return -EINVAL;
3284
3285         scaling_info->dst_rect.height = state->crtc_h;
3286
3287         /* DRM doesn't specify clipping on destination output. */
3288         scaling_info->clip_rect = scaling_info->dst_rect;
3289
3290         /* TODO: Validate scaling per-format with DC plane caps */
3291         scale_w = scaling_info->dst_rect.width * 1000 /
3292                   scaling_info->src_rect.width;
3293
3294         if (scale_w < 250 || scale_w > 16000)
3295                 return -EINVAL;
3296
3297         scale_h = scaling_info->dst_rect.height * 1000 /
3298                   scaling_info->src_rect.height;
3299
3300         if (scale_h < 250 || scale_h > 16000)
3301                 return -EINVAL;
3302
3303         /*
3304          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3305          * assume reasonable defaults based on the format.
3306          */
3307
3308         return 0;
3309 }
3310
3311 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3312                        uint64_t *tiling_flags)
3313 {
3314         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3315         int r = amdgpu_bo_reserve(rbo, false);
3316
3317         if (unlikely(r)) {
3318                 /* Don't show error message when returning -ERESTARTSYS */
3319                 if (r != -ERESTARTSYS)
3320                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3321                 return r;
3322         }
3323
3324         if (tiling_flags)
3325                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3326
3327         amdgpu_bo_unreserve(rbo);
3328
3329         return r;
3330 }
3331
3332 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3333 {
3334         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3335
3336         return offset ? (address + offset * 256) : 0;
3337 }
3338
3339 static int
3340 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3341                           const struct amdgpu_framebuffer *afb,
3342                           const enum surface_pixel_format format,
3343                           const enum dc_rotation_angle rotation,
3344                           const struct plane_size *plane_size,
3345                           const union dc_tiling_info *tiling_info,
3346                           const uint64_t info,
3347                           struct dc_plane_dcc_param *dcc,
3348                           struct dc_plane_address *address,
3349                           bool force_disable_dcc)
3350 {
3351         struct dc *dc = adev->dm.dc;
3352         struct dc_dcc_surface_param input;
3353         struct dc_surface_dcc_cap output;
3354         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3355         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3356         uint64_t dcc_address;
3357
3358         memset(&input, 0, sizeof(input));
3359         memset(&output, 0, sizeof(output));
3360
3361         if (force_disable_dcc)
3362                 return 0;
3363
3364         if (!offset)
3365                 return 0;
3366
3367         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3368                 return 0;
3369
3370         if (!dc->cap_funcs.get_dcc_compression_cap)
3371                 return -EINVAL;
3372
3373         input.format = format;
3374         input.surface_size.width = plane_size->surface_size.width;
3375         input.surface_size.height = plane_size->surface_size.height;
3376         input.swizzle_mode = tiling_info->gfx9.swizzle;
3377
3378         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3379                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3380         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3381                 input.scan = SCAN_DIRECTION_VERTICAL;
3382
3383         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3384                 return -EINVAL;
3385
3386         if (!output.capable)
3387                 return -EINVAL;
3388
3389         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3390                 return -EINVAL;
3391
3392         dcc->enable = 1;
3393         dcc->meta_pitch =
3394                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3395         dcc->independent_64b_blks = i64b;
3396
3397         dcc_address = get_dcc_address(afb->address, info);
3398         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3399         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3400
3401         return 0;
3402 }
3403
3404 static int
3405 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3406                              const struct amdgpu_framebuffer *afb,
3407                              const enum surface_pixel_format format,
3408                              const enum dc_rotation_angle rotation,
3409                              const uint64_t tiling_flags,
3410                              union dc_tiling_info *tiling_info,
3411                              struct plane_size *plane_size,
3412                              struct dc_plane_dcc_param *dcc,
3413                              struct dc_plane_address *address,
3414                              bool force_disable_dcc)
3415 {
3416         const struct drm_framebuffer *fb = &afb->base;
3417         int ret;
3418
3419         memset(tiling_info, 0, sizeof(*tiling_info));
3420         memset(plane_size, 0, sizeof(*plane_size));
3421         memset(dcc, 0, sizeof(*dcc));
3422         memset(address, 0, sizeof(*address));
3423
3424         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3425                 plane_size->surface_size.x = 0;
3426                 plane_size->surface_size.y = 0;
3427                 plane_size->surface_size.width = fb->width;
3428                 plane_size->surface_size.height = fb->height;
3429                 plane_size->surface_pitch =
3430                         fb->pitches[0] / fb->format->cpp[0];
3431
3432                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3433                 address->grph.addr.low_part = lower_32_bits(afb->address);
3434                 address->grph.addr.high_part = upper_32_bits(afb->address);
3435         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3436                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3437
3438                 plane_size->surface_size.x = 0;
3439                 plane_size->surface_size.y = 0;
3440                 plane_size->surface_size.width = fb->width;
3441                 plane_size->surface_size.height = fb->height;
3442                 plane_size->surface_pitch =
3443                         fb->pitches[0] / fb->format->cpp[0];
3444
3445                 plane_size->chroma_size.x = 0;
3446                 plane_size->chroma_size.y = 0;
3447                 /* TODO: set these based on surface format */
3448                 plane_size->chroma_size.width = fb->width / 2;
3449                 plane_size->chroma_size.height = fb->height / 2;
3450
3451                 plane_size->chroma_pitch =
3452                         fb->pitches[1] / fb->format->cpp[1];
3453
3454                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3455                 address->video_progressive.luma_addr.low_part =
3456                         lower_32_bits(afb->address);
3457                 address->video_progressive.luma_addr.high_part =
3458                         upper_32_bits(afb->address);
3459                 address->video_progressive.chroma_addr.low_part =
3460                         lower_32_bits(chroma_addr);
3461                 address->video_progressive.chroma_addr.high_part =
3462                         upper_32_bits(chroma_addr);
3463         }
3464
3465         /* Fill GFX8 params */
3466         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3467                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3468
3469                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3470                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3471                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3472                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3473                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3474
3475                 /* XXX fix me for VI */
3476                 tiling_info->gfx8.num_banks = num_banks;
3477                 tiling_info->gfx8.array_mode =
3478                                 DC_ARRAY_2D_TILED_THIN1;
3479                 tiling_info->gfx8.tile_split = tile_split;
3480                 tiling_info->gfx8.bank_width = bankw;
3481                 tiling_info->gfx8.bank_height = bankh;
3482                 tiling_info->gfx8.tile_aspect = mtaspect;
3483                 tiling_info->gfx8.tile_mode =
3484                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3485         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3486                         == DC_ARRAY_1D_TILED_THIN1) {
3487                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3488         }
3489
3490         tiling_info->gfx8.pipe_config =
3491                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3492
3493         if (adev->asic_type == CHIP_VEGA10 ||
3494             adev->asic_type == CHIP_VEGA12 ||
3495             adev->asic_type == CHIP_VEGA20 ||
3496             adev->asic_type == CHIP_NAVI10 ||
3497             adev->asic_type == CHIP_NAVI14 ||
3498             adev->asic_type == CHIP_NAVI12 ||
3499             adev->asic_type == CHIP_RENOIR ||
3500             adev->asic_type == CHIP_RAVEN) {
3501                 /* Fill GFX9 params */
3502                 tiling_info->gfx9.num_pipes =
3503                         adev->gfx.config.gb_addr_config_fields.num_pipes;
3504                 tiling_info->gfx9.num_banks =
3505                         adev->gfx.config.gb_addr_config_fields.num_banks;
3506                 tiling_info->gfx9.pipe_interleave =
3507                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3508                 tiling_info->gfx9.num_shader_engines =
3509                         adev->gfx.config.gb_addr_config_fields.num_se;
3510                 tiling_info->gfx9.max_compressed_frags =
3511                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3512                 tiling_info->gfx9.num_rb_per_se =
3513                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3514                 tiling_info->gfx9.swizzle =
3515                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3516                 tiling_info->gfx9.shaderEnable = 1;
3517
3518                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3519                                                 plane_size, tiling_info,
3520                                                 tiling_flags, dcc, address,
3521                                                 force_disable_dcc);
3522                 if (ret)
3523                         return ret;
3524         }
3525
3526         return 0;
3527 }
3528
3529 static void
3530 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3531                                bool *per_pixel_alpha, bool *global_alpha,
3532                                int *global_alpha_value)
3533 {
3534         *per_pixel_alpha = false;
3535         *global_alpha = false;
3536         *global_alpha_value = 0xff;
3537
3538         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3539                 return;
3540
3541         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3542                 static const uint32_t alpha_formats[] = {
3543                         DRM_FORMAT_ARGB8888,
3544                         DRM_FORMAT_RGBA8888,
3545                         DRM_FORMAT_ABGR8888,
3546                 };
3547                 uint32_t format = plane_state->fb->format->format;
3548                 unsigned int i;
3549
3550                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3551                         if (format == alpha_formats[i]) {
3552                                 *per_pixel_alpha = true;
3553                                 break;
3554                         }
3555                 }
3556         }
3557
3558         if (plane_state->alpha < 0xffff) {
3559                 *global_alpha = true;
3560                 *global_alpha_value = plane_state->alpha >> 8;
3561         }
3562 }
3563
3564 static int
3565 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3566                             const enum surface_pixel_format format,
3567                             enum dc_color_space *color_space)
3568 {
3569         bool full_range;
3570
3571         *color_space = COLOR_SPACE_SRGB;
3572
3573         /* DRM color properties only affect non-RGB formats. */
3574         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3575                 return 0;
3576
3577         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3578
3579         switch (plane_state->color_encoding) {
3580         case DRM_COLOR_YCBCR_BT601:
3581                 if (full_range)
3582                         *color_space = COLOR_SPACE_YCBCR601;
3583                 else
3584                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3585                 break;
3586
3587         case DRM_COLOR_YCBCR_BT709:
3588                 if (full_range)
3589                         *color_space = COLOR_SPACE_YCBCR709;
3590                 else
3591                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3592                 break;
3593
3594         case DRM_COLOR_YCBCR_BT2020:
3595                 if (full_range)
3596                         *color_space = COLOR_SPACE_2020_YCBCR;
3597                 else
3598                         return -EINVAL;
3599                 break;
3600
3601         default:
3602                 return -EINVAL;
3603         }
3604
3605         return 0;
3606 }
3607
3608 static int
3609 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3610                             const struct drm_plane_state *plane_state,
3611                             const uint64_t tiling_flags,
3612                             struct dc_plane_info *plane_info,
3613                             struct dc_plane_address *address,
3614                             bool force_disable_dcc)
3615 {
3616         const struct drm_framebuffer *fb = plane_state->fb;
3617         const struct amdgpu_framebuffer *afb =
3618                 to_amdgpu_framebuffer(plane_state->fb);
3619         struct drm_format_name_buf format_name;
3620         int ret;
3621
3622         memset(plane_info, 0, sizeof(*plane_info));
3623
3624         switch (fb->format->format) {
3625         case DRM_FORMAT_C8:
3626                 plane_info->format =
3627                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3628                 break;
3629         case DRM_FORMAT_RGB565:
3630                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3631                 break;
3632         case DRM_FORMAT_XRGB8888:
3633         case DRM_FORMAT_ARGB8888:
3634                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3635                 break;
3636         case DRM_FORMAT_XRGB2101010:
3637         case DRM_FORMAT_ARGB2101010:
3638                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3639                 break;
3640         case DRM_FORMAT_XBGR2101010:
3641         case DRM_FORMAT_ABGR2101010:
3642                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3643                 break;
3644         case DRM_FORMAT_XBGR8888:
3645         case DRM_FORMAT_ABGR8888:
3646                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3647                 break;
3648         case DRM_FORMAT_NV21:
3649                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3650                 break;
3651         case DRM_FORMAT_NV12:
3652                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3653                 break;
3654         case DRM_FORMAT_P010:
3655                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3656                 break;
3657         default:
3658                 DRM_ERROR(
3659                         "Unsupported screen format %s\n",
3660                         drm_get_format_name(fb->format->format, &format_name));
3661                 return -EINVAL;
3662         }
3663
3664         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3665         case DRM_MODE_ROTATE_0:
3666                 plane_info->rotation = ROTATION_ANGLE_0;
3667                 break;
3668         case DRM_MODE_ROTATE_90:
3669                 plane_info->rotation = ROTATION_ANGLE_90;
3670                 break;
3671         case DRM_MODE_ROTATE_180:
3672                 plane_info->rotation = ROTATION_ANGLE_180;
3673                 break;
3674         case DRM_MODE_ROTATE_270:
3675                 plane_info->rotation = ROTATION_ANGLE_270;
3676                 break;
3677         default:
3678                 plane_info->rotation = ROTATION_ANGLE_0;
3679                 break;
3680         }
3681
3682         plane_info->visible = true;
3683         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3684
3685         plane_info->layer_index = 0;
3686
3687         ret = fill_plane_color_attributes(plane_state, plane_info->format,
3688                                           &plane_info->color_space);
3689         if (ret)
3690                 return ret;
3691
3692         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3693                                            plane_info->rotation, tiling_flags,
3694                                            &plane_info->tiling_info,
3695                                            &plane_info->plane_size,
3696                                            &plane_info->dcc, address,
3697                                            force_disable_dcc);
3698         if (ret)
3699                 return ret;
3700
3701         fill_blending_from_plane_state(
3702                 plane_state, &plane_info->per_pixel_alpha,
3703                 &plane_info->global_alpha, &plane_info->global_alpha_value);
3704
3705         return 0;
3706 }
3707
3708 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3709                                     struct dc_plane_state *dc_plane_state,
3710                                     struct drm_plane_state *plane_state,
3711                                     struct drm_crtc_state *crtc_state)
3712 {
3713         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3714         const struct amdgpu_framebuffer *amdgpu_fb =
3715                 to_amdgpu_framebuffer(plane_state->fb);
3716         struct dc_scaling_info scaling_info;
3717         struct dc_plane_info plane_info;
3718         uint64_t tiling_flags;
3719         int ret;
3720         bool force_disable_dcc = false;
3721
3722         ret = fill_dc_scaling_info(plane_state, &scaling_info);
3723         if (ret)
3724                 return ret;
3725
3726         dc_plane_state->src_rect = scaling_info.src_rect;
3727         dc_plane_state->dst_rect = scaling_info.dst_rect;
3728         dc_plane_state->clip_rect = scaling_info.clip_rect;
3729         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3730
3731         ret = get_fb_info(amdgpu_fb, &tiling_flags);
3732         if (ret)
3733                 return ret;
3734
3735         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3736         ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3737                                           &plane_info,
3738                                           &dc_plane_state->address,
3739                                           force_disable_dcc);
3740         if (ret)
3741                 return ret;
3742
3743         dc_plane_state->format = plane_info.format;
3744         dc_plane_state->color_space = plane_info.color_space;
3745         dc_plane_state->format = plane_info.format;
3746         dc_plane_state->plane_size = plane_info.plane_size;
3747         dc_plane_state->rotation = plane_info.rotation;
3748         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3749         dc_plane_state->stereo_format = plane_info.stereo_format;
3750         dc_plane_state->tiling_info = plane_info.tiling_info;
3751         dc_plane_state->visible = plane_info.visible;
3752         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3753         dc_plane_state->global_alpha = plane_info.global_alpha;
3754         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3755         dc_plane_state->dcc = plane_info.dcc;
3756         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3757
3758         /*
3759          * Always set input transfer function, since plane state is refreshed
3760          * every time.
3761          */
3762         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3763         if (ret)
3764                 return ret;
3765
3766         return 0;
3767 }
3768
3769 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3770                                            const struct dm_connector_state *dm_state,
3771                                            struct dc_stream_state *stream)
3772 {
3773         enum amdgpu_rmx_type rmx_type;
3774
3775         struct rect src = { 0 }; /* viewport in composition space*/
3776         struct rect dst = { 0 }; /* stream addressable area */
3777
3778         /* no mode. nothing to be done */
3779         if (!mode)
3780                 return;
3781
3782         /* Full screen scaling by default */
3783         src.width = mode->hdisplay;
3784         src.height = mode->vdisplay;
3785         dst.width = stream->timing.h_addressable;
3786         dst.height = stream->timing.v_addressable;
3787
3788         if (dm_state) {
3789                 rmx_type = dm_state->scaling;
3790                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3791                         if (src.width * dst.height <
3792                                         src.height * dst.width) {
3793                                 /* height needs less upscaling/more downscaling */
3794                                 dst.width = src.width *
3795                                                 dst.height / src.height;
3796                         } else {
3797                                 /* width needs less upscaling/more downscaling */
3798                                 dst.height = src.height *
3799                                                 dst.width / src.width;
3800                         }
3801                 } else if (rmx_type == RMX_CENTER) {
3802                         dst = src;
3803                 }
3804
3805                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3806                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3807
3808                 if (dm_state->underscan_enable) {
3809                         dst.x += dm_state->underscan_hborder / 2;
3810                         dst.y += dm_state->underscan_vborder / 2;
3811                         dst.width -= dm_state->underscan_hborder;
3812                         dst.height -= dm_state->underscan_vborder;
3813                 }
3814         }
3815
3816         stream->src = src;
3817         stream->dst = dst;
3818
3819         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3820                         dst.x, dst.y, dst.width, dst.height);
3821
3822 }
3823
3824 static enum dc_color_depth
3825 convert_color_depth_from_display_info(const struct drm_connector *connector,
3826                                       const struct drm_connector_state *state,
3827                                       bool is_y420)
3828 {
3829         uint8_t bpc;
3830
3831         if (is_y420) {
3832                 bpc = 8;
3833
3834                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3835                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3836                         bpc = 16;
3837                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3838                         bpc = 12;
3839                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3840                         bpc = 10;
3841         } else {
3842                 bpc = (uint8_t)connector->display_info.bpc;
3843                 /* Assume 8 bpc by default if no bpc is specified. */
3844                 bpc = bpc ? bpc : 8;
3845         }
3846
3847         if (!state)
3848                 state = connector->state;
3849
3850         if (state) {
3851                 /*
3852                  * Cap display bpc based on the user requested value.
3853                  *
3854                  * The value for state->max_bpc may not correctly updated
3855                  * depending on when the connector gets added to the state
3856                  * or if this was called outside of atomic check, so it
3857                  * can't be used directly.
3858                  */
3859                 bpc = min(bpc, state->max_requested_bpc);
3860
3861                 /* Round down to the nearest even number. */
3862                 bpc = bpc - (bpc & 1);
3863         }
3864
3865         switch (bpc) {
3866         case 0:
3867                 /*
3868                  * Temporary Work around, DRM doesn't parse color depth for
3869                  * EDID revision before 1.4
3870                  * TODO: Fix edid parsing
3871                  */
3872                 return COLOR_DEPTH_888;
3873         case 6:
3874                 return COLOR_DEPTH_666;
3875         case 8:
3876                 return COLOR_DEPTH_888;
3877         case 10:
3878                 return COLOR_DEPTH_101010;
3879         case 12:
3880                 return COLOR_DEPTH_121212;
3881         case 14:
3882                 return COLOR_DEPTH_141414;
3883         case 16:
3884                 return COLOR_DEPTH_161616;
3885         default:
3886                 return COLOR_DEPTH_UNDEFINED;
3887         }
3888 }
3889
3890 static enum dc_aspect_ratio
3891 get_aspect_ratio(const struct drm_display_mode *mode_in)
3892 {
3893         /* 1-1 mapping, since both enums follow the HDMI spec. */
3894         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3895 }
3896
3897 static enum dc_color_space
3898 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3899 {
3900         enum dc_color_space color_space = COLOR_SPACE_SRGB;
3901
3902         switch (dc_crtc_timing->pixel_encoding) {
3903         case PIXEL_ENCODING_YCBCR422:
3904         case PIXEL_ENCODING_YCBCR444:
3905         case PIXEL_ENCODING_YCBCR420:
3906         {
3907                 /*
3908                  * 27030khz is the separation point between HDTV and SDTV
3909                  * according to HDMI spec, we use YCbCr709 and YCbCr601
3910                  * respectively
3911                  */
3912                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3913                         if (dc_crtc_timing->flags.Y_ONLY)
3914                                 color_space =
3915                                         COLOR_SPACE_YCBCR709_LIMITED;
3916                         else
3917                                 color_space = COLOR_SPACE_YCBCR709;
3918                 } else {
3919                         if (dc_crtc_timing->flags.Y_ONLY)
3920                                 color_space =
3921                                         COLOR_SPACE_YCBCR601_LIMITED;
3922                         else
3923                                 color_space = COLOR_SPACE_YCBCR601;
3924                 }
3925
3926         }
3927         break;
3928         case PIXEL_ENCODING_RGB:
3929                 color_space = COLOR_SPACE_SRGB;
3930                 break;
3931
3932         default:
3933                 WARN_ON(1);
3934                 break;
3935         }
3936
3937         return color_space;
3938 }
3939
3940 static bool adjust_colour_depth_from_display_info(
3941         struct dc_crtc_timing *timing_out,
3942         const struct drm_display_info *info)
3943 {
3944         enum dc_color_depth depth = timing_out->display_color_depth;
3945         int normalized_clk;
3946         do {
3947                 normalized_clk = timing_out->pix_clk_100hz / 10;
3948                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3949                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3950                         normalized_clk /= 2;
3951                 /* Adjusting pix clock following on HDMI spec based on colour depth */
3952                 switch (depth) {
3953                 case COLOR_DEPTH_888:
3954                         break;
3955                 case COLOR_DEPTH_101010:
3956                         normalized_clk = (normalized_clk * 30) / 24;
3957                         break;
3958                 case COLOR_DEPTH_121212:
3959                         normalized_clk = (normalized_clk * 36) / 24;
3960                         break;
3961                 case COLOR_DEPTH_161616:
3962                         normalized_clk = (normalized_clk * 48) / 24;
3963                         break;
3964                 default:
3965                         /* The above depths are the only ones valid for HDMI. */
3966                         return false;
3967                 }
3968                 if (normalized_clk <= info->max_tmds_clock) {
3969                         timing_out->display_color_depth = depth;
3970                         return true;
3971                 }
3972         } while (--depth > COLOR_DEPTH_666);
3973         return false;
3974 }
3975
3976 static void fill_stream_properties_from_drm_display_mode(
3977         struct dc_stream_state *stream,
3978         const struct drm_display_mode *mode_in,
3979         const struct drm_connector *connector,
3980         const struct drm_connector_state *connector_state,
3981         const struct dc_stream_state *old_stream)
3982 {
3983         struct dc_crtc_timing *timing_out = &stream->timing;
3984         const struct drm_display_info *info = &connector->display_info;
3985         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3986         struct hdmi_vendor_infoframe hv_frame;
3987         struct hdmi_avi_infoframe avi_frame;
3988
3989         memset(&hv_frame, 0, sizeof(hv_frame));
3990         memset(&avi_frame, 0, sizeof(avi_frame));
3991
3992         timing_out->h_border_left = 0;
3993         timing_out->h_border_right = 0;
3994         timing_out->v_border_top = 0;
3995         timing_out->v_border_bottom = 0;
3996         /* TODO: un-hardcode */
3997         if (drm_mode_is_420_only(info, mode_in)
3998                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3999                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4000         else if (drm_mode_is_420_also(info, mode_in)
4001                         && aconnector->force_yuv420_output)
4002                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4003         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4004                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4005                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4006         else
4007                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4008
4009         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4010         timing_out->display_color_depth = convert_color_depth_from_display_info(
4011                 connector, connector_state,
4012                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
4013         timing_out->scan_type = SCANNING_TYPE_NODATA;
4014         timing_out->hdmi_vic = 0;
4015
4016         if(old_stream) {
4017                 timing_out->vic = old_stream->timing.vic;
4018                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4019                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4020         } else {
4021                 timing_out->vic = drm_match_cea_mode(mode_in);
4022                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4023                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4024                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4025                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4026         }
4027
4028         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4029                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4030                 timing_out->vic = avi_frame.video_code;
4031                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4032                 timing_out->hdmi_vic = hv_frame.vic;
4033         }
4034
4035         timing_out->h_addressable = mode_in->crtc_hdisplay;
4036         timing_out->h_total = mode_in->crtc_htotal;
4037         timing_out->h_sync_width =
4038                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4039         timing_out->h_front_porch =
4040                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4041         timing_out->v_total = mode_in->crtc_vtotal;
4042         timing_out->v_addressable = mode_in->crtc_vdisplay;
4043         timing_out->v_front_porch =
4044                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4045         timing_out->v_sync_width =
4046                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4047         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4048         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4049
4050         stream->output_color_space = get_output_color_space(timing_out);
4051
4052         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4053         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4054         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4055                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4056                     drm_mode_is_420_also(info, mode_in) &&
4057                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4058                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4059                         adjust_colour_depth_from_display_info(timing_out, info);
4060                 }
4061         }
4062 }
4063
4064 static void fill_audio_info(struct audio_info *audio_info,
4065                             const struct drm_connector *drm_connector,
4066                             const struct dc_sink *dc_sink)
4067 {
4068         int i = 0;
4069         int cea_revision = 0;
4070         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4071
4072         audio_info->manufacture_id = edid_caps->manufacturer_id;
4073         audio_info->product_id = edid_caps->product_id;
4074
4075         cea_revision = drm_connector->display_info.cea_rev;
4076
4077         strscpy(audio_info->display_name,
4078                 edid_caps->display_name,
4079                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4080
4081         if (cea_revision >= 3) {
4082                 audio_info->mode_count = edid_caps->audio_mode_count;
4083
4084                 for (i = 0; i < audio_info->mode_count; ++i) {
4085                         audio_info->modes[i].format_code =
4086                                         (enum audio_format_code)
4087                                         (edid_caps->audio_modes[i].format_code);
4088                         audio_info->modes[i].channel_count =
4089                                         edid_caps->audio_modes[i].channel_count;
4090                         audio_info->modes[i].sample_rates.all =
4091                                         edid_caps->audio_modes[i].sample_rate;
4092                         audio_info->modes[i].sample_size =
4093                                         edid_caps->audio_modes[i].sample_size;
4094                 }
4095         }
4096
4097         audio_info->flags.all = edid_caps->speaker_flags;
4098
4099         /* TODO: We only check for the progressive mode, check for interlace mode too */
4100         if (drm_connector->latency_present[0]) {
4101                 audio_info->video_latency = drm_connector->video_latency[0];
4102                 audio_info->audio_latency = drm_connector->audio_latency[0];
4103         }
4104
4105         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4106
4107 }
4108
4109 static void
4110 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4111                                       struct drm_display_mode *dst_mode)
4112 {
4113         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4114         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4115         dst_mode->crtc_clock = src_mode->crtc_clock;
4116         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4117         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4118         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4119         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4120         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4121         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4122         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4123         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4124         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4125         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4126         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4127 }
4128
4129 static void
4130 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4131                                         const struct drm_display_mode *native_mode,
4132                                         bool scale_enabled)
4133 {
4134         if (scale_enabled) {
4135                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4136         } else if (native_mode->clock == drm_mode->clock &&
4137                         native_mode->htotal == drm_mode->htotal &&
4138                         native_mode->vtotal == drm_mode->vtotal) {
4139                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4140         } else {
4141                 /* no scaling nor amdgpu inserted, no need to patch */
4142         }
4143 }
4144
4145 static struct dc_sink *
4146 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4147 {
4148         struct dc_sink_init_data sink_init_data = { 0 };
4149         struct dc_sink *sink = NULL;
4150         sink_init_data.link = aconnector->dc_link;
4151         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4152
4153         sink = dc_sink_create(&sink_init_data);
4154         if (!sink) {
4155                 DRM_ERROR("Failed to create sink!\n");
4156                 return NULL;
4157         }
4158         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4159
4160         return sink;
4161 }
4162
4163 static void set_multisync_trigger_params(
4164                 struct dc_stream_state *stream)
4165 {
4166         if (stream->triggered_crtc_reset.enabled) {
4167                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4168                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4169         }
4170 }
4171
4172 static void set_master_stream(struct dc_stream_state *stream_set[],
4173                               int stream_count)
4174 {
4175         int j, highest_rfr = 0, master_stream = 0;
4176
4177         for (j = 0;  j < stream_count; j++) {
4178                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4179                         int refresh_rate = 0;
4180
4181                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4182                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4183                         if (refresh_rate > highest_rfr) {
4184                                 highest_rfr = refresh_rate;
4185                                 master_stream = j;
4186                         }
4187                 }
4188         }
4189         for (j = 0;  j < stream_count; j++) {
4190                 if (stream_set[j])
4191                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4192         }
4193 }
4194
4195 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4196 {
4197         int i = 0;
4198
4199         if (context->stream_count < 2)
4200                 return;
4201         for (i = 0; i < context->stream_count ; i++) {
4202                 if (!context->streams[i])
4203                         continue;
4204                 /*
4205                  * TODO: add a function to read AMD VSDB bits and set
4206                  * crtc_sync_master.multi_sync_enabled flag
4207                  * For now it's set to false
4208                  */
4209                 set_multisync_trigger_params(context->streams[i]);
4210         }
4211         set_master_stream(context->streams, context->stream_count);
4212 }
4213
4214 static struct dc_stream_state *
4215 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4216                        const struct drm_display_mode *drm_mode,
4217                        const struct dm_connector_state *dm_state,
4218                        const struct dc_stream_state *old_stream)
4219 {
4220         struct drm_display_mode *preferred_mode = NULL;
4221         struct drm_connector *drm_connector;
4222         const struct drm_connector_state *con_state =
4223                 dm_state ? &dm_state->base : NULL;
4224         struct dc_stream_state *stream = NULL;
4225         struct drm_display_mode mode = *drm_mode;
4226         bool native_mode_found = false;
4227         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4228         int mode_refresh;
4229         int preferred_refresh = 0;
4230 #if defined(CONFIG_DRM_AMD_DC_DCN)
4231         struct dsc_dec_dpcd_caps dsc_caps;
4232 #endif
4233         uint32_t link_bandwidth_kbps;
4234
4235         struct dc_sink *sink = NULL;
4236         if (aconnector == NULL) {
4237                 DRM_ERROR("aconnector is NULL!\n");
4238                 return stream;
4239         }
4240
4241         drm_connector = &aconnector->base;
4242
4243         if (!aconnector->dc_sink) {
4244                 sink = create_fake_sink(aconnector);
4245                 if (!sink)
4246                         return stream;
4247         } else {
4248                 sink = aconnector->dc_sink;
4249                 dc_sink_retain(sink);
4250         }
4251
4252         stream = dc_create_stream_for_sink(sink);
4253
4254         if (stream == NULL) {
4255                 DRM_ERROR("Failed to create stream for sink!\n");
4256                 goto finish;
4257         }
4258
4259         stream->dm_stream_context = aconnector;
4260
4261         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4262                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4263
4264         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4265                 /* Search for preferred mode */
4266                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4267                         native_mode_found = true;
4268                         break;
4269                 }
4270         }
4271         if (!native_mode_found)
4272                 preferred_mode = list_first_entry_or_null(
4273                                 &aconnector->base.modes,
4274                                 struct drm_display_mode,
4275                                 head);
4276
4277         mode_refresh = drm_mode_vrefresh(&mode);
4278
4279         if (preferred_mode == NULL) {
4280                 /*
4281                  * This may not be an error, the use case is when we have no
4282                  * usermode calls to reset and set mode upon hotplug. In this
4283                  * case, we call set mode ourselves to restore the previous mode
4284                  * and the modelist may not be filled in in time.
4285                  */
4286                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4287         } else {
4288                 decide_crtc_timing_for_drm_display_mode(
4289                                 &mode, preferred_mode,
4290                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4291                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4292         }
4293
4294         if (!dm_state)
4295                 drm_mode_set_crtcinfo(&mode, 0);
4296
4297         /*
4298         * If scaling is enabled and refresh rate didn't change
4299         * we copy the vic and polarities of the old timings
4300         */
4301         if (!scale || mode_refresh != preferred_refresh)
4302                 fill_stream_properties_from_drm_display_mode(stream,
4303                         &mode, &aconnector->base, con_state, NULL);
4304         else
4305                 fill_stream_properties_from_drm_display_mode(stream,
4306                         &mode, &aconnector->base, con_state, old_stream);
4307
4308         stream->timing.flags.DSC = 0;
4309
4310         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4311 #if defined(CONFIG_DRM_AMD_DC_DCN)
4312                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4313                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4314                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4315                                       &dsc_caps);
4316 #endif
4317                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4318                                                              dc_link_get_link_cap(aconnector->dc_link));
4319
4320 #if defined(CONFIG_DRM_AMD_DC_DCN)
4321                 if (dsc_caps.is_dsc_supported)
4322                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4323                                                   &dsc_caps,
4324                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4325                                                   link_bandwidth_kbps,
4326                                                   &stream->timing,
4327                                                   &stream->timing.dsc_cfg))
4328                                 stream->timing.flags.DSC = 1;
4329 #endif
4330         }
4331
4332         update_stream_scaling_settings(&mode, dm_state, stream);
4333
4334         fill_audio_info(
4335                 &stream->audio_info,
4336                 drm_connector,
4337                 sink);
4338
4339         update_stream_signal(stream, sink);
4340
4341         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4342                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4343         if (stream->link->psr_settings.psr_feature_enabled)     {
4344                 struct dc  *core_dc = stream->link->ctx->dc;
4345
4346                 if (dc_is_dmcu_initialized(core_dc)) {
4347                         //
4348                         // should decide stream support vsc sdp colorimetry capability
4349                         // before building vsc info packet
4350                         //
4351                         stream->use_vsc_sdp_for_colorimetry = false;
4352                         if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4353                                 stream->use_vsc_sdp_for_colorimetry =
4354                                         aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4355                         } else {
4356                                 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4357                                         stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4358                                         stream->use_vsc_sdp_for_colorimetry = true;
4359                                 }
4360                         }
4361                         mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4362                 }
4363         }
4364 finish:
4365         dc_sink_release(sink);
4366
4367         return stream;
4368 }
4369
4370 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4371 {
4372         drm_crtc_cleanup(crtc);
4373         kfree(crtc);
4374 }
4375
4376 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4377                                   struct drm_crtc_state *state)
4378 {
4379         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4380
4381         /* TODO Destroy dc_stream objects are stream object is flattened */
4382         if (cur->stream)
4383                 dc_stream_release(cur->stream);
4384
4385
4386         __drm_atomic_helper_crtc_destroy_state(state);
4387
4388
4389         kfree(state);
4390 }
4391
4392 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4393 {
4394         struct dm_crtc_state *state;
4395
4396         if (crtc->state)
4397                 dm_crtc_destroy_state(crtc, crtc->state);
4398
4399         state = kzalloc(sizeof(*state), GFP_KERNEL);
4400         if (WARN_ON(!state))
4401                 return;
4402
4403         crtc->state = &state->base;
4404         crtc->state->crtc = crtc;
4405
4406 }
4407
4408 static struct drm_crtc_state *
4409 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4410 {
4411         struct dm_crtc_state *state, *cur;
4412
4413         cur = to_dm_crtc_state(crtc->state);
4414
4415         if (WARN_ON(!crtc->state))
4416                 return NULL;
4417
4418         state = kzalloc(sizeof(*state), GFP_KERNEL);
4419         if (!state)
4420                 return NULL;
4421
4422         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4423
4424         if (cur->stream) {
4425                 state->stream = cur->stream;
4426                 dc_stream_retain(state->stream);
4427         }
4428
4429         state->active_planes = cur->active_planes;
4430         state->interrupts_enabled = cur->interrupts_enabled;
4431         state->vrr_params = cur->vrr_params;
4432         state->vrr_infopacket = cur->vrr_infopacket;
4433         state->abm_level = cur->abm_level;
4434         state->vrr_supported = cur->vrr_supported;
4435         state->freesync_config = cur->freesync_config;
4436         state->crc_src = cur->crc_src;
4437         state->cm_has_degamma = cur->cm_has_degamma;
4438         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4439
4440         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4441
4442         return &state->base;
4443 }
4444
4445 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4446 {
4447         enum dc_irq_source irq_source;
4448         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4449         struct amdgpu_device *adev = crtc->dev->dev_private;
4450         int rc;
4451
4452         /* Do not set vupdate for DCN hardware */
4453         if (adev->family > AMDGPU_FAMILY_AI)
4454                 return 0;
4455
4456         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4457
4458         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4459
4460         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4461                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4462         return rc;
4463 }
4464
4465 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4466 {
4467         enum dc_irq_source irq_source;
4468         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4469         struct amdgpu_device *adev = crtc->dev->dev_private;
4470         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4471         int rc = 0;
4472
4473         if (enable) {
4474                 /* vblank irq on -> Only need vupdate irq in vrr mode */
4475                 if (amdgpu_dm_vrr_active(acrtc_state))
4476                         rc = dm_set_vupdate_irq(crtc, true);
4477         } else {
4478                 /* vblank irq off -> vupdate irq off */
4479                 rc = dm_set_vupdate_irq(crtc, false);
4480         }
4481
4482         if (rc)
4483                 return rc;
4484
4485         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4486         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4487 }
4488
4489 static int dm_enable_vblank(struct drm_crtc *crtc)
4490 {
4491         return dm_set_vblank(crtc, true);
4492 }
4493
4494 static void dm_disable_vblank(struct drm_crtc *crtc)
4495 {
4496         dm_set_vblank(crtc, false);
4497 }
4498
4499 /* Implemented only the options currently availible for the driver */
4500 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4501         .reset = dm_crtc_reset_state,
4502         .destroy = amdgpu_dm_crtc_destroy,
4503         .gamma_set = drm_atomic_helper_legacy_gamma_set,
4504         .set_config = drm_atomic_helper_set_config,
4505         .page_flip = drm_atomic_helper_page_flip,
4506         .atomic_duplicate_state = dm_crtc_duplicate_state,
4507         .atomic_destroy_state = dm_crtc_destroy_state,
4508         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4509         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4510         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4511         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4512         .enable_vblank = dm_enable_vblank,
4513         .disable_vblank = dm_disable_vblank,
4514         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4515 };
4516
4517 static enum drm_connector_status
4518 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4519 {
4520         bool connected;
4521         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4522
4523         /*
4524          * Notes:
4525          * 1. This interface is NOT called in context of HPD irq.
4526          * 2. This interface *is called* in context of user-mode ioctl. Which
4527          * makes it a bad place for *any* MST-related activity.
4528          */
4529
4530         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4531             !aconnector->fake_enable)
4532                 connected = (aconnector->dc_sink != NULL);
4533         else
4534                 connected = (aconnector->base.force == DRM_FORCE_ON);
4535
4536         return (connected ? connector_status_connected :
4537                         connector_status_disconnected);
4538 }
4539
4540 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4541                                             struct drm_connector_state *connector_state,
4542                                             struct drm_property *property,
4543                                             uint64_t val)
4544 {
4545         struct drm_device *dev = connector->dev;
4546         struct amdgpu_device *adev = dev->dev_private;
4547         struct dm_connector_state *dm_old_state =
4548                 to_dm_connector_state(connector->state);
4549         struct dm_connector_state *dm_new_state =
4550                 to_dm_connector_state(connector_state);
4551
4552         int ret = -EINVAL;
4553
4554         if (property == dev->mode_config.scaling_mode_property) {
4555                 enum amdgpu_rmx_type rmx_type;
4556
4557                 switch (val) {
4558                 case DRM_MODE_SCALE_CENTER:
4559                         rmx_type = RMX_CENTER;
4560                         break;
4561                 case DRM_MODE_SCALE_ASPECT:
4562                         rmx_type = RMX_ASPECT;
4563                         break;
4564                 case DRM_MODE_SCALE_FULLSCREEN:
4565                         rmx_type = RMX_FULL;
4566                         break;
4567                 case DRM_MODE_SCALE_NONE:
4568                 default:
4569                         rmx_type = RMX_OFF;
4570                         break;
4571                 }
4572
4573                 if (dm_old_state->scaling == rmx_type)
4574                         return 0;
4575
4576                 dm_new_state->scaling = rmx_type;
4577                 ret = 0;
4578         } else if (property == adev->mode_info.underscan_hborder_property) {
4579                 dm_new_state->underscan_hborder = val;
4580                 ret = 0;
4581         } else if (property == adev->mode_info.underscan_vborder_property) {
4582                 dm_new_state->underscan_vborder = val;
4583                 ret = 0;
4584         } else if (property == adev->mode_info.underscan_property) {
4585                 dm_new_state->underscan_enable = val;
4586                 ret = 0;
4587         } else if (property == adev->mode_info.abm_level_property) {
4588                 dm_new_state->abm_level = val;
4589                 ret = 0;
4590         }
4591
4592         return ret;
4593 }
4594
4595 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4596                                             const struct drm_connector_state *state,
4597                                             struct drm_property *property,
4598                                             uint64_t *val)
4599 {
4600         struct drm_device *dev = connector->dev;
4601         struct amdgpu_device *adev = dev->dev_private;
4602         struct dm_connector_state *dm_state =
4603                 to_dm_connector_state(state);
4604         int ret = -EINVAL;
4605
4606         if (property == dev->mode_config.scaling_mode_property) {
4607                 switch (dm_state->scaling) {
4608                 case RMX_CENTER:
4609                         *val = DRM_MODE_SCALE_CENTER;
4610                         break;
4611                 case RMX_ASPECT:
4612                         *val = DRM_MODE_SCALE_ASPECT;
4613                         break;
4614                 case RMX_FULL:
4615                         *val = DRM_MODE_SCALE_FULLSCREEN;
4616                         break;
4617                 case RMX_OFF:
4618                 default:
4619                         *val = DRM_MODE_SCALE_NONE;
4620                         break;
4621                 }
4622                 ret = 0;
4623         } else if (property == adev->mode_info.underscan_hborder_property) {
4624                 *val = dm_state->underscan_hborder;
4625                 ret = 0;
4626         } else if (property == adev->mode_info.underscan_vborder_property) {
4627                 *val = dm_state->underscan_vborder;
4628                 ret = 0;
4629         } else if (property == adev->mode_info.underscan_property) {
4630                 *val = dm_state->underscan_enable;
4631                 ret = 0;
4632         } else if (property == adev->mode_info.abm_level_property) {
4633                 *val = dm_state->abm_level;
4634                 ret = 0;
4635         }
4636
4637         return ret;
4638 }
4639
4640 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4641 {
4642         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4643
4644         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4645 }
4646
4647 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4648 {
4649         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4650         const struct dc_link *link = aconnector->dc_link;
4651         struct amdgpu_device *adev = connector->dev->dev_private;
4652         struct amdgpu_display_manager *dm = &adev->dm;
4653
4654 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4655         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4656
4657         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4658             link->type != dc_connection_none &&
4659             dm->backlight_dev) {
4660                 backlight_device_unregister(dm->backlight_dev);
4661                 dm->backlight_dev = NULL;
4662         }
4663 #endif
4664
4665         if (aconnector->dc_em_sink)
4666                 dc_sink_release(aconnector->dc_em_sink);
4667         aconnector->dc_em_sink = NULL;
4668         if (aconnector->dc_sink)
4669                 dc_sink_release(aconnector->dc_sink);
4670         aconnector->dc_sink = NULL;
4671
4672         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4673         drm_connector_unregister(connector);
4674         drm_connector_cleanup(connector);
4675         if (aconnector->i2c) {
4676                 i2c_del_adapter(&aconnector->i2c->base);
4677                 kfree(aconnector->i2c);
4678         }
4679         kfree(aconnector->dm_dp_aux.aux.name);
4680
4681         kfree(connector);
4682 }
4683
4684 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4685 {
4686         struct dm_connector_state *state =
4687                 to_dm_connector_state(connector->state);
4688
4689         if (connector->state)
4690                 __drm_atomic_helper_connector_destroy_state(connector->state);
4691
4692         kfree(state);
4693
4694         state = kzalloc(sizeof(*state), GFP_KERNEL);
4695
4696         if (state) {
4697                 state->scaling = RMX_OFF;
4698                 state->underscan_enable = false;
4699                 state->underscan_hborder = 0;
4700                 state->underscan_vborder = 0;
4701                 state->base.max_requested_bpc = 8;
4702                 state->vcpi_slots = 0;
4703                 state->pbn = 0;
4704                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4705                         state->abm_level = amdgpu_dm_abm_level;
4706
4707                 __drm_atomic_helper_connector_reset(connector, &state->base);
4708         }
4709 }
4710
4711 struct drm_connector_state *
4712 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4713 {
4714         struct dm_connector_state *state =
4715                 to_dm_connector_state(connector->state);
4716
4717         struct dm_connector_state *new_state =
4718                         kmemdup(state, sizeof(*state), GFP_KERNEL);
4719
4720         if (!new_state)
4721                 return NULL;
4722
4723         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4724
4725         new_state->freesync_capable = state->freesync_capable;
4726         new_state->abm_level = state->abm_level;
4727         new_state->scaling = state->scaling;
4728         new_state->underscan_enable = state->underscan_enable;
4729         new_state->underscan_hborder = state->underscan_hborder;
4730         new_state->underscan_vborder = state->underscan_vborder;
4731         new_state->vcpi_slots = state->vcpi_slots;
4732         new_state->pbn = state->pbn;
4733         return &new_state->base;
4734 }
4735
4736 static int
4737 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4738 {
4739         struct amdgpu_dm_connector *amdgpu_dm_connector =
4740                 to_amdgpu_dm_connector(connector);
4741         int r;
4742
4743         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4744             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4745                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4746                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4747                 if (r)
4748                         return r;
4749         }
4750
4751 #if defined(CONFIG_DEBUG_FS)
4752         connector_debugfs_init(amdgpu_dm_connector);
4753 #endif
4754
4755         return 0;
4756 }
4757
4758 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4759         .reset = amdgpu_dm_connector_funcs_reset,
4760         .detect = amdgpu_dm_connector_detect,
4761         .fill_modes = drm_helper_probe_single_connector_modes,
4762         .destroy = amdgpu_dm_connector_destroy,
4763         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4764         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4765         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4766         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4767         .late_register = amdgpu_dm_connector_late_register,
4768         .early_unregister = amdgpu_dm_connector_unregister
4769 };
4770
4771 static int get_modes(struct drm_connector *connector)
4772 {
4773         return amdgpu_dm_connector_get_modes(connector);
4774 }
4775
4776 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4777 {
4778         struct dc_sink_init_data init_params = {
4779                         .link = aconnector->dc_link,
4780                         .sink_signal = SIGNAL_TYPE_VIRTUAL
4781         };
4782         struct edid *edid;
4783
4784         if (!aconnector->base.edid_blob_ptr) {
4785                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4786                                 aconnector->base.name);
4787
4788                 aconnector->base.force = DRM_FORCE_OFF;
4789                 aconnector->base.override_edid = false;
4790                 return;
4791         }
4792
4793         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4794
4795         aconnector->edid = edid;
4796
4797         aconnector->dc_em_sink = dc_link_add_remote_sink(
4798                 aconnector->dc_link,
4799                 (uint8_t *)edid,
4800                 (edid->extensions + 1) * EDID_LENGTH,
4801                 &init_params);
4802
4803         if (aconnector->base.force == DRM_FORCE_ON) {
4804                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4805                 aconnector->dc_link->local_sink :
4806                 aconnector->dc_em_sink;
4807                 dc_sink_retain(aconnector->dc_sink);
4808         }
4809 }
4810
4811 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4812 {
4813         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4814
4815         /*
4816          * In case of headless boot with force on for DP managed connector
4817          * Those settings have to be != 0 to get initial modeset
4818          */
4819         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4820                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4821                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4822         }
4823
4824
4825         aconnector->base.override_edid = true;
4826         create_eml_sink(aconnector);
4827 }
4828
4829 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4830                                    struct drm_display_mode *mode)
4831 {
4832         int result = MODE_ERROR;
4833         struct dc_sink *dc_sink;
4834         struct amdgpu_device *adev = connector->dev->dev_private;
4835         /* TODO: Unhardcode stream count */
4836         struct dc_stream_state *stream;
4837         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4838         enum dc_status dc_result = DC_OK;
4839
4840         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4841                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4842                 return result;
4843
4844         /*
4845          * Only run this the first time mode_valid is called to initilialize
4846          * EDID mgmt
4847          */
4848         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4849                 !aconnector->dc_em_sink)
4850                 handle_edid_mgmt(aconnector);
4851
4852         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4853
4854         if (dc_sink == NULL) {
4855                 DRM_ERROR("dc_sink is NULL!\n");
4856                 goto fail;
4857         }
4858
4859         stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4860         if (stream == NULL) {
4861                 DRM_ERROR("Failed to create stream for sink!\n");
4862                 goto fail;
4863         }
4864
4865         dc_result = dc_validate_stream(adev->dm.dc, stream);
4866
4867         if (dc_result == DC_OK)
4868                 result = MODE_OK;
4869         else
4870                 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4871                               mode->hdisplay,
4872                               mode->vdisplay,
4873                               mode->clock,
4874                               dc_result);
4875
4876         dc_stream_release(stream);
4877
4878 fail:
4879         /* TODO: error handling*/
4880         return result;
4881 }
4882
4883 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4884                                 struct dc_info_packet *out)
4885 {
4886         struct hdmi_drm_infoframe frame;
4887         unsigned char buf[30]; /* 26 + 4 */
4888         ssize_t len;
4889         int ret, i;
4890
4891         memset(out, 0, sizeof(*out));
4892
4893         if (!state->hdr_output_metadata)
4894                 return 0;
4895
4896         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4897         if (ret)
4898                 return ret;
4899
4900         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4901         if (len < 0)
4902                 return (int)len;
4903
4904         /* Static metadata is a fixed 26 bytes + 4 byte header. */
4905         if (len != 30)
4906                 return -EINVAL;
4907
4908         /* Prepare the infopacket for DC. */
4909         switch (state->connector->connector_type) {
4910         case DRM_MODE_CONNECTOR_HDMIA:
4911                 out->hb0 = 0x87; /* type */
4912                 out->hb1 = 0x01; /* version */
4913                 out->hb2 = 0x1A; /* length */
4914                 out->sb[0] = buf[3]; /* checksum */
4915                 i = 1;
4916                 break;
4917
4918         case DRM_MODE_CONNECTOR_DisplayPort:
4919         case DRM_MODE_CONNECTOR_eDP:
4920                 out->hb0 = 0x00; /* sdp id, zero */
4921                 out->hb1 = 0x87; /* type */
4922                 out->hb2 = 0x1D; /* payload len - 1 */
4923                 out->hb3 = (0x13 << 2); /* sdp version */
4924                 out->sb[0] = 0x01; /* version */
4925                 out->sb[1] = 0x1A; /* length */
4926                 i = 2;
4927                 break;
4928
4929         default:
4930                 return -EINVAL;
4931         }
4932
4933         memcpy(&out->sb[i], &buf[4], 26);
4934         out->valid = true;
4935
4936         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4937                        sizeof(out->sb), false);
4938
4939         return 0;
4940 }
4941
4942 static bool
4943 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4944                           const struct drm_connector_state *new_state)
4945 {
4946         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4947         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4948
4949         if (old_blob != new_blob) {
4950                 if (old_blob && new_blob &&
4951                     old_blob->length == new_blob->length)
4952                         return memcmp(old_blob->data, new_blob->data,
4953                                       old_blob->length);
4954
4955                 return true;
4956         }
4957
4958         return false;
4959 }
4960
4961 static int
4962 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4963                                  struct drm_atomic_state *state)
4964 {
4965         struct drm_connector_state *new_con_state =
4966                 drm_atomic_get_new_connector_state(state, conn);
4967         struct drm_connector_state *old_con_state =
4968                 drm_atomic_get_old_connector_state(state, conn);
4969         struct drm_crtc *crtc = new_con_state->crtc;
4970         struct drm_crtc_state *new_crtc_state;
4971         int ret;
4972
4973         if (!crtc)
4974                 return 0;
4975
4976         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4977                 struct dc_info_packet hdr_infopacket;
4978
4979                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4980                 if (ret)
4981                         return ret;
4982
4983                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4984                 if (IS_ERR(new_crtc_state))
4985                         return PTR_ERR(new_crtc_state);
4986
4987                 /*
4988                  * DC considers the stream backends changed if the
4989                  * static metadata changes. Forcing the modeset also
4990                  * gives a simple way for userspace to switch from
4991                  * 8bpc to 10bpc when setting the metadata to enter
4992                  * or exit HDR.
4993                  *
4994                  * Changing the static metadata after it's been
4995                  * set is permissible, however. So only force a
4996                  * modeset if we're entering or exiting HDR.
4997                  */
4998                 new_crtc_state->mode_changed =
4999                         !old_con_state->hdr_output_metadata ||
5000                         !new_con_state->hdr_output_metadata;
5001         }
5002
5003         return 0;
5004 }
5005
5006 static const struct drm_connector_helper_funcs
5007 amdgpu_dm_connector_helper_funcs = {
5008         /*
5009          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5010          * modes will be filtered by drm_mode_validate_size(), and those modes
5011          * are missing after user start lightdm. So we need to renew modes list.
5012          * in get_modes call back, not just return the modes count
5013          */
5014         .get_modes = get_modes,
5015         .mode_valid = amdgpu_dm_connector_mode_valid,
5016         .atomic_check = amdgpu_dm_connector_atomic_check,
5017 };
5018
5019 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5020 {
5021 }
5022
5023 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5024 {
5025         struct drm_device *dev = new_crtc_state->crtc->dev;
5026         struct drm_plane *plane;
5027
5028         drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5029                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5030                         return true;
5031         }
5032
5033         return false;
5034 }
5035
5036 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5037 {
5038         struct drm_atomic_state *state = new_crtc_state->state;
5039         struct drm_plane *plane;
5040         int num_active = 0;
5041
5042         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5043                 struct drm_plane_state *new_plane_state;
5044
5045                 /* Cursor planes are "fake". */
5046                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5047                         continue;
5048
5049                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5050
5051                 if (!new_plane_state) {
5052                         /*
5053                          * The plane is enable on the CRTC and hasn't changed
5054                          * state. This means that it previously passed
5055                          * validation and is therefore enabled.
5056                          */
5057                         num_active += 1;
5058                         continue;
5059                 }
5060
5061                 /* We need a framebuffer to be considered enabled. */
5062                 num_active += (new_plane_state->fb != NULL);
5063         }
5064
5065         return num_active;
5066 }
5067
5068 /*
5069  * Sets whether interrupts should be enabled on a specific CRTC.
5070  * We require that the stream be enabled and that there exist active
5071  * DC planes on the stream.
5072  */
5073 static void
5074 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5075                                struct drm_crtc_state *new_crtc_state)
5076 {
5077         struct dm_crtc_state *dm_new_crtc_state =
5078                 to_dm_crtc_state(new_crtc_state);
5079
5080         dm_new_crtc_state->active_planes = 0;
5081         dm_new_crtc_state->interrupts_enabled = false;
5082
5083         if (!dm_new_crtc_state->stream)
5084                 return;
5085
5086         dm_new_crtc_state->active_planes =
5087                 count_crtc_active_planes(new_crtc_state);
5088
5089         dm_new_crtc_state->interrupts_enabled =
5090                 dm_new_crtc_state->active_planes > 0;
5091 }
5092
5093 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5094                                        struct drm_crtc_state *state)
5095 {
5096         struct amdgpu_device *adev = crtc->dev->dev_private;
5097         struct dc *dc = adev->dm.dc;
5098         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5099         int ret = -EINVAL;
5100
5101         /*
5102          * Update interrupt state for the CRTC. This needs to happen whenever
5103          * the CRTC has changed or whenever any of its planes have changed.
5104          * Atomic check satisfies both of these requirements since the CRTC
5105          * is added to the state by DRM during drm_atomic_helper_check_planes.
5106          */
5107         dm_update_crtc_interrupt_state(crtc, state);
5108
5109         if (unlikely(!dm_crtc_state->stream &&
5110                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5111                 WARN_ON(1);
5112                 return ret;
5113         }
5114
5115         /* In some use cases, like reset, no stream is attached */
5116         if (!dm_crtc_state->stream)
5117                 return 0;
5118
5119         /*
5120          * We want at least one hardware plane enabled to use
5121          * the stream with a cursor enabled.
5122          */
5123         if (state->enable && state->active &&
5124             does_crtc_have_active_cursor(state) &&
5125             dm_crtc_state->active_planes == 0)
5126                 return -EINVAL;
5127
5128         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5129                 return 0;
5130
5131         return ret;
5132 }
5133
5134 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5135                                       const struct drm_display_mode *mode,
5136                                       struct drm_display_mode *adjusted_mode)
5137 {
5138         return true;
5139 }
5140
5141 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5142         .disable = dm_crtc_helper_disable,
5143         .atomic_check = dm_crtc_helper_atomic_check,
5144         .mode_fixup = dm_crtc_helper_mode_fixup,
5145         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5146 };
5147
5148 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5149 {
5150
5151 }
5152
5153 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5154 {
5155         switch (display_color_depth) {
5156                 case COLOR_DEPTH_666:
5157                         return 6;
5158                 case COLOR_DEPTH_888:
5159                         return 8;
5160                 case COLOR_DEPTH_101010:
5161                         return 10;
5162                 case COLOR_DEPTH_121212:
5163                         return 12;
5164                 case COLOR_DEPTH_141414:
5165                         return 14;
5166                 case COLOR_DEPTH_161616:
5167                         return 16;
5168                 default:
5169                         break;
5170                 }
5171         return 0;
5172 }
5173
5174 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5175                                           struct drm_crtc_state *crtc_state,
5176                                           struct drm_connector_state *conn_state)
5177 {
5178         struct drm_atomic_state *state = crtc_state->state;
5179         struct drm_connector *connector = conn_state->connector;
5180         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5181         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5182         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5183         struct drm_dp_mst_topology_mgr *mst_mgr;
5184         struct drm_dp_mst_port *mst_port;
5185         enum dc_color_depth color_depth;
5186         int clock, bpp = 0;
5187         bool is_y420 = false;
5188
5189         if (!aconnector->port || !aconnector->dc_sink)
5190                 return 0;
5191
5192         mst_port = aconnector->port;
5193         mst_mgr = &aconnector->mst_port->mst_mgr;
5194
5195         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5196                 return 0;
5197
5198         if (!state->duplicated) {
5199                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5200                                 aconnector->force_yuv420_output;
5201                 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5202                                                                     is_y420);
5203                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5204                 clock = adjusted_mode->clock;
5205                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5206         }
5207         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5208                                                                            mst_mgr,
5209                                                                            mst_port,
5210                                                                            dm_new_connector_state->pbn,
5211                                                                            0);
5212         if (dm_new_connector_state->vcpi_slots < 0) {
5213                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5214                 return dm_new_connector_state->vcpi_slots;
5215         }
5216         return 0;
5217 }
5218
5219 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5220         .disable = dm_encoder_helper_disable,
5221         .atomic_check = dm_encoder_helper_atomic_check
5222 };
5223
5224 #if defined(CONFIG_DRM_AMD_DC_DCN)
5225 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5226                                             struct dc_state *dc_state)
5227 {
5228         struct dc_stream_state *stream = NULL;
5229         struct drm_connector *connector;
5230         struct drm_connector_state *new_con_state, *old_con_state;
5231         struct amdgpu_dm_connector *aconnector;
5232         struct dm_connector_state *dm_conn_state;
5233         int i, j, clock, bpp;
5234         int vcpi, pbn_div, pbn = 0;
5235
5236         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5237
5238                 aconnector = to_amdgpu_dm_connector(connector);
5239
5240                 if (!aconnector->port)
5241                         continue;
5242
5243                 if (!new_con_state || !new_con_state->crtc)
5244                         continue;
5245
5246                 dm_conn_state = to_dm_connector_state(new_con_state);
5247
5248                 for (j = 0; j < dc_state->stream_count; j++) {
5249                         stream = dc_state->streams[j];
5250                         if (!stream)
5251                                 continue;
5252
5253                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5254                                 break;
5255
5256                         stream = NULL;
5257                 }
5258
5259                 if (!stream)
5260                         continue;
5261
5262                 if (stream->timing.flags.DSC != 1) {
5263                         drm_dp_mst_atomic_enable_dsc(state,
5264                                                      aconnector->port,
5265                                                      dm_conn_state->pbn,
5266                                                      0,
5267                                                      false);
5268                         continue;
5269                 }
5270
5271                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5272                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5273                 clock = stream->timing.pix_clk_100hz / 10;
5274                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5275                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5276                                                     aconnector->port,
5277                                                     pbn, pbn_div,
5278                                                     true);
5279                 if (vcpi < 0)
5280                         return vcpi;
5281
5282                 dm_conn_state->pbn = pbn;
5283                 dm_conn_state->vcpi_slots = vcpi;
5284         }
5285         return 0;
5286 }
5287 #endif
5288
5289 static void dm_drm_plane_reset(struct drm_plane *plane)
5290 {
5291         struct dm_plane_state *amdgpu_state = NULL;
5292
5293         if (plane->state)
5294                 plane->funcs->atomic_destroy_state(plane, plane->state);
5295
5296         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5297         WARN_ON(amdgpu_state == NULL);
5298
5299         if (amdgpu_state)
5300                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5301 }
5302
5303 static struct drm_plane_state *
5304 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5305 {
5306         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5307
5308         old_dm_plane_state = to_dm_plane_state(plane->state);
5309         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5310         if (!dm_plane_state)
5311                 return NULL;
5312
5313         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5314
5315         if (old_dm_plane_state->dc_state) {
5316                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5317                 dc_plane_state_retain(dm_plane_state->dc_state);
5318         }
5319
5320         return &dm_plane_state->base;
5321 }
5322
5323 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5324                                 struct drm_plane_state *state)
5325 {
5326         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5327
5328         if (dm_plane_state->dc_state)
5329                 dc_plane_state_release(dm_plane_state->dc_state);
5330
5331         drm_atomic_helper_plane_destroy_state(plane, state);
5332 }
5333
5334 static const struct drm_plane_funcs dm_plane_funcs = {
5335         .update_plane   = drm_atomic_helper_update_plane,
5336         .disable_plane  = drm_atomic_helper_disable_plane,
5337         .destroy        = drm_primary_helper_destroy,
5338         .reset = dm_drm_plane_reset,
5339         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5340         .atomic_destroy_state = dm_drm_plane_destroy_state,
5341 };
5342
5343 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5344                                       struct drm_plane_state *new_state)
5345 {
5346         struct amdgpu_framebuffer *afb;
5347         struct drm_gem_object *obj;
5348         struct amdgpu_device *adev;
5349         struct amdgpu_bo *rbo;
5350         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5351         struct list_head list;
5352         struct ttm_validate_buffer tv;
5353         struct ww_acquire_ctx ticket;
5354         uint64_t tiling_flags;
5355         uint32_t domain;
5356         int r;
5357         bool force_disable_dcc = false;
5358
5359         dm_plane_state_old = to_dm_plane_state(plane->state);
5360         dm_plane_state_new = to_dm_plane_state(new_state);
5361
5362         if (!new_state->fb) {
5363                 DRM_DEBUG_DRIVER("No FB bound\n");
5364                 return 0;
5365         }
5366
5367         afb = to_amdgpu_framebuffer(new_state->fb);
5368         obj = new_state->fb->obj[0];
5369         rbo = gem_to_amdgpu_bo(obj);
5370         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5371         INIT_LIST_HEAD(&list);
5372
5373         tv.bo = &rbo->tbo;
5374         tv.num_shared = 1;
5375         list_add(&tv.head, &list);
5376
5377         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5378         if (r) {
5379                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5380                 return r;
5381         }
5382
5383         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5384                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5385         else
5386                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5387
5388         r = amdgpu_bo_pin(rbo, domain);
5389         if (unlikely(r != 0)) {
5390                 if (r != -ERESTARTSYS)
5391                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5392                 ttm_eu_backoff_reservation(&ticket, &list);
5393                 return r;
5394         }
5395
5396         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5397         if (unlikely(r != 0)) {
5398                 amdgpu_bo_unpin(rbo);
5399                 ttm_eu_backoff_reservation(&ticket, &list);
5400                 DRM_ERROR("%p bind failed\n", rbo);
5401                 return r;
5402         }
5403
5404         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5405
5406         ttm_eu_backoff_reservation(&ticket, &list);
5407
5408         afb->address = amdgpu_bo_gpu_offset(rbo);
5409
5410         amdgpu_bo_ref(rbo);
5411
5412         if (dm_plane_state_new->dc_state &&
5413                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5414                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5415
5416                 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5417                 fill_plane_buffer_attributes(
5418                         adev, afb, plane_state->format, plane_state->rotation,
5419                         tiling_flags, &plane_state->tiling_info,
5420                         &plane_state->plane_size, &plane_state->dcc,
5421                         &plane_state->address,
5422                         force_disable_dcc);
5423         }
5424
5425         return 0;
5426 }
5427
5428 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5429                                        struct drm_plane_state *old_state)
5430 {
5431         struct amdgpu_bo *rbo;
5432         int r;
5433
5434         if (!old_state->fb)
5435                 return;
5436
5437         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5438         r = amdgpu_bo_reserve(rbo, false);
5439         if (unlikely(r)) {
5440                 DRM_ERROR("failed to reserve rbo before unpin\n");
5441                 return;
5442         }
5443
5444         amdgpu_bo_unpin(rbo);
5445         amdgpu_bo_unreserve(rbo);
5446         amdgpu_bo_unref(&rbo);
5447 }
5448
5449 static int dm_plane_atomic_check(struct drm_plane *plane,
5450                                  struct drm_plane_state *state)
5451 {
5452         struct amdgpu_device *adev = plane->dev->dev_private;
5453         struct dc *dc = adev->dm.dc;
5454         struct dm_plane_state *dm_plane_state;
5455         struct dc_scaling_info scaling_info;
5456         int ret;
5457
5458         dm_plane_state = to_dm_plane_state(state);
5459
5460         if (!dm_plane_state->dc_state)
5461                 return 0;
5462
5463         ret = fill_dc_scaling_info(state, &scaling_info);
5464         if (ret)
5465                 return ret;
5466
5467         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5468                 return 0;
5469
5470         return -EINVAL;
5471 }
5472
5473 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5474                                        struct drm_plane_state *new_plane_state)
5475 {
5476         /* Only support async updates on cursor planes. */
5477         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5478                 return -EINVAL;
5479
5480         return 0;
5481 }
5482
5483 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5484                                          struct drm_plane_state *new_state)
5485 {
5486         struct drm_plane_state *old_state =
5487                 drm_atomic_get_old_plane_state(new_state->state, plane);
5488
5489         swap(plane->state->fb, new_state->fb);
5490
5491         plane->state->src_x = new_state->src_x;
5492         plane->state->src_y = new_state->src_y;
5493         plane->state->src_w = new_state->src_w;
5494         plane->state->src_h = new_state->src_h;
5495         plane->state->crtc_x = new_state->crtc_x;
5496         plane->state->crtc_y = new_state->crtc_y;
5497         plane->state->crtc_w = new_state->crtc_w;
5498         plane->state->crtc_h = new_state->crtc_h;
5499
5500         handle_cursor_update(plane, old_state);
5501 }
5502
5503 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5504         .prepare_fb = dm_plane_helper_prepare_fb,
5505         .cleanup_fb = dm_plane_helper_cleanup_fb,
5506         .atomic_check = dm_plane_atomic_check,
5507         .atomic_async_check = dm_plane_atomic_async_check,
5508         .atomic_async_update = dm_plane_atomic_async_update
5509 };
5510
5511 /*
5512  * TODO: these are currently initialized to rgb formats only.
5513  * For future use cases we should either initialize them dynamically based on
5514  * plane capabilities, or initialize this array to all formats, so internal drm
5515  * check will succeed, and let DC implement proper check
5516  */
5517 static const uint32_t rgb_formats[] = {
5518         DRM_FORMAT_XRGB8888,
5519         DRM_FORMAT_ARGB8888,
5520         DRM_FORMAT_RGBA8888,
5521         DRM_FORMAT_XRGB2101010,
5522         DRM_FORMAT_XBGR2101010,
5523         DRM_FORMAT_ARGB2101010,
5524         DRM_FORMAT_ABGR2101010,
5525         DRM_FORMAT_XBGR8888,
5526         DRM_FORMAT_ABGR8888,
5527         DRM_FORMAT_RGB565,
5528 };
5529
5530 static const uint32_t overlay_formats[] = {
5531         DRM_FORMAT_XRGB8888,
5532         DRM_FORMAT_ARGB8888,
5533         DRM_FORMAT_RGBA8888,
5534         DRM_FORMAT_XBGR8888,
5535         DRM_FORMAT_ABGR8888,
5536         DRM_FORMAT_RGB565
5537 };
5538
5539 static const u32 cursor_formats[] = {
5540         DRM_FORMAT_ARGB8888
5541 };
5542
5543 static int get_plane_formats(const struct drm_plane *plane,
5544                              const struct dc_plane_cap *plane_cap,
5545                              uint32_t *formats, int max_formats)
5546 {
5547         int i, num_formats = 0;
5548
5549         /*
5550          * TODO: Query support for each group of formats directly from
5551          * DC plane caps. This will require adding more formats to the
5552          * caps list.
5553          */
5554
5555         switch (plane->type) {
5556         case DRM_PLANE_TYPE_PRIMARY:
5557                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5558                         if (num_formats >= max_formats)
5559                                 break;
5560
5561                         formats[num_formats++] = rgb_formats[i];
5562                 }
5563
5564                 if (plane_cap && plane_cap->pixel_format_support.nv12)
5565                         formats[num_formats++] = DRM_FORMAT_NV12;
5566                 if (plane_cap && plane_cap->pixel_format_support.p010)
5567                         formats[num_formats++] = DRM_FORMAT_P010;
5568                 break;
5569
5570         case DRM_PLANE_TYPE_OVERLAY:
5571                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5572                         if (num_formats >= max_formats)
5573                                 break;
5574
5575                         formats[num_formats++] = overlay_formats[i];
5576                 }
5577                 break;
5578
5579         case DRM_PLANE_TYPE_CURSOR:
5580                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5581                         if (num_formats >= max_formats)
5582                                 break;
5583
5584                         formats[num_formats++] = cursor_formats[i];
5585                 }
5586                 break;
5587         }
5588
5589         return num_formats;
5590 }
5591
5592 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5593                                 struct drm_plane *plane,
5594                                 unsigned long possible_crtcs,
5595                                 const struct dc_plane_cap *plane_cap)
5596 {
5597         uint32_t formats[32];
5598         int num_formats;
5599         int res = -EPERM;
5600
5601         num_formats = get_plane_formats(plane, plane_cap, formats,
5602                                         ARRAY_SIZE(formats));
5603
5604         res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5605                                        &dm_plane_funcs, formats, num_formats,
5606                                        NULL, plane->type, NULL);
5607         if (res)
5608                 return res;
5609
5610         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5611             plane_cap && plane_cap->per_pixel_alpha) {
5612                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5613                                           BIT(DRM_MODE_BLEND_PREMULTI);
5614
5615                 drm_plane_create_alpha_property(plane);
5616                 drm_plane_create_blend_mode_property(plane, blend_caps);
5617         }
5618
5619         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5620             plane_cap &&
5621             (plane_cap->pixel_format_support.nv12 ||
5622              plane_cap->pixel_format_support.p010)) {
5623                 /* This only affects YUV formats. */
5624                 drm_plane_create_color_properties(
5625                         plane,
5626                         BIT(DRM_COLOR_YCBCR_BT601) |
5627                         BIT(DRM_COLOR_YCBCR_BT709) |
5628                         BIT(DRM_COLOR_YCBCR_BT2020),
5629                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5630                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5631                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5632         }
5633
5634         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5635
5636         /* Create (reset) the plane state */
5637         if (plane->funcs->reset)
5638                 plane->funcs->reset(plane);
5639
5640         return 0;
5641 }
5642
5643 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5644                                struct drm_plane *plane,
5645                                uint32_t crtc_index)
5646 {
5647         struct amdgpu_crtc *acrtc = NULL;
5648         struct drm_plane *cursor_plane;
5649
5650         int res = -ENOMEM;
5651
5652         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5653         if (!cursor_plane)
5654                 goto fail;
5655
5656         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5657         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5658
5659         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5660         if (!acrtc)
5661                 goto fail;
5662
5663         res = drm_crtc_init_with_planes(
5664                         dm->ddev,
5665                         &acrtc->base,
5666                         plane,
5667                         cursor_plane,
5668                         &amdgpu_dm_crtc_funcs, NULL);
5669
5670         if (res)
5671                 goto fail;
5672
5673         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5674
5675         /* Create (reset) the plane state */
5676         if (acrtc->base.funcs->reset)
5677                 acrtc->base.funcs->reset(&acrtc->base);
5678
5679         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5680         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5681
5682         acrtc->crtc_id = crtc_index;
5683         acrtc->base.enabled = false;
5684         acrtc->otg_inst = -1;
5685
5686         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5687         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5688                                    true, MAX_COLOR_LUT_ENTRIES);
5689         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5690
5691         return 0;
5692
5693 fail:
5694         kfree(acrtc);
5695         kfree(cursor_plane);
5696         return res;
5697 }
5698
5699
5700 static int to_drm_connector_type(enum signal_type st)
5701 {
5702         switch (st) {
5703         case SIGNAL_TYPE_HDMI_TYPE_A:
5704                 return DRM_MODE_CONNECTOR_HDMIA;
5705         case SIGNAL_TYPE_EDP:
5706                 return DRM_MODE_CONNECTOR_eDP;
5707         case SIGNAL_TYPE_LVDS:
5708                 return DRM_MODE_CONNECTOR_LVDS;
5709         case SIGNAL_TYPE_RGB:
5710                 return DRM_MODE_CONNECTOR_VGA;
5711         case SIGNAL_TYPE_DISPLAY_PORT:
5712         case SIGNAL_TYPE_DISPLAY_PORT_MST:
5713                 return DRM_MODE_CONNECTOR_DisplayPort;
5714         case SIGNAL_TYPE_DVI_DUAL_LINK:
5715         case SIGNAL_TYPE_DVI_SINGLE_LINK:
5716                 return DRM_MODE_CONNECTOR_DVID;
5717         case SIGNAL_TYPE_VIRTUAL:
5718                 return DRM_MODE_CONNECTOR_VIRTUAL;
5719
5720         default:
5721                 return DRM_MODE_CONNECTOR_Unknown;
5722         }
5723 }
5724
5725 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5726 {
5727         struct drm_encoder *encoder;
5728
5729         /* There is only one encoder per connector */
5730         drm_connector_for_each_possible_encoder(connector, encoder)
5731                 return encoder;
5732
5733         return NULL;
5734 }
5735
5736 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5737 {
5738         struct drm_encoder *encoder;
5739         struct amdgpu_encoder *amdgpu_encoder;
5740
5741         encoder = amdgpu_dm_connector_to_encoder(connector);
5742
5743         if (encoder == NULL)
5744                 return;
5745
5746         amdgpu_encoder = to_amdgpu_encoder(encoder);
5747
5748         amdgpu_encoder->native_mode.clock = 0;
5749
5750         if (!list_empty(&connector->probed_modes)) {
5751                 struct drm_display_mode *preferred_mode = NULL;
5752
5753                 list_for_each_entry(preferred_mode,
5754                                     &connector->probed_modes,
5755                                     head) {
5756                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5757                                 amdgpu_encoder->native_mode = *preferred_mode;
5758
5759                         break;
5760                 }
5761
5762         }
5763 }
5764
5765 static struct drm_display_mode *
5766 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5767                              char *name,
5768                              int hdisplay, int vdisplay)
5769 {
5770         struct drm_device *dev = encoder->dev;
5771         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5772         struct drm_display_mode *mode = NULL;
5773         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5774
5775         mode = drm_mode_duplicate(dev, native_mode);
5776
5777         if (mode == NULL)
5778                 return NULL;
5779
5780         mode->hdisplay = hdisplay;
5781         mode->vdisplay = vdisplay;
5782         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5783         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5784
5785         return mode;
5786
5787 }
5788
5789 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5790                                                  struct drm_connector *connector)
5791 {
5792         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5793         struct drm_display_mode *mode = NULL;
5794         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5795         struct amdgpu_dm_connector *amdgpu_dm_connector =
5796                                 to_amdgpu_dm_connector(connector);
5797         int i;
5798         int n;
5799         struct mode_size {
5800                 char name[DRM_DISPLAY_MODE_LEN];
5801                 int w;
5802                 int h;
5803         } common_modes[] = {
5804                 {  "640x480",  640,  480},
5805                 {  "800x600",  800,  600},
5806                 { "1024x768", 1024,  768},
5807                 { "1280x720", 1280,  720},
5808                 { "1280x800", 1280,  800},
5809                 {"1280x1024", 1280, 1024},
5810                 { "1440x900", 1440,  900},
5811                 {"1680x1050", 1680, 1050},
5812                 {"1600x1200", 1600, 1200},
5813                 {"1920x1080", 1920, 1080},
5814                 {"1920x1200", 1920, 1200}
5815         };
5816
5817         n = ARRAY_SIZE(common_modes);
5818
5819         for (i = 0; i < n; i++) {
5820                 struct drm_display_mode *curmode = NULL;
5821                 bool mode_existed = false;
5822
5823                 if (common_modes[i].w > native_mode->hdisplay ||
5824                     common_modes[i].h > native_mode->vdisplay ||
5825                    (common_modes[i].w == native_mode->hdisplay &&
5826                     common_modes[i].h == native_mode->vdisplay))
5827                         continue;
5828
5829                 list_for_each_entry(curmode, &connector->probed_modes, head) {
5830                         if (common_modes[i].w == curmode->hdisplay &&
5831                             common_modes[i].h == curmode->vdisplay) {
5832                                 mode_existed = true;
5833                                 break;
5834                         }
5835                 }
5836
5837                 if (mode_existed)
5838                         continue;
5839
5840                 mode = amdgpu_dm_create_common_mode(encoder,
5841                                 common_modes[i].name, common_modes[i].w,
5842                                 common_modes[i].h);
5843                 drm_mode_probed_add(connector, mode);
5844                 amdgpu_dm_connector->num_modes++;
5845         }
5846 }
5847
5848 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5849                                               struct edid *edid)
5850 {
5851         struct amdgpu_dm_connector *amdgpu_dm_connector =
5852                         to_amdgpu_dm_connector(connector);
5853
5854         if (edid) {
5855                 /* empty probed_modes */
5856                 INIT_LIST_HEAD(&connector->probed_modes);
5857                 amdgpu_dm_connector->num_modes =
5858                                 drm_add_edid_modes(connector, edid);
5859
5860                 /* sorting the probed modes before calling function
5861                  * amdgpu_dm_get_native_mode() since EDID can have
5862                  * more than one preferred mode. The modes that are
5863                  * later in the probed mode list could be of higher
5864                  * and preferred resolution. For example, 3840x2160
5865                  * resolution in base EDID preferred timing and 4096x2160
5866                  * preferred resolution in DID extension block later.
5867                  */
5868                 drm_mode_sort(&connector->probed_modes);
5869                 amdgpu_dm_get_native_mode(connector);
5870         } else {
5871                 amdgpu_dm_connector->num_modes = 0;
5872         }
5873 }
5874
5875 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5876 {
5877         struct amdgpu_dm_connector *amdgpu_dm_connector =
5878                         to_amdgpu_dm_connector(connector);
5879         struct drm_encoder *encoder;
5880         struct edid *edid = amdgpu_dm_connector->edid;
5881
5882         encoder = amdgpu_dm_connector_to_encoder(connector);
5883
5884         if (!edid || !drm_edid_is_valid(edid)) {
5885                 amdgpu_dm_connector->num_modes =
5886                                 drm_add_modes_noedid(connector, 640, 480);
5887         } else {
5888                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5889                 amdgpu_dm_connector_add_common_modes(encoder, connector);
5890         }
5891         amdgpu_dm_fbc_init(connector);
5892
5893         return amdgpu_dm_connector->num_modes;
5894 }
5895
5896 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5897                                      struct amdgpu_dm_connector *aconnector,
5898                                      int connector_type,
5899                                      struct dc_link *link,
5900                                      int link_index)
5901 {
5902         struct amdgpu_device *adev = dm->ddev->dev_private;
5903
5904         /*
5905          * Some of the properties below require access to state, like bpc.
5906          * Allocate some default initial connector state with our reset helper.
5907          */
5908         if (aconnector->base.funcs->reset)
5909                 aconnector->base.funcs->reset(&aconnector->base);
5910
5911         aconnector->connector_id = link_index;
5912         aconnector->dc_link = link;
5913         aconnector->base.interlace_allowed = false;
5914         aconnector->base.doublescan_allowed = false;
5915         aconnector->base.stereo_allowed = false;
5916         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5917         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5918         aconnector->audio_inst = -1;
5919         mutex_init(&aconnector->hpd_lock);
5920
5921         /*
5922          * configure support HPD hot plug connector_>polled default value is 0
5923          * which means HPD hot plug not supported
5924          */
5925         switch (connector_type) {
5926         case DRM_MODE_CONNECTOR_HDMIA:
5927                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5928                 aconnector->base.ycbcr_420_allowed =
5929                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5930                 break;
5931         case DRM_MODE_CONNECTOR_DisplayPort:
5932                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5933                 aconnector->base.ycbcr_420_allowed =
5934                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
5935                 break;
5936         case DRM_MODE_CONNECTOR_DVID:
5937                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5938                 break;
5939         default:
5940                 break;
5941         }
5942
5943         drm_object_attach_property(&aconnector->base.base,
5944                                 dm->ddev->mode_config.scaling_mode_property,
5945                                 DRM_MODE_SCALE_NONE);
5946
5947         drm_object_attach_property(&aconnector->base.base,
5948                                 adev->mode_info.underscan_property,
5949                                 UNDERSCAN_OFF);
5950         drm_object_attach_property(&aconnector->base.base,
5951                                 adev->mode_info.underscan_hborder_property,
5952                                 0);
5953         drm_object_attach_property(&aconnector->base.base,
5954                                 adev->mode_info.underscan_vborder_property,
5955                                 0);
5956
5957         if (!aconnector->mst_port)
5958                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5959
5960         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5961         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5962         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5963
5964         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5965             dc_is_dmcu_initialized(adev->dm.dc)) {
5966                 drm_object_attach_property(&aconnector->base.base,
5967                                 adev->mode_info.abm_level_property, 0);
5968         }
5969
5970         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5971             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5972             connector_type == DRM_MODE_CONNECTOR_eDP) {
5973                 drm_object_attach_property(
5974                         &aconnector->base.base,
5975                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
5976
5977                 if (!aconnector->mst_port)
5978                         drm_connector_attach_vrr_capable_property(&aconnector->base);
5979
5980 #ifdef CONFIG_DRM_AMD_DC_HDCP
5981                 if (adev->dm.hdcp_workqueue)
5982                         drm_connector_attach_content_protection_property(&aconnector->base, true);
5983 #endif
5984         }
5985 }
5986
5987 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5988                               struct i2c_msg *msgs, int num)
5989 {
5990         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5991         struct ddc_service *ddc_service = i2c->ddc_service;
5992         struct i2c_command cmd;
5993         int i;
5994         int result = -EIO;
5995
5996         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5997
5998         if (!cmd.payloads)
5999                 return result;
6000
6001         cmd.number_of_payloads = num;
6002         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6003         cmd.speed = 100;
6004
6005         for (i = 0; i < num; i++) {
6006                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6007                 cmd.payloads[i].address = msgs[i].addr;
6008                 cmd.payloads[i].length = msgs[i].len;
6009                 cmd.payloads[i].data = msgs[i].buf;
6010         }
6011
6012         if (dc_submit_i2c(
6013                         ddc_service->ctx->dc,
6014                         ddc_service->ddc_pin->hw_info.ddc_channel,
6015                         &cmd))
6016                 result = num;
6017
6018         kfree(cmd.payloads);
6019         return result;
6020 }
6021
6022 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6023 {
6024         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6025 }
6026
6027 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6028         .master_xfer = amdgpu_dm_i2c_xfer,
6029         .functionality = amdgpu_dm_i2c_func,
6030 };
6031
6032 static struct amdgpu_i2c_adapter *
6033 create_i2c(struct ddc_service *ddc_service,
6034            int link_index,
6035            int *res)
6036 {
6037         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6038         struct amdgpu_i2c_adapter *i2c;
6039
6040         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6041         if (!i2c)
6042                 return NULL;
6043         i2c->base.owner = THIS_MODULE;
6044         i2c->base.class = I2C_CLASS_DDC;
6045         i2c->base.dev.parent = &adev->pdev->dev;
6046         i2c->base.algo = &amdgpu_dm_i2c_algo;
6047         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6048         i2c_set_adapdata(&i2c->base, i2c);
6049         i2c->ddc_service = ddc_service;
6050         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6051
6052         return i2c;
6053 }
6054
6055
6056 /*
6057  * Note: this function assumes that dc_link_detect() was called for the
6058  * dc_link which will be represented by this aconnector.
6059  */
6060 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6061                                     struct amdgpu_dm_connector *aconnector,
6062                                     uint32_t link_index,
6063                                     struct amdgpu_encoder *aencoder)
6064 {
6065         int res = 0;
6066         int connector_type;
6067         struct dc *dc = dm->dc;
6068         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6069         struct amdgpu_i2c_adapter *i2c;
6070
6071         link->priv = aconnector;
6072
6073         DRM_DEBUG_DRIVER("%s()\n", __func__);
6074
6075         i2c = create_i2c(link->ddc, link->link_index, &res);
6076         if (!i2c) {
6077                 DRM_ERROR("Failed to create i2c adapter data\n");
6078                 return -ENOMEM;
6079         }
6080
6081         aconnector->i2c = i2c;
6082         res = i2c_add_adapter(&i2c->base);
6083
6084         if (res) {
6085                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6086                 goto out_free;
6087         }
6088
6089         connector_type = to_drm_connector_type(link->connector_signal);
6090
6091         res = drm_connector_init_with_ddc(
6092                         dm->ddev,
6093                         &aconnector->base,
6094                         &amdgpu_dm_connector_funcs,
6095                         connector_type,
6096                         &i2c->base);
6097
6098         if (res) {
6099                 DRM_ERROR("connector_init failed\n");
6100                 aconnector->connector_id = -1;
6101                 goto out_free;
6102         }
6103
6104         drm_connector_helper_add(
6105                         &aconnector->base,
6106                         &amdgpu_dm_connector_helper_funcs);
6107
6108         amdgpu_dm_connector_init_helper(
6109                 dm,
6110                 aconnector,
6111                 connector_type,
6112                 link,
6113                 link_index);
6114
6115         drm_connector_attach_encoder(
6116                 &aconnector->base, &aencoder->base);
6117
6118         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6119                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6120                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6121
6122 out_free:
6123         if (res) {
6124                 kfree(i2c);
6125                 aconnector->i2c = NULL;
6126         }
6127         return res;
6128 }
6129
6130 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6131 {
6132         switch (adev->mode_info.num_crtc) {
6133         case 1:
6134                 return 0x1;
6135         case 2:
6136                 return 0x3;
6137         case 3:
6138                 return 0x7;
6139         case 4:
6140                 return 0xf;
6141         case 5:
6142                 return 0x1f;
6143         case 6:
6144         default:
6145                 return 0x3f;
6146         }
6147 }
6148
6149 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6150                                   struct amdgpu_encoder *aencoder,
6151                                   uint32_t link_index)
6152 {
6153         struct amdgpu_device *adev = dev->dev_private;
6154
6155         int res = drm_encoder_init(dev,
6156                                    &aencoder->base,
6157                                    &amdgpu_dm_encoder_funcs,
6158                                    DRM_MODE_ENCODER_TMDS,
6159                                    NULL);
6160
6161         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6162
6163         if (!res)
6164                 aencoder->encoder_id = link_index;
6165         else
6166                 aencoder->encoder_id = -1;
6167
6168         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6169
6170         return res;
6171 }
6172
6173 static void manage_dm_interrupts(struct amdgpu_device *adev,
6174                                  struct amdgpu_crtc *acrtc,
6175                                  bool enable)
6176 {
6177         /*
6178          * this is not correct translation but will work as soon as VBLANK
6179          * constant is the same as PFLIP
6180          */
6181         int irq_type =
6182                 amdgpu_display_crtc_idx_to_irq_type(
6183                         adev,
6184                         acrtc->crtc_id);
6185
6186         if (enable) {
6187                 drm_crtc_vblank_on(&acrtc->base);
6188                 amdgpu_irq_get(
6189                         adev,
6190                         &adev->pageflip_irq,
6191                         irq_type);
6192         } else {
6193
6194                 amdgpu_irq_put(
6195                         adev,
6196                         &adev->pageflip_irq,
6197                         irq_type);
6198                 drm_crtc_vblank_off(&acrtc->base);
6199         }
6200 }
6201
6202 static bool
6203 is_scaling_state_different(const struct dm_connector_state *dm_state,
6204                            const struct dm_connector_state *old_dm_state)
6205 {
6206         if (dm_state->scaling != old_dm_state->scaling)
6207                 return true;
6208         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6209                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6210                         return true;
6211         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6212                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6213                         return true;
6214         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6215                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6216                 return true;
6217         return false;
6218 }
6219
6220 #ifdef CONFIG_DRM_AMD_DC_HDCP
6221 static bool is_content_protection_different(struct drm_connector_state *state,
6222                                             const struct drm_connector_state *old_state,
6223                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6224 {
6225         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6226
6227         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6228             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6229                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6230                 return true;
6231         }
6232
6233         /* CP is being re enabled, ignore this */
6234         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6235             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6236                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6237                 return false;
6238         }
6239
6240         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6241         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6242             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6243                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6244
6245         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6246          * hot-plug, headless s3, dpms
6247          */
6248         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6249             aconnector->dc_sink != NULL)
6250                 return true;
6251
6252         if (old_state->content_protection == state->content_protection)
6253                 return false;
6254
6255         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6256                 return true;
6257
6258         return false;
6259 }
6260
6261 #endif
6262 static void remove_stream(struct amdgpu_device *adev,
6263                           struct amdgpu_crtc *acrtc,
6264                           struct dc_stream_state *stream)
6265 {
6266         /* this is the update mode case */
6267
6268         acrtc->otg_inst = -1;
6269         acrtc->enabled = false;
6270 }
6271
6272 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6273                                struct dc_cursor_position *position)
6274 {
6275         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6276         int x, y;
6277         int xorigin = 0, yorigin = 0;
6278
6279         position->enable = false;
6280         position->x = 0;
6281         position->y = 0;
6282
6283         if (!crtc || !plane->state->fb)
6284                 return 0;
6285
6286         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6287             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6288                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6289                           __func__,
6290                           plane->state->crtc_w,
6291                           plane->state->crtc_h);
6292                 return -EINVAL;
6293         }
6294
6295         x = plane->state->crtc_x;
6296         y = plane->state->crtc_y;
6297
6298         if (x <= -amdgpu_crtc->max_cursor_width ||
6299             y <= -amdgpu_crtc->max_cursor_height)
6300                 return 0;
6301
6302         if (x < 0) {
6303                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6304                 x = 0;
6305         }
6306         if (y < 0) {
6307                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6308                 y = 0;
6309         }
6310         position->enable = true;
6311         position->translate_by_source = true;
6312         position->x = x;
6313         position->y = y;
6314         position->x_hotspot = xorigin;
6315         position->y_hotspot = yorigin;
6316
6317         return 0;
6318 }
6319
6320 static void handle_cursor_update(struct drm_plane *plane,
6321                                  struct drm_plane_state *old_plane_state)
6322 {
6323         struct amdgpu_device *adev = plane->dev->dev_private;
6324         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6325         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6326         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6327         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6328         uint64_t address = afb ? afb->address : 0;
6329         struct dc_cursor_position position;
6330         struct dc_cursor_attributes attributes;
6331         int ret;
6332
6333         if (!plane->state->fb && !old_plane_state->fb)
6334                 return;
6335
6336         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6337                          __func__,
6338                          amdgpu_crtc->crtc_id,
6339                          plane->state->crtc_w,
6340                          plane->state->crtc_h);
6341
6342         ret = get_cursor_position(plane, crtc, &position);
6343         if (ret)
6344                 return;
6345
6346         if (!position.enable) {
6347                 /* turn off cursor */
6348                 if (crtc_state && crtc_state->stream) {
6349                         mutex_lock(&adev->dm.dc_lock);
6350                         dc_stream_set_cursor_position(crtc_state->stream,
6351                                                       &position);
6352                         mutex_unlock(&adev->dm.dc_lock);
6353                 }
6354                 return;
6355         }
6356
6357         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6358         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6359
6360         memset(&attributes, 0, sizeof(attributes));
6361         attributes.address.high_part = upper_32_bits(address);
6362         attributes.address.low_part  = lower_32_bits(address);
6363         attributes.width             = plane->state->crtc_w;
6364         attributes.height            = plane->state->crtc_h;
6365         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6366         attributes.rotation_angle    = 0;
6367         attributes.attribute_flags.value = 0;
6368
6369         attributes.pitch = attributes.width;
6370
6371         if (crtc_state->stream) {
6372                 mutex_lock(&adev->dm.dc_lock);
6373                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6374                                                          &attributes))
6375                         DRM_ERROR("DC failed to set cursor attributes\n");
6376
6377                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6378                                                    &position))
6379                         DRM_ERROR("DC failed to set cursor position\n");
6380                 mutex_unlock(&adev->dm.dc_lock);
6381         }
6382 }
6383
6384 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6385 {
6386
6387         assert_spin_locked(&acrtc->base.dev->event_lock);
6388         WARN_ON(acrtc->event);
6389
6390         acrtc->event = acrtc->base.state->event;
6391
6392         /* Set the flip status */
6393         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6394
6395         /* Mark this event as consumed */
6396         acrtc->base.state->event = NULL;
6397
6398         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6399                                                  acrtc->crtc_id);
6400 }
6401
6402 static void update_freesync_state_on_stream(
6403         struct amdgpu_display_manager *dm,
6404         struct dm_crtc_state *new_crtc_state,
6405         struct dc_stream_state *new_stream,
6406         struct dc_plane_state *surface,
6407         u32 flip_timestamp_in_us)
6408 {
6409         struct mod_vrr_params vrr_params;
6410         struct dc_info_packet vrr_infopacket = {0};
6411         struct amdgpu_device *adev = dm->adev;
6412         unsigned long flags;
6413
6414         if (!new_stream)
6415                 return;
6416
6417         /*
6418          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6419          * For now it's sufficient to just guard against these conditions.
6420          */
6421
6422         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6423                 return;
6424
6425         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6426         vrr_params = new_crtc_state->vrr_params;
6427
6428         if (surface) {
6429                 mod_freesync_handle_preflip(
6430                         dm->freesync_module,
6431                         surface,
6432                         new_stream,
6433                         flip_timestamp_in_us,
6434                         &vrr_params);
6435
6436                 if (adev->family < AMDGPU_FAMILY_AI &&
6437                     amdgpu_dm_vrr_active(new_crtc_state)) {
6438                         mod_freesync_handle_v_update(dm->freesync_module,
6439                                                      new_stream, &vrr_params);
6440
6441                         /* Need to call this before the frame ends. */
6442                         dc_stream_adjust_vmin_vmax(dm->dc,
6443                                                    new_crtc_state->stream,
6444                                                    &vrr_params.adjust);
6445                 }
6446         }
6447
6448         mod_freesync_build_vrr_infopacket(
6449                 dm->freesync_module,
6450                 new_stream,
6451                 &vrr_params,
6452                 PACKET_TYPE_VRR,
6453                 TRANSFER_FUNC_UNKNOWN,
6454                 &vrr_infopacket);
6455
6456         new_crtc_state->freesync_timing_changed |=
6457                 (memcmp(&new_crtc_state->vrr_params.adjust,
6458                         &vrr_params.adjust,
6459                         sizeof(vrr_params.adjust)) != 0);
6460
6461         new_crtc_state->freesync_vrr_info_changed |=
6462                 (memcmp(&new_crtc_state->vrr_infopacket,
6463                         &vrr_infopacket,
6464                         sizeof(vrr_infopacket)) != 0);
6465
6466         new_crtc_state->vrr_params = vrr_params;
6467         new_crtc_state->vrr_infopacket = vrr_infopacket;
6468
6469         new_stream->adjust = new_crtc_state->vrr_params.adjust;
6470         new_stream->vrr_infopacket = vrr_infopacket;
6471
6472         if (new_crtc_state->freesync_vrr_info_changed)
6473                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6474                               new_crtc_state->base.crtc->base.id,
6475                               (int)new_crtc_state->base.vrr_enabled,
6476                               (int)vrr_params.state);
6477
6478         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6479 }
6480
6481 static void pre_update_freesync_state_on_stream(
6482         struct amdgpu_display_manager *dm,
6483         struct dm_crtc_state *new_crtc_state)
6484 {
6485         struct dc_stream_state *new_stream = new_crtc_state->stream;
6486         struct mod_vrr_params vrr_params;
6487         struct mod_freesync_config config = new_crtc_state->freesync_config;
6488         struct amdgpu_device *adev = dm->adev;
6489         unsigned long flags;
6490
6491         if (!new_stream)
6492                 return;
6493
6494         /*
6495          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6496          * For now it's sufficient to just guard against these conditions.
6497          */
6498         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6499                 return;
6500
6501         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6502         vrr_params = new_crtc_state->vrr_params;
6503
6504         if (new_crtc_state->vrr_supported &&
6505             config.min_refresh_in_uhz &&
6506             config.max_refresh_in_uhz) {
6507                 config.state = new_crtc_state->base.vrr_enabled ?
6508                         VRR_STATE_ACTIVE_VARIABLE :
6509                         VRR_STATE_INACTIVE;
6510         } else {
6511                 config.state = VRR_STATE_UNSUPPORTED;
6512         }
6513
6514         mod_freesync_build_vrr_params(dm->freesync_module,
6515                                       new_stream,
6516                                       &config, &vrr_params);
6517
6518         new_crtc_state->freesync_timing_changed |=
6519                 (memcmp(&new_crtc_state->vrr_params.adjust,
6520                         &vrr_params.adjust,
6521                         sizeof(vrr_params.adjust)) != 0);
6522
6523         new_crtc_state->vrr_params = vrr_params;
6524         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6525 }
6526
6527 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6528                                             struct dm_crtc_state *new_state)
6529 {
6530         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6531         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6532
6533         if (!old_vrr_active && new_vrr_active) {
6534                 /* Transition VRR inactive -> active:
6535                  * While VRR is active, we must not disable vblank irq, as a
6536                  * reenable after disable would compute bogus vblank/pflip
6537                  * timestamps if it likely happened inside display front-porch.
6538                  *
6539                  * We also need vupdate irq for the actual core vblank handling
6540                  * at end of vblank.
6541                  */
6542                 dm_set_vupdate_irq(new_state->base.crtc, true);
6543                 drm_crtc_vblank_get(new_state->base.crtc);
6544                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6545                                  __func__, new_state->base.crtc->base.id);
6546         } else if (old_vrr_active && !new_vrr_active) {
6547                 /* Transition VRR active -> inactive:
6548                  * Allow vblank irq disable again for fixed refresh rate.
6549                  */
6550                 dm_set_vupdate_irq(new_state->base.crtc, false);
6551                 drm_crtc_vblank_put(new_state->base.crtc);
6552                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6553                                  __func__, new_state->base.crtc->base.id);
6554         }
6555 }
6556
6557 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6558 {
6559         struct drm_plane *plane;
6560         struct drm_plane_state *old_plane_state, *new_plane_state;
6561         int i;
6562
6563         /*
6564          * TODO: Make this per-stream so we don't issue redundant updates for
6565          * commits with multiple streams.
6566          */
6567         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6568                                        new_plane_state, i)
6569                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6570                         handle_cursor_update(plane, old_plane_state);
6571 }
6572
6573 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6574                                     struct dc_state *dc_state,
6575                                     struct drm_device *dev,
6576                                     struct amdgpu_display_manager *dm,
6577                                     struct drm_crtc *pcrtc,
6578                                     bool wait_for_vblank)
6579 {
6580         uint32_t i;
6581         uint64_t timestamp_ns;
6582         struct drm_plane *plane;
6583         struct drm_plane_state *old_plane_state, *new_plane_state;
6584         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6585         struct drm_crtc_state *new_pcrtc_state =
6586                         drm_atomic_get_new_crtc_state(state, pcrtc);
6587         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6588         struct dm_crtc_state *dm_old_crtc_state =
6589                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6590         int planes_count = 0, vpos, hpos;
6591         long r;
6592         unsigned long flags;
6593         struct amdgpu_bo *abo;
6594         uint64_t tiling_flags;
6595         uint32_t target_vblank, last_flip_vblank;
6596         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6597         bool pflip_present = false;
6598         struct {
6599                 struct dc_surface_update surface_updates[MAX_SURFACES];
6600                 struct dc_plane_info plane_infos[MAX_SURFACES];
6601                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6602                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6603                 struct dc_stream_update stream_update;
6604         } *bundle;
6605
6606         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6607
6608         if (!bundle) {
6609                 dm_error("Failed to allocate update bundle\n");
6610                 goto cleanup;
6611         }
6612
6613         /*
6614          * Disable the cursor first if we're disabling all the planes.
6615          * It'll remain on the screen after the planes are re-enabled
6616          * if we don't.
6617          */
6618         if (acrtc_state->active_planes == 0)
6619                 amdgpu_dm_commit_cursors(state);
6620
6621         /* update planes when needed */
6622         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6623                 struct drm_crtc *crtc = new_plane_state->crtc;
6624                 struct drm_crtc_state *new_crtc_state;
6625                 struct drm_framebuffer *fb = new_plane_state->fb;
6626                 bool plane_needs_flip;
6627                 struct dc_plane_state *dc_plane;
6628                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6629
6630                 /* Cursor plane is handled after stream updates */
6631                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6632                         continue;
6633
6634                 if (!fb || !crtc || pcrtc != crtc)
6635                         continue;
6636
6637                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6638                 if (!new_crtc_state->active)
6639                         continue;
6640
6641                 dc_plane = dm_new_plane_state->dc_state;
6642
6643                 bundle->surface_updates[planes_count].surface = dc_plane;
6644                 if (new_pcrtc_state->color_mgmt_changed) {
6645                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6646                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6647                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6648                 }
6649
6650                 fill_dc_scaling_info(new_plane_state,
6651                                      &bundle->scaling_infos[planes_count]);
6652
6653                 bundle->surface_updates[planes_count].scaling_info =
6654                         &bundle->scaling_infos[planes_count];
6655
6656                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6657
6658                 pflip_present = pflip_present || plane_needs_flip;
6659
6660                 if (!plane_needs_flip) {
6661                         planes_count += 1;
6662                         continue;
6663                 }
6664
6665                 abo = gem_to_amdgpu_bo(fb->obj[0]);
6666
6667                 /*
6668                  * Wait for all fences on this FB. Do limited wait to avoid
6669                  * deadlock during GPU reset when this fence will not signal
6670                  * but we hold reservation lock for the BO.
6671                  */
6672                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6673                                                         false,
6674                                                         msecs_to_jiffies(5000));
6675                 if (unlikely(r <= 0))
6676                         DRM_ERROR("Waiting for fences timed out!");
6677
6678                 /*
6679                  * TODO This might fail and hence better not used, wait
6680                  * explicitly on fences instead
6681                  * and in general should be called for
6682                  * blocking commit to as per framework helpers
6683                  */
6684                 r = amdgpu_bo_reserve(abo, true);
6685                 if (unlikely(r != 0))
6686                         DRM_ERROR("failed to reserve buffer before flip\n");
6687
6688                 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6689
6690                 amdgpu_bo_unreserve(abo);
6691
6692                 fill_dc_plane_info_and_addr(
6693                         dm->adev, new_plane_state, tiling_flags,
6694                         &bundle->plane_infos[planes_count],
6695                         &bundle->flip_addrs[planes_count].address,
6696                         false);
6697
6698                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6699                                  new_plane_state->plane->index,
6700                                  bundle->plane_infos[planes_count].dcc.enable);
6701
6702                 bundle->surface_updates[planes_count].plane_info =
6703                         &bundle->plane_infos[planes_count];
6704
6705                 /*
6706                  * Only allow immediate flips for fast updates that don't
6707                  * change FB pitch, DCC state, rotation or mirroing.
6708                  */
6709                 bundle->flip_addrs[planes_count].flip_immediate =
6710                         crtc->state->async_flip &&
6711                         acrtc_state->update_type == UPDATE_TYPE_FAST;
6712
6713                 timestamp_ns = ktime_get_ns();
6714                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6715                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6716                 bundle->surface_updates[planes_count].surface = dc_plane;
6717
6718                 if (!bundle->surface_updates[planes_count].surface) {
6719                         DRM_ERROR("No surface for CRTC: id=%d\n",
6720                                         acrtc_attach->crtc_id);
6721                         continue;
6722                 }
6723
6724                 if (plane == pcrtc->primary)
6725                         update_freesync_state_on_stream(
6726                                 dm,
6727                                 acrtc_state,
6728                                 acrtc_state->stream,
6729                                 dc_plane,
6730                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6731
6732                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6733                                  __func__,
6734                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6735                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6736
6737                 planes_count += 1;
6738
6739         }
6740
6741         if (pflip_present) {
6742                 if (!vrr_active) {
6743                         /* Use old throttling in non-vrr fixed refresh rate mode
6744                          * to keep flip scheduling based on target vblank counts
6745                          * working in a backwards compatible way, e.g., for
6746                          * clients using the GLX_OML_sync_control extension or
6747                          * DRI3/Present extension with defined target_msc.
6748                          */
6749                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6750                 }
6751                 else {
6752                         /* For variable refresh rate mode only:
6753                          * Get vblank of last completed flip to avoid > 1 vrr
6754                          * flips per video frame by use of throttling, but allow
6755                          * flip programming anywhere in the possibly large
6756                          * variable vrr vblank interval for fine-grained flip
6757                          * timing control and more opportunity to avoid stutter
6758                          * on late submission of flips.
6759                          */
6760                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6761                         last_flip_vblank = acrtc_attach->last_flip_vblank;
6762                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6763                 }
6764
6765                 target_vblank = last_flip_vblank + wait_for_vblank;
6766
6767                 /*
6768                  * Wait until we're out of the vertical blank period before the one
6769                  * targeted by the flip
6770                  */
6771                 while ((acrtc_attach->enabled &&
6772                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6773                                                             0, &vpos, &hpos, NULL,
6774                                                             NULL, &pcrtc->hwmode)
6775                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6776                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6777                         (int)(target_vblank -
6778                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6779                         usleep_range(1000, 1100);
6780                 }
6781
6782                 if (acrtc_attach->base.state->event) {
6783                         drm_crtc_vblank_get(pcrtc);
6784
6785                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6786
6787                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6788                         prepare_flip_isr(acrtc_attach);
6789
6790                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6791                 }
6792
6793                 if (acrtc_state->stream) {
6794                         if (acrtc_state->freesync_vrr_info_changed)
6795                                 bundle->stream_update.vrr_infopacket =
6796                                         &acrtc_state->stream->vrr_infopacket;
6797                 }
6798         }
6799
6800         /* Update the planes if changed or disable if we don't have any. */
6801         if ((planes_count || acrtc_state->active_planes == 0) &&
6802                 acrtc_state->stream) {
6803                 bundle->stream_update.stream = acrtc_state->stream;
6804                 if (new_pcrtc_state->mode_changed) {
6805                         bundle->stream_update.src = acrtc_state->stream->src;
6806                         bundle->stream_update.dst = acrtc_state->stream->dst;
6807                 }
6808
6809                 if (new_pcrtc_state->color_mgmt_changed) {
6810                         /*
6811                          * TODO: This isn't fully correct since we've actually
6812                          * already modified the stream in place.
6813                          */
6814                         bundle->stream_update.gamut_remap =
6815                                 &acrtc_state->stream->gamut_remap_matrix;
6816                         bundle->stream_update.output_csc_transform =
6817                                 &acrtc_state->stream->csc_color_matrix;
6818                         bundle->stream_update.out_transfer_func =
6819                                 acrtc_state->stream->out_transfer_func;
6820                 }
6821
6822                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6823                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6824                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
6825
6826                 /*
6827                  * If FreeSync state on the stream has changed then we need to
6828                  * re-adjust the min/max bounds now that DC doesn't handle this
6829                  * as part of commit.
6830                  */
6831                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6832                     amdgpu_dm_vrr_active(acrtc_state)) {
6833                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6834                         dc_stream_adjust_vmin_vmax(
6835                                 dm->dc, acrtc_state->stream,
6836                                 &acrtc_state->vrr_params.adjust);
6837                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6838                 }
6839                 mutex_lock(&dm->dc_lock);
6840                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6841                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
6842                         amdgpu_dm_psr_disable(acrtc_state->stream);
6843
6844                 dc_commit_updates_for_stream(dm->dc,
6845                                                      bundle->surface_updates,
6846                                                      planes_count,
6847                                                      acrtc_state->stream,
6848                                                      &bundle->stream_update,
6849                                                      dc_state);
6850
6851                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6852                                 acrtc_state->stream->link->psr_settings.psr_version != PSR_VERSION_UNSUPPORTED &&
6853                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
6854                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
6855                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6856                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
6857                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
6858                         amdgpu_dm_psr_enable(acrtc_state->stream);
6859                 }
6860
6861                 mutex_unlock(&dm->dc_lock);
6862         }
6863
6864         /*
6865          * Update cursor state *after* programming all the planes.
6866          * This avoids redundant programming in the case where we're going
6867          * to be disabling a single plane - those pipes are being disabled.
6868          */
6869         if (acrtc_state->active_planes)
6870                 amdgpu_dm_commit_cursors(state);
6871
6872 cleanup:
6873         kfree(bundle);
6874 }
6875
6876 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6877                                    struct drm_atomic_state *state)
6878 {
6879         struct amdgpu_device *adev = dev->dev_private;
6880         struct amdgpu_dm_connector *aconnector;
6881         struct drm_connector *connector;
6882         struct drm_connector_state *old_con_state, *new_con_state;
6883         struct drm_crtc_state *new_crtc_state;
6884         struct dm_crtc_state *new_dm_crtc_state;
6885         const struct dc_stream_status *status;
6886         int i, inst;
6887
6888         /* Notify device removals. */
6889         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6890                 if (old_con_state->crtc != new_con_state->crtc) {
6891                         /* CRTC changes require notification. */
6892                         goto notify;
6893                 }
6894
6895                 if (!new_con_state->crtc)
6896                         continue;
6897
6898                 new_crtc_state = drm_atomic_get_new_crtc_state(
6899                         state, new_con_state->crtc);
6900
6901                 if (!new_crtc_state)
6902                         continue;
6903
6904                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6905                         continue;
6906
6907         notify:
6908                 aconnector = to_amdgpu_dm_connector(connector);
6909
6910                 mutex_lock(&adev->dm.audio_lock);
6911                 inst = aconnector->audio_inst;
6912                 aconnector->audio_inst = -1;
6913                 mutex_unlock(&adev->dm.audio_lock);
6914
6915                 amdgpu_dm_audio_eld_notify(adev, inst);
6916         }
6917
6918         /* Notify audio device additions. */
6919         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6920                 if (!new_con_state->crtc)
6921                         continue;
6922
6923                 new_crtc_state = drm_atomic_get_new_crtc_state(
6924                         state, new_con_state->crtc);
6925
6926                 if (!new_crtc_state)
6927                         continue;
6928
6929                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6930                         continue;
6931
6932                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6933                 if (!new_dm_crtc_state->stream)
6934                         continue;
6935
6936                 status = dc_stream_get_status(new_dm_crtc_state->stream);
6937                 if (!status)
6938                         continue;
6939
6940                 aconnector = to_amdgpu_dm_connector(connector);
6941
6942                 mutex_lock(&adev->dm.audio_lock);
6943                 inst = status->audio_inst;
6944                 aconnector->audio_inst = inst;
6945                 mutex_unlock(&adev->dm.audio_lock);
6946
6947                 amdgpu_dm_audio_eld_notify(adev, inst);
6948         }
6949 }
6950
6951 /*
6952  * Enable interrupts on CRTCs that are newly active, undergone
6953  * a modeset, or have active planes again.
6954  *
6955  * Done in two passes, based on the for_modeset flag:
6956  * Pass 1: For CRTCs going through modeset
6957  * Pass 2: For CRTCs going from 0 to n active planes
6958  *
6959  * Interrupts can only be enabled after the planes are programmed,
6960  * so this requires a two-pass approach since we don't want to
6961  * just defer the interrupts until after commit planes every time.
6962  */
6963 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6964                                              struct drm_atomic_state *state,
6965                                              bool for_modeset)
6966 {
6967         struct amdgpu_device *adev = dev->dev_private;
6968         struct drm_crtc *crtc;
6969         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6970         int i;
6971 #ifdef CONFIG_DEBUG_FS
6972         enum amdgpu_dm_pipe_crc_source source;
6973 #endif
6974
6975         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6976                                       new_crtc_state, i) {
6977                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6978                 struct dm_crtc_state *dm_new_crtc_state =
6979                         to_dm_crtc_state(new_crtc_state);
6980                 struct dm_crtc_state *dm_old_crtc_state =
6981                         to_dm_crtc_state(old_crtc_state);
6982                 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6983                 bool run_pass;
6984
6985                 run_pass = (for_modeset && modeset) ||
6986                            (!for_modeset && !modeset &&
6987                             !dm_old_crtc_state->interrupts_enabled);
6988
6989                 if (!run_pass)
6990                         continue;
6991
6992                 if (!dm_new_crtc_state->interrupts_enabled)
6993                         continue;
6994
6995                 manage_dm_interrupts(adev, acrtc, true);
6996
6997 #ifdef CONFIG_DEBUG_FS
6998                 /* The stream has changed so CRC capture needs to re-enabled. */
6999                 source = dm_new_crtc_state->crc_src;
7000                 if (amdgpu_dm_is_valid_crc_source(source)) {
7001                         amdgpu_dm_crtc_configure_crc_source(
7002                                 crtc, dm_new_crtc_state,
7003                                 dm_new_crtc_state->crc_src);
7004                 }
7005 #endif
7006         }
7007 }
7008
7009 /*
7010  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7011  * @crtc_state: the DRM CRTC state
7012  * @stream_state: the DC stream state.
7013  *
7014  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7015  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7016  */
7017 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7018                                                 struct dc_stream_state *stream_state)
7019 {
7020         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7021 }
7022
7023 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7024                                    struct drm_atomic_state *state,
7025                                    bool nonblock)
7026 {
7027         struct drm_crtc *crtc;
7028         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7029         struct amdgpu_device *adev = dev->dev_private;
7030         int i;
7031
7032         /*
7033          * We evade vblank and pflip interrupts on CRTCs that are undergoing
7034          * a modeset, being disabled, or have no active planes.
7035          *
7036          * It's done in atomic commit rather than commit tail for now since
7037          * some of these interrupt handlers access the current CRTC state and
7038          * potentially the stream pointer itself.
7039          *
7040          * Since the atomic state is swapped within atomic commit and not within
7041          * commit tail this would leave to new state (that hasn't been committed yet)
7042          * being accesssed from within the handlers.
7043          *
7044          * TODO: Fix this so we can do this in commit tail and not have to block
7045          * in atomic check.
7046          */
7047         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7048                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7049                 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7050                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7051
7052                 if (dm_old_crtc_state->interrupts_enabled &&
7053                     (!dm_new_crtc_state->interrupts_enabled ||
7054                      drm_atomic_crtc_needs_modeset(new_crtc_state)))
7055                         manage_dm_interrupts(adev, acrtc, false);
7056         }
7057         /*
7058          * Add check here for SoC's that support hardware cursor plane, to
7059          * unset legacy_cursor_update
7060          */
7061
7062         return drm_atomic_helper_commit(dev, state, nonblock);
7063
7064         /*TODO Handle EINTR, reenable IRQ*/
7065 }
7066
7067 /**
7068  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7069  * @state: The atomic state to commit
7070  *
7071  * This will tell DC to commit the constructed DC state from atomic_check,
7072  * programming the hardware. Any failures here implies a hardware failure, since
7073  * atomic check should have filtered anything non-kosher.
7074  */
7075 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7076 {
7077         struct drm_device *dev = state->dev;
7078         struct amdgpu_device *adev = dev->dev_private;
7079         struct amdgpu_display_manager *dm = &adev->dm;
7080         struct dm_atomic_state *dm_state;
7081         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7082         uint32_t i, j;
7083         struct drm_crtc *crtc;
7084         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7085         unsigned long flags;
7086         bool wait_for_vblank = true;
7087         struct drm_connector *connector;
7088         struct drm_connector_state *old_con_state, *new_con_state;
7089         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7090         int crtc_disable_count = 0;
7091
7092         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7093
7094         dm_state = dm_atomic_get_new_state(state);
7095         if (dm_state && dm_state->context) {
7096                 dc_state = dm_state->context;
7097         } else {
7098                 /* No state changes, retain current state. */
7099                 dc_state_temp = dc_create_state(dm->dc);
7100                 ASSERT(dc_state_temp);
7101                 dc_state = dc_state_temp;
7102                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7103         }
7104
7105         /* update changed items */
7106         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7107                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7108
7109                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7110                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7111
7112                 DRM_DEBUG_DRIVER(
7113                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7114                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7115                         "connectors_changed:%d\n",
7116                         acrtc->crtc_id,
7117                         new_crtc_state->enable,
7118                         new_crtc_state->active,
7119                         new_crtc_state->planes_changed,
7120                         new_crtc_state->mode_changed,
7121                         new_crtc_state->active_changed,
7122                         new_crtc_state->connectors_changed);
7123
7124                 /* Copy all transient state flags into dc state */
7125                 if (dm_new_crtc_state->stream) {
7126                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7127                                                             dm_new_crtc_state->stream);
7128                 }
7129
7130                 /* handles headless hotplug case, updating new_state and
7131                  * aconnector as needed
7132                  */
7133
7134                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7135
7136                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7137
7138                         if (!dm_new_crtc_state->stream) {
7139                                 /*
7140                                  * this could happen because of issues with
7141                                  * userspace notifications delivery.
7142                                  * In this case userspace tries to set mode on
7143                                  * display which is disconnected in fact.
7144                                  * dc_sink is NULL in this case on aconnector.
7145                                  * We expect reset mode will come soon.
7146                                  *
7147                                  * This can also happen when unplug is done
7148                                  * during resume sequence ended
7149                                  *
7150                                  * In this case, we want to pretend we still
7151                                  * have a sink to keep the pipe running so that
7152                                  * hw state is consistent with the sw state
7153                                  */
7154                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7155                                                 __func__, acrtc->base.base.id);
7156                                 continue;
7157                         }
7158
7159                         if (dm_old_crtc_state->stream)
7160                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7161
7162                         pm_runtime_get_noresume(dev->dev);
7163
7164                         acrtc->enabled = true;
7165                         acrtc->hw_mode = new_crtc_state->mode;
7166                         crtc->hwmode = new_crtc_state->mode;
7167                 } else if (modereset_required(new_crtc_state)) {
7168                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7169                         /* i.e. reset mode */
7170                         if (dm_old_crtc_state->stream) {
7171                                 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7172                                         amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7173
7174                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7175                         }
7176                 }
7177         } /* for_each_crtc_in_state() */
7178
7179         if (dc_state) {
7180                 dm_enable_per_frame_crtc_master_sync(dc_state);
7181                 mutex_lock(&dm->dc_lock);
7182                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7183                 mutex_unlock(&dm->dc_lock);
7184         }
7185
7186         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7187                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7188
7189                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7190
7191                 if (dm_new_crtc_state->stream != NULL) {
7192                         const struct dc_stream_status *status =
7193                                         dc_stream_get_status(dm_new_crtc_state->stream);
7194
7195                         if (!status)
7196                                 status = dc_stream_get_status_from_state(dc_state,
7197                                                                          dm_new_crtc_state->stream);
7198
7199                         if (!status)
7200                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7201                         else
7202                                 acrtc->otg_inst = status->primary_otg_inst;
7203                 }
7204         }
7205 #ifdef CONFIG_DRM_AMD_DC_HDCP
7206         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7207                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7208                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7209                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7210
7211                 new_crtc_state = NULL;
7212
7213                 if (acrtc)
7214                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7215
7216                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7217
7218                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7219                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7220                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7221                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7222                         continue;
7223                 }
7224
7225                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7226                         hdcp_update_display(
7227                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7228                                 new_con_state->hdcp_content_type,
7229                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7230                                                                                                          : false);
7231         }
7232 #endif
7233
7234         /* Handle connector state changes */
7235         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7236                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7237                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7238                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7239                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7240                 struct dc_stream_update stream_update;
7241                 struct dc_info_packet hdr_packet;
7242                 struct dc_stream_status *status = NULL;
7243                 bool abm_changed, hdr_changed, scaling_changed;
7244
7245                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7246                 memset(&stream_update, 0, sizeof(stream_update));
7247
7248                 if (acrtc) {
7249                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7250                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7251                 }
7252
7253                 /* Skip any modesets/resets */
7254                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7255                         continue;
7256
7257                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7258                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7259
7260                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7261                                                              dm_old_con_state);
7262
7263                 abm_changed = dm_new_crtc_state->abm_level !=
7264                               dm_old_crtc_state->abm_level;
7265
7266                 hdr_changed =
7267                         is_hdr_metadata_different(old_con_state, new_con_state);
7268
7269                 if (!scaling_changed && !abm_changed && !hdr_changed)
7270                         continue;
7271
7272                 stream_update.stream = dm_new_crtc_state->stream;
7273                 if (scaling_changed) {
7274                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7275                                         dm_new_con_state, dm_new_crtc_state->stream);
7276
7277                         stream_update.src = dm_new_crtc_state->stream->src;
7278                         stream_update.dst = dm_new_crtc_state->stream->dst;
7279                 }
7280
7281                 if (abm_changed) {
7282                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7283
7284                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7285                 }
7286
7287                 if (hdr_changed) {
7288                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7289                         stream_update.hdr_static_metadata = &hdr_packet;
7290                 }
7291
7292                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7293                 WARN_ON(!status);
7294                 WARN_ON(!status->plane_count);
7295
7296                 /*
7297                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7298                  * Here we create an empty update on each plane.
7299                  * To fix this, DC should permit updating only stream properties.
7300                  */
7301                 for (j = 0; j < status->plane_count; j++)
7302                         dummy_updates[j].surface = status->plane_states[0];
7303
7304
7305                 mutex_lock(&dm->dc_lock);
7306                 dc_commit_updates_for_stream(dm->dc,
7307                                                      dummy_updates,
7308                                                      status->plane_count,
7309                                                      dm_new_crtc_state->stream,
7310                                                      &stream_update,
7311                                                      dc_state);
7312                 mutex_unlock(&dm->dc_lock);
7313         }
7314
7315         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7316         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7317                                       new_crtc_state, i) {
7318                 if (old_crtc_state->active && !new_crtc_state->active)
7319                         crtc_disable_count++;
7320
7321                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7322                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7323
7324                 /* Update freesync active state. */
7325                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7326
7327                 /* Handle vrr on->off / off->on transitions */
7328                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7329                                                 dm_new_crtc_state);
7330         }
7331
7332         /* Enable interrupts for CRTCs going through a modeset. */
7333         amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7334
7335         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7336                 if (new_crtc_state->async_flip)
7337                         wait_for_vblank = false;
7338
7339         /* update planes when needed per crtc*/
7340         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7341                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7342
7343                 if (dm_new_crtc_state->stream)
7344                         amdgpu_dm_commit_planes(state, dc_state, dev,
7345                                                 dm, crtc, wait_for_vblank);
7346         }
7347
7348         /* Enable interrupts for CRTCs going from 0 to n active planes. */
7349         amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7350
7351         /* Update audio instances for each connector. */
7352         amdgpu_dm_commit_audio(dev, state);
7353
7354         /*
7355          * send vblank event on all events not handled in flip and
7356          * mark consumed event for drm_atomic_helper_commit_hw_done
7357          */
7358         spin_lock_irqsave(&adev->ddev->event_lock, flags);
7359         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7360
7361                 if (new_crtc_state->event)
7362                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7363
7364                 new_crtc_state->event = NULL;
7365         }
7366         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7367
7368         /* Signal HW programming completion */
7369         drm_atomic_helper_commit_hw_done(state);
7370
7371         if (wait_for_vblank)
7372                 drm_atomic_helper_wait_for_flip_done(dev, state);
7373
7374         drm_atomic_helper_cleanup_planes(dev, state);
7375
7376         /*
7377          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7378          * so we can put the GPU into runtime suspend if we're not driving any
7379          * displays anymore
7380          */
7381         for (i = 0; i < crtc_disable_count; i++)
7382                 pm_runtime_put_autosuspend(dev->dev);
7383         pm_runtime_mark_last_busy(dev->dev);
7384
7385         if (dc_state_temp)
7386                 dc_release_state(dc_state_temp);
7387 }
7388
7389
7390 static int dm_force_atomic_commit(struct drm_connector *connector)
7391 {
7392         int ret = 0;
7393         struct drm_device *ddev = connector->dev;
7394         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7395         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7396         struct drm_plane *plane = disconnected_acrtc->base.primary;
7397         struct drm_connector_state *conn_state;
7398         struct drm_crtc_state *crtc_state;
7399         struct drm_plane_state *plane_state;
7400
7401         if (!state)
7402                 return -ENOMEM;
7403
7404         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7405
7406         /* Construct an atomic state to restore previous display setting */
7407
7408         /*
7409          * Attach connectors to drm_atomic_state
7410          */
7411         conn_state = drm_atomic_get_connector_state(state, connector);
7412
7413         ret = PTR_ERR_OR_ZERO(conn_state);
7414         if (ret)
7415                 goto err;
7416
7417         /* Attach crtc to drm_atomic_state*/
7418         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7419
7420         ret = PTR_ERR_OR_ZERO(crtc_state);
7421         if (ret)
7422                 goto err;
7423
7424         /* force a restore */
7425         crtc_state->mode_changed = true;
7426
7427         /* Attach plane to drm_atomic_state */
7428         plane_state = drm_atomic_get_plane_state(state, plane);
7429
7430         ret = PTR_ERR_OR_ZERO(plane_state);
7431         if (ret)
7432                 goto err;
7433
7434
7435         /* Call commit internally with the state we just constructed */
7436         ret = drm_atomic_commit(state);
7437         if (!ret)
7438                 return 0;
7439
7440 err:
7441         DRM_ERROR("Restoring old state failed with %i\n", ret);
7442         drm_atomic_state_put(state);
7443
7444         return ret;
7445 }
7446
7447 /*
7448  * This function handles all cases when set mode does not come upon hotplug.
7449  * This includes when a display is unplugged then plugged back into the
7450  * same port and when running without usermode desktop manager supprot
7451  */
7452 void dm_restore_drm_connector_state(struct drm_device *dev,
7453                                     struct drm_connector *connector)
7454 {
7455         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7456         struct amdgpu_crtc *disconnected_acrtc;
7457         struct dm_crtc_state *acrtc_state;
7458
7459         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7460                 return;
7461
7462         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7463         if (!disconnected_acrtc)
7464                 return;
7465
7466         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7467         if (!acrtc_state->stream)
7468                 return;
7469
7470         /*
7471          * If the previous sink is not released and different from the current,
7472          * we deduce we are in a state where we can not rely on usermode call
7473          * to turn on the display, so we do it here
7474          */
7475         if (acrtc_state->stream->sink != aconnector->dc_sink)
7476                 dm_force_atomic_commit(&aconnector->base);
7477 }
7478
7479 /*
7480  * Grabs all modesetting locks to serialize against any blocking commits,
7481  * Waits for completion of all non blocking commits.
7482  */
7483 static int do_aquire_global_lock(struct drm_device *dev,
7484                                  struct drm_atomic_state *state)
7485 {
7486         struct drm_crtc *crtc;
7487         struct drm_crtc_commit *commit;
7488         long ret;
7489
7490         /*
7491          * Adding all modeset locks to aquire_ctx will
7492          * ensure that when the framework release it the
7493          * extra locks we are locking here will get released to
7494          */
7495         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7496         if (ret)
7497                 return ret;
7498
7499         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7500                 spin_lock(&crtc->commit_lock);
7501                 commit = list_first_entry_or_null(&crtc->commit_list,
7502                                 struct drm_crtc_commit, commit_entry);
7503                 if (commit)
7504                         drm_crtc_commit_get(commit);
7505                 spin_unlock(&crtc->commit_lock);
7506
7507                 if (!commit)
7508                         continue;
7509
7510                 /*
7511                  * Make sure all pending HW programming completed and
7512                  * page flips done
7513                  */
7514                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7515
7516                 if (ret > 0)
7517                         ret = wait_for_completion_interruptible_timeout(
7518                                         &commit->flip_done, 10*HZ);
7519
7520                 if (ret == 0)
7521                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7522                                   "timed out\n", crtc->base.id, crtc->name);
7523
7524                 drm_crtc_commit_put(commit);
7525         }
7526
7527         return ret < 0 ? ret : 0;
7528 }
7529
7530 static void get_freesync_config_for_crtc(
7531         struct dm_crtc_state *new_crtc_state,
7532         struct dm_connector_state *new_con_state)
7533 {
7534         struct mod_freesync_config config = {0};
7535         struct amdgpu_dm_connector *aconnector =
7536                         to_amdgpu_dm_connector(new_con_state->base.connector);
7537         struct drm_display_mode *mode = &new_crtc_state->base.mode;
7538         int vrefresh = drm_mode_vrefresh(mode);
7539
7540         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7541                                         vrefresh >= aconnector->min_vfreq &&
7542                                         vrefresh <= aconnector->max_vfreq;
7543
7544         if (new_crtc_state->vrr_supported) {
7545                 new_crtc_state->stream->ignore_msa_timing_param = true;
7546                 config.state = new_crtc_state->base.vrr_enabled ?
7547                                 VRR_STATE_ACTIVE_VARIABLE :
7548                                 VRR_STATE_INACTIVE;
7549                 config.min_refresh_in_uhz =
7550                                 aconnector->min_vfreq * 1000000;
7551                 config.max_refresh_in_uhz =
7552                                 aconnector->max_vfreq * 1000000;
7553                 config.vsif_supported = true;
7554                 config.btr = true;
7555         }
7556
7557         new_crtc_state->freesync_config = config;
7558 }
7559
7560 static void reset_freesync_config_for_crtc(
7561         struct dm_crtc_state *new_crtc_state)
7562 {
7563         new_crtc_state->vrr_supported = false;
7564
7565         memset(&new_crtc_state->vrr_params, 0,
7566                sizeof(new_crtc_state->vrr_params));
7567         memset(&new_crtc_state->vrr_infopacket, 0,
7568                sizeof(new_crtc_state->vrr_infopacket));
7569 }
7570
7571 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7572                                 struct drm_atomic_state *state,
7573                                 struct drm_crtc *crtc,
7574                                 struct drm_crtc_state *old_crtc_state,
7575                                 struct drm_crtc_state *new_crtc_state,
7576                                 bool enable,
7577                                 bool *lock_and_validation_needed)
7578 {
7579         struct dm_atomic_state *dm_state = NULL;
7580         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7581         struct dc_stream_state *new_stream;
7582         int ret = 0;
7583
7584         /*
7585          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7586          * update changed items
7587          */
7588         struct amdgpu_crtc *acrtc = NULL;
7589         struct amdgpu_dm_connector *aconnector = NULL;
7590         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7591         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7592
7593         new_stream = NULL;
7594
7595         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7596         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7597         acrtc = to_amdgpu_crtc(crtc);
7598         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7599
7600         /* TODO This hack should go away */
7601         if (aconnector && enable) {
7602                 /* Make sure fake sink is created in plug-in scenario */
7603                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7604                                                             &aconnector->base);
7605                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7606                                                             &aconnector->base);
7607
7608                 if (IS_ERR(drm_new_conn_state)) {
7609                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7610                         goto fail;
7611                 }
7612
7613                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7614                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7615
7616                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7617                         goto skip_modeset;
7618
7619                 new_stream = create_stream_for_sink(aconnector,
7620                                                      &new_crtc_state->mode,
7621                                                     dm_new_conn_state,
7622                                                     dm_old_crtc_state->stream);
7623
7624                 /*
7625                  * we can have no stream on ACTION_SET if a display
7626                  * was disconnected during S3, in this case it is not an
7627                  * error, the OS will be updated after detection, and
7628                  * will do the right thing on next atomic commit
7629                  */
7630
7631                 if (!new_stream) {
7632                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7633                                         __func__, acrtc->base.base.id);
7634                         ret = -ENOMEM;
7635                         goto fail;
7636                 }
7637
7638                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7639
7640                 ret = fill_hdr_info_packet(drm_new_conn_state,
7641                                            &new_stream->hdr_static_metadata);
7642                 if (ret)
7643                         goto fail;
7644
7645                 /*
7646                  * If we already removed the old stream from the context
7647                  * (and set the new stream to NULL) then we can't reuse
7648                  * the old stream even if the stream and scaling are unchanged.
7649                  * We'll hit the BUG_ON and black screen.
7650                  *
7651                  * TODO: Refactor this function to allow this check to work
7652                  * in all conditions.
7653                  */
7654                 if (dm_new_crtc_state->stream &&
7655                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7656                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7657                         new_crtc_state->mode_changed = false;
7658                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7659                                          new_crtc_state->mode_changed);
7660                 }
7661         }
7662
7663         /* mode_changed flag may get updated above, need to check again */
7664         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7665                 goto skip_modeset;
7666
7667         DRM_DEBUG_DRIVER(
7668                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7669                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7670                 "connectors_changed:%d\n",
7671                 acrtc->crtc_id,
7672                 new_crtc_state->enable,
7673                 new_crtc_state->active,
7674                 new_crtc_state->planes_changed,
7675                 new_crtc_state->mode_changed,
7676                 new_crtc_state->active_changed,
7677                 new_crtc_state->connectors_changed);
7678
7679         /* Remove stream for any changed/disabled CRTC */
7680         if (!enable) {
7681
7682                 if (!dm_old_crtc_state->stream)
7683                         goto skip_modeset;
7684
7685                 ret = dm_atomic_get_state(state, &dm_state);
7686                 if (ret)
7687                         goto fail;
7688
7689                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7690                                 crtc->base.id);
7691
7692                 /* i.e. reset mode */
7693                 if (dc_remove_stream_from_ctx(
7694                                 dm->dc,
7695                                 dm_state->context,
7696                                 dm_old_crtc_state->stream) != DC_OK) {
7697                         ret = -EINVAL;
7698                         goto fail;
7699                 }
7700
7701                 dc_stream_release(dm_old_crtc_state->stream);
7702                 dm_new_crtc_state->stream = NULL;
7703
7704                 reset_freesync_config_for_crtc(dm_new_crtc_state);
7705
7706                 *lock_and_validation_needed = true;
7707
7708         } else {/* Add stream for any updated/enabled CRTC */
7709                 /*
7710                  * Quick fix to prevent NULL pointer on new_stream when
7711                  * added MST connectors not found in existing crtc_state in the chained mode
7712                  * TODO: need to dig out the root cause of that
7713                  */
7714                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7715                         goto skip_modeset;
7716
7717                 if (modereset_required(new_crtc_state))
7718                         goto skip_modeset;
7719
7720                 if (modeset_required(new_crtc_state, new_stream,
7721                                      dm_old_crtc_state->stream)) {
7722
7723                         WARN_ON(dm_new_crtc_state->stream);
7724
7725                         ret = dm_atomic_get_state(state, &dm_state);
7726                         if (ret)
7727                                 goto fail;
7728
7729                         dm_new_crtc_state->stream = new_stream;
7730
7731                         dc_stream_retain(new_stream);
7732
7733                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7734                                                 crtc->base.id);
7735
7736                         if (dc_add_stream_to_ctx(
7737                                         dm->dc,
7738                                         dm_state->context,
7739                                         dm_new_crtc_state->stream) != DC_OK) {
7740                                 ret = -EINVAL;
7741                                 goto fail;
7742                         }
7743
7744                         *lock_and_validation_needed = true;
7745                 }
7746         }
7747
7748 skip_modeset:
7749         /* Release extra reference */
7750         if (new_stream)
7751                  dc_stream_release(new_stream);
7752
7753         /*
7754          * We want to do dc stream updates that do not require a
7755          * full modeset below.
7756          */
7757         if (!(enable && aconnector && new_crtc_state->enable &&
7758               new_crtc_state->active))
7759                 return 0;
7760         /*
7761          * Given above conditions, the dc state cannot be NULL because:
7762          * 1. We're in the process of enabling CRTCs (just been added
7763          *    to the dc context, or already is on the context)
7764          * 2. Has a valid connector attached, and
7765          * 3. Is currently active and enabled.
7766          * => The dc stream state currently exists.
7767          */
7768         BUG_ON(dm_new_crtc_state->stream == NULL);
7769
7770         /* Scaling or underscan settings */
7771         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7772                 update_stream_scaling_settings(
7773                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7774
7775         /* ABM settings */
7776         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7777
7778         /*
7779          * Color management settings. We also update color properties
7780          * when a modeset is needed, to ensure it gets reprogrammed.
7781          */
7782         if (dm_new_crtc_state->base.color_mgmt_changed ||
7783             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7784                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7785                 if (ret)
7786                         goto fail;
7787         }
7788
7789         /* Update Freesync settings. */
7790         get_freesync_config_for_crtc(dm_new_crtc_state,
7791                                      dm_new_conn_state);
7792
7793         return ret;
7794
7795 fail:
7796         if (new_stream)
7797                 dc_stream_release(new_stream);
7798         return ret;
7799 }
7800
7801 static bool should_reset_plane(struct drm_atomic_state *state,
7802                                struct drm_plane *plane,
7803                                struct drm_plane_state *old_plane_state,
7804                                struct drm_plane_state *new_plane_state)
7805 {
7806         struct drm_plane *other;
7807         struct drm_plane_state *old_other_state, *new_other_state;
7808         struct drm_crtc_state *new_crtc_state;
7809         int i;
7810
7811         /*
7812          * TODO: Remove this hack once the checks below are sufficient
7813          * enough to determine when we need to reset all the planes on
7814          * the stream.
7815          */
7816         if (state->allow_modeset)
7817                 return true;
7818
7819         /* Exit early if we know that we're adding or removing the plane. */
7820         if (old_plane_state->crtc != new_plane_state->crtc)
7821                 return true;
7822
7823         /* old crtc == new_crtc == NULL, plane not in context. */
7824         if (!new_plane_state->crtc)
7825                 return false;
7826
7827         new_crtc_state =
7828                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7829
7830         if (!new_crtc_state)
7831                 return true;
7832
7833         /* CRTC Degamma changes currently require us to recreate planes. */
7834         if (new_crtc_state->color_mgmt_changed)
7835                 return true;
7836
7837         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7838                 return true;
7839
7840         /*
7841          * If there are any new primary or overlay planes being added or
7842          * removed then the z-order can potentially change. To ensure
7843          * correct z-order and pipe acquisition the current DC architecture
7844          * requires us to remove and recreate all existing planes.
7845          *
7846          * TODO: Come up with a more elegant solution for this.
7847          */
7848         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7849                 if (other->type == DRM_PLANE_TYPE_CURSOR)
7850                         continue;
7851
7852                 if (old_other_state->crtc != new_plane_state->crtc &&
7853                     new_other_state->crtc != new_plane_state->crtc)
7854                         continue;
7855
7856                 if (old_other_state->crtc != new_other_state->crtc)
7857                         return true;
7858
7859                 /* TODO: Remove this once we can handle fast format changes. */
7860                 if (old_other_state->fb && new_other_state->fb &&
7861                     old_other_state->fb->format != new_other_state->fb->format)
7862                         return true;
7863         }
7864
7865         return false;
7866 }
7867
7868 static int dm_update_plane_state(struct dc *dc,
7869                                  struct drm_atomic_state *state,
7870                                  struct drm_plane *plane,
7871                                  struct drm_plane_state *old_plane_state,
7872                                  struct drm_plane_state *new_plane_state,
7873                                  bool enable,
7874                                  bool *lock_and_validation_needed)
7875 {
7876
7877         struct dm_atomic_state *dm_state = NULL;
7878         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7879         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7880         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7881         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7882         bool needs_reset;
7883         int ret = 0;
7884
7885
7886         new_plane_crtc = new_plane_state->crtc;
7887         old_plane_crtc = old_plane_state->crtc;
7888         dm_new_plane_state = to_dm_plane_state(new_plane_state);
7889         dm_old_plane_state = to_dm_plane_state(old_plane_state);
7890
7891         /*TODO Implement atomic check for cursor plane */
7892         if (plane->type == DRM_PLANE_TYPE_CURSOR)
7893                 return 0;
7894
7895         needs_reset = should_reset_plane(state, plane, old_plane_state,
7896                                          new_plane_state);
7897
7898         /* Remove any changed/removed planes */
7899         if (!enable) {
7900                 if (!needs_reset)
7901                         return 0;
7902
7903                 if (!old_plane_crtc)
7904                         return 0;
7905
7906                 old_crtc_state = drm_atomic_get_old_crtc_state(
7907                                 state, old_plane_crtc);
7908                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7909
7910                 if (!dm_old_crtc_state->stream)
7911                         return 0;
7912
7913                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7914                                 plane->base.id, old_plane_crtc->base.id);
7915
7916                 ret = dm_atomic_get_state(state, &dm_state);
7917                 if (ret)
7918                         return ret;
7919
7920                 if (!dc_remove_plane_from_context(
7921                                 dc,
7922                                 dm_old_crtc_state->stream,
7923                                 dm_old_plane_state->dc_state,
7924                                 dm_state->context)) {
7925
7926                         ret = EINVAL;
7927                         return ret;
7928                 }
7929
7930
7931                 dc_plane_state_release(dm_old_plane_state->dc_state);
7932                 dm_new_plane_state->dc_state = NULL;
7933
7934                 *lock_and_validation_needed = true;
7935
7936         } else { /* Add new planes */
7937                 struct dc_plane_state *dc_new_plane_state;
7938
7939                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7940                         return 0;
7941
7942                 if (!new_plane_crtc)
7943                         return 0;
7944
7945                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7946                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7947
7948                 if (!dm_new_crtc_state->stream)
7949                         return 0;
7950
7951                 if (!needs_reset)
7952                         return 0;
7953
7954                 WARN_ON(dm_new_plane_state->dc_state);
7955
7956                 dc_new_plane_state = dc_create_plane_state(dc);
7957                 if (!dc_new_plane_state)
7958                         return -ENOMEM;
7959
7960                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7961                                 plane->base.id, new_plane_crtc->base.id);
7962
7963                 ret = fill_dc_plane_attributes(
7964                         new_plane_crtc->dev->dev_private,
7965                         dc_new_plane_state,
7966                         new_plane_state,
7967                         new_crtc_state);
7968                 if (ret) {
7969                         dc_plane_state_release(dc_new_plane_state);
7970                         return ret;
7971                 }
7972
7973                 ret = dm_atomic_get_state(state, &dm_state);
7974                 if (ret) {
7975                         dc_plane_state_release(dc_new_plane_state);
7976                         return ret;
7977                 }
7978
7979                 /*
7980                  * Any atomic check errors that occur after this will
7981                  * not need a release. The plane state will be attached
7982                  * to the stream, and therefore part of the atomic
7983                  * state. It'll be released when the atomic state is
7984                  * cleaned.
7985                  */
7986                 if (!dc_add_plane_to_context(
7987                                 dc,
7988                                 dm_new_crtc_state->stream,
7989                                 dc_new_plane_state,
7990                                 dm_state->context)) {
7991
7992                         dc_plane_state_release(dc_new_plane_state);
7993                         return -EINVAL;
7994                 }
7995
7996                 dm_new_plane_state->dc_state = dc_new_plane_state;
7997
7998                 /* Tell DC to do a full surface update every time there
7999                  * is a plane change. Inefficient, but works for now.
8000                  */
8001                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8002
8003                 *lock_and_validation_needed = true;
8004         }
8005
8006
8007         return ret;
8008 }
8009
8010 static int
8011 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8012                                     struct drm_atomic_state *state,
8013                                     enum surface_update_type *out_type)
8014 {
8015         struct dc *dc = dm->dc;
8016         struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8017         int i, j, num_plane, ret = 0;
8018         struct drm_plane_state *old_plane_state, *new_plane_state;
8019         struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8020         struct drm_crtc *new_plane_crtc;
8021         struct drm_plane *plane;
8022
8023         struct drm_crtc *crtc;
8024         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8025         struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8026         struct dc_stream_status *status = NULL;
8027         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8028         struct surface_info_bundle {
8029                 struct dc_surface_update surface_updates[MAX_SURFACES];
8030                 struct dc_plane_info plane_infos[MAX_SURFACES];
8031                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8032                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8033                 struct dc_stream_update stream_update;
8034         } *bundle;
8035
8036         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8037
8038         if (!bundle) {
8039                 DRM_ERROR("Failed to allocate update bundle\n");
8040                 /* Set type to FULL to avoid crashing in DC*/
8041                 update_type = UPDATE_TYPE_FULL;
8042                 goto cleanup;
8043         }
8044
8045         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8046
8047                 memset(bundle, 0, sizeof(struct surface_info_bundle));
8048
8049                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8050                 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8051                 num_plane = 0;
8052
8053                 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8054                         update_type = UPDATE_TYPE_FULL;
8055                         goto cleanup;
8056                 }
8057
8058                 if (!new_dm_crtc_state->stream)
8059                         continue;
8060
8061                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8062                         const struct amdgpu_framebuffer *amdgpu_fb =
8063                                 to_amdgpu_framebuffer(new_plane_state->fb);
8064                         struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8065                         struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8066                         struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8067                         uint64_t tiling_flags;
8068
8069                         new_plane_crtc = new_plane_state->crtc;
8070                         new_dm_plane_state = to_dm_plane_state(new_plane_state);
8071                         old_dm_plane_state = to_dm_plane_state(old_plane_state);
8072
8073                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8074                                 continue;
8075
8076                         if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8077                                 update_type = UPDATE_TYPE_FULL;
8078                                 goto cleanup;
8079                         }
8080
8081                         if (crtc != new_plane_crtc)
8082                                 continue;
8083
8084                         bundle->surface_updates[num_plane].surface =
8085                                         new_dm_plane_state->dc_state;
8086
8087                         if (new_crtc_state->mode_changed) {
8088                                 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8089                                 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8090                         }
8091
8092                         if (new_crtc_state->color_mgmt_changed) {
8093                                 bundle->surface_updates[num_plane].gamma =
8094                                                 new_dm_plane_state->dc_state->gamma_correction;
8095                                 bundle->surface_updates[num_plane].in_transfer_func =
8096                                                 new_dm_plane_state->dc_state->in_transfer_func;
8097                                 bundle->surface_updates[num_plane].gamut_remap_matrix =
8098                                                 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8099                                 bundle->stream_update.gamut_remap =
8100                                                 &new_dm_crtc_state->stream->gamut_remap_matrix;
8101                                 bundle->stream_update.output_csc_transform =
8102                                                 &new_dm_crtc_state->stream->csc_color_matrix;
8103                                 bundle->stream_update.out_transfer_func =
8104                                                 new_dm_crtc_state->stream->out_transfer_func;
8105                         }
8106
8107                         ret = fill_dc_scaling_info(new_plane_state,
8108                                                    scaling_info);
8109                         if (ret)
8110                                 goto cleanup;
8111
8112                         bundle->surface_updates[num_plane].scaling_info = scaling_info;
8113
8114                         if (amdgpu_fb) {
8115                                 ret = get_fb_info(amdgpu_fb, &tiling_flags);
8116                                 if (ret)
8117                                         goto cleanup;
8118
8119                                 ret = fill_dc_plane_info_and_addr(
8120                                         dm->adev, new_plane_state, tiling_flags,
8121                                         plane_info,
8122                                         &flip_addr->address,
8123                                         false);
8124                                 if (ret)
8125                                         goto cleanup;
8126
8127                                 bundle->surface_updates[num_plane].plane_info = plane_info;
8128                                 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8129                         }
8130
8131                         num_plane++;
8132                 }
8133
8134                 if (num_plane == 0)
8135                         continue;
8136
8137                 ret = dm_atomic_get_state(state, &dm_state);
8138                 if (ret)
8139                         goto cleanup;
8140
8141                 old_dm_state = dm_atomic_get_old_state(state);
8142                 if (!old_dm_state) {
8143                         ret = -EINVAL;
8144                         goto cleanup;
8145                 }
8146
8147                 status = dc_stream_get_status_from_state(old_dm_state->context,
8148                                                          new_dm_crtc_state->stream);
8149                 bundle->stream_update.stream = new_dm_crtc_state->stream;
8150                 /*
8151                  * TODO: DC modifies the surface during this call so we need
8152                  * to lock here - find a way to do this without locking.
8153                  */
8154                 mutex_lock(&dm->dc_lock);
8155                 update_type = dc_check_update_surfaces_for_stream(
8156                                 dc,     bundle->surface_updates, num_plane,
8157                                 &bundle->stream_update, status);
8158                 mutex_unlock(&dm->dc_lock);
8159
8160                 if (update_type > UPDATE_TYPE_MED) {
8161                         update_type = UPDATE_TYPE_FULL;
8162                         goto cleanup;
8163                 }
8164         }
8165
8166 cleanup:
8167         kfree(bundle);
8168
8169         *out_type = update_type;
8170         return ret;
8171 }
8172
8173 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8174 {
8175         struct drm_connector *connector;
8176         struct drm_connector_state *conn_state;
8177         struct amdgpu_dm_connector *aconnector = NULL;
8178         int i;
8179         for_each_new_connector_in_state(state, connector, conn_state, i) {
8180                 if (conn_state->crtc != crtc)
8181                         continue;
8182
8183                 aconnector = to_amdgpu_dm_connector(connector);
8184                 if (!aconnector->port || !aconnector->mst_port)
8185                         aconnector = NULL;
8186                 else
8187                         break;
8188         }
8189
8190         if (!aconnector)
8191                 return 0;
8192
8193         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8194 }
8195
8196 /**
8197  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8198  * @dev: The DRM device
8199  * @state: The atomic state to commit
8200  *
8201  * Validate that the given atomic state is programmable by DC into hardware.
8202  * This involves constructing a &struct dc_state reflecting the new hardware
8203  * state we wish to commit, then querying DC to see if it is programmable. It's
8204  * important not to modify the existing DC state. Otherwise, atomic_check
8205  * may unexpectedly commit hardware changes.
8206  *
8207  * When validating the DC state, it's important that the right locks are
8208  * acquired. For full updates case which removes/adds/updates streams on one
8209  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8210  * that any such full update commit will wait for completion of any outstanding
8211  * flip using DRMs synchronization events. See
8212  * dm_determine_update_type_for_commit()
8213  *
8214  * Note that DM adds the affected connectors for all CRTCs in state, when that
8215  * might not seem necessary. This is because DC stream creation requires the
8216  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8217  * be possible but non-trivial - a possible TODO item.
8218  *
8219  * Return: -Error code if validation failed.
8220  */
8221 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8222                                   struct drm_atomic_state *state)
8223 {
8224         struct amdgpu_device *adev = dev->dev_private;
8225         struct dm_atomic_state *dm_state = NULL;
8226         struct dc *dc = adev->dm.dc;
8227         struct drm_connector *connector;
8228         struct drm_connector_state *old_con_state, *new_con_state;
8229         struct drm_crtc *crtc;
8230         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8231         struct drm_plane *plane;
8232         struct drm_plane_state *old_plane_state, *new_plane_state;
8233         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8234         enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8235
8236         int ret, i;
8237
8238         /*
8239          * This bool will be set for true for any modeset/reset
8240          * or plane update which implies non fast surface update.
8241          */
8242         bool lock_and_validation_needed = false;
8243
8244         ret = drm_atomic_helper_check_modeset(dev, state);
8245         if (ret)
8246                 goto fail;
8247
8248         if (adev->asic_type >= CHIP_NAVI10) {
8249                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8250                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8251                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8252                                 if (ret)
8253                                         goto fail;
8254                         }
8255                 }
8256         }
8257
8258         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8259                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8260                     !new_crtc_state->color_mgmt_changed &&
8261                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8262                         continue;
8263
8264                 if (!new_crtc_state->enable)
8265                         continue;
8266
8267                 ret = drm_atomic_add_affected_connectors(state, crtc);
8268                 if (ret)
8269                         return ret;
8270
8271                 ret = drm_atomic_add_affected_planes(state, crtc);
8272                 if (ret)
8273                         goto fail;
8274         }
8275
8276         /*
8277          * Add all primary and overlay planes on the CRTC to the state
8278          * whenever a plane is enabled to maintain correct z-ordering
8279          * and to enable fast surface updates.
8280          */
8281         drm_for_each_crtc(crtc, dev) {
8282                 bool modified = false;
8283
8284                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8285                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8286                                 continue;
8287
8288                         if (new_plane_state->crtc == crtc ||
8289                             old_plane_state->crtc == crtc) {
8290                                 modified = true;
8291                                 break;
8292                         }
8293                 }
8294
8295                 if (!modified)
8296                         continue;
8297
8298                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8299                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8300                                 continue;
8301
8302                         new_plane_state =
8303                                 drm_atomic_get_plane_state(state, plane);
8304
8305                         if (IS_ERR(new_plane_state)) {
8306                                 ret = PTR_ERR(new_plane_state);
8307                                 goto fail;
8308                         }
8309                 }
8310         }
8311
8312         /* Remove exiting planes if they are modified */
8313         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8314                 ret = dm_update_plane_state(dc, state, plane,
8315                                             old_plane_state,
8316                                             new_plane_state,
8317                                             false,
8318                                             &lock_and_validation_needed);
8319                 if (ret)
8320                         goto fail;
8321         }
8322
8323         /* Disable all crtcs which require disable */
8324         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8325                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8326                                            old_crtc_state,
8327                                            new_crtc_state,
8328                                            false,
8329                                            &lock_and_validation_needed);
8330                 if (ret)
8331                         goto fail;
8332         }
8333
8334         /* Enable all crtcs which require enable */
8335         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8336                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8337                                            old_crtc_state,
8338                                            new_crtc_state,
8339                                            true,
8340                                            &lock_and_validation_needed);
8341                 if (ret)
8342                         goto fail;
8343         }
8344
8345         /* Add new/modified planes */
8346         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8347                 ret = dm_update_plane_state(dc, state, plane,
8348                                             old_plane_state,
8349                                             new_plane_state,
8350                                             true,
8351                                             &lock_and_validation_needed);
8352                 if (ret)
8353                         goto fail;
8354         }
8355
8356         /* Run this here since we want to validate the streams we created */
8357         ret = drm_atomic_helper_check_planes(dev, state);
8358         if (ret)
8359                 goto fail;
8360
8361         if (state->legacy_cursor_update) {
8362                 /*
8363                  * This is a fast cursor update coming from the plane update
8364                  * helper, check if it can be done asynchronously for better
8365                  * performance.
8366                  */
8367                 state->async_update =
8368                         !drm_atomic_helper_async_check(dev, state);
8369
8370                 /*
8371                  * Skip the remaining global validation if this is an async
8372                  * update. Cursor updates can be done without affecting
8373                  * state or bandwidth calcs and this avoids the performance
8374                  * penalty of locking the private state object and
8375                  * allocating a new dc_state.
8376                  */
8377                 if (state->async_update)
8378                         return 0;
8379         }
8380
8381         /* Check scaling and underscan changes*/
8382         /* TODO Removed scaling changes validation due to inability to commit
8383          * new stream into context w\o causing full reset. Need to
8384          * decide how to handle.
8385          */
8386         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8387                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8388                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8389                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8390
8391                 /* Skip any modesets/resets */
8392                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8393                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8394                         continue;
8395
8396                 /* Skip any thing not scale or underscan changes */
8397                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8398                         continue;
8399
8400                 overall_update_type = UPDATE_TYPE_FULL;
8401                 lock_and_validation_needed = true;
8402         }
8403
8404         ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8405         if (ret)
8406                 goto fail;
8407
8408         if (overall_update_type < update_type)
8409                 overall_update_type = update_type;
8410
8411         /*
8412          * lock_and_validation_needed was an old way to determine if we need to set
8413          * the global lock. Leaving it in to check if we broke any corner cases
8414          * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8415          * lock_and_validation_needed false = UPDATE_TYPE_FAST
8416          */
8417         if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8418                 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8419
8420         if (overall_update_type > UPDATE_TYPE_FAST) {
8421                 ret = dm_atomic_get_state(state, &dm_state);
8422                 if (ret)
8423                         goto fail;
8424
8425                 ret = do_aquire_global_lock(dev, state);
8426                 if (ret)
8427                         goto fail;
8428
8429 #if defined(CONFIG_DRM_AMD_DC_DCN)
8430                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8431                         goto fail;
8432
8433                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8434                 if (ret)
8435                         goto fail;
8436 #endif
8437
8438                 /*
8439                  * Perform validation of MST topology in the state:
8440                  * We need to perform MST atomic check before calling
8441                  * dc_validate_global_state(), or there is a chance
8442                  * to get stuck in an infinite loop and hang eventually.
8443                  */
8444                 ret = drm_dp_mst_atomic_check(state);
8445                 if (ret)
8446                         goto fail;
8447
8448                 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8449                         ret = -EINVAL;
8450                         goto fail;
8451                 }
8452         } else {
8453                 /*
8454                  * The commit is a fast update. Fast updates shouldn't change
8455                  * the DC context, affect global validation, and can have their
8456                  * commit work done in parallel with other commits not touching
8457                  * the same resource. If we have a new DC context as part of
8458                  * the DM atomic state from validation we need to free it and
8459                  * retain the existing one instead.
8460                  */
8461                 struct dm_atomic_state *new_dm_state, *old_dm_state;
8462
8463                 new_dm_state = dm_atomic_get_new_state(state);
8464                 old_dm_state = dm_atomic_get_old_state(state);
8465
8466                 if (new_dm_state && old_dm_state) {
8467                         if (new_dm_state->context)
8468                                 dc_release_state(new_dm_state->context);
8469
8470                         new_dm_state->context = old_dm_state->context;
8471
8472                         if (old_dm_state->context)
8473                                 dc_retain_state(old_dm_state->context);
8474                 }
8475         }
8476
8477         /* Store the overall update type for use later in atomic check. */
8478         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8479                 struct dm_crtc_state *dm_new_crtc_state =
8480                         to_dm_crtc_state(new_crtc_state);
8481
8482                 dm_new_crtc_state->update_type = (int)overall_update_type;
8483         }
8484
8485         /* Must be success */
8486         WARN_ON(ret);
8487         return ret;
8488
8489 fail:
8490         if (ret == -EDEADLK)
8491                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8492         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8493                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8494         else
8495                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8496
8497         return ret;
8498 }
8499
8500 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8501                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
8502 {
8503         uint8_t dpcd_data;
8504         bool capable = false;
8505
8506         if (amdgpu_dm_connector->dc_link &&
8507                 dm_helpers_dp_read_dpcd(
8508                                 NULL,
8509                                 amdgpu_dm_connector->dc_link,
8510                                 DP_DOWN_STREAM_PORT_COUNT,
8511                                 &dpcd_data,
8512                                 sizeof(dpcd_data))) {
8513                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8514         }
8515
8516         return capable;
8517 }
8518 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8519                                         struct edid *edid)
8520 {
8521         int i;
8522         bool edid_check_required;
8523         struct detailed_timing *timing;
8524         struct detailed_non_pixel *data;
8525         struct detailed_data_monitor_range *range;
8526         struct amdgpu_dm_connector *amdgpu_dm_connector =
8527                         to_amdgpu_dm_connector(connector);
8528         struct dm_connector_state *dm_con_state = NULL;
8529
8530         struct drm_device *dev = connector->dev;
8531         struct amdgpu_device *adev = dev->dev_private;
8532         bool freesync_capable = false;
8533
8534         if (!connector->state) {
8535                 DRM_ERROR("%s - Connector has no state", __func__);
8536                 goto update;
8537         }
8538
8539         if (!edid) {
8540                 dm_con_state = to_dm_connector_state(connector->state);
8541
8542                 amdgpu_dm_connector->min_vfreq = 0;
8543                 amdgpu_dm_connector->max_vfreq = 0;
8544                 amdgpu_dm_connector->pixel_clock_mhz = 0;
8545
8546                 goto update;
8547         }
8548
8549         dm_con_state = to_dm_connector_state(connector->state);
8550
8551         edid_check_required = false;
8552         if (!amdgpu_dm_connector->dc_sink) {
8553                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8554                 goto update;
8555         }
8556         if (!adev->dm.freesync_module)
8557                 goto update;
8558         /*
8559          * if edid non zero restrict freesync only for dp and edp
8560          */
8561         if (edid) {
8562                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8563                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8564                         edid_check_required = is_dp_capable_without_timing_msa(
8565                                                 adev->dm.dc,
8566                                                 amdgpu_dm_connector);
8567                 }
8568         }
8569         if (edid_check_required == true && (edid->version > 1 ||
8570            (edid->version == 1 && edid->revision > 1))) {
8571                 for (i = 0; i < 4; i++) {
8572
8573                         timing  = &edid->detailed_timings[i];
8574                         data    = &timing->data.other_data;
8575                         range   = &data->data.range;
8576                         /*
8577                          * Check if monitor has continuous frequency mode
8578                          */
8579                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
8580                                 continue;
8581                         /*
8582                          * Check for flag range limits only. If flag == 1 then
8583                          * no additional timing information provided.
8584                          * Default GTF, GTF Secondary curve and CVT are not
8585                          * supported
8586                          */
8587                         if (range->flags != 1)
8588                                 continue;
8589
8590                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8591                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8592                         amdgpu_dm_connector->pixel_clock_mhz =
8593                                 range->pixel_clock_mhz * 10;
8594                         break;
8595                 }
8596
8597                 if (amdgpu_dm_connector->max_vfreq -
8598                     amdgpu_dm_connector->min_vfreq > 10) {
8599
8600                         freesync_capable = true;
8601                 }
8602         }
8603
8604 update:
8605         if (dm_con_state)
8606                 dm_con_state->freesync_capable = freesync_capable;
8607
8608         if (connector->vrr_capable_property)
8609                 drm_connector_set_vrr_capable_property(connector,
8610                                                        freesync_capable);
8611 }
8612
8613 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8614 {
8615         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8616
8617         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8618                 return;
8619         if (link->type == dc_connection_none)
8620                 return;
8621         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8622                                         dpcd_data, sizeof(dpcd_data))) {
8623                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8624
8625                 if (dpcd_data[0] == 0) {
8626                         link->psr_settings.psr_version = PSR_VERSION_UNSUPPORTED;
8627                         link->psr_settings.psr_feature_enabled = false;
8628                 } else {
8629                         link->psr_settings.psr_version = PSR_VERSION_1;
8630                         link->psr_settings.psr_feature_enabled = true;
8631                 }
8632
8633                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8634         }
8635 }
8636
8637 /*
8638  * amdgpu_dm_link_setup_psr() - configure psr link
8639  * @stream: stream state
8640  *
8641  * Return: true if success
8642  */
8643 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8644 {
8645         struct dc_link *link = NULL;
8646         struct psr_config psr_config = {0};
8647         struct psr_context psr_context = {0};
8648         struct dc *dc = NULL;
8649         bool ret = false;
8650
8651         if (stream == NULL)
8652                 return false;
8653
8654         link = stream->link;
8655         dc = link->ctx->dc;
8656
8657         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8658
8659         if (psr_config.psr_version > 0) {
8660                 psr_config.psr_exit_link_training_required = 0x1;
8661                 psr_config.psr_frame_capture_indication_req = 0;
8662                 psr_config.psr_rfb_setup_time = 0x37;
8663                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8664                 psr_config.allow_smu_optimizations = 0x0;
8665
8666                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8667
8668         }
8669         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
8670
8671         return ret;
8672 }
8673
8674 /*
8675  * amdgpu_dm_psr_enable() - enable psr f/w
8676  * @stream: stream state
8677  *
8678  * Return: true if success
8679  */
8680 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8681 {
8682         struct dc_link *link = stream->link;
8683         unsigned int vsync_rate_hz = 0;
8684         struct dc_static_screen_params params = {0};
8685         /* Calculate number of static frames before generating interrupt to
8686          * enter PSR.
8687          */
8688         // Init fail safe of 2 frames static
8689         unsigned int num_frames_static = 2;
8690
8691         DRM_DEBUG_DRIVER("Enabling psr...\n");
8692
8693         vsync_rate_hz = div64_u64(div64_u64((
8694                         stream->timing.pix_clk_100hz * 100),
8695                         stream->timing.v_total),
8696                         stream->timing.h_total);
8697
8698         /* Round up
8699          * Calculate number of frames such that at least 30 ms of time has
8700          * passed.
8701          */
8702         if (vsync_rate_hz != 0) {
8703                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8704                 num_frames_static = (30000 / frame_time_microsec) + 1;
8705         }
8706
8707         params.triggers.cursor_update = true;
8708         params.triggers.overlay_update = true;
8709         params.triggers.surface_update = true;
8710         params.num_frames = num_frames_static;
8711
8712         dc_stream_set_static_screen_params(link->ctx->dc,
8713                                            &stream, 1,
8714                                            &params);
8715
8716         return dc_link_set_psr_allow_active(link, true, false);
8717 }
8718
8719 /*
8720  * amdgpu_dm_psr_disable() - disable psr f/w
8721  * @stream:  stream state
8722  *
8723  * Return: true if success
8724  */
8725 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8726 {
8727
8728         DRM_DEBUG_DRIVER("Disabling psr...\n");
8729
8730         return dc_link_set_psr_allow_active(stream->link, false, true);
8731 }