Merge tag 'mailbox-v5.8' of git://git.linaro.org/landing-teams/working/fujitsu/integr...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97
98 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136                                 struct drm_plane *plane,
137                                 unsigned long possible_crtcs,
138                                 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140                                struct drm_plane *plane,
141                                uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
144                                     uint32_t link_index,
145                                     struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147                                   struct amdgpu_encoder *aencoder,
148                                   uint32_t link_index);
149
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153                                    struct drm_atomic_state *state,
154                                    bool nonblock);
155
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159                                   struct drm_atomic_state *state);
160
161 static void handle_cursor_update(struct drm_plane *plane,
162                                  struct drm_plane_state *old_plane_state);
163
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185         if (crtc >= adev->mode_info.num_crtc)
186                 return 0;
187         else {
188                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190                                 acrtc->base.state);
191
192
193                 if (acrtc_state->stream == NULL) {
194                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195                                   crtc);
196                         return 0;
197                 }
198
199                 return dc_stream_get_vblank_counter(acrtc_state->stream);
200         }
201 }
202
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204                                   u32 *vbl, u32 *position)
205 {
206         uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
208         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209                 return -EINVAL;
210         else {
211                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213                                                 acrtc->base.state);
214
215                 if (acrtc_state->stream ==  NULL) {
216                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217                                   crtc);
218                         return 0;
219                 }
220
221                 /*
222                  * TODO rework base driver to use values directly.
223                  * for now parse it back into reg-format
224                  */
225                 dc_stream_get_scanoutpos(acrtc_state->stream,
226                                          &v_blank_start,
227                                          &v_blank_end,
228                                          &h_position,
229                                          &v_position);
230
231                 *position = v_position | (h_position << 16);
232                 *vbl = v_blank_start | (v_blank_end << 16);
233         }
234
235         return 0;
236 }
237
238 static bool dm_is_idle(void *handle)
239 {
240         /* XXX todo */
241         return true;
242 }
243
244 static int dm_wait_for_idle(void *handle)
245 {
246         /* XXX todo */
247         return 0;
248 }
249
250 static bool dm_check_soft_reset(void *handle)
251 {
252         return false;
253 }
254
255 static int dm_soft_reset(void *handle)
256 {
257         /* XXX todo */
258         return 0;
259 }
260
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263                      int otg_inst)
264 {
265         struct drm_device *dev = adev->ddev;
266         struct drm_crtc *crtc;
267         struct amdgpu_crtc *amdgpu_crtc;
268
269         if (otg_inst == -1) {
270                 WARN_ON(1);
271                 return adev->mode_info.crtcs[0];
272         }
273
274         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275                 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277                 if (amdgpu_crtc->otg_inst == otg_inst)
278                         return amdgpu_crtc;
279         }
280
281         return NULL;
282 }
283
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299         struct amdgpu_crtc *amdgpu_crtc;
300         struct common_irq_params *irq_params = interrupt_params;
301         struct amdgpu_device *adev = irq_params->adev;
302         unsigned long flags;
303         struct drm_pending_vblank_event *e;
304         struct dm_crtc_state *acrtc_state;
305         uint32_t vpos, hpos, v_blank_start, v_blank_end;
306         bool vrr_active;
307
308         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310         /* IRQ could occur when in initial stage */
311         /* TODO work and BO cleanup */
312         if (amdgpu_crtc == NULL) {
313                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314                 return;
315         }
316
317         spin_lock_irqsave(&adev->ddev->event_lock, flags);
318
319         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321                                                  amdgpu_crtc->pflip_status,
322                                                  AMDGPU_FLIP_SUBMITTED,
323                                                  amdgpu_crtc->crtc_id,
324                                                  amdgpu_crtc);
325                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326                 return;
327         }
328
329         /* page flip completed. */
330         e = amdgpu_crtc->event;
331         amdgpu_crtc->event = NULL;
332
333         if (!e)
334                 WARN_ON(1);
335
336         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340         if (!vrr_active ||
341             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342                                       &v_blank_end, &hpos, &vpos) ||
343             (vpos < v_blank_start)) {
344                 /* Update to correct count and vblank timestamp if racing with
345                  * vblank irq. This also updates to the correct vblank timestamp
346                  * even in VRR mode, as scanout is past the front-porch atm.
347                  */
348                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349
350                 /* Wake up userspace by sending the pageflip event with proper
351                  * count and timestamp of vblank of flip completion.
352                  */
353                 if (e) {
354                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356                         /* Event sent, so done with vblank for this flip */
357                         drm_crtc_vblank_put(&amdgpu_crtc->base);
358                 }
359         } else if (e) {
360                 /* VRR active and inside front-porch: vblank count and
361                  * timestamp for pageflip event will only be up to date after
362                  * drm_crtc_handle_vblank() has been executed from late vblank
363                  * irq handler after start of back-porch (vline 0). We queue the
364                  * pageflip event for send-out by drm_crtc_handle_vblank() with
365                  * updated timestamp and count, once it runs after us.
366                  *
367                  * We need to open-code this instead of using the helper
368                  * drm_crtc_arm_vblank_event(), as that helper would
369                  * call drm_crtc_accurate_vblank_count(), which we must
370                  * not call in VRR mode while we are in front-porch!
371                  */
372
373                 /* sequence will be replaced by real count during send-out. */
374                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375                 e->pipe = amdgpu_crtc->crtc_id;
376
377                 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378                 e = NULL;
379         }
380
381         /* Keep track of vblank of this flip for flip throttling. We use the
382          * cooked hw counter, as that one incremented at start of this vblank
383          * of pageflip completion, so last_flip_vblank is the forbidden count
384          * for queueing new pageflips if vsync + VRR is enabled.
385          */
386         amdgpu_crtc->last_flip_vblank =
387                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388
389         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
392         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393                          amdgpu_crtc->crtc_id, amdgpu_crtc,
394                          vrr_active, (int) !e);
395 }
396
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399         struct common_irq_params *irq_params = interrupt_params;
400         struct amdgpu_device *adev = irq_params->adev;
401         struct amdgpu_crtc *acrtc;
402         struct dm_crtc_state *acrtc_state;
403         unsigned long flags;
404
405         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407         if (acrtc) {
408                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
410                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411                               acrtc->crtc_id,
412                               amdgpu_dm_vrr_active(acrtc_state));
413
414                 /* Core vblank handling is done here after end of front-porch in
415                  * vrr mode, as vblank timestamping will give valid results
416                  * while now done after front-porch. This will also deliver
417                  * page-flip completion events that have been queued to us
418                  * if a pageflip happened inside front-porch.
419                  */
420                 if (amdgpu_dm_vrr_active(acrtc_state)) {
421                         drm_crtc_handle_vblank(&acrtc->base);
422
423                         /* BTR processing for pre-DCE12 ASICs */
424                         if (acrtc_state->stream &&
425                             adev->family < AMDGPU_FAMILY_AI) {
426                                 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427                                 mod_freesync_handle_v_update(
428                                     adev->dm.freesync_module,
429                                     acrtc_state->stream,
430                                     &acrtc_state->vrr_params);
431
432                                 dc_stream_adjust_vmin_vmax(
433                                     adev->dm.dc,
434                                     acrtc_state->stream,
435                                     &acrtc_state->vrr_params.adjust);
436                                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437                         }
438                 }
439         }
440 }
441
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: used for determining the CRTC instance
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451         struct common_irq_params *irq_params = interrupt_params;
452         struct amdgpu_device *adev = irq_params->adev;
453         struct amdgpu_crtc *acrtc;
454         struct dm_crtc_state *acrtc_state;
455         unsigned long flags;
456
457         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458         if (!acrtc)
459                 return;
460
461         acrtc_state = to_dm_crtc_state(acrtc->base.state);
462
463         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464                          amdgpu_dm_vrr_active(acrtc_state),
465                          acrtc_state->active_planes);
466
467         /**
468          * Core vblank handling at start of front-porch is only possible
469          * in non-vrr mode, as only there vblank timestamping will give
470          * valid results while done in front-porch. Otherwise defer it
471          * to dm_vupdate_high_irq after end of front-porch.
472          */
473         if (!amdgpu_dm_vrr_active(acrtc_state))
474                 drm_crtc_handle_vblank(&acrtc->base);
475
476         /**
477          * Following stuff must happen at start of vblank, for crc
478          * computation and below-the-range btr support in vrr mode.
479          */
480         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
481
482         /* BTR updates need to happen before VUPDATE on Vega and above. */
483         if (adev->family < AMDGPU_FAMILY_AI)
484                 return;
485
486         spin_lock_irqsave(&adev->ddev->event_lock, flags);
487
488         if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489             acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490                 mod_freesync_handle_v_update(adev->dm.freesync_module,
491                                              acrtc_state->stream,
492                                              &acrtc_state->vrr_params);
493
494                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495                                            &acrtc_state->vrr_params.adjust);
496         }
497
498         /*
499          * If there aren't any active_planes then DCH HUBP may be clock-gated.
500          * In that case, pageflip completion interrupts won't fire and pageflip
501          * completion events won't get delivered. Prevent this by sending
502          * pending pageflip events from here if a flip is still pending.
503          *
504          * If any planes are enabled, use dm_pflip_high_irq() instead, to
505          * avoid race conditions between flip programming and completion,
506          * which could cause too early flip completion events.
507          */
508         if (adev->family >= AMDGPU_FAMILY_RV &&
509             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510             acrtc_state->active_planes == 0) {
511                 if (acrtc->event) {
512                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
513                         acrtc->event = NULL;
514                         drm_crtc_vblank_put(&acrtc->base);
515                 }
516                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
517         }
518
519         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
520 }
521
522 static int dm_set_clockgating_state(void *handle,
523                   enum amd_clockgating_state state)
524 {
525         return 0;
526 }
527
528 static int dm_set_powergating_state(void *handle,
529                   enum amd_powergating_state state)
530 {
531         return 0;
532 }
533
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle);
536
537 /* Allocate memory for FBC compressed data  */
538 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
539 {
540         struct drm_device *dev = connector->dev;
541         struct amdgpu_device *adev = dev->dev_private;
542         struct dm_comressor_info *compressor = &adev->dm.compressor;
543         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544         struct drm_display_mode *mode;
545         unsigned long max_size = 0;
546
547         if (adev->dm.dc->fbc_compressor == NULL)
548                 return;
549
550         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
551                 return;
552
553         if (compressor->bo_ptr)
554                 return;
555
556
557         list_for_each_entry(mode, &connector->modes, head) {
558                 if (max_size < mode->htotal * mode->vtotal)
559                         max_size = mode->htotal * mode->vtotal;
560         }
561
562         if (max_size) {
563                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565                             &compressor->gpu_addr, &compressor->cpu_addr);
566
567                 if (r)
568                         DRM_ERROR("DM: Failed to initialize FBC\n");
569                 else {
570                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
572                 }
573
574         }
575
576 }
577
578 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579                                           int pipe, bool *enabled,
580                                           unsigned char *buf, int max_bytes)
581 {
582         struct drm_device *dev = dev_get_drvdata(kdev);
583         struct amdgpu_device *adev = dev->dev_private;
584         struct drm_connector *connector;
585         struct drm_connector_list_iter conn_iter;
586         struct amdgpu_dm_connector *aconnector;
587         int ret = 0;
588
589         *enabled = false;
590
591         mutex_lock(&adev->dm.audio_lock);
592
593         drm_connector_list_iter_begin(dev, &conn_iter);
594         drm_for_each_connector_iter(connector, &conn_iter) {
595                 aconnector = to_amdgpu_dm_connector(connector);
596                 if (aconnector->audio_inst != port)
597                         continue;
598
599                 *enabled = true;
600                 ret = drm_eld_size(connector->eld);
601                 memcpy(buf, connector->eld, min(max_bytes, ret));
602
603                 break;
604         }
605         drm_connector_list_iter_end(&conn_iter);
606
607         mutex_unlock(&adev->dm.audio_lock);
608
609         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
610
611         return ret;
612 }
613
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615         .get_eld = amdgpu_dm_audio_component_get_eld,
616 };
617
618 static int amdgpu_dm_audio_component_bind(struct device *kdev,
619                                        struct device *hda_kdev, void *data)
620 {
621         struct drm_device *dev = dev_get_drvdata(kdev);
622         struct amdgpu_device *adev = dev->dev_private;
623         struct drm_audio_component *acomp = data;
624
625         acomp->ops = &amdgpu_dm_audio_component_ops;
626         acomp->dev = kdev;
627         adev->dm.audio_component = acomp;
628
629         return 0;
630 }
631
632 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633                                           struct device *hda_kdev, void *data)
634 {
635         struct drm_device *dev = dev_get_drvdata(kdev);
636         struct amdgpu_device *adev = dev->dev_private;
637         struct drm_audio_component *acomp = data;
638
639         acomp->ops = NULL;
640         acomp->dev = NULL;
641         adev->dm.audio_component = NULL;
642 }
643
644 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
645         .bind   = amdgpu_dm_audio_component_bind,
646         .unbind = amdgpu_dm_audio_component_unbind,
647 };
648
649 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
650 {
651         int i, ret;
652
653         if (!amdgpu_audio)
654                 return 0;
655
656         adev->mode_info.audio.enabled = true;
657
658         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
659
660         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
661                 adev->mode_info.audio.pin[i].channels = -1;
662                 adev->mode_info.audio.pin[i].rate = -1;
663                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
664                 adev->mode_info.audio.pin[i].status_bits = 0;
665                 adev->mode_info.audio.pin[i].category_code = 0;
666                 adev->mode_info.audio.pin[i].connected = false;
667                 adev->mode_info.audio.pin[i].id =
668                         adev->dm.dc->res_pool->audios[i]->inst;
669                 adev->mode_info.audio.pin[i].offset = 0;
670         }
671
672         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
673         if (ret < 0)
674                 return ret;
675
676         adev->dm.audio_registered = true;
677
678         return 0;
679 }
680
681 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
682 {
683         if (!amdgpu_audio)
684                 return;
685
686         if (!adev->mode_info.audio.enabled)
687                 return;
688
689         if (adev->dm.audio_registered) {
690                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
691                 adev->dm.audio_registered = false;
692         }
693
694         /* TODO: Disable audio? */
695
696         adev->mode_info.audio.enabled = false;
697 }
698
699 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
700 {
701         struct drm_audio_component *acomp = adev->dm.audio_component;
702
703         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
704                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
705
706                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
707                                                  pin, -1);
708         }
709 }
710
711 static int dm_dmub_hw_init(struct amdgpu_device *adev)
712 {
713         const struct dmcub_firmware_header_v1_0 *hdr;
714         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
715         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
716         const struct firmware *dmub_fw = adev->dm.dmub_fw;
717         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
718         struct abm *abm = adev->dm.dc->res_pool->abm;
719         struct dmub_srv_hw_params hw_params;
720         enum dmub_status status;
721         const unsigned char *fw_inst_const, *fw_bss_data;
722         uint32_t i, fw_inst_const_size, fw_bss_data_size;
723         bool has_hw_support;
724
725         if (!dmub_srv)
726                 /* DMUB isn't supported on the ASIC. */
727                 return 0;
728
729         if (!fb_info) {
730                 DRM_ERROR("No framebuffer info for DMUB service.\n");
731                 return -EINVAL;
732         }
733
734         if (!dmub_fw) {
735                 /* Firmware required for DMUB support. */
736                 DRM_ERROR("No firmware provided for DMUB.\n");
737                 return -EINVAL;
738         }
739
740         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
741         if (status != DMUB_STATUS_OK) {
742                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
743                 return -EINVAL;
744         }
745
746         if (!has_hw_support) {
747                 DRM_INFO("DMUB unsupported on ASIC\n");
748                 return 0;
749         }
750
751         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
752
753         fw_inst_const = dmub_fw->data +
754                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
755                         PSP_HEADER_BYTES;
756
757         fw_bss_data = dmub_fw->data +
758                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759                       le32_to_cpu(hdr->inst_const_bytes);
760
761         /* Copy firmware and bios info into FB memory. */
762         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
763                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
764
765         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
766
767         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
768          * amdgpu_ucode_init_single_fw will load dmub firmware
769          * fw_inst_const part to cw0; otherwise, the firmware back door load
770          * will be done by dm_dmub_hw_init
771          */
772         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
773                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
774                                 fw_inst_const_size);
775         }
776
777         if (fw_bss_data_size)
778                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
779                        fw_bss_data, fw_bss_data_size);
780
781         /* Copy firmware bios info into FB memory. */
782         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
783                adev->bios_size);
784
785         /* Reset regions that need to be reset. */
786         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
787         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
788
789         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
790                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
791
792         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
793                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
794
795         /* Initialize hardware. */
796         memset(&hw_params, 0, sizeof(hw_params));
797         hw_params.fb_base = adev->gmc.fb_start;
798         hw_params.fb_offset = adev->gmc.aper_base;
799
800         /* backdoor load firmware and trigger dmub running */
801         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
802                 hw_params.load_inst_const = true;
803
804         if (dmcu)
805                 hw_params.psp_version = dmcu->psp_version;
806
807         for (i = 0; i < fb_info->num_fb; ++i)
808                 hw_params.fb[i] = &fb_info->fb[i];
809
810         status = dmub_srv_hw_init(dmub_srv, &hw_params);
811         if (status != DMUB_STATUS_OK) {
812                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
813                 return -EINVAL;
814         }
815
816         /* Wait for firmware load to finish. */
817         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
818         if (status != DMUB_STATUS_OK)
819                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
820
821         /* Init DMCU and ABM if available. */
822         if (dmcu && abm) {
823                 dmcu->funcs->dmcu_init(dmcu);
824                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
825         }
826
827         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
828         if (!adev->dm.dc->ctx->dmub_srv) {
829                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
830                 return -ENOMEM;
831         }
832
833         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
834                  adev->dm.dmcub_fw_version);
835
836         return 0;
837 }
838
839 static int amdgpu_dm_init(struct amdgpu_device *adev)
840 {
841         struct dc_init_data init_data;
842 #ifdef CONFIG_DRM_AMD_DC_HDCP
843         struct dc_callback_init init_params;
844 #endif
845         int r;
846
847         adev->dm.ddev = adev->ddev;
848         adev->dm.adev = adev;
849
850         /* Zero all the fields */
851         memset(&init_data, 0, sizeof(init_data));
852 #ifdef CONFIG_DRM_AMD_DC_HDCP
853         memset(&init_params, 0, sizeof(init_params));
854 #endif
855
856         mutex_init(&adev->dm.dc_lock);
857         mutex_init(&adev->dm.audio_lock);
858
859         if(amdgpu_dm_irq_init(adev)) {
860                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
861                 goto error;
862         }
863
864         init_data.asic_id.chip_family = adev->family;
865
866         init_data.asic_id.pci_revision_id = adev->pdev->revision;
867         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
868
869         init_data.asic_id.vram_width = adev->gmc.vram_width;
870         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
871         init_data.asic_id.atombios_base_address =
872                 adev->mode_info.atom_context->bios;
873
874         init_data.driver = adev;
875
876         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
877
878         if (!adev->dm.cgs_device) {
879                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
880                 goto error;
881         }
882
883         init_data.cgs_device = adev->dm.cgs_device;
884
885         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
886
887         switch (adev->asic_type) {
888         case CHIP_CARRIZO:
889         case CHIP_STONEY:
890         case CHIP_RAVEN:
891         case CHIP_RENOIR:
892                 init_data.flags.gpu_vm_support = true;
893                 break;
894         default:
895                 break;
896         }
897
898         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
899                 init_data.flags.fbc_support = true;
900
901         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
902                 init_data.flags.multi_mon_pp_mclk_switch = true;
903
904         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
905                 init_data.flags.disable_fractional_pwm = true;
906
907         init_data.flags.power_down_display_on_boot = true;
908
909         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
910
911         /* Display Core create. */
912         adev->dm.dc = dc_create(&init_data);
913
914         if (adev->dm.dc) {
915                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
916         } else {
917                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
918                 goto error;
919         }
920
921         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
922                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
923                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
924         }
925
926         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
927                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
928
929         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
930                 adev->dm.dc->debug.disable_stutter = true;
931
932         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
933                 adev->dm.dc->debug.disable_dsc = true;
934
935         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
936                 adev->dm.dc->debug.disable_clock_gate = true;
937
938         r = dm_dmub_hw_init(adev);
939         if (r) {
940                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
941                 goto error;
942         }
943
944         dc_hardware_init(adev->dm.dc);
945
946         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
947         if (!adev->dm.freesync_module) {
948                 DRM_ERROR(
949                 "amdgpu: failed to initialize freesync_module.\n");
950         } else
951                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
952                                 adev->dm.freesync_module);
953
954         amdgpu_dm_init_color_mod();
955
956 #ifdef CONFIG_DRM_AMD_DC_HDCP
957         if (adev->asic_type >= CHIP_RAVEN) {
958                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
959
960                 if (!adev->dm.hdcp_workqueue)
961                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
962                 else
963                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
964
965                 dc_init_callbacks(adev->dm.dc, &init_params);
966         }
967 #endif
968         if (amdgpu_dm_initialize_drm_device(adev)) {
969                 DRM_ERROR(
970                 "amdgpu: failed to initialize sw for display support.\n");
971                 goto error;
972         }
973
974         /* Update the actual used number of crtc */
975         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
976
977         /* TODO: Add_display_info? */
978
979         /* TODO use dynamic cursor width */
980         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
981         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
982
983         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
984                 DRM_ERROR(
985                 "amdgpu: failed to initialize sw for display support.\n");
986                 goto error;
987         }
988
989         DRM_DEBUG_DRIVER("KMS initialized.\n");
990
991         return 0;
992 error:
993         amdgpu_dm_fini(adev);
994
995         return -EINVAL;
996 }
997
998 static void amdgpu_dm_fini(struct amdgpu_device *adev)
999 {
1000         amdgpu_dm_audio_fini(adev);
1001
1002         amdgpu_dm_destroy_drm_device(&adev->dm);
1003
1004 #ifdef CONFIG_DRM_AMD_DC_HDCP
1005         if (adev->dm.hdcp_workqueue) {
1006                 hdcp_destroy(adev->dm.hdcp_workqueue);
1007                 adev->dm.hdcp_workqueue = NULL;
1008         }
1009
1010         if (adev->dm.dc)
1011                 dc_deinit_callbacks(adev->dm.dc);
1012 #endif
1013         if (adev->dm.dc->ctx->dmub_srv) {
1014                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1015                 adev->dm.dc->ctx->dmub_srv = NULL;
1016         }
1017
1018         if (adev->dm.dmub_bo)
1019                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1020                                       &adev->dm.dmub_bo_gpu_addr,
1021                                       &adev->dm.dmub_bo_cpu_addr);
1022
1023         /* DC Destroy TODO: Replace destroy DAL */
1024         if (adev->dm.dc)
1025                 dc_destroy(&adev->dm.dc);
1026         /*
1027          * TODO: pageflip, vlank interrupt
1028          *
1029          * amdgpu_dm_irq_fini(adev);
1030          */
1031
1032         if (adev->dm.cgs_device) {
1033                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1034                 adev->dm.cgs_device = NULL;
1035         }
1036         if (adev->dm.freesync_module) {
1037                 mod_freesync_destroy(adev->dm.freesync_module);
1038                 adev->dm.freesync_module = NULL;
1039         }
1040
1041         mutex_destroy(&adev->dm.audio_lock);
1042         mutex_destroy(&adev->dm.dc_lock);
1043
1044         return;
1045 }
1046
1047 static int load_dmcu_fw(struct amdgpu_device *adev)
1048 {
1049         const char *fw_name_dmcu = NULL;
1050         int r;
1051         const struct dmcu_firmware_header_v1_0 *hdr;
1052
1053         switch(adev->asic_type) {
1054         case CHIP_BONAIRE:
1055         case CHIP_HAWAII:
1056         case CHIP_KAVERI:
1057         case CHIP_KABINI:
1058         case CHIP_MULLINS:
1059         case CHIP_TONGA:
1060         case CHIP_FIJI:
1061         case CHIP_CARRIZO:
1062         case CHIP_STONEY:
1063         case CHIP_POLARIS11:
1064         case CHIP_POLARIS10:
1065         case CHIP_POLARIS12:
1066         case CHIP_VEGAM:
1067         case CHIP_VEGA10:
1068         case CHIP_VEGA12:
1069         case CHIP_VEGA20:
1070         case CHIP_NAVI10:
1071         case CHIP_NAVI14:
1072         case CHIP_RENOIR:
1073                 return 0;
1074         case CHIP_NAVI12:
1075                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1076                 break;
1077         case CHIP_RAVEN:
1078                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1079                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1080                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1081                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1082                 else
1083                         return 0;
1084                 break;
1085         default:
1086                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1087                 return -EINVAL;
1088         }
1089
1090         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1091                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1092                 return 0;
1093         }
1094
1095         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1096         if (r == -ENOENT) {
1097                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1098                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1099                 adev->dm.fw_dmcu = NULL;
1100                 return 0;
1101         }
1102         if (r) {
1103                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1104                         fw_name_dmcu);
1105                 return r;
1106         }
1107
1108         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1109         if (r) {
1110                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1111                         fw_name_dmcu);
1112                 release_firmware(adev->dm.fw_dmcu);
1113                 adev->dm.fw_dmcu = NULL;
1114                 return r;
1115         }
1116
1117         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1118         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1119         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1120         adev->firmware.fw_size +=
1121                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1122
1123         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1124         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1125         adev->firmware.fw_size +=
1126                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1127
1128         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1129
1130         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1131
1132         return 0;
1133 }
1134
1135 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1136 {
1137         struct amdgpu_device *adev = ctx;
1138
1139         return dm_read_reg(adev->dm.dc->ctx, address);
1140 }
1141
1142 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1143                                      uint32_t value)
1144 {
1145         struct amdgpu_device *adev = ctx;
1146
1147         return dm_write_reg(adev->dm.dc->ctx, address, value);
1148 }
1149
1150 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1151 {
1152         struct dmub_srv_create_params create_params;
1153         struct dmub_srv_region_params region_params;
1154         struct dmub_srv_region_info region_info;
1155         struct dmub_srv_fb_params fb_params;
1156         struct dmub_srv_fb_info *fb_info;
1157         struct dmub_srv *dmub_srv;
1158         const struct dmcub_firmware_header_v1_0 *hdr;
1159         const char *fw_name_dmub;
1160         enum dmub_asic dmub_asic;
1161         enum dmub_status status;
1162         int r;
1163
1164         switch (adev->asic_type) {
1165         case CHIP_RENOIR:
1166                 dmub_asic = DMUB_ASIC_DCN21;
1167                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1168                 break;
1169
1170         default:
1171                 /* ASIC doesn't support DMUB. */
1172                 return 0;
1173         }
1174
1175         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1176         if (r) {
1177                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1178                 return 0;
1179         }
1180
1181         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1182         if (r) {
1183                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1184                 return 0;
1185         }
1186
1187         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1188
1189         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1190                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1191                         AMDGPU_UCODE_ID_DMCUB;
1192                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1193                         adev->dm.dmub_fw;
1194                 adev->firmware.fw_size +=
1195                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1196
1197                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1198                          adev->dm.dmcub_fw_version);
1199         }
1200
1201         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1202
1203         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1204         dmub_srv = adev->dm.dmub_srv;
1205
1206         if (!dmub_srv) {
1207                 DRM_ERROR("Failed to allocate DMUB service!\n");
1208                 return -ENOMEM;
1209         }
1210
1211         memset(&create_params, 0, sizeof(create_params));
1212         create_params.user_ctx = adev;
1213         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1214         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1215         create_params.asic = dmub_asic;
1216
1217         /* Create the DMUB service. */
1218         status = dmub_srv_create(dmub_srv, &create_params);
1219         if (status != DMUB_STATUS_OK) {
1220                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1221                 return -EINVAL;
1222         }
1223
1224         /* Calculate the size of all the regions for the DMUB service. */
1225         memset(&region_params, 0, sizeof(region_params));
1226
1227         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1228                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1229         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1230         region_params.vbios_size = adev->bios_size;
1231         region_params.fw_bss_data =
1232                 adev->dm.dmub_fw->data +
1233                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1234                 le32_to_cpu(hdr->inst_const_bytes);
1235         region_params.fw_inst_const =
1236                 adev->dm.dmub_fw->data +
1237                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1238                 PSP_HEADER_BYTES;
1239
1240         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1241                                            &region_info);
1242
1243         if (status != DMUB_STATUS_OK) {
1244                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1245                 return -EINVAL;
1246         }
1247
1248         /*
1249          * Allocate a framebuffer based on the total size of all the regions.
1250          * TODO: Move this into GART.
1251          */
1252         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1253                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1254                                     &adev->dm.dmub_bo_gpu_addr,
1255                                     &adev->dm.dmub_bo_cpu_addr);
1256         if (r)
1257                 return r;
1258
1259         /* Rebase the regions on the framebuffer address. */
1260         memset(&fb_params, 0, sizeof(fb_params));
1261         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1262         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1263         fb_params.region_info = &region_info;
1264
1265         adev->dm.dmub_fb_info =
1266                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1267         fb_info = adev->dm.dmub_fb_info;
1268
1269         if (!fb_info) {
1270                 DRM_ERROR(
1271                         "Failed to allocate framebuffer info for DMUB service!\n");
1272                 return -ENOMEM;
1273         }
1274
1275         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1276         if (status != DMUB_STATUS_OK) {
1277                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1278                 return -EINVAL;
1279         }
1280
1281         return 0;
1282 }
1283
1284 static int dm_sw_init(void *handle)
1285 {
1286         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1287         int r;
1288
1289         r = dm_dmub_sw_init(adev);
1290         if (r)
1291                 return r;
1292
1293         return load_dmcu_fw(adev);
1294 }
1295
1296 static int dm_sw_fini(void *handle)
1297 {
1298         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1299
1300         kfree(adev->dm.dmub_fb_info);
1301         adev->dm.dmub_fb_info = NULL;
1302
1303         if (adev->dm.dmub_srv) {
1304                 dmub_srv_destroy(adev->dm.dmub_srv);
1305                 adev->dm.dmub_srv = NULL;
1306         }
1307
1308         if (adev->dm.dmub_fw) {
1309                 release_firmware(adev->dm.dmub_fw);
1310                 adev->dm.dmub_fw = NULL;
1311         }
1312
1313         if(adev->dm.fw_dmcu) {
1314                 release_firmware(adev->dm.fw_dmcu);
1315                 adev->dm.fw_dmcu = NULL;
1316         }
1317
1318         return 0;
1319 }
1320
1321 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1322 {
1323         struct amdgpu_dm_connector *aconnector;
1324         struct drm_connector *connector;
1325         struct drm_connector_list_iter iter;
1326         int ret = 0;
1327
1328         drm_connector_list_iter_begin(dev, &iter);
1329         drm_for_each_connector_iter(connector, &iter) {
1330                 aconnector = to_amdgpu_dm_connector(connector);
1331                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1332                     aconnector->mst_mgr.aux) {
1333                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1334                                          aconnector,
1335                                          aconnector->base.base.id);
1336
1337                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1338                         if (ret < 0) {
1339                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1340                                 aconnector->dc_link->type =
1341                                         dc_connection_single;
1342                                 break;
1343                         }
1344                 }
1345         }
1346         drm_connector_list_iter_end(&iter);
1347
1348         return ret;
1349 }
1350
1351 static int dm_late_init(void *handle)
1352 {
1353         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1354
1355         struct dmcu_iram_parameters params;
1356         unsigned int linear_lut[16];
1357         int i;
1358         struct dmcu *dmcu = NULL;
1359         bool ret;
1360
1361         if (!adev->dm.fw_dmcu)
1362                 return detect_mst_link_for_all_connectors(adev->ddev);
1363
1364         dmcu = adev->dm.dc->res_pool->dmcu;
1365
1366         for (i = 0; i < 16; i++)
1367                 linear_lut[i] = 0xFFFF * i / 15;
1368
1369         params.set = 0;
1370         params.backlight_ramping_start = 0xCCCC;
1371         params.backlight_ramping_reduction = 0xCCCCCCCC;
1372         params.backlight_lut_array_size = 16;
1373         params.backlight_lut_array = linear_lut;
1374
1375         /* Min backlight level after ABM reduction,  Don't allow below 1%
1376          * 0xFFFF x 0.01 = 0x28F
1377          */
1378         params.min_abm_backlight = 0x28F;
1379
1380         ret = dmcu_load_iram(dmcu, params);
1381
1382         if (!ret)
1383                 return -EINVAL;
1384
1385         return detect_mst_link_for_all_connectors(adev->ddev);
1386 }
1387
1388 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1389 {
1390         struct amdgpu_dm_connector *aconnector;
1391         struct drm_connector *connector;
1392         struct drm_connector_list_iter iter;
1393         struct drm_dp_mst_topology_mgr *mgr;
1394         int ret;
1395         bool need_hotplug = false;
1396
1397         drm_connector_list_iter_begin(dev, &iter);
1398         drm_for_each_connector_iter(connector, &iter) {
1399                 aconnector = to_amdgpu_dm_connector(connector);
1400                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1401                     aconnector->mst_port)
1402                         continue;
1403
1404                 mgr = &aconnector->mst_mgr;
1405
1406                 if (suspend) {
1407                         drm_dp_mst_topology_mgr_suspend(mgr);
1408                 } else {
1409                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1410                         if (ret < 0) {
1411                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1412                                 need_hotplug = true;
1413                         }
1414                 }
1415         }
1416         drm_connector_list_iter_end(&iter);
1417
1418         if (need_hotplug)
1419                 drm_kms_helper_hotplug_event(dev);
1420 }
1421
1422 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1423 {
1424         struct smu_context *smu = &adev->smu;
1425         int ret = 0;
1426
1427         if (!is_support_sw_smu(adev))
1428                 return 0;
1429
1430         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1431          * on window driver dc implementation.
1432          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1433          * should be passed to smu during boot up and resume from s3.
1434          * boot up: dc calculate dcn watermark clock settings within dc_create,
1435          * dcn20_resource_construct
1436          * then call pplib functions below to pass the settings to smu:
1437          * smu_set_watermarks_for_clock_ranges
1438          * smu_set_watermarks_table
1439          * navi10_set_watermarks_table
1440          * smu_write_watermarks_table
1441          *
1442          * For Renoir, clock settings of dcn watermark are also fixed values.
1443          * dc has implemented different flow for window driver:
1444          * dc_hardware_init / dc_set_power_state
1445          * dcn10_init_hw
1446          * notify_wm_ranges
1447          * set_wm_ranges
1448          * -- Linux
1449          * smu_set_watermarks_for_clock_ranges
1450          * renoir_set_watermarks_table
1451          * smu_write_watermarks_table
1452          *
1453          * For Linux,
1454          * dc_hardware_init -> amdgpu_dm_init
1455          * dc_set_power_state --> dm_resume
1456          *
1457          * therefore, this function apply to navi10/12/14 but not Renoir
1458          * *
1459          */
1460         switch(adev->asic_type) {
1461         case CHIP_NAVI10:
1462         case CHIP_NAVI14:
1463         case CHIP_NAVI12:
1464                 break;
1465         default:
1466                 return 0;
1467         }
1468
1469         mutex_lock(&smu->mutex);
1470
1471         /* pass data to smu controller */
1472         if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1473                         !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1474                 ret = smu_write_watermarks_table(smu);
1475
1476                 if (ret) {
1477                         mutex_unlock(&smu->mutex);
1478                         DRM_ERROR("Failed to update WMTABLE!\n");
1479                         return ret;
1480                 }
1481                 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1482         }
1483
1484         mutex_unlock(&smu->mutex);
1485
1486         return 0;
1487 }
1488
1489 /**
1490  * dm_hw_init() - Initialize DC device
1491  * @handle: The base driver device containing the amdgpu_dm device.
1492  *
1493  * Initialize the &struct amdgpu_display_manager device. This involves calling
1494  * the initializers of each DM component, then populating the struct with them.
1495  *
1496  * Although the function implies hardware initialization, both hardware and
1497  * software are initialized here. Splitting them out to their relevant init
1498  * hooks is a future TODO item.
1499  *
1500  * Some notable things that are initialized here:
1501  *
1502  * - Display Core, both software and hardware
1503  * - DC modules that we need (freesync and color management)
1504  * - DRM software states
1505  * - Interrupt sources and handlers
1506  * - Vblank support
1507  * - Debug FS entries, if enabled
1508  */
1509 static int dm_hw_init(void *handle)
1510 {
1511         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1512         /* Create DAL display manager */
1513         amdgpu_dm_init(adev);
1514         amdgpu_dm_hpd_init(adev);
1515
1516         return 0;
1517 }
1518
1519 /**
1520  * dm_hw_fini() - Teardown DC device
1521  * @handle: The base driver device containing the amdgpu_dm device.
1522  *
1523  * Teardown components within &struct amdgpu_display_manager that require
1524  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1525  * were loaded. Also flush IRQ workqueues and disable them.
1526  */
1527 static int dm_hw_fini(void *handle)
1528 {
1529         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1530
1531         amdgpu_dm_hpd_fini(adev);
1532
1533         amdgpu_dm_irq_fini(adev);
1534         amdgpu_dm_fini(adev);
1535         return 0;
1536 }
1537
1538
1539 static int dm_enable_vblank(struct drm_crtc *crtc);
1540 static void dm_disable_vblank(struct drm_crtc *crtc);
1541
1542 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1543                                  struct dc_state *state, bool enable)
1544 {
1545         enum dc_irq_source irq_source;
1546         struct amdgpu_crtc *acrtc;
1547         int rc = -EBUSY;
1548         int i = 0;
1549
1550         for (i = 0; i < state->stream_count; i++) {
1551                 acrtc = get_crtc_by_otg_inst(
1552                                 adev, state->stream_status[i].primary_otg_inst);
1553
1554                 if (acrtc && state->stream_status[i].plane_count != 0) {
1555                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1556                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1557                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1558                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1559                         if (rc)
1560                                 DRM_WARN("Failed to %s pflip interrupts\n",
1561                                          enable ? "enable" : "disable");
1562
1563                         if (enable) {
1564                                 rc = dm_enable_vblank(&acrtc->base);
1565                                 if (rc)
1566                                         DRM_WARN("Failed to enable vblank interrupts\n");
1567                         } else {
1568                                 dm_disable_vblank(&acrtc->base);
1569                         }
1570
1571                 }
1572         }
1573
1574 }
1575
1576 enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1577 {
1578         struct dc_state *context = NULL;
1579         enum dc_status res = DC_ERROR_UNEXPECTED;
1580         int i;
1581         struct dc_stream_state *del_streams[MAX_PIPES];
1582         int del_streams_count = 0;
1583
1584         memset(del_streams, 0, sizeof(del_streams));
1585
1586         context = dc_create_state(dc);
1587         if (context == NULL)
1588                 goto context_alloc_fail;
1589
1590         dc_resource_state_copy_construct_current(dc, context);
1591
1592         /* First remove from context all streams */
1593         for (i = 0; i < context->stream_count; i++) {
1594                 struct dc_stream_state *stream = context->streams[i];
1595
1596                 del_streams[del_streams_count++] = stream;
1597         }
1598
1599         /* Remove all planes for removed streams and then remove the streams */
1600         for (i = 0; i < del_streams_count; i++) {
1601                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1602                         res = DC_FAIL_DETACH_SURFACES;
1603                         goto fail;
1604                 }
1605
1606                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1607                 if (res != DC_OK)
1608                         goto fail;
1609         }
1610
1611
1612         res = dc_validate_global_state(dc, context, false);
1613
1614         if (res != DC_OK) {
1615                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1616                 goto fail;
1617         }
1618
1619         res = dc_commit_state(dc, context);
1620
1621 fail:
1622         dc_release_state(context);
1623
1624 context_alloc_fail:
1625         return res;
1626 }
1627
1628 static int dm_suspend(void *handle)
1629 {
1630         struct amdgpu_device *adev = handle;
1631         struct amdgpu_display_manager *dm = &adev->dm;
1632         int ret = 0;
1633
1634         if (adev->in_gpu_reset) {
1635                 mutex_lock(&dm->dc_lock);
1636                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1637
1638                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1639
1640                 amdgpu_dm_commit_zero_streams(dm->dc);
1641
1642                 amdgpu_dm_irq_suspend(adev);
1643
1644                 return ret;
1645         }
1646
1647         WARN_ON(adev->dm.cached_state);
1648         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1649
1650         s3_handle_mst(adev->ddev, true);
1651
1652         amdgpu_dm_irq_suspend(adev);
1653
1654
1655         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1656
1657         return 0;
1658 }
1659
1660 static struct amdgpu_dm_connector *
1661 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1662                                              struct drm_crtc *crtc)
1663 {
1664         uint32_t i;
1665         struct drm_connector_state *new_con_state;
1666         struct drm_connector *connector;
1667         struct drm_crtc *crtc_from_state;
1668
1669         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1670                 crtc_from_state = new_con_state->crtc;
1671
1672                 if (crtc_from_state == crtc)
1673                         return to_amdgpu_dm_connector(connector);
1674         }
1675
1676         return NULL;
1677 }
1678
1679 static void emulated_link_detect(struct dc_link *link)
1680 {
1681         struct dc_sink_init_data sink_init_data = { 0 };
1682         struct display_sink_capability sink_caps = { 0 };
1683         enum dc_edid_status edid_status;
1684         struct dc_context *dc_ctx = link->ctx;
1685         struct dc_sink *sink = NULL;
1686         struct dc_sink *prev_sink = NULL;
1687
1688         link->type = dc_connection_none;
1689         prev_sink = link->local_sink;
1690
1691         if (prev_sink != NULL)
1692                 dc_sink_retain(prev_sink);
1693
1694         switch (link->connector_signal) {
1695         case SIGNAL_TYPE_HDMI_TYPE_A: {
1696                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1697                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1698                 break;
1699         }
1700
1701         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1702                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1703                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1704                 break;
1705         }
1706
1707         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1708                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1709                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1710                 break;
1711         }
1712
1713         case SIGNAL_TYPE_LVDS: {
1714                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1715                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1716                 break;
1717         }
1718
1719         case SIGNAL_TYPE_EDP: {
1720                 sink_caps.transaction_type =
1721                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1722                 sink_caps.signal = SIGNAL_TYPE_EDP;
1723                 break;
1724         }
1725
1726         case SIGNAL_TYPE_DISPLAY_PORT: {
1727                 sink_caps.transaction_type =
1728                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1729                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1730                 break;
1731         }
1732
1733         default:
1734                 DC_ERROR("Invalid connector type! signal:%d\n",
1735                         link->connector_signal);
1736                 return;
1737         }
1738
1739         sink_init_data.link = link;
1740         sink_init_data.sink_signal = sink_caps.signal;
1741
1742         sink = dc_sink_create(&sink_init_data);
1743         if (!sink) {
1744                 DC_ERROR("Failed to create sink!\n");
1745                 return;
1746         }
1747
1748         /* dc_sink_create returns a new reference */
1749         link->local_sink = sink;
1750
1751         edid_status = dm_helpers_read_local_edid(
1752                         link->ctx,
1753                         link,
1754                         sink);
1755
1756         if (edid_status != EDID_OK)
1757                 DC_ERROR("Failed to read EDID");
1758
1759 }
1760
1761 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1762                                      struct amdgpu_display_manager *dm)
1763 {
1764         struct {
1765                 struct dc_surface_update surface_updates[MAX_SURFACES];
1766                 struct dc_plane_info plane_infos[MAX_SURFACES];
1767                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1768                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1769                 struct dc_stream_update stream_update;
1770         } * bundle;
1771         int k, m;
1772
1773         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1774
1775         if (!bundle) {
1776                 dm_error("Failed to allocate update bundle\n");
1777                 goto cleanup;
1778         }
1779
1780         for (k = 0; k < dc_state->stream_count; k++) {
1781                 bundle->stream_update.stream = dc_state->streams[k];
1782
1783                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1784                         bundle->surface_updates[m].surface =
1785                                 dc_state->stream_status->plane_states[m];
1786                         bundle->surface_updates[m].surface->force_full_update =
1787                                 true;
1788                 }
1789                 dc_commit_updates_for_stream(
1790                         dm->dc, bundle->surface_updates,
1791                         dc_state->stream_status->plane_count,
1792                         dc_state->streams[k], &bundle->stream_update, dc_state);
1793         }
1794
1795 cleanup:
1796         kfree(bundle);
1797
1798         return;
1799 }
1800
1801 static int dm_resume(void *handle)
1802 {
1803         struct amdgpu_device *adev = handle;
1804         struct drm_device *ddev = adev->ddev;
1805         struct amdgpu_display_manager *dm = &adev->dm;
1806         struct amdgpu_dm_connector *aconnector;
1807         struct drm_connector *connector;
1808         struct drm_connector_list_iter iter;
1809         struct drm_crtc *crtc;
1810         struct drm_crtc_state *new_crtc_state;
1811         struct dm_crtc_state *dm_new_crtc_state;
1812         struct drm_plane *plane;
1813         struct drm_plane_state *new_plane_state;
1814         struct dm_plane_state *dm_new_plane_state;
1815         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1816         enum dc_connection_type new_connection_type = dc_connection_none;
1817         struct dc_state *dc_state;
1818         int i, r, j;
1819
1820         if (adev->in_gpu_reset) {
1821                 dc_state = dm->cached_dc_state;
1822
1823                 r = dm_dmub_hw_init(adev);
1824                 if (r)
1825                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1826
1827                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1828                 dc_resume(dm->dc);
1829
1830                 amdgpu_dm_irq_resume_early(adev);
1831
1832                 for (i = 0; i < dc_state->stream_count; i++) {
1833                         dc_state->streams[i]->mode_changed = true;
1834                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1835                                 dc_state->stream_status->plane_states[j]->update_flags.raw
1836                                         = 0xffffffff;
1837                         }
1838                 }
1839
1840                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1841
1842                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1843
1844                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1845
1846                 dc_release_state(dm->cached_dc_state);
1847                 dm->cached_dc_state = NULL;
1848
1849                 amdgpu_dm_irq_resume_late(adev);
1850
1851                 mutex_unlock(&dm->dc_lock);
1852
1853                 return 0;
1854         }
1855         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1856         dc_release_state(dm_state->context);
1857         dm_state->context = dc_create_state(dm->dc);
1858         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1859         dc_resource_state_construct(dm->dc, dm_state->context);
1860
1861         /* Before powering on DC we need to re-initialize DMUB. */
1862         r = dm_dmub_hw_init(adev);
1863         if (r)
1864                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1865
1866         /* power on hardware */
1867         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1868
1869         /* program HPD filter */
1870         dc_resume(dm->dc);
1871
1872         /*
1873          * early enable HPD Rx IRQ, should be done before set mode as short
1874          * pulse interrupts are used for MST
1875          */
1876         amdgpu_dm_irq_resume_early(adev);
1877
1878         /* On resume we need to rewrite the MSTM control bits to enable MST*/
1879         s3_handle_mst(ddev, false);
1880
1881         /* Do detection*/
1882         drm_connector_list_iter_begin(ddev, &iter);
1883         drm_for_each_connector_iter(connector, &iter) {
1884                 aconnector = to_amdgpu_dm_connector(connector);
1885
1886                 /*
1887                  * this is the case when traversing through already created
1888                  * MST connectors, should be skipped
1889                  */
1890                 if (aconnector->mst_port)
1891                         continue;
1892
1893                 mutex_lock(&aconnector->hpd_lock);
1894                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1895                         DRM_ERROR("KMS: Failed to detect connector\n");
1896
1897                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1898                         emulated_link_detect(aconnector->dc_link);
1899                 else
1900                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1901
1902                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1903                         aconnector->fake_enable = false;
1904
1905                 if (aconnector->dc_sink)
1906                         dc_sink_release(aconnector->dc_sink);
1907                 aconnector->dc_sink = NULL;
1908                 amdgpu_dm_update_connector_after_detect(aconnector);
1909                 mutex_unlock(&aconnector->hpd_lock);
1910         }
1911         drm_connector_list_iter_end(&iter);
1912
1913         /* Force mode set in atomic commit */
1914         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1915                 new_crtc_state->active_changed = true;
1916
1917         /*
1918          * atomic_check is expected to create the dc states. We need to release
1919          * them here, since they were duplicated as part of the suspend
1920          * procedure.
1921          */
1922         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1923                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1924                 if (dm_new_crtc_state->stream) {
1925                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1926                         dc_stream_release(dm_new_crtc_state->stream);
1927                         dm_new_crtc_state->stream = NULL;
1928                 }
1929         }
1930
1931         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1932                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1933                 if (dm_new_plane_state->dc_state) {
1934                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1935                         dc_plane_state_release(dm_new_plane_state->dc_state);
1936                         dm_new_plane_state->dc_state = NULL;
1937                 }
1938         }
1939
1940         drm_atomic_helper_resume(ddev, dm->cached_state);
1941
1942         dm->cached_state = NULL;
1943
1944         amdgpu_dm_irq_resume_late(adev);
1945
1946         amdgpu_dm_smu_write_watermarks_table(adev);
1947
1948         return 0;
1949 }
1950
1951 /**
1952  * DOC: DM Lifecycle
1953  *
1954  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1955  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1956  * the base driver's device list to be initialized and torn down accordingly.
1957  *
1958  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1959  */
1960
1961 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1962         .name = "dm",
1963         .early_init = dm_early_init,
1964         .late_init = dm_late_init,
1965         .sw_init = dm_sw_init,
1966         .sw_fini = dm_sw_fini,
1967         .hw_init = dm_hw_init,
1968         .hw_fini = dm_hw_fini,
1969         .suspend = dm_suspend,
1970         .resume = dm_resume,
1971         .is_idle = dm_is_idle,
1972         .wait_for_idle = dm_wait_for_idle,
1973         .check_soft_reset = dm_check_soft_reset,
1974         .soft_reset = dm_soft_reset,
1975         .set_clockgating_state = dm_set_clockgating_state,
1976         .set_powergating_state = dm_set_powergating_state,
1977 };
1978
1979 const struct amdgpu_ip_block_version dm_ip_block =
1980 {
1981         .type = AMD_IP_BLOCK_TYPE_DCE,
1982         .major = 1,
1983         .minor = 0,
1984         .rev = 0,
1985         .funcs = &amdgpu_dm_funcs,
1986 };
1987
1988
1989 /**
1990  * DOC: atomic
1991  *
1992  * *WIP*
1993  */
1994
1995 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1996         .fb_create = amdgpu_display_user_framebuffer_create,
1997         .output_poll_changed = drm_fb_helper_output_poll_changed,
1998         .atomic_check = amdgpu_dm_atomic_check,
1999         .atomic_commit = amdgpu_dm_atomic_commit,
2000 };
2001
2002 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2003         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2004 };
2005
2006 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2007 {
2008         u32 max_cll, min_cll, max, min, q, r;
2009         struct amdgpu_dm_backlight_caps *caps;
2010         struct amdgpu_display_manager *dm;
2011         struct drm_connector *conn_base;
2012         struct amdgpu_device *adev;
2013         static const u8 pre_computed_values[] = {
2014                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2015                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2016
2017         if (!aconnector || !aconnector->dc_link)
2018                 return;
2019
2020         conn_base = &aconnector->base;
2021         adev = conn_base->dev->dev_private;
2022         dm = &adev->dm;
2023         caps = &dm->backlight_caps;
2024         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2025         caps->aux_support = false;
2026         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2027         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2028
2029         if (caps->ext_caps->bits.oled == 1 ||
2030             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2031             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2032                 caps->aux_support = true;
2033
2034         /* From the specification (CTA-861-G), for calculating the maximum
2035          * luminance we need to use:
2036          *      Luminance = 50*2**(CV/32)
2037          * Where CV is a one-byte value.
2038          * For calculating this expression we may need float point precision;
2039          * to avoid this complexity level, we take advantage that CV is divided
2040          * by a constant. From the Euclids division algorithm, we know that CV
2041          * can be written as: CV = 32*q + r. Next, we replace CV in the
2042          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2043          * need to pre-compute the value of r/32. For pre-computing the values
2044          * We just used the following Ruby line:
2045          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2046          * The results of the above expressions can be verified at
2047          * pre_computed_values.
2048          */
2049         q = max_cll >> 5;
2050         r = max_cll % 32;
2051         max = (1 << q) * pre_computed_values[r];
2052
2053         // min luminance: maxLum * (CV/255)^2 / 100
2054         q = DIV_ROUND_CLOSEST(min_cll, 255);
2055         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2056
2057         caps->aux_max_input_signal = max;
2058         caps->aux_min_input_signal = min;
2059 }
2060
2061 void amdgpu_dm_update_connector_after_detect(
2062                 struct amdgpu_dm_connector *aconnector)
2063 {
2064         struct drm_connector *connector = &aconnector->base;
2065         struct drm_device *dev = connector->dev;
2066         struct dc_sink *sink;
2067
2068         /* MST handled by drm_mst framework */
2069         if (aconnector->mst_mgr.mst_state == true)
2070                 return;
2071
2072
2073         sink = aconnector->dc_link->local_sink;
2074         if (sink)
2075                 dc_sink_retain(sink);
2076
2077         /*
2078          * Edid mgmt connector gets first update only in mode_valid hook and then
2079          * the connector sink is set to either fake or physical sink depends on link status.
2080          * Skip if already done during boot.
2081          */
2082         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2083                         && aconnector->dc_em_sink) {
2084
2085                 /*
2086                  * For S3 resume with headless use eml_sink to fake stream
2087                  * because on resume connector->sink is set to NULL
2088                  */
2089                 mutex_lock(&dev->mode_config.mutex);
2090
2091                 if (sink) {
2092                         if (aconnector->dc_sink) {
2093                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2094                                 /*
2095                                  * retain and release below are used to
2096                                  * bump up refcount for sink because the link doesn't point
2097                                  * to it anymore after disconnect, so on next crtc to connector
2098                                  * reshuffle by UMD we will get into unwanted dc_sink release
2099                                  */
2100                                 dc_sink_release(aconnector->dc_sink);
2101                         }
2102                         aconnector->dc_sink = sink;
2103                         dc_sink_retain(aconnector->dc_sink);
2104                         amdgpu_dm_update_freesync_caps(connector,
2105                                         aconnector->edid);
2106                 } else {
2107                         amdgpu_dm_update_freesync_caps(connector, NULL);
2108                         if (!aconnector->dc_sink) {
2109                                 aconnector->dc_sink = aconnector->dc_em_sink;
2110                                 dc_sink_retain(aconnector->dc_sink);
2111                         }
2112                 }
2113
2114                 mutex_unlock(&dev->mode_config.mutex);
2115
2116                 if (sink)
2117                         dc_sink_release(sink);
2118                 return;
2119         }
2120
2121         /*
2122          * TODO: temporary guard to look for proper fix
2123          * if this sink is MST sink, we should not do anything
2124          */
2125         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2126                 dc_sink_release(sink);
2127                 return;
2128         }
2129
2130         if (aconnector->dc_sink == sink) {
2131                 /*
2132                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2133                  * Do nothing!!
2134                  */
2135                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2136                                 aconnector->connector_id);
2137                 if (sink)
2138                         dc_sink_release(sink);
2139                 return;
2140         }
2141
2142         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2143                 aconnector->connector_id, aconnector->dc_sink, sink);
2144
2145         mutex_lock(&dev->mode_config.mutex);
2146
2147         /*
2148          * 1. Update status of the drm connector
2149          * 2. Send an event and let userspace tell us what to do
2150          */
2151         if (sink) {
2152                 /*
2153                  * TODO: check if we still need the S3 mode update workaround.
2154                  * If yes, put it here.
2155                  */
2156                 if (aconnector->dc_sink)
2157                         amdgpu_dm_update_freesync_caps(connector, NULL);
2158
2159                 aconnector->dc_sink = sink;
2160                 dc_sink_retain(aconnector->dc_sink);
2161                 if (sink->dc_edid.length == 0) {
2162                         aconnector->edid = NULL;
2163                         if (aconnector->dc_link->aux_mode) {
2164                                 drm_dp_cec_unset_edid(
2165                                         &aconnector->dm_dp_aux.aux);
2166                         }
2167                 } else {
2168                         aconnector->edid =
2169                                 (struct edid *)sink->dc_edid.raw_edid;
2170
2171                         drm_connector_update_edid_property(connector,
2172                                                            aconnector->edid);
2173
2174                         if (aconnector->dc_link->aux_mode)
2175                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2176                                                     aconnector->edid);
2177                 }
2178
2179                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2180                 update_connector_ext_caps(aconnector);
2181         } else {
2182                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2183                 amdgpu_dm_update_freesync_caps(connector, NULL);
2184                 drm_connector_update_edid_property(connector, NULL);
2185                 aconnector->num_modes = 0;
2186                 dc_sink_release(aconnector->dc_sink);
2187                 aconnector->dc_sink = NULL;
2188                 aconnector->edid = NULL;
2189 #ifdef CONFIG_DRM_AMD_DC_HDCP
2190                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2191                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2192                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2193 #endif
2194         }
2195
2196         mutex_unlock(&dev->mode_config.mutex);
2197
2198         if (sink)
2199                 dc_sink_release(sink);
2200 }
2201
2202 static void handle_hpd_irq(void *param)
2203 {
2204         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2205         struct drm_connector *connector = &aconnector->base;
2206         struct drm_device *dev = connector->dev;
2207         enum dc_connection_type new_connection_type = dc_connection_none;
2208 #ifdef CONFIG_DRM_AMD_DC_HDCP
2209         struct amdgpu_device *adev = dev->dev_private;
2210 #endif
2211
2212         /*
2213          * In case of failure or MST no need to update connector status or notify the OS
2214          * since (for MST case) MST does this in its own context.
2215          */
2216         mutex_lock(&aconnector->hpd_lock);
2217
2218 #ifdef CONFIG_DRM_AMD_DC_HDCP
2219         if (adev->dm.hdcp_workqueue)
2220                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2221 #endif
2222         if (aconnector->fake_enable)
2223                 aconnector->fake_enable = false;
2224
2225         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2226                 DRM_ERROR("KMS: Failed to detect connector\n");
2227
2228         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2229                 emulated_link_detect(aconnector->dc_link);
2230
2231
2232                 drm_modeset_lock_all(dev);
2233                 dm_restore_drm_connector_state(dev, connector);
2234                 drm_modeset_unlock_all(dev);
2235
2236                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2237                         drm_kms_helper_hotplug_event(dev);
2238
2239         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2240                 amdgpu_dm_update_connector_after_detect(aconnector);
2241
2242
2243                 drm_modeset_lock_all(dev);
2244                 dm_restore_drm_connector_state(dev, connector);
2245                 drm_modeset_unlock_all(dev);
2246
2247                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2248                         drm_kms_helper_hotplug_event(dev);
2249         }
2250         mutex_unlock(&aconnector->hpd_lock);
2251
2252 }
2253
2254 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2255 {
2256         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2257         uint8_t dret;
2258         bool new_irq_handled = false;
2259         int dpcd_addr;
2260         int dpcd_bytes_to_read;
2261
2262         const int max_process_count = 30;
2263         int process_count = 0;
2264
2265         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2266
2267         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2268                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2269                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2270                 dpcd_addr = DP_SINK_COUNT;
2271         } else {
2272                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2273                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2274                 dpcd_addr = DP_SINK_COUNT_ESI;
2275         }
2276
2277         dret = drm_dp_dpcd_read(
2278                 &aconnector->dm_dp_aux.aux,
2279                 dpcd_addr,
2280                 esi,
2281                 dpcd_bytes_to_read);
2282
2283         while (dret == dpcd_bytes_to_read &&
2284                 process_count < max_process_count) {
2285                 uint8_t retry;
2286                 dret = 0;
2287
2288                 process_count++;
2289
2290                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2291                 /* handle HPD short pulse irq */
2292                 if (aconnector->mst_mgr.mst_state)
2293                         drm_dp_mst_hpd_irq(
2294                                 &aconnector->mst_mgr,
2295                                 esi,
2296                                 &new_irq_handled);
2297
2298                 if (new_irq_handled) {
2299                         /* ACK at DPCD to notify down stream */
2300                         const int ack_dpcd_bytes_to_write =
2301                                 dpcd_bytes_to_read - 1;
2302
2303                         for (retry = 0; retry < 3; retry++) {
2304                                 uint8_t wret;
2305
2306                                 wret = drm_dp_dpcd_write(
2307                                         &aconnector->dm_dp_aux.aux,
2308                                         dpcd_addr + 1,
2309                                         &esi[1],
2310                                         ack_dpcd_bytes_to_write);
2311                                 if (wret == ack_dpcd_bytes_to_write)
2312                                         break;
2313                         }
2314
2315                         /* check if there is new irq to be handled */
2316                         dret = drm_dp_dpcd_read(
2317                                 &aconnector->dm_dp_aux.aux,
2318                                 dpcd_addr,
2319                                 esi,
2320                                 dpcd_bytes_to_read);
2321
2322                         new_irq_handled = false;
2323                 } else {
2324                         break;
2325                 }
2326         }
2327
2328         if (process_count == max_process_count)
2329                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2330 }
2331
2332 static void handle_hpd_rx_irq(void *param)
2333 {
2334         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2335         struct drm_connector *connector = &aconnector->base;
2336         struct drm_device *dev = connector->dev;
2337         struct dc_link *dc_link = aconnector->dc_link;
2338         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2339         enum dc_connection_type new_connection_type = dc_connection_none;
2340 #ifdef CONFIG_DRM_AMD_DC_HDCP
2341         union hpd_irq_data hpd_irq_data;
2342         struct amdgpu_device *adev = dev->dev_private;
2343
2344         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2345 #endif
2346
2347         /*
2348          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2349          * conflict, after implement i2c helper, this mutex should be
2350          * retired.
2351          */
2352         if (dc_link->type != dc_connection_mst_branch)
2353                 mutex_lock(&aconnector->hpd_lock);
2354
2355
2356 #ifdef CONFIG_DRM_AMD_DC_HDCP
2357         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2358 #else
2359         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2360 #endif
2361                         !is_mst_root_connector) {
2362                 /* Downstream Port status changed. */
2363                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2364                         DRM_ERROR("KMS: Failed to detect connector\n");
2365
2366                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2367                         emulated_link_detect(dc_link);
2368
2369                         if (aconnector->fake_enable)
2370                                 aconnector->fake_enable = false;
2371
2372                         amdgpu_dm_update_connector_after_detect(aconnector);
2373
2374
2375                         drm_modeset_lock_all(dev);
2376                         dm_restore_drm_connector_state(dev, connector);
2377                         drm_modeset_unlock_all(dev);
2378
2379                         drm_kms_helper_hotplug_event(dev);
2380                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2381
2382                         if (aconnector->fake_enable)
2383                                 aconnector->fake_enable = false;
2384
2385                         amdgpu_dm_update_connector_after_detect(aconnector);
2386
2387
2388                         drm_modeset_lock_all(dev);
2389                         dm_restore_drm_connector_state(dev, connector);
2390                         drm_modeset_unlock_all(dev);
2391
2392                         drm_kms_helper_hotplug_event(dev);
2393                 }
2394         }
2395 #ifdef CONFIG_DRM_AMD_DC_HDCP
2396         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2397                 if (adev->dm.hdcp_workqueue)
2398                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2399         }
2400 #endif
2401         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2402             (dc_link->type == dc_connection_mst_branch))
2403                 dm_handle_hpd_rx_irq(aconnector);
2404
2405         if (dc_link->type != dc_connection_mst_branch) {
2406                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2407                 mutex_unlock(&aconnector->hpd_lock);
2408         }
2409 }
2410
2411 static void register_hpd_handlers(struct amdgpu_device *adev)
2412 {
2413         struct drm_device *dev = adev->ddev;
2414         struct drm_connector *connector;
2415         struct amdgpu_dm_connector *aconnector;
2416         const struct dc_link *dc_link;
2417         struct dc_interrupt_params int_params = {0};
2418
2419         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2420         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2421
2422         list_for_each_entry(connector,
2423                         &dev->mode_config.connector_list, head) {
2424
2425                 aconnector = to_amdgpu_dm_connector(connector);
2426                 dc_link = aconnector->dc_link;
2427
2428                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2429                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2430                         int_params.irq_source = dc_link->irq_source_hpd;
2431
2432                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2433                                         handle_hpd_irq,
2434                                         (void *) aconnector);
2435                 }
2436
2437                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2438
2439                         /* Also register for DP short pulse (hpd_rx). */
2440                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2441                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2442
2443                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2444                                         handle_hpd_rx_irq,
2445                                         (void *) aconnector);
2446                 }
2447         }
2448 }
2449
2450 /* Register IRQ sources and initialize IRQ callbacks */
2451 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2452 {
2453         struct dc *dc = adev->dm.dc;
2454         struct common_irq_params *c_irq_params;
2455         struct dc_interrupt_params int_params = {0};
2456         int r;
2457         int i;
2458         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2459
2460         if (adev->asic_type >= CHIP_VEGA10)
2461                 client_id = SOC15_IH_CLIENTID_DCE;
2462
2463         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2464         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2465
2466         /*
2467          * Actions of amdgpu_irq_add_id():
2468          * 1. Register a set() function with base driver.
2469          *    Base driver will call set() function to enable/disable an
2470          *    interrupt in DC hardware.
2471          * 2. Register amdgpu_dm_irq_handler().
2472          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2473          *    coming from DC hardware.
2474          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2475          *    for acknowledging and handling. */
2476
2477         /* Use VBLANK interrupt */
2478         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2479                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2480                 if (r) {
2481                         DRM_ERROR("Failed to add crtc irq id!\n");
2482                         return r;
2483                 }
2484
2485                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2486                 int_params.irq_source =
2487                         dc_interrupt_to_irq_source(dc, i, 0);
2488
2489                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2490
2491                 c_irq_params->adev = adev;
2492                 c_irq_params->irq_src = int_params.irq_source;
2493
2494                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2495                                 dm_crtc_high_irq, c_irq_params);
2496         }
2497
2498         /* Use VUPDATE interrupt */
2499         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2500                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2501                 if (r) {
2502                         DRM_ERROR("Failed to add vupdate irq id!\n");
2503                         return r;
2504                 }
2505
2506                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2507                 int_params.irq_source =
2508                         dc_interrupt_to_irq_source(dc, i, 0);
2509
2510                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2511
2512                 c_irq_params->adev = adev;
2513                 c_irq_params->irq_src = int_params.irq_source;
2514
2515                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2516                                 dm_vupdate_high_irq, c_irq_params);
2517         }
2518
2519         /* Use GRPH_PFLIP interrupt */
2520         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2521                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2522                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2523                 if (r) {
2524                         DRM_ERROR("Failed to add page flip irq id!\n");
2525                         return r;
2526                 }
2527
2528                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2529                 int_params.irq_source =
2530                         dc_interrupt_to_irq_source(dc, i, 0);
2531
2532                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2533
2534                 c_irq_params->adev = adev;
2535                 c_irq_params->irq_src = int_params.irq_source;
2536
2537                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2538                                 dm_pflip_high_irq, c_irq_params);
2539
2540         }
2541
2542         /* HPD */
2543         r = amdgpu_irq_add_id(adev, client_id,
2544                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2545         if (r) {
2546                 DRM_ERROR("Failed to add hpd irq id!\n");
2547                 return r;
2548         }
2549
2550         register_hpd_handlers(adev);
2551
2552         return 0;
2553 }
2554
2555 #if defined(CONFIG_DRM_AMD_DC_DCN)
2556 /* Register IRQ sources and initialize IRQ callbacks */
2557 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2558 {
2559         struct dc *dc = adev->dm.dc;
2560         struct common_irq_params *c_irq_params;
2561         struct dc_interrupt_params int_params = {0};
2562         int r;
2563         int i;
2564
2565         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2566         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2567
2568         /*
2569          * Actions of amdgpu_irq_add_id():
2570          * 1. Register a set() function with base driver.
2571          *    Base driver will call set() function to enable/disable an
2572          *    interrupt in DC hardware.
2573          * 2. Register amdgpu_dm_irq_handler().
2574          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2575          *    coming from DC hardware.
2576          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2577          *    for acknowledging and handling.
2578          */
2579
2580         /* Use VSTARTUP interrupt */
2581         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2582                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2583                         i++) {
2584                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2585
2586                 if (r) {
2587                         DRM_ERROR("Failed to add crtc irq id!\n");
2588                         return r;
2589                 }
2590
2591                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2592                 int_params.irq_source =
2593                         dc_interrupt_to_irq_source(dc, i, 0);
2594
2595                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2596
2597                 c_irq_params->adev = adev;
2598                 c_irq_params->irq_src = int_params.irq_source;
2599
2600                 amdgpu_dm_irq_register_interrupt(
2601                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2602         }
2603
2604         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2605          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2606          * to trigger at end of each vblank, regardless of state of the lock,
2607          * matching DCE behaviour.
2608          */
2609         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2610              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2611              i++) {
2612                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2613
2614                 if (r) {
2615                         DRM_ERROR("Failed to add vupdate irq id!\n");
2616                         return r;
2617                 }
2618
2619                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2620                 int_params.irq_source =
2621                         dc_interrupt_to_irq_source(dc, i, 0);
2622
2623                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2624
2625                 c_irq_params->adev = adev;
2626                 c_irq_params->irq_src = int_params.irq_source;
2627
2628                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2629                                 dm_vupdate_high_irq, c_irq_params);
2630         }
2631
2632         /* Use GRPH_PFLIP interrupt */
2633         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2634                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2635                         i++) {
2636                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2637                 if (r) {
2638                         DRM_ERROR("Failed to add page flip irq id!\n");
2639                         return r;
2640                 }
2641
2642                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2643                 int_params.irq_source =
2644                         dc_interrupt_to_irq_source(dc, i, 0);
2645
2646                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2647
2648                 c_irq_params->adev = adev;
2649                 c_irq_params->irq_src = int_params.irq_source;
2650
2651                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2652                                 dm_pflip_high_irq, c_irq_params);
2653
2654         }
2655
2656         /* HPD */
2657         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2658                         &adev->hpd_irq);
2659         if (r) {
2660                 DRM_ERROR("Failed to add hpd irq id!\n");
2661                 return r;
2662         }
2663
2664         register_hpd_handlers(adev);
2665
2666         return 0;
2667 }
2668 #endif
2669
2670 /*
2671  * Acquires the lock for the atomic state object and returns
2672  * the new atomic state.
2673  *
2674  * This should only be called during atomic check.
2675  */
2676 static int dm_atomic_get_state(struct drm_atomic_state *state,
2677                                struct dm_atomic_state **dm_state)
2678 {
2679         struct drm_device *dev = state->dev;
2680         struct amdgpu_device *adev = dev->dev_private;
2681         struct amdgpu_display_manager *dm = &adev->dm;
2682         struct drm_private_state *priv_state;
2683
2684         if (*dm_state)
2685                 return 0;
2686
2687         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2688         if (IS_ERR(priv_state))
2689                 return PTR_ERR(priv_state);
2690
2691         *dm_state = to_dm_atomic_state(priv_state);
2692
2693         return 0;
2694 }
2695
2696 struct dm_atomic_state *
2697 dm_atomic_get_new_state(struct drm_atomic_state *state)
2698 {
2699         struct drm_device *dev = state->dev;
2700         struct amdgpu_device *adev = dev->dev_private;
2701         struct amdgpu_display_manager *dm = &adev->dm;
2702         struct drm_private_obj *obj;
2703         struct drm_private_state *new_obj_state;
2704         int i;
2705
2706         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2707                 if (obj->funcs == dm->atomic_obj.funcs)
2708                         return to_dm_atomic_state(new_obj_state);
2709         }
2710
2711         return NULL;
2712 }
2713
2714 struct dm_atomic_state *
2715 dm_atomic_get_old_state(struct drm_atomic_state *state)
2716 {
2717         struct drm_device *dev = state->dev;
2718         struct amdgpu_device *adev = dev->dev_private;
2719         struct amdgpu_display_manager *dm = &adev->dm;
2720         struct drm_private_obj *obj;
2721         struct drm_private_state *old_obj_state;
2722         int i;
2723
2724         for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2725                 if (obj->funcs == dm->atomic_obj.funcs)
2726                         return to_dm_atomic_state(old_obj_state);
2727         }
2728
2729         return NULL;
2730 }
2731
2732 static struct drm_private_state *
2733 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2734 {
2735         struct dm_atomic_state *old_state, *new_state;
2736
2737         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2738         if (!new_state)
2739                 return NULL;
2740
2741         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2742
2743         old_state = to_dm_atomic_state(obj->state);
2744
2745         if (old_state && old_state->context)
2746                 new_state->context = dc_copy_state(old_state->context);
2747
2748         if (!new_state->context) {
2749                 kfree(new_state);
2750                 return NULL;
2751         }
2752
2753         return &new_state->base;
2754 }
2755
2756 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2757                                     struct drm_private_state *state)
2758 {
2759         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2760
2761         if (dm_state && dm_state->context)
2762                 dc_release_state(dm_state->context);
2763
2764         kfree(dm_state);
2765 }
2766
2767 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2768         .atomic_duplicate_state = dm_atomic_duplicate_state,
2769         .atomic_destroy_state = dm_atomic_destroy_state,
2770 };
2771
2772 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2773 {
2774         struct dm_atomic_state *state;
2775         int r;
2776
2777         adev->mode_info.mode_config_initialized = true;
2778
2779         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2780         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2781
2782         adev->ddev->mode_config.max_width = 16384;
2783         adev->ddev->mode_config.max_height = 16384;
2784
2785         adev->ddev->mode_config.preferred_depth = 24;
2786         adev->ddev->mode_config.prefer_shadow = 1;
2787         /* indicates support for immediate flip */
2788         adev->ddev->mode_config.async_page_flip = true;
2789
2790         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2791
2792         state = kzalloc(sizeof(*state), GFP_KERNEL);
2793         if (!state)
2794                 return -ENOMEM;
2795
2796         state->context = dc_create_state(adev->dm.dc);
2797         if (!state->context) {
2798                 kfree(state);
2799                 return -ENOMEM;
2800         }
2801
2802         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2803
2804         drm_atomic_private_obj_init(adev->ddev,
2805                                     &adev->dm.atomic_obj,
2806                                     &state->base,
2807                                     &dm_atomic_state_funcs);
2808
2809         r = amdgpu_display_modeset_create_props(adev);
2810         if (r)
2811                 return r;
2812
2813         r = amdgpu_dm_audio_init(adev);
2814         if (r)
2815                 return r;
2816
2817         return 0;
2818 }
2819
2820 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2821 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2822 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2823
2824 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2825         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2826
2827 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2828 {
2829 #if defined(CONFIG_ACPI)
2830         struct amdgpu_dm_backlight_caps caps;
2831
2832         if (dm->backlight_caps.caps_valid)
2833                 return;
2834
2835         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2836         if (caps.caps_valid) {
2837                 dm->backlight_caps.caps_valid = true;
2838                 if (caps.aux_support)
2839                         return;
2840                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2841                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2842         } else {
2843                 dm->backlight_caps.min_input_signal =
2844                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2845                 dm->backlight_caps.max_input_signal =
2846                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2847         }
2848 #else
2849         if (dm->backlight_caps.aux_support)
2850                 return;
2851
2852         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2853         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2854 #endif
2855 }
2856
2857 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2858 {
2859         bool rc;
2860
2861         if (!link)
2862                 return 1;
2863
2864         rc = dc_link_set_backlight_level_nits(link, true, brightness,
2865                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2866
2867         return rc ? 0 : 1;
2868 }
2869
2870 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2871                               const uint32_t user_brightness)
2872 {
2873         u32 min, max, conversion_pace;
2874         u32 brightness = user_brightness;
2875
2876         if (!caps)
2877                 goto out;
2878
2879         if (!caps->aux_support) {
2880                 max = caps->max_input_signal;
2881                 min = caps->min_input_signal;
2882                 /*
2883                  * The brightness input is in the range 0-255
2884                  * It needs to be rescaled to be between the
2885                  * requested min and max input signal
2886                  * It also needs to be scaled up by 0x101 to
2887                  * match the DC interface which has a range of
2888                  * 0 to 0xffff
2889                  */
2890                 conversion_pace = 0x101;
2891                 brightness =
2892                         user_brightness
2893                         * conversion_pace
2894                         * (max - min)
2895                         / AMDGPU_MAX_BL_LEVEL
2896                         + min * conversion_pace;
2897         } else {
2898                 /* TODO
2899                  * We are doing a linear interpolation here, which is OK but
2900                  * does not provide the optimal result. We probably want
2901                  * something close to the Perceptual Quantizer (PQ) curve.
2902                  */
2903                 max = caps->aux_max_input_signal;
2904                 min = caps->aux_min_input_signal;
2905
2906                 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2907                                + user_brightness * max;
2908                 // Multiple the value by 1000 since we use millinits
2909                 brightness *= 1000;
2910                 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2911         }
2912
2913 out:
2914         return brightness;
2915 }
2916
2917 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2918 {
2919         struct amdgpu_display_manager *dm = bl_get_data(bd);
2920         struct amdgpu_dm_backlight_caps caps;
2921         struct dc_link *link = NULL;
2922         u32 brightness;
2923         bool rc;
2924
2925         amdgpu_dm_update_backlight_caps(dm);
2926         caps = dm->backlight_caps;
2927
2928         link = (struct dc_link *)dm->backlight_link;
2929
2930         brightness = convert_brightness(&caps, bd->props.brightness);
2931         // Change brightness based on AUX property
2932         if (caps.aux_support)
2933                 return set_backlight_via_aux(link, brightness);
2934
2935         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2936
2937         return rc ? 0 : 1;
2938 }
2939
2940 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2941 {
2942         struct amdgpu_display_manager *dm = bl_get_data(bd);
2943         int ret = dc_link_get_backlight_level(dm->backlight_link);
2944
2945         if (ret == DC_ERROR_UNEXPECTED)
2946                 return bd->props.brightness;
2947         return ret;
2948 }
2949
2950 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2951         .options = BL_CORE_SUSPENDRESUME,
2952         .get_brightness = amdgpu_dm_backlight_get_brightness,
2953         .update_status  = amdgpu_dm_backlight_update_status,
2954 };
2955
2956 static void
2957 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2958 {
2959         char bl_name[16];
2960         struct backlight_properties props = { 0 };
2961
2962         amdgpu_dm_update_backlight_caps(dm);
2963
2964         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2965         props.brightness = AMDGPU_MAX_BL_LEVEL;
2966         props.type = BACKLIGHT_RAW;
2967
2968         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2969                         dm->adev->ddev->primary->index);
2970
2971         dm->backlight_dev = backlight_device_register(bl_name,
2972                         dm->adev->ddev->dev,
2973                         dm,
2974                         &amdgpu_dm_backlight_ops,
2975                         &props);
2976
2977         if (IS_ERR(dm->backlight_dev))
2978                 DRM_ERROR("DM: Backlight registration failed!\n");
2979         else
2980                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2981 }
2982
2983 #endif
2984
2985 static int initialize_plane(struct amdgpu_display_manager *dm,
2986                             struct amdgpu_mode_info *mode_info, int plane_id,
2987                             enum drm_plane_type plane_type,
2988                             const struct dc_plane_cap *plane_cap)
2989 {
2990         struct drm_plane *plane;
2991         unsigned long possible_crtcs;
2992         int ret = 0;
2993
2994         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2995         if (!plane) {
2996                 DRM_ERROR("KMS: Failed to allocate plane\n");
2997                 return -ENOMEM;
2998         }
2999         plane->type = plane_type;
3000
3001         /*
3002          * HACK: IGT tests expect that the primary plane for a CRTC
3003          * can only have one possible CRTC. Only expose support for
3004          * any CRTC if they're not going to be used as a primary plane
3005          * for a CRTC - like overlay or underlay planes.
3006          */
3007         possible_crtcs = 1 << plane_id;
3008         if (plane_id >= dm->dc->caps.max_streams)
3009                 possible_crtcs = 0xff;
3010
3011         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3012
3013         if (ret) {
3014                 DRM_ERROR("KMS: Failed to initialize plane\n");
3015                 kfree(plane);
3016                 return ret;
3017         }
3018
3019         if (mode_info)
3020                 mode_info->planes[plane_id] = plane;
3021
3022         return ret;
3023 }
3024
3025
3026 static void register_backlight_device(struct amdgpu_display_manager *dm,
3027                                       struct dc_link *link)
3028 {
3029 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3030         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3031
3032         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3033             link->type != dc_connection_none) {
3034                 /*
3035                  * Event if registration failed, we should continue with
3036                  * DM initialization because not having a backlight control
3037                  * is better then a black screen.
3038                  */
3039                 amdgpu_dm_register_backlight_device(dm);
3040
3041                 if (dm->backlight_dev)
3042                         dm->backlight_link = link;
3043         }
3044 #endif
3045 }
3046
3047
3048 /*
3049  * In this architecture, the association
3050  * connector -> encoder -> crtc
3051  * id not really requried. The crtc and connector will hold the
3052  * display_index as an abstraction to use with DAL component
3053  *
3054  * Returns 0 on success
3055  */
3056 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3057 {
3058         struct amdgpu_display_manager *dm = &adev->dm;
3059         int32_t i;
3060         struct amdgpu_dm_connector *aconnector = NULL;
3061         struct amdgpu_encoder *aencoder = NULL;
3062         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3063         uint32_t link_cnt;
3064         int32_t primary_planes;
3065         enum dc_connection_type new_connection_type = dc_connection_none;
3066         const struct dc_plane_cap *plane;
3067
3068         link_cnt = dm->dc->caps.max_links;
3069         if (amdgpu_dm_mode_config_init(dm->adev)) {
3070                 DRM_ERROR("DM: Failed to initialize mode config\n");
3071                 return -EINVAL;
3072         }
3073
3074         /* There is one primary plane per CRTC */
3075         primary_planes = dm->dc->caps.max_streams;
3076         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3077
3078         /*
3079          * Initialize primary planes, implicit planes for legacy IOCTLS.
3080          * Order is reversed to match iteration order in atomic check.
3081          */
3082         for (i = (primary_planes - 1); i >= 0; i--) {
3083                 plane = &dm->dc->caps.planes[i];
3084
3085                 if (initialize_plane(dm, mode_info, i,
3086                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3087                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3088                         goto fail;
3089                 }
3090         }
3091
3092         /*
3093          * Initialize overlay planes, index starting after primary planes.
3094          * These planes have a higher DRM index than the primary planes since
3095          * they should be considered as having a higher z-order.
3096          * Order is reversed to match iteration order in atomic check.
3097          *
3098          * Only support DCN for now, and only expose one so we don't encourage
3099          * userspace to use up all the pipes.
3100          */
3101         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3102                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3103
3104                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3105                         continue;
3106
3107                 if (!plane->blends_with_above || !plane->blends_with_below)
3108                         continue;
3109
3110                 if (!plane->pixel_format_support.argb8888)
3111                         continue;
3112
3113                 if (initialize_plane(dm, NULL, primary_planes + i,
3114                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3115                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3116                         goto fail;
3117                 }
3118
3119                 /* Only create one overlay plane. */
3120                 break;
3121         }
3122
3123         for (i = 0; i < dm->dc->caps.max_streams; i++)
3124                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3125                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3126                         goto fail;
3127                 }
3128
3129         dm->display_indexes_num = dm->dc->caps.max_streams;
3130
3131         /* loops over all connectors on the board */
3132         for (i = 0; i < link_cnt; i++) {
3133                 struct dc_link *link = NULL;
3134
3135                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3136                         DRM_ERROR(
3137                                 "KMS: Cannot support more than %d display indexes\n",
3138                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3139                         continue;
3140                 }
3141
3142                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3143                 if (!aconnector)
3144                         goto fail;
3145
3146                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3147                 if (!aencoder)
3148                         goto fail;
3149
3150                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3151                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3152                         goto fail;
3153                 }
3154
3155                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3156                         DRM_ERROR("KMS: Failed to initialize connector\n");
3157                         goto fail;
3158                 }
3159
3160                 link = dc_get_link_at_index(dm->dc, i);
3161
3162                 if (!dc_link_detect_sink(link, &new_connection_type))
3163                         DRM_ERROR("KMS: Failed to detect connector\n");
3164
3165                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3166                         emulated_link_detect(link);
3167                         amdgpu_dm_update_connector_after_detect(aconnector);
3168
3169                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3170                         amdgpu_dm_update_connector_after_detect(aconnector);
3171                         register_backlight_device(dm, link);
3172                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3173                                 amdgpu_dm_set_psr_caps(link);
3174                 }
3175
3176
3177         }
3178
3179         /* Software is initialized. Now we can register interrupt handlers. */
3180         switch (adev->asic_type) {
3181         case CHIP_BONAIRE:
3182         case CHIP_HAWAII:
3183         case CHIP_KAVERI:
3184         case CHIP_KABINI:
3185         case CHIP_MULLINS:
3186         case CHIP_TONGA:
3187         case CHIP_FIJI:
3188         case CHIP_CARRIZO:
3189         case CHIP_STONEY:
3190         case CHIP_POLARIS11:
3191         case CHIP_POLARIS10:
3192         case CHIP_POLARIS12:
3193         case CHIP_VEGAM:
3194         case CHIP_VEGA10:
3195         case CHIP_VEGA12:
3196         case CHIP_VEGA20:
3197                 if (dce110_register_irq_handlers(dm->adev)) {
3198                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3199                         goto fail;
3200                 }
3201                 break;
3202 #if defined(CONFIG_DRM_AMD_DC_DCN)
3203         case CHIP_RAVEN:
3204         case CHIP_NAVI12:
3205         case CHIP_NAVI10:
3206         case CHIP_NAVI14:
3207         case CHIP_RENOIR:
3208                 if (dcn10_register_irq_handlers(dm->adev)) {
3209                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3210                         goto fail;
3211                 }
3212                 break;
3213 #endif
3214         default:
3215                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3216                 goto fail;
3217         }
3218
3219         /* No userspace support. */
3220         dm->dc->debug.disable_tri_buf = true;
3221
3222         return 0;
3223 fail:
3224         kfree(aencoder);
3225         kfree(aconnector);
3226
3227         return -EINVAL;
3228 }
3229
3230 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3231 {
3232         drm_mode_config_cleanup(dm->ddev);
3233         drm_atomic_private_obj_fini(&dm->atomic_obj);
3234         return;
3235 }
3236
3237 /******************************************************************************
3238  * amdgpu_display_funcs functions
3239  *****************************************************************************/
3240
3241 /*
3242  * dm_bandwidth_update - program display watermarks
3243  *
3244  * @adev: amdgpu_device pointer
3245  *
3246  * Calculate and program the display watermarks and line buffer allocation.
3247  */
3248 static void dm_bandwidth_update(struct amdgpu_device *adev)
3249 {
3250         /* TODO: implement later */
3251 }
3252
3253 static const struct amdgpu_display_funcs dm_display_funcs = {
3254         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3255         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3256         .backlight_set_level = NULL, /* never called for DC */
3257         .backlight_get_level = NULL, /* never called for DC */
3258         .hpd_sense = NULL,/* called unconditionally */
3259         .hpd_set_polarity = NULL, /* called unconditionally */
3260         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3261         .page_flip_get_scanoutpos =
3262                 dm_crtc_get_scanoutpos,/* called unconditionally */
3263         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3264         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3265 };
3266
3267 #if defined(CONFIG_DEBUG_KERNEL_DC)
3268
3269 static ssize_t s3_debug_store(struct device *device,
3270                               struct device_attribute *attr,
3271                               const char *buf,
3272                               size_t count)
3273 {
3274         int ret;
3275         int s3_state;
3276         struct drm_device *drm_dev = dev_get_drvdata(device);
3277         struct amdgpu_device *adev = drm_dev->dev_private;
3278
3279         ret = kstrtoint(buf, 0, &s3_state);
3280
3281         if (ret == 0) {
3282                 if (s3_state) {
3283                         dm_resume(adev);
3284                         drm_kms_helper_hotplug_event(adev->ddev);
3285                 } else
3286                         dm_suspend(adev);
3287         }
3288
3289         return ret == 0 ? count : 0;
3290 }
3291
3292 DEVICE_ATTR_WO(s3_debug);
3293
3294 #endif
3295
3296 static int dm_early_init(void *handle)
3297 {
3298         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3299
3300         switch (adev->asic_type) {
3301         case CHIP_BONAIRE:
3302         case CHIP_HAWAII:
3303                 adev->mode_info.num_crtc = 6;
3304                 adev->mode_info.num_hpd = 6;
3305                 adev->mode_info.num_dig = 6;
3306                 break;
3307         case CHIP_KAVERI:
3308                 adev->mode_info.num_crtc = 4;
3309                 adev->mode_info.num_hpd = 6;
3310                 adev->mode_info.num_dig = 7;
3311                 break;
3312         case CHIP_KABINI:
3313         case CHIP_MULLINS:
3314                 adev->mode_info.num_crtc = 2;
3315                 adev->mode_info.num_hpd = 6;
3316                 adev->mode_info.num_dig = 6;
3317                 break;
3318         case CHIP_FIJI:
3319         case CHIP_TONGA:
3320                 adev->mode_info.num_crtc = 6;
3321                 adev->mode_info.num_hpd = 6;
3322                 adev->mode_info.num_dig = 7;
3323                 break;
3324         case CHIP_CARRIZO:
3325                 adev->mode_info.num_crtc = 3;
3326                 adev->mode_info.num_hpd = 6;
3327                 adev->mode_info.num_dig = 9;
3328                 break;
3329         case CHIP_STONEY:
3330                 adev->mode_info.num_crtc = 2;
3331                 adev->mode_info.num_hpd = 6;
3332                 adev->mode_info.num_dig = 9;
3333                 break;
3334         case CHIP_POLARIS11:
3335         case CHIP_POLARIS12:
3336                 adev->mode_info.num_crtc = 5;
3337                 adev->mode_info.num_hpd = 5;
3338                 adev->mode_info.num_dig = 5;
3339                 break;
3340         case CHIP_POLARIS10:
3341         case CHIP_VEGAM:
3342                 adev->mode_info.num_crtc = 6;
3343                 adev->mode_info.num_hpd = 6;
3344                 adev->mode_info.num_dig = 6;
3345                 break;
3346         case CHIP_VEGA10:
3347         case CHIP_VEGA12:
3348         case CHIP_VEGA20:
3349                 adev->mode_info.num_crtc = 6;
3350                 adev->mode_info.num_hpd = 6;
3351                 adev->mode_info.num_dig = 6;
3352                 break;
3353 #if defined(CONFIG_DRM_AMD_DC_DCN)
3354         case CHIP_RAVEN:
3355                 adev->mode_info.num_crtc = 4;
3356                 adev->mode_info.num_hpd = 4;
3357                 adev->mode_info.num_dig = 4;
3358                 break;
3359 #endif
3360         case CHIP_NAVI10:
3361         case CHIP_NAVI12:
3362                 adev->mode_info.num_crtc = 6;
3363                 adev->mode_info.num_hpd = 6;
3364                 adev->mode_info.num_dig = 6;
3365                 break;
3366         case CHIP_NAVI14:
3367                 adev->mode_info.num_crtc = 5;
3368                 adev->mode_info.num_hpd = 5;
3369                 adev->mode_info.num_dig = 5;
3370                 break;
3371         case CHIP_RENOIR:
3372                 adev->mode_info.num_crtc = 4;
3373                 adev->mode_info.num_hpd = 4;
3374                 adev->mode_info.num_dig = 4;
3375                 break;
3376         default:
3377                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3378                 return -EINVAL;
3379         }
3380
3381         amdgpu_dm_set_irq_funcs(adev);
3382
3383         if (adev->mode_info.funcs == NULL)
3384                 adev->mode_info.funcs = &dm_display_funcs;
3385
3386         /*
3387          * Note: Do NOT change adev->audio_endpt_rreg and
3388          * adev->audio_endpt_wreg because they are initialised in
3389          * amdgpu_device_init()
3390          */
3391 #if defined(CONFIG_DEBUG_KERNEL_DC)
3392         device_create_file(
3393                 adev->ddev->dev,
3394                 &dev_attr_s3_debug);
3395 #endif
3396
3397         return 0;
3398 }
3399
3400 static bool modeset_required(struct drm_crtc_state *crtc_state,
3401                              struct dc_stream_state *new_stream,
3402                              struct dc_stream_state *old_stream)
3403 {
3404         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3405                 return false;
3406
3407         if (!crtc_state->enable)
3408                 return false;
3409
3410         return crtc_state->active;
3411 }
3412
3413 static bool modereset_required(struct drm_crtc_state *crtc_state)
3414 {
3415         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3416                 return false;
3417
3418         return !crtc_state->enable || !crtc_state->active;
3419 }
3420
3421 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3422 {
3423         drm_encoder_cleanup(encoder);
3424         kfree(encoder);
3425 }
3426
3427 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3428         .destroy = amdgpu_dm_encoder_destroy,
3429 };
3430
3431
3432 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3433                                 struct dc_scaling_info *scaling_info)
3434 {
3435         int scale_w, scale_h;
3436
3437         memset(scaling_info, 0, sizeof(*scaling_info));
3438
3439         /* Source is fixed 16.16 but we ignore mantissa for now... */
3440         scaling_info->src_rect.x = state->src_x >> 16;
3441         scaling_info->src_rect.y = state->src_y >> 16;
3442
3443         scaling_info->src_rect.width = state->src_w >> 16;
3444         if (scaling_info->src_rect.width == 0)
3445                 return -EINVAL;
3446
3447         scaling_info->src_rect.height = state->src_h >> 16;
3448         if (scaling_info->src_rect.height == 0)
3449                 return -EINVAL;
3450
3451         scaling_info->dst_rect.x = state->crtc_x;
3452         scaling_info->dst_rect.y = state->crtc_y;
3453
3454         if (state->crtc_w == 0)
3455                 return -EINVAL;
3456
3457         scaling_info->dst_rect.width = state->crtc_w;
3458
3459         if (state->crtc_h == 0)
3460                 return -EINVAL;
3461
3462         scaling_info->dst_rect.height = state->crtc_h;
3463
3464         /* DRM doesn't specify clipping on destination output. */
3465         scaling_info->clip_rect = scaling_info->dst_rect;
3466
3467         /* TODO: Validate scaling per-format with DC plane caps */
3468         scale_w = scaling_info->dst_rect.width * 1000 /
3469                   scaling_info->src_rect.width;
3470
3471         if (scale_w < 250 || scale_w > 16000)
3472                 return -EINVAL;
3473
3474         scale_h = scaling_info->dst_rect.height * 1000 /
3475                   scaling_info->src_rect.height;
3476
3477         if (scale_h < 250 || scale_h > 16000)
3478                 return -EINVAL;
3479
3480         /*
3481          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3482          * assume reasonable defaults based on the format.
3483          */
3484
3485         return 0;
3486 }
3487
3488 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3489                        uint64_t *tiling_flags, bool *tmz_surface)
3490 {
3491         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3492         int r = amdgpu_bo_reserve(rbo, false);
3493
3494         if (unlikely(r)) {
3495                 /* Don't show error message when returning -ERESTARTSYS */
3496                 if (r != -ERESTARTSYS)
3497                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3498                 return r;
3499         }
3500
3501         if (tiling_flags)
3502                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3503
3504         if (tmz_surface)
3505                 *tmz_surface = amdgpu_bo_encrypted(rbo);
3506
3507         amdgpu_bo_unreserve(rbo);
3508
3509         return r;
3510 }
3511
3512 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3513 {
3514         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3515
3516         return offset ? (address + offset * 256) : 0;
3517 }
3518
3519 static int
3520 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3521                           const struct amdgpu_framebuffer *afb,
3522                           const enum surface_pixel_format format,
3523                           const enum dc_rotation_angle rotation,
3524                           const struct plane_size *plane_size,
3525                           const union dc_tiling_info *tiling_info,
3526                           const uint64_t info,
3527                           struct dc_plane_dcc_param *dcc,
3528                           struct dc_plane_address *address,
3529                           bool force_disable_dcc)
3530 {
3531         struct dc *dc = adev->dm.dc;
3532         struct dc_dcc_surface_param input;
3533         struct dc_surface_dcc_cap output;
3534         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3535         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3536         uint64_t dcc_address;
3537
3538         memset(&input, 0, sizeof(input));
3539         memset(&output, 0, sizeof(output));
3540
3541         if (force_disable_dcc)
3542                 return 0;
3543
3544         if (!offset)
3545                 return 0;
3546
3547         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3548                 return 0;
3549
3550         if (!dc->cap_funcs.get_dcc_compression_cap)
3551                 return -EINVAL;
3552
3553         input.format = format;
3554         input.surface_size.width = plane_size->surface_size.width;
3555         input.surface_size.height = plane_size->surface_size.height;
3556         input.swizzle_mode = tiling_info->gfx9.swizzle;
3557
3558         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3559                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3560         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3561                 input.scan = SCAN_DIRECTION_VERTICAL;
3562
3563         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3564                 return -EINVAL;
3565
3566         if (!output.capable)
3567                 return -EINVAL;
3568
3569         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3570                 return -EINVAL;
3571
3572         dcc->enable = 1;
3573         dcc->meta_pitch =
3574                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3575         dcc->independent_64b_blks = i64b;
3576
3577         dcc_address = get_dcc_address(afb->address, info);
3578         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3579         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3580
3581         return 0;
3582 }
3583
3584 static int
3585 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3586                              const struct amdgpu_framebuffer *afb,
3587                              const enum surface_pixel_format format,
3588                              const enum dc_rotation_angle rotation,
3589                              const uint64_t tiling_flags,
3590                              union dc_tiling_info *tiling_info,
3591                              struct plane_size *plane_size,
3592                              struct dc_plane_dcc_param *dcc,
3593                              struct dc_plane_address *address,
3594                              bool tmz_surface,
3595                              bool force_disable_dcc)
3596 {
3597         const struct drm_framebuffer *fb = &afb->base;
3598         int ret;
3599
3600         memset(tiling_info, 0, sizeof(*tiling_info));
3601         memset(plane_size, 0, sizeof(*plane_size));
3602         memset(dcc, 0, sizeof(*dcc));
3603         memset(address, 0, sizeof(*address));
3604
3605         address->tmz_surface = tmz_surface;
3606
3607         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3608                 plane_size->surface_size.x = 0;
3609                 plane_size->surface_size.y = 0;
3610                 plane_size->surface_size.width = fb->width;
3611                 plane_size->surface_size.height = fb->height;
3612                 plane_size->surface_pitch =
3613                         fb->pitches[0] / fb->format->cpp[0];
3614
3615                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3616                 address->grph.addr.low_part = lower_32_bits(afb->address);
3617                 address->grph.addr.high_part = upper_32_bits(afb->address);
3618         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3619                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3620
3621                 plane_size->surface_size.x = 0;
3622                 plane_size->surface_size.y = 0;
3623                 plane_size->surface_size.width = fb->width;
3624                 plane_size->surface_size.height = fb->height;
3625                 plane_size->surface_pitch =
3626                         fb->pitches[0] / fb->format->cpp[0];
3627
3628                 plane_size->chroma_size.x = 0;
3629                 plane_size->chroma_size.y = 0;
3630                 /* TODO: set these based on surface format */
3631                 plane_size->chroma_size.width = fb->width / 2;
3632                 plane_size->chroma_size.height = fb->height / 2;
3633
3634                 plane_size->chroma_pitch =
3635                         fb->pitches[1] / fb->format->cpp[1];
3636
3637                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3638                 address->video_progressive.luma_addr.low_part =
3639                         lower_32_bits(afb->address);
3640                 address->video_progressive.luma_addr.high_part =
3641                         upper_32_bits(afb->address);
3642                 address->video_progressive.chroma_addr.low_part =
3643                         lower_32_bits(chroma_addr);
3644                 address->video_progressive.chroma_addr.high_part =
3645                         upper_32_bits(chroma_addr);
3646         }
3647
3648         /* Fill GFX8 params */
3649         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3650                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3651
3652                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3653                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3654                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3655                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3656                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3657
3658                 /* XXX fix me for VI */
3659                 tiling_info->gfx8.num_banks = num_banks;
3660                 tiling_info->gfx8.array_mode =
3661                                 DC_ARRAY_2D_TILED_THIN1;
3662                 tiling_info->gfx8.tile_split = tile_split;
3663                 tiling_info->gfx8.bank_width = bankw;
3664                 tiling_info->gfx8.bank_height = bankh;
3665                 tiling_info->gfx8.tile_aspect = mtaspect;
3666                 tiling_info->gfx8.tile_mode =
3667                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3668         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3669                         == DC_ARRAY_1D_TILED_THIN1) {
3670                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3671         }
3672
3673         tiling_info->gfx8.pipe_config =
3674                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3675
3676         if (adev->asic_type == CHIP_VEGA10 ||
3677             adev->asic_type == CHIP_VEGA12 ||
3678             adev->asic_type == CHIP_VEGA20 ||
3679             adev->asic_type == CHIP_NAVI10 ||
3680             adev->asic_type == CHIP_NAVI14 ||
3681             adev->asic_type == CHIP_NAVI12 ||
3682             adev->asic_type == CHIP_RENOIR ||
3683             adev->asic_type == CHIP_RAVEN) {
3684                 /* Fill GFX9 params */
3685                 tiling_info->gfx9.num_pipes =
3686                         adev->gfx.config.gb_addr_config_fields.num_pipes;
3687                 tiling_info->gfx9.num_banks =
3688                         adev->gfx.config.gb_addr_config_fields.num_banks;
3689                 tiling_info->gfx9.pipe_interleave =
3690                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3691                 tiling_info->gfx9.num_shader_engines =
3692                         adev->gfx.config.gb_addr_config_fields.num_se;
3693                 tiling_info->gfx9.max_compressed_frags =
3694                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3695                 tiling_info->gfx9.num_rb_per_se =
3696                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3697                 tiling_info->gfx9.swizzle =
3698                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3699                 tiling_info->gfx9.shaderEnable = 1;
3700
3701                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3702                                                 plane_size, tiling_info,
3703                                                 tiling_flags, dcc, address,
3704                                                 force_disable_dcc);
3705                 if (ret)
3706                         return ret;
3707         }
3708
3709         return 0;
3710 }
3711
3712 static void
3713 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3714                                bool *per_pixel_alpha, bool *global_alpha,
3715                                int *global_alpha_value)
3716 {
3717         *per_pixel_alpha = false;
3718         *global_alpha = false;
3719         *global_alpha_value = 0xff;
3720
3721         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3722                 return;
3723
3724         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3725                 static const uint32_t alpha_formats[] = {
3726                         DRM_FORMAT_ARGB8888,
3727                         DRM_FORMAT_RGBA8888,
3728                         DRM_FORMAT_ABGR8888,
3729                 };
3730                 uint32_t format = plane_state->fb->format->format;
3731                 unsigned int i;
3732
3733                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3734                         if (format == alpha_formats[i]) {
3735                                 *per_pixel_alpha = true;
3736                                 break;
3737                         }
3738                 }
3739         }
3740
3741         if (plane_state->alpha < 0xffff) {
3742                 *global_alpha = true;
3743                 *global_alpha_value = plane_state->alpha >> 8;
3744         }
3745 }
3746
3747 static int
3748 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3749                             const enum surface_pixel_format format,
3750                             enum dc_color_space *color_space)
3751 {
3752         bool full_range;
3753
3754         *color_space = COLOR_SPACE_SRGB;
3755
3756         /* DRM color properties only affect non-RGB formats. */
3757         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3758                 return 0;
3759
3760         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3761
3762         switch (plane_state->color_encoding) {
3763         case DRM_COLOR_YCBCR_BT601:
3764                 if (full_range)
3765                         *color_space = COLOR_SPACE_YCBCR601;
3766                 else
3767                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3768                 break;
3769
3770         case DRM_COLOR_YCBCR_BT709:
3771                 if (full_range)
3772                         *color_space = COLOR_SPACE_YCBCR709;
3773                 else
3774                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3775                 break;
3776
3777         case DRM_COLOR_YCBCR_BT2020:
3778                 if (full_range)
3779                         *color_space = COLOR_SPACE_2020_YCBCR;
3780                 else
3781                         return -EINVAL;
3782                 break;
3783
3784         default:
3785                 return -EINVAL;
3786         }
3787
3788         return 0;
3789 }
3790
3791 static int
3792 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3793                             const struct drm_plane_state *plane_state,
3794                             const uint64_t tiling_flags,
3795                             struct dc_plane_info *plane_info,
3796                             struct dc_plane_address *address,
3797                             bool tmz_surface,
3798                             bool force_disable_dcc)
3799 {
3800         const struct drm_framebuffer *fb = plane_state->fb;
3801         const struct amdgpu_framebuffer *afb =
3802                 to_amdgpu_framebuffer(plane_state->fb);
3803         struct drm_format_name_buf format_name;
3804         int ret;
3805
3806         memset(plane_info, 0, sizeof(*plane_info));
3807
3808         switch (fb->format->format) {
3809         case DRM_FORMAT_C8:
3810                 plane_info->format =
3811                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3812                 break;
3813         case DRM_FORMAT_RGB565:
3814                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3815                 break;
3816         case DRM_FORMAT_XRGB8888:
3817         case DRM_FORMAT_ARGB8888:
3818                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3819                 break;
3820         case DRM_FORMAT_XRGB2101010:
3821         case DRM_FORMAT_ARGB2101010:
3822                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3823                 break;
3824         case DRM_FORMAT_XBGR2101010:
3825         case DRM_FORMAT_ABGR2101010:
3826                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3827                 break;
3828         case DRM_FORMAT_XBGR8888:
3829         case DRM_FORMAT_ABGR8888:
3830                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3831                 break;
3832         case DRM_FORMAT_NV21:
3833                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3834                 break;
3835         case DRM_FORMAT_NV12:
3836                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3837                 break;
3838         case DRM_FORMAT_P010:
3839                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3840                 break;
3841         case DRM_FORMAT_XRGB16161616F:
3842         case DRM_FORMAT_ARGB16161616F:
3843                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3844                 break;
3845         case DRM_FORMAT_XBGR16161616F:
3846         case DRM_FORMAT_ABGR16161616F:
3847                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3848                 break;
3849         default:
3850                 DRM_ERROR(
3851                         "Unsupported screen format %s\n",
3852                         drm_get_format_name(fb->format->format, &format_name));
3853                 return -EINVAL;
3854         }
3855
3856         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3857         case DRM_MODE_ROTATE_0:
3858                 plane_info->rotation = ROTATION_ANGLE_0;
3859                 break;
3860         case DRM_MODE_ROTATE_90:
3861                 plane_info->rotation = ROTATION_ANGLE_90;
3862                 break;
3863         case DRM_MODE_ROTATE_180:
3864                 plane_info->rotation = ROTATION_ANGLE_180;
3865                 break;
3866         case DRM_MODE_ROTATE_270:
3867                 plane_info->rotation = ROTATION_ANGLE_270;
3868                 break;
3869         default:
3870                 plane_info->rotation = ROTATION_ANGLE_0;
3871                 break;
3872         }
3873
3874         plane_info->visible = true;
3875         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3876
3877         plane_info->layer_index = 0;
3878
3879         ret = fill_plane_color_attributes(plane_state, plane_info->format,
3880                                           &plane_info->color_space);
3881         if (ret)
3882                 return ret;
3883
3884         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3885                                            plane_info->rotation, tiling_flags,
3886                                            &plane_info->tiling_info,
3887                                            &plane_info->plane_size,
3888                                            &plane_info->dcc, address, tmz_surface,
3889                                            force_disable_dcc);
3890         if (ret)
3891                 return ret;
3892
3893         fill_blending_from_plane_state(
3894                 plane_state, &plane_info->per_pixel_alpha,
3895                 &plane_info->global_alpha, &plane_info->global_alpha_value);
3896
3897         return 0;
3898 }
3899
3900 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3901                                     struct dc_plane_state *dc_plane_state,
3902                                     struct drm_plane_state *plane_state,
3903                                     struct drm_crtc_state *crtc_state)
3904 {
3905         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3906         const struct amdgpu_framebuffer *amdgpu_fb =
3907                 to_amdgpu_framebuffer(plane_state->fb);
3908         struct dc_scaling_info scaling_info;
3909         struct dc_plane_info plane_info;
3910         uint64_t tiling_flags;
3911         int ret;
3912         bool tmz_surface = false;
3913         bool force_disable_dcc = false;
3914
3915         ret = fill_dc_scaling_info(plane_state, &scaling_info);
3916         if (ret)
3917                 return ret;
3918
3919         dc_plane_state->src_rect = scaling_info.src_rect;
3920         dc_plane_state->dst_rect = scaling_info.dst_rect;
3921         dc_plane_state->clip_rect = scaling_info.clip_rect;
3922         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3923
3924         ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3925         if (ret)
3926                 return ret;
3927
3928         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3929         ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3930                                           &plane_info,
3931                                           &dc_plane_state->address,
3932                                           tmz_surface,
3933                                           force_disable_dcc);
3934         if (ret)
3935                 return ret;
3936
3937         dc_plane_state->format = plane_info.format;
3938         dc_plane_state->color_space = plane_info.color_space;
3939         dc_plane_state->format = plane_info.format;
3940         dc_plane_state->plane_size = plane_info.plane_size;
3941         dc_plane_state->rotation = plane_info.rotation;
3942         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3943         dc_plane_state->stereo_format = plane_info.stereo_format;
3944         dc_plane_state->tiling_info = plane_info.tiling_info;
3945         dc_plane_state->visible = plane_info.visible;
3946         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3947         dc_plane_state->global_alpha = plane_info.global_alpha;
3948         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3949         dc_plane_state->dcc = plane_info.dcc;
3950         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3951
3952         /*
3953          * Always set input transfer function, since plane state is refreshed
3954          * every time.
3955          */
3956         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3957         if (ret)
3958                 return ret;
3959
3960         return 0;
3961 }
3962
3963 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3964                                            const struct dm_connector_state *dm_state,
3965                                            struct dc_stream_state *stream)
3966 {
3967         enum amdgpu_rmx_type rmx_type;
3968
3969         struct rect src = { 0 }; /* viewport in composition space*/
3970         struct rect dst = { 0 }; /* stream addressable area */
3971
3972         /* no mode. nothing to be done */
3973         if (!mode)
3974                 return;
3975
3976         /* Full screen scaling by default */
3977         src.width = mode->hdisplay;
3978         src.height = mode->vdisplay;
3979         dst.width = stream->timing.h_addressable;
3980         dst.height = stream->timing.v_addressable;
3981
3982         if (dm_state) {
3983                 rmx_type = dm_state->scaling;
3984                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3985                         if (src.width * dst.height <
3986                                         src.height * dst.width) {
3987                                 /* height needs less upscaling/more downscaling */
3988                                 dst.width = src.width *
3989                                                 dst.height / src.height;
3990                         } else {
3991                                 /* width needs less upscaling/more downscaling */
3992                                 dst.height = src.height *
3993                                                 dst.width / src.width;
3994                         }
3995                 } else if (rmx_type == RMX_CENTER) {
3996                         dst = src;
3997                 }
3998
3999                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4000                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4001
4002                 if (dm_state->underscan_enable) {
4003                         dst.x += dm_state->underscan_hborder / 2;
4004                         dst.y += dm_state->underscan_vborder / 2;
4005                         dst.width -= dm_state->underscan_hborder;
4006                         dst.height -= dm_state->underscan_vborder;
4007                 }
4008         }
4009
4010         stream->src = src;
4011         stream->dst = dst;
4012
4013         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4014                         dst.x, dst.y, dst.width, dst.height);
4015
4016 }
4017
4018 static enum dc_color_depth
4019 convert_color_depth_from_display_info(const struct drm_connector *connector,
4020                                       bool is_y420, int requested_bpc)
4021 {
4022         uint8_t bpc;
4023
4024         if (is_y420) {
4025                 bpc = 8;
4026
4027                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4028                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4029                         bpc = 16;
4030                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4031                         bpc = 12;
4032                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4033                         bpc = 10;
4034         } else {
4035                 bpc = (uint8_t)connector->display_info.bpc;
4036                 /* Assume 8 bpc by default if no bpc is specified. */
4037                 bpc = bpc ? bpc : 8;
4038         }
4039
4040         if (requested_bpc > 0) {
4041                 /*
4042                  * Cap display bpc based on the user requested value.
4043                  *
4044                  * The value for state->max_bpc may not correctly updated
4045                  * depending on when the connector gets added to the state
4046                  * or if this was called outside of atomic check, so it
4047                  * can't be used directly.
4048                  */
4049                 bpc = min_t(u8, bpc, requested_bpc);
4050
4051                 /* Round down to the nearest even number. */
4052                 bpc = bpc - (bpc & 1);
4053         }
4054
4055         switch (bpc) {
4056         case 0:
4057                 /*
4058                  * Temporary Work around, DRM doesn't parse color depth for
4059                  * EDID revision before 1.4
4060                  * TODO: Fix edid parsing
4061                  */
4062                 return COLOR_DEPTH_888;
4063         case 6:
4064                 return COLOR_DEPTH_666;
4065         case 8:
4066                 return COLOR_DEPTH_888;
4067         case 10:
4068                 return COLOR_DEPTH_101010;
4069         case 12:
4070                 return COLOR_DEPTH_121212;
4071         case 14:
4072                 return COLOR_DEPTH_141414;
4073         case 16:
4074                 return COLOR_DEPTH_161616;
4075         default:
4076                 return COLOR_DEPTH_UNDEFINED;
4077         }
4078 }
4079
4080 static enum dc_aspect_ratio
4081 get_aspect_ratio(const struct drm_display_mode *mode_in)
4082 {
4083         /* 1-1 mapping, since both enums follow the HDMI spec. */
4084         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4085 }
4086
4087 static enum dc_color_space
4088 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4089 {
4090         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4091
4092         switch (dc_crtc_timing->pixel_encoding) {
4093         case PIXEL_ENCODING_YCBCR422:
4094         case PIXEL_ENCODING_YCBCR444:
4095         case PIXEL_ENCODING_YCBCR420:
4096         {
4097                 /*
4098                  * 27030khz is the separation point between HDTV and SDTV
4099                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4100                  * respectively
4101                  */
4102                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4103                         if (dc_crtc_timing->flags.Y_ONLY)
4104                                 color_space =
4105                                         COLOR_SPACE_YCBCR709_LIMITED;
4106                         else
4107                                 color_space = COLOR_SPACE_YCBCR709;
4108                 } else {
4109                         if (dc_crtc_timing->flags.Y_ONLY)
4110                                 color_space =
4111                                         COLOR_SPACE_YCBCR601_LIMITED;
4112                         else
4113                                 color_space = COLOR_SPACE_YCBCR601;
4114                 }
4115
4116         }
4117         break;
4118         case PIXEL_ENCODING_RGB:
4119                 color_space = COLOR_SPACE_SRGB;
4120                 break;
4121
4122         default:
4123                 WARN_ON(1);
4124                 break;
4125         }
4126
4127         return color_space;
4128 }
4129
4130 static bool adjust_colour_depth_from_display_info(
4131         struct dc_crtc_timing *timing_out,
4132         const struct drm_display_info *info)
4133 {
4134         enum dc_color_depth depth = timing_out->display_color_depth;
4135         int normalized_clk;
4136         do {
4137                 normalized_clk = timing_out->pix_clk_100hz / 10;
4138                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4139                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4140                         normalized_clk /= 2;
4141                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4142                 switch (depth) {
4143                 case COLOR_DEPTH_888:
4144                         break;
4145                 case COLOR_DEPTH_101010:
4146                         normalized_clk = (normalized_clk * 30) / 24;
4147                         break;
4148                 case COLOR_DEPTH_121212:
4149                         normalized_clk = (normalized_clk * 36) / 24;
4150                         break;
4151                 case COLOR_DEPTH_161616:
4152                         normalized_clk = (normalized_clk * 48) / 24;
4153                         break;
4154                 default:
4155                         /* The above depths are the only ones valid for HDMI. */
4156                         return false;
4157                 }
4158                 if (normalized_clk <= info->max_tmds_clock) {
4159                         timing_out->display_color_depth = depth;
4160                         return true;
4161                 }
4162         } while (--depth > COLOR_DEPTH_666);
4163         return false;
4164 }
4165
4166 static void fill_stream_properties_from_drm_display_mode(
4167         struct dc_stream_state *stream,
4168         const struct drm_display_mode *mode_in,
4169         const struct drm_connector *connector,
4170         const struct drm_connector_state *connector_state,
4171         const struct dc_stream_state *old_stream,
4172         int requested_bpc)
4173 {
4174         struct dc_crtc_timing *timing_out = &stream->timing;
4175         const struct drm_display_info *info = &connector->display_info;
4176         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4177         struct hdmi_vendor_infoframe hv_frame;
4178         struct hdmi_avi_infoframe avi_frame;
4179
4180         memset(&hv_frame, 0, sizeof(hv_frame));
4181         memset(&avi_frame, 0, sizeof(avi_frame));
4182
4183         timing_out->h_border_left = 0;
4184         timing_out->h_border_right = 0;
4185         timing_out->v_border_top = 0;
4186         timing_out->v_border_bottom = 0;
4187         /* TODO: un-hardcode */
4188         if (drm_mode_is_420_only(info, mode_in)
4189                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4190                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4191         else if (drm_mode_is_420_also(info, mode_in)
4192                         && aconnector->force_yuv420_output)
4193                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4194         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4195                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4196                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4197         else
4198                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4199
4200         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4201         timing_out->display_color_depth = convert_color_depth_from_display_info(
4202                 connector,
4203                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4204                 requested_bpc);
4205         timing_out->scan_type = SCANNING_TYPE_NODATA;
4206         timing_out->hdmi_vic = 0;
4207
4208         if(old_stream) {
4209                 timing_out->vic = old_stream->timing.vic;
4210                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4211                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4212         } else {
4213                 timing_out->vic = drm_match_cea_mode(mode_in);
4214                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4215                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4216                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4217                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4218         }
4219
4220         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4221                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4222                 timing_out->vic = avi_frame.video_code;
4223                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4224                 timing_out->hdmi_vic = hv_frame.vic;
4225         }
4226
4227         timing_out->h_addressable = mode_in->crtc_hdisplay;
4228         timing_out->h_total = mode_in->crtc_htotal;
4229         timing_out->h_sync_width =
4230                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4231         timing_out->h_front_porch =
4232                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4233         timing_out->v_total = mode_in->crtc_vtotal;
4234         timing_out->v_addressable = mode_in->crtc_vdisplay;
4235         timing_out->v_front_porch =
4236                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4237         timing_out->v_sync_width =
4238                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4239         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4240         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4241
4242         stream->output_color_space = get_output_color_space(timing_out);
4243
4244         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4245         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4246         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4247                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4248                     drm_mode_is_420_also(info, mode_in) &&
4249                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4250                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4251                         adjust_colour_depth_from_display_info(timing_out, info);
4252                 }
4253         }
4254 }
4255
4256 static void fill_audio_info(struct audio_info *audio_info,
4257                             const struct drm_connector *drm_connector,
4258                             const struct dc_sink *dc_sink)
4259 {
4260         int i = 0;
4261         int cea_revision = 0;
4262         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4263
4264         audio_info->manufacture_id = edid_caps->manufacturer_id;
4265         audio_info->product_id = edid_caps->product_id;
4266
4267         cea_revision = drm_connector->display_info.cea_rev;
4268
4269         strscpy(audio_info->display_name,
4270                 edid_caps->display_name,
4271                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4272
4273         if (cea_revision >= 3) {
4274                 audio_info->mode_count = edid_caps->audio_mode_count;
4275
4276                 for (i = 0; i < audio_info->mode_count; ++i) {
4277                         audio_info->modes[i].format_code =
4278                                         (enum audio_format_code)
4279                                         (edid_caps->audio_modes[i].format_code);
4280                         audio_info->modes[i].channel_count =
4281                                         edid_caps->audio_modes[i].channel_count;
4282                         audio_info->modes[i].sample_rates.all =
4283                                         edid_caps->audio_modes[i].sample_rate;
4284                         audio_info->modes[i].sample_size =
4285                                         edid_caps->audio_modes[i].sample_size;
4286                 }
4287         }
4288
4289         audio_info->flags.all = edid_caps->speaker_flags;
4290
4291         /* TODO: We only check for the progressive mode, check for interlace mode too */
4292         if (drm_connector->latency_present[0]) {
4293                 audio_info->video_latency = drm_connector->video_latency[0];
4294                 audio_info->audio_latency = drm_connector->audio_latency[0];
4295         }
4296
4297         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4298
4299 }
4300
4301 static void
4302 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4303                                       struct drm_display_mode *dst_mode)
4304 {
4305         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4306         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4307         dst_mode->crtc_clock = src_mode->crtc_clock;
4308         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4309         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4310         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4311         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4312         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4313         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4314         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4315         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4316         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4317         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4318         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4319 }
4320
4321 static void
4322 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4323                                         const struct drm_display_mode *native_mode,
4324                                         bool scale_enabled)
4325 {
4326         if (scale_enabled) {
4327                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4328         } else if (native_mode->clock == drm_mode->clock &&
4329                         native_mode->htotal == drm_mode->htotal &&
4330                         native_mode->vtotal == drm_mode->vtotal) {
4331                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4332         } else {
4333                 /* no scaling nor amdgpu inserted, no need to patch */
4334         }
4335 }
4336
4337 static struct dc_sink *
4338 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4339 {
4340         struct dc_sink_init_data sink_init_data = { 0 };
4341         struct dc_sink *sink = NULL;
4342         sink_init_data.link = aconnector->dc_link;
4343         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4344
4345         sink = dc_sink_create(&sink_init_data);
4346         if (!sink) {
4347                 DRM_ERROR("Failed to create sink!\n");
4348                 return NULL;
4349         }
4350         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4351
4352         return sink;
4353 }
4354
4355 static void set_multisync_trigger_params(
4356                 struct dc_stream_state *stream)
4357 {
4358         if (stream->triggered_crtc_reset.enabled) {
4359                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4360                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4361         }
4362 }
4363
4364 static void set_master_stream(struct dc_stream_state *stream_set[],
4365                               int stream_count)
4366 {
4367         int j, highest_rfr = 0, master_stream = 0;
4368
4369         for (j = 0;  j < stream_count; j++) {
4370                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4371                         int refresh_rate = 0;
4372
4373                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4374                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4375                         if (refresh_rate > highest_rfr) {
4376                                 highest_rfr = refresh_rate;
4377                                 master_stream = j;
4378                         }
4379                 }
4380         }
4381         for (j = 0;  j < stream_count; j++) {
4382                 if (stream_set[j])
4383                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4384         }
4385 }
4386
4387 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4388 {
4389         int i = 0;
4390
4391         if (context->stream_count < 2)
4392                 return;
4393         for (i = 0; i < context->stream_count ; i++) {
4394                 if (!context->streams[i])
4395                         continue;
4396                 /*
4397                  * TODO: add a function to read AMD VSDB bits and set
4398                  * crtc_sync_master.multi_sync_enabled flag
4399                  * For now it's set to false
4400                  */
4401                 set_multisync_trigger_params(context->streams[i]);
4402         }
4403         set_master_stream(context->streams, context->stream_count);
4404 }
4405
4406 static struct dc_stream_state *
4407 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4408                        const struct drm_display_mode *drm_mode,
4409                        const struct dm_connector_state *dm_state,
4410                        const struct dc_stream_state *old_stream,
4411                        int requested_bpc)
4412 {
4413         struct drm_display_mode *preferred_mode = NULL;
4414         struct drm_connector *drm_connector;
4415         const struct drm_connector_state *con_state =
4416                 dm_state ? &dm_state->base : NULL;
4417         struct dc_stream_state *stream = NULL;
4418         struct drm_display_mode mode = *drm_mode;
4419         bool native_mode_found = false;
4420         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4421         int mode_refresh;
4422         int preferred_refresh = 0;
4423 #if defined(CONFIG_DRM_AMD_DC_DCN)
4424         struct dsc_dec_dpcd_caps dsc_caps;
4425 #endif
4426         uint32_t link_bandwidth_kbps;
4427
4428         struct dc_sink *sink = NULL;
4429         if (aconnector == NULL) {
4430                 DRM_ERROR("aconnector is NULL!\n");
4431                 return stream;
4432         }
4433
4434         drm_connector = &aconnector->base;
4435
4436         if (!aconnector->dc_sink) {
4437                 sink = create_fake_sink(aconnector);
4438                 if (!sink)
4439                         return stream;
4440         } else {
4441                 sink = aconnector->dc_sink;
4442                 dc_sink_retain(sink);
4443         }
4444
4445         stream = dc_create_stream_for_sink(sink);
4446
4447         if (stream == NULL) {
4448                 DRM_ERROR("Failed to create stream for sink!\n");
4449                 goto finish;
4450         }
4451
4452         stream->dm_stream_context = aconnector;
4453
4454         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4455                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4456
4457         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4458                 /* Search for preferred mode */
4459                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4460                         native_mode_found = true;
4461                         break;
4462                 }
4463         }
4464         if (!native_mode_found)
4465                 preferred_mode = list_first_entry_or_null(
4466                                 &aconnector->base.modes,
4467                                 struct drm_display_mode,
4468                                 head);
4469
4470         mode_refresh = drm_mode_vrefresh(&mode);
4471
4472         if (preferred_mode == NULL) {
4473                 /*
4474                  * This may not be an error, the use case is when we have no
4475                  * usermode calls to reset and set mode upon hotplug. In this
4476                  * case, we call set mode ourselves to restore the previous mode
4477                  * and the modelist may not be filled in in time.
4478                  */
4479                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4480         } else {
4481                 decide_crtc_timing_for_drm_display_mode(
4482                                 &mode, preferred_mode,
4483                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4484                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4485         }
4486
4487         if (!dm_state)
4488                 drm_mode_set_crtcinfo(&mode, 0);
4489
4490         /*
4491         * If scaling is enabled and refresh rate didn't change
4492         * we copy the vic and polarities of the old timings
4493         */
4494         if (!scale || mode_refresh != preferred_refresh)
4495                 fill_stream_properties_from_drm_display_mode(stream,
4496                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
4497         else
4498                 fill_stream_properties_from_drm_display_mode(stream,
4499                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4500
4501         stream->timing.flags.DSC = 0;
4502
4503         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4504 #if defined(CONFIG_DRM_AMD_DC_DCN)
4505                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4506                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4507                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4508                                       &dsc_caps);
4509 #endif
4510                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4511                                                              dc_link_get_link_cap(aconnector->dc_link));
4512
4513 #if defined(CONFIG_DRM_AMD_DC_DCN)
4514                 if (dsc_caps.is_dsc_supported)
4515                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4516                                                   &dsc_caps,
4517                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4518                                                   link_bandwidth_kbps,
4519                                                   &stream->timing,
4520                                                   &stream->timing.dsc_cfg))
4521                                 stream->timing.flags.DSC = 1;
4522 #endif
4523         }
4524
4525         update_stream_scaling_settings(&mode, dm_state, stream);
4526
4527         fill_audio_info(
4528                 &stream->audio_info,
4529                 drm_connector,
4530                 sink);
4531
4532         update_stream_signal(stream, sink);
4533
4534         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4535                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4536         if (stream->link->psr_settings.psr_feature_enabled)     {
4537                 struct dc  *core_dc = stream->link->ctx->dc;
4538
4539                 if (dc_is_dmcu_initialized(core_dc)) {
4540                         //
4541                         // should decide stream support vsc sdp colorimetry capability
4542                         // before building vsc info packet
4543                         //
4544                         stream->use_vsc_sdp_for_colorimetry = false;
4545                         if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4546                                 stream->use_vsc_sdp_for_colorimetry =
4547                                         aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4548                         } else {
4549                                 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4550                                         stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4551                                         stream->use_vsc_sdp_for_colorimetry = true;
4552                                 }
4553                         }
4554                         mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4555                 }
4556         }
4557 finish:
4558         dc_sink_release(sink);
4559
4560         return stream;
4561 }
4562
4563 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4564 {
4565         drm_crtc_cleanup(crtc);
4566         kfree(crtc);
4567 }
4568
4569 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4570                                   struct drm_crtc_state *state)
4571 {
4572         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4573
4574         /* TODO Destroy dc_stream objects are stream object is flattened */
4575         if (cur->stream)
4576                 dc_stream_release(cur->stream);
4577
4578
4579         __drm_atomic_helper_crtc_destroy_state(state);
4580
4581
4582         kfree(state);
4583 }
4584
4585 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4586 {
4587         struct dm_crtc_state *state;
4588
4589         if (crtc->state)
4590                 dm_crtc_destroy_state(crtc, crtc->state);
4591
4592         state = kzalloc(sizeof(*state), GFP_KERNEL);
4593         if (WARN_ON(!state))
4594                 return;
4595
4596         crtc->state = &state->base;
4597         crtc->state->crtc = crtc;
4598
4599 }
4600
4601 static struct drm_crtc_state *
4602 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4603 {
4604         struct dm_crtc_state *state, *cur;
4605
4606         cur = to_dm_crtc_state(crtc->state);
4607
4608         if (WARN_ON(!crtc->state))
4609                 return NULL;
4610
4611         state = kzalloc(sizeof(*state), GFP_KERNEL);
4612         if (!state)
4613                 return NULL;
4614
4615         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4616
4617         if (cur->stream) {
4618                 state->stream = cur->stream;
4619                 dc_stream_retain(state->stream);
4620         }
4621
4622         state->active_planes = cur->active_planes;
4623         state->interrupts_enabled = cur->interrupts_enabled;
4624         state->vrr_params = cur->vrr_params;
4625         state->vrr_infopacket = cur->vrr_infopacket;
4626         state->abm_level = cur->abm_level;
4627         state->vrr_supported = cur->vrr_supported;
4628         state->freesync_config = cur->freesync_config;
4629         state->crc_src = cur->crc_src;
4630         state->cm_has_degamma = cur->cm_has_degamma;
4631         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4632
4633         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4634
4635         return &state->base;
4636 }
4637
4638 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4639 {
4640         enum dc_irq_source irq_source;
4641         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4642         struct amdgpu_device *adev = crtc->dev->dev_private;
4643         int rc;
4644
4645         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4646
4647         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4648
4649         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4650                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4651         return rc;
4652 }
4653
4654 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4655 {
4656         enum dc_irq_source irq_source;
4657         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4658         struct amdgpu_device *adev = crtc->dev->dev_private;
4659         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4660         int rc = 0;
4661
4662         if (enable) {
4663                 /* vblank irq on -> Only need vupdate irq in vrr mode */
4664                 if (amdgpu_dm_vrr_active(acrtc_state))
4665                         rc = dm_set_vupdate_irq(crtc, true);
4666         } else {
4667                 /* vblank irq off -> vupdate irq off */
4668                 rc = dm_set_vupdate_irq(crtc, false);
4669         }
4670
4671         if (rc)
4672                 return rc;
4673
4674         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4675         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4676 }
4677
4678 static int dm_enable_vblank(struct drm_crtc *crtc)
4679 {
4680         return dm_set_vblank(crtc, true);
4681 }
4682
4683 static void dm_disable_vblank(struct drm_crtc *crtc)
4684 {
4685         dm_set_vblank(crtc, false);
4686 }
4687
4688 /* Implemented only the options currently availible for the driver */
4689 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4690         .reset = dm_crtc_reset_state,
4691         .destroy = amdgpu_dm_crtc_destroy,
4692         .gamma_set = drm_atomic_helper_legacy_gamma_set,
4693         .set_config = drm_atomic_helper_set_config,
4694         .page_flip = drm_atomic_helper_page_flip,
4695         .atomic_duplicate_state = dm_crtc_duplicate_state,
4696         .atomic_destroy_state = dm_crtc_destroy_state,
4697         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4698         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4699         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4700         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4701         .enable_vblank = dm_enable_vblank,
4702         .disable_vblank = dm_disable_vblank,
4703         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4704 };
4705
4706 static enum drm_connector_status
4707 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4708 {
4709         bool connected;
4710         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4711
4712         /*
4713          * Notes:
4714          * 1. This interface is NOT called in context of HPD irq.
4715          * 2. This interface *is called* in context of user-mode ioctl. Which
4716          * makes it a bad place for *any* MST-related activity.
4717          */
4718
4719         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4720             !aconnector->fake_enable)
4721                 connected = (aconnector->dc_sink != NULL);
4722         else
4723                 connected = (aconnector->base.force == DRM_FORCE_ON);
4724
4725         return (connected ? connector_status_connected :
4726                         connector_status_disconnected);
4727 }
4728
4729 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4730                                             struct drm_connector_state *connector_state,
4731                                             struct drm_property *property,
4732                                             uint64_t val)
4733 {
4734         struct drm_device *dev = connector->dev;
4735         struct amdgpu_device *adev = dev->dev_private;
4736         struct dm_connector_state *dm_old_state =
4737                 to_dm_connector_state(connector->state);
4738         struct dm_connector_state *dm_new_state =
4739                 to_dm_connector_state(connector_state);
4740
4741         int ret = -EINVAL;
4742
4743         if (property == dev->mode_config.scaling_mode_property) {
4744                 enum amdgpu_rmx_type rmx_type;
4745
4746                 switch (val) {
4747                 case DRM_MODE_SCALE_CENTER:
4748                         rmx_type = RMX_CENTER;
4749                         break;
4750                 case DRM_MODE_SCALE_ASPECT:
4751                         rmx_type = RMX_ASPECT;
4752                         break;
4753                 case DRM_MODE_SCALE_FULLSCREEN:
4754                         rmx_type = RMX_FULL;
4755                         break;
4756                 case DRM_MODE_SCALE_NONE:
4757                 default:
4758                         rmx_type = RMX_OFF;
4759                         break;
4760                 }
4761
4762                 if (dm_old_state->scaling == rmx_type)
4763                         return 0;
4764
4765                 dm_new_state->scaling = rmx_type;
4766                 ret = 0;
4767         } else if (property == adev->mode_info.underscan_hborder_property) {
4768                 dm_new_state->underscan_hborder = val;
4769                 ret = 0;
4770         } else if (property == adev->mode_info.underscan_vborder_property) {
4771                 dm_new_state->underscan_vborder = val;
4772                 ret = 0;
4773         } else if (property == adev->mode_info.underscan_property) {
4774                 dm_new_state->underscan_enable = val;
4775                 ret = 0;
4776         } else if (property == adev->mode_info.abm_level_property) {
4777                 dm_new_state->abm_level = val;
4778                 ret = 0;
4779         }
4780
4781         return ret;
4782 }
4783
4784 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4785                                             const struct drm_connector_state *state,
4786                                             struct drm_property *property,
4787                                             uint64_t *val)
4788 {
4789         struct drm_device *dev = connector->dev;
4790         struct amdgpu_device *adev = dev->dev_private;
4791         struct dm_connector_state *dm_state =
4792                 to_dm_connector_state(state);
4793         int ret = -EINVAL;
4794
4795         if (property == dev->mode_config.scaling_mode_property) {
4796                 switch (dm_state->scaling) {
4797                 case RMX_CENTER:
4798                         *val = DRM_MODE_SCALE_CENTER;
4799                         break;
4800                 case RMX_ASPECT:
4801                         *val = DRM_MODE_SCALE_ASPECT;
4802                         break;
4803                 case RMX_FULL:
4804                         *val = DRM_MODE_SCALE_FULLSCREEN;
4805                         break;
4806                 case RMX_OFF:
4807                 default:
4808                         *val = DRM_MODE_SCALE_NONE;
4809                         break;
4810                 }
4811                 ret = 0;
4812         } else if (property == adev->mode_info.underscan_hborder_property) {
4813                 *val = dm_state->underscan_hborder;
4814                 ret = 0;
4815         } else if (property == adev->mode_info.underscan_vborder_property) {
4816                 *val = dm_state->underscan_vborder;
4817                 ret = 0;
4818         } else if (property == adev->mode_info.underscan_property) {
4819                 *val = dm_state->underscan_enable;
4820                 ret = 0;
4821         } else if (property == adev->mode_info.abm_level_property) {
4822                 *val = dm_state->abm_level;
4823                 ret = 0;
4824         }
4825
4826         return ret;
4827 }
4828
4829 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4830 {
4831         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4832
4833         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4834 }
4835
4836 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4837 {
4838         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4839         const struct dc_link *link = aconnector->dc_link;
4840         struct amdgpu_device *adev = connector->dev->dev_private;
4841         struct amdgpu_display_manager *dm = &adev->dm;
4842
4843 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4844         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4845
4846         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4847             link->type != dc_connection_none &&
4848             dm->backlight_dev) {
4849                 backlight_device_unregister(dm->backlight_dev);
4850                 dm->backlight_dev = NULL;
4851         }
4852 #endif
4853
4854         if (aconnector->dc_em_sink)
4855                 dc_sink_release(aconnector->dc_em_sink);
4856         aconnector->dc_em_sink = NULL;
4857         if (aconnector->dc_sink)
4858                 dc_sink_release(aconnector->dc_sink);
4859         aconnector->dc_sink = NULL;
4860
4861         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4862         drm_connector_unregister(connector);
4863         drm_connector_cleanup(connector);
4864         if (aconnector->i2c) {
4865                 i2c_del_adapter(&aconnector->i2c->base);
4866                 kfree(aconnector->i2c);
4867         }
4868         kfree(aconnector->dm_dp_aux.aux.name);
4869
4870         kfree(connector);
4871 }
4872
4873 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4874 {
4875         struct dm_connector_state *state =
4876                 to_dm_connector_state(connector->state);
4877
4878         if (connector->state)
4879                 __drm_atomic_helper_connector_destroy_state(connector->state);
4880
4881         kfree(state);
4882
4883         state = kzalloc(sizeof(*state), GFP_KERNEL);
4884
4885         if (state) {
4886                 state->scaling = RMX_OFF;
4887                 state->underscan_enable = false;
4888                 state->underscan_hborder = 0;
4889                 state->underscan_vborder = 0;
4890                 state->base.max_requested_bpc = 8;
4891                 state->vcpi_slots = 0;
4892                 state->pbn = 0;
4893                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4894                         state->abm_level = amdgpu_dm_abm_level;
4895
4896                 __drm_atomic_helper_connector_reset(connector, &state->base);
4897         }
4898 }
4899
4900 struct drm_connector_state *
4901 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4902 {
4903         struct dm_connector_state *state =
4904                 to_dm_connector_state(connector->state);
4905
4906         struct dm_connector_state *new_state =
4907                         kmemdup(state, sizeof(*state), GFP_KERNEL);
4908
4909         if (!new_state)
4910                 return NULL;
4911
4912         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4913
4914         new_state->freesync_capable = state->freesync_capable;
4915         new_state->abm_level = state->abm_level;
4916         new_state->scaling = state->scaling;
4917         new_state->underscan_enable = state->underscan_enable;
4918         new_state->underscan_hborder = state->underscan_hborder;
4919         new_state->underscan_vborder = state->underscan_vborder;
4920         new_state->vcpi_slots = state->vcpi_slots;
4921         new_state->pbn = state->pbn;
4922         return &new_state->base;
4923 }
4924
4925 static int
4926 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4927 {
4928         struct amdgpu_dm_connector *amdgpu_dm_connector =
4929                 to_amdgpu_dm_connector(connector);
4930         int r;
4931
4932         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4933             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4934                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4935                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4936                 if (r)
4937                         return r;
4938         }
4939
4940 #if defined(CONFIG_DEBUG_FS)
4941         connector_debugfs_init(amdgpu_dm_connector);
4942 #endif
4943
4944         return 0;
4945 }
4946
4947 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4948         .reset = amdgpu_dm_connector_funcs_reset,
4949         .detect = amdgpu_dm_connector_detect,
4950         .fill_modes = drm_helper_probe_single_connector_modes,
4951         .destroy = amdgpu_dm_connector_destroy,
4952         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4953         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4954         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4955         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4956         .late_register = amdgpu_dm_connector_late_register,
4957         .early_unregister = amdgpu_dm_connector_unregister
4958 };
4959
4960 static int get_modes(struct drm_connector *connector)
4961 {
4962         return amdgpu_dm_connector_get_modes(connector);
4963 }
4964
4965 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4966 {
4967         struct dc_sink_init_data init_params = {
4968                         .link = aconnector->dc_link,
4969                         .sink_signal = SIGNAL_TYPE_VIRTUAL
4970         };
4971         struct edid *edid;
4972
4973         if (!aconnector->base.edid_blob_ptr) {
4974                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4975                                 aconnector->base.name);
4976
4977                 aconnector->base.force = DRM_FORCE_OFF;
4978                 aconnector->base.override_edid = false;
4979                 return;
4980         }
4981
4982         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4983
4984         aconnector->edid = edid;
4985
4986         aconnector->dc_em_sink = dc_link_add_remote_sink(
4987                 aconnector->dc_link,
4988                 (uint8_t *)edid,
4989                 (edid->extensions + 1) * EDID_LENGTH,
4990                 &init_params);
4991
4992         if (aconnector->base.force == DRM_FORCE_ON) {
4993                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4994                 aconnector->dc_link->local_sink :
4995                 aconnector->dc_em_sink;
4996                 dc_sink_retain(aconnector->dc_sink);
4997         }
4998 }
4999
5000 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5001 {
5002         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5003
5004         /*
5005          * In case of headless boot with force on for DP managed connector
5006          * Those settings have to be != 0 to get initial modeset
5007          */
5008         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5009                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5010                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5011         }
5012
5013
5014         aconnector->base.override_edid = true;
5015         create_eml_sink(aconnector);
5016 }
5017
5018 static struct dc_stream_state *
5019 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5020                                 const struct drm_display_mode *drm_mode,
5021                                 const struct dm_connector_state *dm_state,
5022                                 const struct dc_stream_state *old_stream)
5023 {
5024         struct drm_connector *connector = &aconnector->base;
5025         struct amdgpu_device *adev = connector->dev->dev_private;
5026         struct dc_stream_state *stream;
5027         int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
5028         enum dc_status dc_result = DC_OK;
5029
5030         do {
5031                 stream = create_stream_for_sink(aconnector, drm_mode,
5032                                                 dm_state, old_stream,
5033                                                 requested_bpc);
5034                 if (stream == NULL) {
5035                         DRM_ERROR("Failed to create stream for sink!\n");
5036                         break;
5037                 }
5038
5039                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5040
5041                 if (dc_result != DC_OK) {
5042                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
5043                                       drm_mode->hdisplay,
5044                                       drm_mode->vdisplay,
5045                                       drm_mode->clock,
5046                                       dc_result);
5047
5048                         dc_stream_release(stream);
5049                         stream = NULL;
5050                         requested_bpc -= 2; /* lower bpc to retry validation */
5051                 }
5052
5053         } while (stream == NULL && requested_bpc >= 6);
5054
5055         return stream;
5056 }
5057
5058 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5059                                    struct drm_display_mode *mode)
5060 {
5061         int result = MODE_ERROR;
5062         struct dc_sink *dc_sink;
5063         /* TODO: Unhardcode stream count */
5064         struct dc_stream_state *stream;
5065         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5066
5067         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5068                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5069                 return result;
5070
5071         /*
5072          * Only run this the first time mode_valid is called to initilialize
5073          * EDID mgmt
5074          */
5075         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5076                 !aconnector->dc_em_sink)
5077                 handle_edid_mgmt(aconnector);
5078
5079         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5080
5081         if (dc_sink == NULL) {
5082                 DRM_ERROR("dc_sink is NULL!\n");
5083                 goto fail;
5084         }
5085
5086         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5087         if (stream) {
5088                 dc_stream_release(stream);
5089                 result = MODE_OK;
5090         }
5091
5092 fail:
5093         /* TODO: error handling*/
5094         return result;
5095 }
5096
5097 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5098                                 struct dc_info_packet *out)
5099 {
5100         struct hdmi_drm_infoframe frame;
5101         unsigned char buf[30]; /* 26 + 4 */
5102         ssize_t len;
5103         int ret, i;
5104
5105         memset(out, 0, sizeof(*out));
5106
5107         if (!state->hdr_output_metadata)
5108                 return 0;
5109
5110         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5111         if (ret)
5112                 return ret;
5113
5114         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5115         if (len < 0)
5116                 return (int)len;
5117
5118         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5119         if (len != 30)
5120                 return -EINVAL;
5121
5122         /* Prepare the infopacket for DC. */
5123         switch (state->connector->connector_type) {
5124         case DRM_MODE_CONNECTOR_HDMIA:
5125                 out->hb0 = 0x87; /* type */
5126                 out->hb1 = 0x01; /* version */
5127                 out->hb2 = 0x1A; /* length */
5128                 out->sb[0] = buf[3]; /* checksum */
5129                 i = 1;
5130                 break;
5131
5132         case DRM_MODE_CONNECTOR_DisplayPort:
5133         case DRM_MODE_CONNECTOR_eDP:
5134                 out->hb0 = 0x00; /* sdp id, zero */
5135                 out->hb1 = 0x87; /* type */
5136                 out->hb2 = 0x1D; /* payload len - 1 */
5137                 out->hb3 = (0x13 << 2); /* sdp version */
5138                 out->sb[0] = 0x01; /* version */
5139                 out->sb[1] = 0x1A; /* length */
5140                 i = 2;
5141                 break;
5142
5143         default:
5144                 return -EINVAL;
5145         }
5146
5147         memcpy(&out->sb[i], &buf[4], 26);
5148         out->valid = true;
5149
5150         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5151                        sizeof(out->sb), false);
5152
5153         return 0;
5154 }
5155
5156 static bool
5157 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5158                           const struct drm_connector_state *new_state)
5159 {
5160         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5161         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5162
5163         if (old_blob != new_blob) {
5164                 if (old_blob && new_blob &&
5165                     old_blob->length == new_blob->length)
5166                         return memcmp(old_blob->data, new_blob->data,
5167                                       old_blob->length);
5168
5169                 return true;
5170         }
5171
5172         return false;
5173 }
5174
5175 static int
5176 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5177                                  struct drm_atomic_state *state)
5178 {
5179         struct drm_connector_state *new_con_state =
5180                 drm_atomic_get_new_connector_state(state, conn);
5181         struct drm_connector_state *old_con_state =
5182                 drm_atomic_get_old_connector_state(state, conn);
5183         struct drm_crtc *crtc = new_con_state->crtc;
5184         struct drm_crtc_state *new_crtc_state;
5185         int ret;
5186
5187         if (!crtc)
5188                 return 0;
5189
5190         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5191                 struct dc_info_packet hdr_infopacket;
5192
5193                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5194                 if (ret)
5195                         return ret;
5196
5197                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5198                 if (IS_ERR(new_crtc_state))
5199                         return PTR_ERR(new_crtc_state);
5200
5201                 /*
5202                  * DC considers the stream backends changed if the
5203                  * static metadata changes. Forcing the modeset also
5204                  * gives a simple way for userspace to switch from
5205                  * 8bpc to 10bpc when setting the metadata to enter
5206                  * or exit HDR.
5207                  *
5208                  * Changing the static metadata after it's been
5209                  * set is permissible, however. So only force a
5210                  * modeset if we're entering or exiting HDR.
5211                  */
5212                 new_crtc_state->mode_changed =
5213                         !old_con_state->hdr_output_metadata ||
5214                         !new_con_state->hdr_output_metadata;
5215         }
5216
5217         return 0;
5218 }
5219
5220 static const struct drm_connector_helper_funcs
5221 amdgpu_dm_connector_helper_funcs = {
5222         /*
5223          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5224          * modes will be filtered by drm_mode_validate_size(), and those modes
5225          * are missing after user start lightdm. So we need to renew modes list.
5226          * in get_modes call back, not just return the modes count
5227          */
5228         .get_modes = get_modes,
5229         .mode_valid = amdgpu_dm_connector_mode_valid,
5230         .atomic_check = amdgpu_dm_connector_atomic_check,
5231 };
5232
5233 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5234 {
5235 }
5236
5237 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5238 {
5239         struct drm_device *dev = new_crtc_state->crtc->dev;
5240         struct drm_plane *plane;
5241
5242         drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5243                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5244                         return true;
5245         }
5246
5247         return false;
5248 }
5249
5250 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5251 {
5252         struct drm_atomic_state *state = new_crtc_state->state;
5253         struct drm_plane *plane;
5254         int num_active = 0;
5255
5256         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5257                 struct drm_plane_state *new_plane_state;
5258
5259                 /* Cursor planes are "fake". */
5260                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5261                         continue;
5262
5263                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5264
5265                 if (!new_plane_state) {
5266                         /*
5267                          * The plane is enable on the CRTC and hasn't changed
5268                          * state. This means that it previously passed
5269                          * validation and is therefore enabled.
5270                          */
5271                         num_active += 1;
5272                         continue;
5273                 }
5274
5275                 /* We need a framebuffer to be considered enabled. */
5276                 num_active += (new_plane_state->fb != NULL);
5277         }
5278
5279         return num_active;
5280 }
5281
5282 /*
5283  * Sets whether interrupts should be enabled on a specific CRTC.
5284  * We require that the stream be enabled and that there exist active
5285  * DC planes on the stream.
5286  */
5287 static void
5288 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5289                                struct drm_crtc_state *new_crtc_state)
5290 {
5291         struct dm_crtc_state *dm_new_crtc_state =
5292                 to_dm_crtc_state(new_crtc_state);
5293
5294         dm_new_crtc_state->active_planes = 0;
5295         dm_new_crtc_state->interrupts_enabled = false;
5296
5297         if (!dm_new_crtc_state->stream)
5298                 return;
5299
5300         dm_new_crtc_state->active_planes =
5301                 count_crtc_active_planes(new_crtc_state);
5302
5303         dm_new_crtc_state->interrupts_enabled =
5304                 dm_new_crtc_state->active_planes > 0;
5305 }
5306
5307 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5308                                        struct drm_crtc_state *state)
5309 {
5310         struct amdgpu_device *adev = crtc->dev->dev_private;
5311         struct dc *dc = adev->dm.dc;
5312         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5313         int ret = -EINVAL;
5314
5315         /*
5316          * Update interrupt state for the CRTC. This needs to happen whenever
5317          * the CRTC has changed or whenever any of its planes have changed.
5318          * Atomic check satisfies both of these requirements since the CRTC
5319          * is added to the state by DRM during drm_atomic_helper_check_planes.
5320          */
5321         dm_update_crtc_interrupt_state(crtc, state);
5322
5323         if (unlikely(!dm_crtc_state->stream &&
5324                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5325                 WARN_ON(1);
5326                 return ret;
5327         }
5328
5329         /* In some use cases, like reset, no stream is attached */
5330         if (!dm_crtc_state->stream)
5331                 return 0;
5332
5333         /*
5334          * We want at least one hardware plane enabled to use
5335          * the stream with a cursor enabled.
5336          */
5337         if (state->enable && state->active &&
5338             does_crtc_have_active_cursor(state) &&
5339             dm_crtc_state->active_planes == 0)
5340                 return -EINVAL;
5341
5342         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5343                 return 0;
5344
5345         return ret;
5346 }
5347
5348 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5349                                       const struct drm_display_mode *mode,
5350                                       struct drm_display_mode *adjusted_mode)
5351 {
5352         return true;
5353 }
5354
5355 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5356         .disable = dm_crtc_helper_disable,
5357         .atomic_check = dm_crtc_helper_atomic_check,
5358         .mode_fixup = dm_crtc_helper_mode_fixup,
5359         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5360 };
5361
5362 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5363 {
5364
5365 }
5366
5367 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5368 {
5369         switch (display_color_depth) {
5370                 case COLOR_DEPTH_666:
5371                         return 6;
5372                 case COLOR_DEPTH_888:
5373                         return 8;
5374                 case COLOR_DEPTH_101010:
5375                         return 10;
5376                 case COLOR_DEPTH_121212:
5377                         return 12;
5378                 case COLOR_DEPTH_141414:
5379                         return 14;
5380                 case COLOR_DEPTH_161616:
5381                         return 16;
5382                 default:
5383                         break;
5384                 }
5385         return 0;
5386 }
5387
5388 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5389                                           struct drm_crtc_state *crtc_state,
5390                                           struct drm_connector_state *conn_state)
5391 {
5392         struct drm_atomic_state *state = crtc_state->state;
5393         struct drm_connector *connector = conn_state->connector;
5394         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5395         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5396         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5397         struct drm_dp_mst_topology_mgr *mst_mgr;
5398         struct drm_dp_mst_port *mst_port;
5399         enum dc_color_depth color_depth;
5400         int clock, bpp = 0;
5401         bool is_y420 = false;
5402
5403         if (!aconnector->port || !aconnector->dc_sink)
5404                 return 0;
5405
5406         mst_port = aconnector->port;
5407         mst_mgr = &aconnector->mst_port->mst_mgr;
5408
5409         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5410                 return 0;
5411
5412         if (!state->duplicated) {
5413                 int max_bpc = conn_state->max_requested_bpc;
5414                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5415                                 aconnector->force_yuv420_output;
5416                 color_depth = convert_color_depth_from_display_info(connector,
5417                                                                     is_y420,
5418                                                                     max_bpc);
5419                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5420                 clock = adjusted_mode->clock;
5421                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5422         }
5423         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5424                                                                            mst_mgr,
5425                                                                            mst_port,
5426                                                                            dm_new_connector_state->pbn,
5427                                                                            0);
5428         if (dm_new_connector_state->vcpi_slots < 0) {
5429                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5430                 return dm_new_connector_state->vcpi_slots;
5431         }
5432         return 0;
5433 }
5434
5435 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5436         .disable = dm_encoder_helper_disable,
5437         .atomic_check = dm_encoder_helper_atomic_check
5438 };
5439
5440 #if defined(CONFIG_DRM_AMD_DC_DCN)
5441 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5442                                             struct dc_state *dc_state)
5443 {
5444         struct dc_stream_state *stream = NULL;
5445         struct drm_connector *connector;
5446         struct drm_connector_state *new_con_state, *old_con_state;
5447         struct amdgpu_dm_connector *aconnector;
5448         struct dm_connector_state *dm_conn_state;
5449         int i, j, clock, bpp;
5450         int vcpi, pbn_div, pbn = 0;
5451
5452         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5453
5454                 aconnector = to_amdgpu_dm_connector(connector);
5455
5456                 if (!aconnector->port)
5457                         continue;
5458
5459                 if (!new_con_state || !new_con_state->crtc)
5460                         continue;
5461
5462                 dm_conn_state = to_dm_connector_state(new_con_state);
5463
5464                 for (j = 0; j < dc_state->stream_count; j++) {
5465                         stream = dc_state->streams[j];
5466                         if (!stream)
5467                                 continue;
5468
5469                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5470                                 break;
5471
5472                         stream = NULL;
5473                 }
5474
5475                 if (!stream)
5476                         continue;
5477
5478                 if (stream->timing.flags.DSC != 1) {
5479                         drm_dp_mst_atomic_enable_dsc(state,
5480                                                      aconnector->port,
5481                                                      dm_conn_state->pbn,
5482                                                      0,
5483                                                      false);
5484                         continue;
5485                 }
5486
5487                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5488                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5489                 clock = stream->timing.pix_clk_100hz / 10;
5490                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5491                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5492                                                     aconnector->port,
5493                                                     pbn, pbn_div,
5494                                                     true);
5495                 if (vcpi < 0)
5496                         return vcpi;
5497
5498                 dm_conn_state->pbn = pbn;
5499                 dm_conn_state->vcpi_slots = vcpi;
5500         }
5501         return 0;
5502 }
5503 #endif
5504
5505 static void dm_drm_plane_reset(struct drm_plane *plane)
5506 {
5507         struct dm_plane_state *amdgpu_state = NULL;
5508
5509         if (plane->state)
5510                 plane->funcs->atomic_destroy_state(plane, plane->state);
5511
5512         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5513         WARN_ON(amdgpu_state == NULL);
5514
5515         if (amdgpu_state)
5516                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5517 }
5518
5519 static struct drm_plane_state *
5520 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5521 {
5522         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5523
5524         old_dm_plane_state = to_dm_plane_state(plane->state);
5525         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5526         if (!dm_plane_state)
5527                 return NULL;
5528
5529         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5530
5531         if (old_dm_plane_state->dc_state) {
5532                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5533                 dc_plane_state_retain(dm_plane_state->dc_state);
5534         }
5535
5536         return &dm_plane_state->base;
5537 }
5538
5539 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5540                                 struct drm_plane_state *state)
5541 {
5542         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5543
5544         if (dm_plane_state->dc_state)
5545                 dc_plane_state_release(dm_plane_state->dc_state);
5546
5547         drm_atomic_helper_plane_destroy_state(plane, state);
5548 }
5549
5550 static const struct drm_plane_funcs dm_plane_funcs = {
5551         .update_plane   = drm_atomic_helper_update_plane,
5552         .disable_plane  = drm_atomic_helper_disable_plane,
5553         .destroy        = drm_primary_helper_destroy,
5554         .reset = dm_drm_plane_reset,
5555         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5556         .atomic_destroy_state = dm_drm_plane_destroy_state,
5557 };
5558
5559 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5560                                       struct drm_plane_state *new_state)
5561 {
5562         struct amdgpu_framebuffer *afb;
5563         struct drm_gem_object *obj;
5564         struct amdgpu_device *adev;
5565         struct amdgpu_bo *rbo;
5566         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5567         struct list_head list;
5568         struct ttm_validate_buffer tv;
5569         struct ww_acquire_ctx ticket;
5570         uint64_t tiling_flags;
5571         uint32_t domain;
5572         int r;
5573         bool tmz_surface = false;
5574         bool force_disable_dcc = false;
5575
5576         dm_plane_state_old = to_dm_plane_state(plane->state);
5577         dm_plane_state_new = to_dm_plane_state(new_state);
5578
5579         if (!new_state->fb) {
5580                 DRM_DEBUG_DRIVER("No FB bound\n");
5581                 return 0;
5582         }
5583
5584         afb = to_amdgpu_framebuffer(new_state->fb);
5585         obj = new_state->fb->obj[0];
5586         rbo = gem_to_amdgpu_bo(obj);
5587         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5588         INIT_LIST_HEAD(&list);
5589
5590         tv.bo = &rbo->tbo;
5591         tv.num_shared = 1;
5592         list_add(&tv.head, &list);
5593
5594         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5595         if (r) {
5596                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5597                 return r;
5598         }
5599
5600         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5601                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5602         else
5603                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5604
5605         r = amdgpu_bo_pin(rbo, domain);
5606         if (unlikely(r != 0)) {
5607                 if (r != -ERESTARTSYS)
5608                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5609                 ttm_eu_backoff_reservation(&ticket, &list);
5610                 return r;
5611         }
5612
5613         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5614         if (unlikely(r != 0)) {
5615                 amdgpu_bo_unpin(rbo);
5616                 ttm_eu_backoff_reservation(&ticket, &list);
5617                 DRM_ERROR("%p bind failed\n", rbo);
5618                 return r;
5619         }
5620
5621         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5622
5623         tmz_surface = amdgpu_bo_encrypted(rbo);
5624
5625         ttm_eu_backoff_reservation(&ticket, &list);
5626
5627         afb->address = amdgpu_bo_gpu_offset(rbo);
5628
5629         amdgpu_bo_ref(rbo);
5630
5631         if (dm_plane_state_new->dc_state &&
5632                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5633                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5634
5635                 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5636                 fill_plane_buffer_attributes(
5637                         adev, afb, plane_state->format, plane_state->rotation,
5638                         tiling_flags, &plane_state->tiling_info,
5639                         &plane_state->plane_size, &plane_state->dcc,
5640                         &plane_state->address, tmz_surface,
5641                         force_disable_dcc);
5642         }
5643
5644         return 0;
5645 }
5646
5647 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5648                                        struct drm_plane_state *old_state)
5649 {
5650         struct amdgpu_bo *rbo;
5651         int r;
5652
5653         if (!old_state->fb)
5654                 return;
5655
5656         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5657         r = amdgpu_bo_reserve(rbo, false);
5658         if (unlikely(r)) {
5659                 DRM_ERROR("failed to reserve rbo before unpin\n");
5660                 return;
5661         }
5662
5663         amdgpu_bo_unpin(rbo);
5664         amdgpu_bo_unreserve(rbo);
5665         amdgpu_bo_unref(&rbo);
5666 }
5667
5668 static int dm_plane_atomic_check(struct drm_plane *plane,
5669                                  struct drm_plane_state *state)
5670 {
5671         struct amdgpu_device *adev = plane->dev->dev_private;
5672         struct dc *dc = adev->dm.dc;
5673         struct dm_plane_state *dm_plane_state;
5674         struct dc_scaling_info scaling_info;
5675         int ret;
5676
5677         dm_plane_state = to_dm_plane_state(state);
5678
5679         if (!dm_plane_state->dc_state)
5680                 return 0;
5681
5682         ret = fill_dc_scaling_info(state, &scaling_info);
5683         if (ret)
5684                 return ret;
5685
5686         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5687                 return 0;
5688
5689         return -EINVAL;
5690 }
5691
5692 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5693                                        struct drm_plane_state *new_plane_state)
5694 {
5695         /* Only support async updates on cursor planes. */
5696         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5697                 return -EINVAL;
5698
5699         return 0;
5700 }
5701
5702 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5703                                          struct drm_plane_state *new_state)
5704 {
5705         struct drm_plane_state *old_state =
5706                 drm_atomic_get_old_plane_state(new_state->state, plane);
5707
5708         swap(plane->state->fb, new_state->fb);
5709
5710         plane->state->src_x = new_state->src_x;
5711         plane->state->src_y = new_state->src_y;
5712         plane->state->src_w = new_state->src_w;
5713         plane->state->src_h = new_state->src_h;
5714         plane->state->crtc_x = new_state->crtc_x;
5715         plane->state->crtc_y = new_state->crtc_y;
5716         plane->state->crtc_w = new_state->crtc_w;
5717         plane->state->crtc_h = new_state->crtc_h;
5718
5719         handle_cursor_update(plane, old_state);
5720 }
5721
5722 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5723         .prepare_fb = dm_plane_helper_prepare_fb,
5724         .cleanup_fb = dm_plane_helper_cleanup_fb,
5725         .atomic_check = dm_plane_atomic_check,
5726         .atomic_async_check = dm_plane_atomic_async_check,
5727         .atomic_async_update = dm_plane_atomic_async_update
5728 };
5729
5730 /*
5731  * TODO: these are currently initialized to rgb formats only.
5732  * For future use cases we should either initialize them dynamically based on
5733  * plane capabilities, or initialize this array to all formats, so internal drm
5734  * check will succeed, and let DC implement proper check
5735  */
5736 static const uint32_t rgb_formats[] = {
5737         DRM_FORMAT_XRGB8888,
5738         DRM_FORMAT_ARGB8888,
5739         DRM_FORMAT_RGBA8888,
5740         DRM_FORMAT_XRGB2101010,
5741         DRM_FORMAT_XBGR2101010,
5742         DRM_FORMAT_ARGB2101010,
5743         DRM_FORMAT_ABGR2101010,
5744         DRM_FORMAT_XBGR8888,
5745         DRM_FORMAT_ABGR8888,
5746         DRM_FORMAT_RGB565,
5747 };
5748
5749 static const uint32_t overlay_formats[] = {
5750         DRM_FORMAT_XRGB8888,
5751         DRM_FORMAT_ARGB8888,
5752         DRM_FORMAT_RGBA8888,
5753         DRM_FORMAT_XBGR8888,
5754         DRM_FORMAT_ABGR8888,
5755         DRM_FORMAT_RGB565
5756 };
5757
5758 static const u32 cursor_formats[] = {
5759         DRM_FORMAT_ARGB8888
5760 };
5761
5762 static int get_plane_formats(const struct drm_plane *plane,
5763                              const struct dc_plane_cap *plane_cap,
5764                              uint32_t *formats, int max_formats)
5765 {
5766         int i, num_formats = 0;
5767
5768         /*
5769          * TODO: Query support for each group of formats directly from
5770          * DC plane caps. This will require adding more formats to the
5771          * caps list.
5772          */
5773
5774         switch (plane->type) {
5775         case DRM_PLANE_TYPE_PRIMARY:
5776                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5777                         if (num_formats >= max_formats)
5778                                 break;
5779
5780                         formats[num_formats++] = rgb_formats[i];
5781                 }
5782
5783                 if (plane_cap && plane_cap->pixel_format_support.nv12)
5784                         formats[num_formats++] = DRM_FORMAT_NV12;
5785                 if (plane_cap && plane_cap->pixel_format_support.p010)
5786                         formats[num_formats++] = DRM_FORMAT_P010;
5787                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5788                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5789                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5790                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5791                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5792                 }
5793                 break;
5794
5795         case DRM_PLANE_TYPE_OVERLAY:
5796                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5797                         if (num_formats >= max_formats)
5798                                 break;
5799
5800                         formats[num_formats++] = overlay_formats[i];
5801                 }
5802                 break;
5803
5804         case DRM_PLANE_TYPE_CURSOR:
5805                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5806                         if (num_formats >= max_formats)
5807                                 break;
5808
5809                         formats[num_formats++] = cursor_formats[i];
5810                 }
5811                 break;
5812         }
5813
5814         return num_formats;
5815 }
5816
5817 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5818                                 struct drm_plane *plane,
5819                                 unsigned long possible_crtcs,
5820                                 const struct dc_plane_cap *plane_cap)
5821 {
5822         uint32_t formats[32];
5823         int num_formats;
5824         int res = -EPERM;
5825
5826         num_formats = get_plane_formats(plane, plane_cap, formats,
5827                                         ARRAY_SIZE(formats));
5828
5829         res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5830                                        &dm_plane_funcs, formats, num_formats,
5831                                        NULL, plane->type, NULL);
5832         if (res)
5833                 return res;
5834
5835         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5836             plane_cap && plane_cap->per_pixel_alpha) {
5837                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5838                                           BIT(DRM_MODE_BLEND_PREMULTI);
5839
5840                 drm_plane_create_alpha_property(plane);
5841                 drm_plane_create_blend_mode_property(plane, blend_caps);
5842         }
5843
5844         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5845             plane_cap &&
5846             (plane_cap->pixel_format_support.nv12 ||
5847              plane_cap->pixel_format_support.p010)) {
5848                 /* This only affects YUV formats. */
5849                 drm_plane_create_color_properties(
5850                         plane,
5851                         BIT(DRM_COLOR_YCBCR_BT601) |
5852                         BIT(DRM_COLOR_YCBCR_BT709) |
5853                         BIT(DRM_COLOR_YCBCR_BT2020),
5854                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5855                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5856                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5857         }
5858
5859         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5860
5861         /* Create (reset) the plane state */
5862         if (plane->funcs->reset)
5863                 plane->funcs->reset(plane);
5864
5865         return 0;
5866 }
5867
5868 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5869                                struct drm_plane *plane,
5870                                uint32_t crtc_index)
5871 {
5872         struct amdgpu_crtc *acrtc = NULL;
5873         struct drm_plane *cursor_plane;
5874
5875         int res = -ENOMEM;
5876
5877         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5878         if (!cursor_plane)
5879                 goto fail;
5880
5881         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5882         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5883
5884         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5885         if (!acrtc)
5886                 goto fail;
5887
5888         res = drm_crtc_init_with_planes(
5889                         dm->ddev,
5890                         &acrtc->base,
5891                         plane,
5892                         cursor_plane,
5893                         &amdgpu_dm_crtc_funcs, NULL);
5894
5895         if (res)
5896                 goto fail;
5897
5898         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5899
5900         /* Create (reset) the plane state */
5901         if (acrtc->base.funcs->reset)
5902                 acrtc->base.funcs->reset(&acrtc->base);
5903
5904         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5905         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5906
5907         acrtc->crtc_id = crtc_index;
5908         acrtc->base.enabled = false;
5909         acrtc->otg_inst = -1;
5910
5911         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5912         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5913                                    true, MAX_COLOR_LUT_ENTRIES);
5914         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5915
5916         return 0;
5917
5918 fail:
5919         kfree(acrtc);
5920         kfree(cursor_plane);
5921         return res;
5922 }
5923
5924
5925 static int to_drm_connector_type(enum signal_type st)
5926 {
5927         switch (st) {
5928         case SIGNAL_TYPE_HDMI_TYPE_A:
5929                 return DRM_MODE_CONNECTOR_HDMIA;
5930         case SIGNAL_TYPE_EDP:
5931                 return DRM_MODE_CONNECTOR_eDP;
5932         case SIGNAL_TYPE_LVDS:
5933                 return DRM_MODE_CONNECTOR_LVDS;
5934         case SIGNAL_TYPE_RGB:
5935                 return DRM_MODE_CONNECTOR_VGA;
5936         case SIGNAL_TYPE_DISPLAY_PORT:
5937         case SIGNAL_TYPE_DISPLAY_PORT_MST:
5938                 return DRM_MODE_CONNECTOR_DisplayPort;
5939         case SIGNAL_TYPE_DVI_DUAL_LINK:
5940         case SIGNAL_TYPE_DVI_SINGLE_LINK:
5941                 return DRM_MODE_CONNECTOR_DVID;
5942         case SIGNAL_TYPE_VIRTUAL:
5943                 return DRM_MODE_CONNECTOR_VIRTUAL;
5944
5945         default:
5946                 return DRM_MODE_CONNECTOR_Unknown;
5947         }
5948 }
5949
5950 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5951 {
5952         struct drm_encoder *encoder;
5953
5954         /* There is only one encoder per connector */
5955         drm_connector_for_each_possible_encoder(connector, encoder)
5956                 return encoder;
5957
5958         return NULL;
5959 }
5960
5961 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5962 {
5963         struct drm_encoder *encoder;
5964         struct amdgpu_encoder *amdgpu_encoder;
5965
5966         encoder = amdgpu_dm_connector_to_encoder(connector);
5967
5968         if (encoder == NULL)
5969                 return;
5970
5971         amdgpu_encoder = to_amdgpu_encoder(encoder);
5972
5973         amdgpu_encoder->native_mode.clock = 0;
5974
5975         if (!list_empty(&connector->probed_modes)) {
5976                 struct drm_display_mode *preferred_mode = NULL;
5977
5978                 list_for_each_entry(preferred_mode,
5979                                     &connector->probed_modes,
5980                                     head) {
5981                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5982                                 amdgpu_encoder->native_mode = *preferred_mode;
5983
5984                         break;
5985                 }
5986
5987         }
5988 }
5989
5990 static struct drm_display_mode *
5991 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5992                              char *name,
5993                              int hdisplay, int vdisplay)
5994 {
5995         struct drm_device *dev = encoder->dev;
5996         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5997         struct drm_display_mode *mode = NULL;
5998         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5999
6000         mode = drm_mode_duplicate(dev, native_mode);
6001
6002         if (mode == NULL)
6003                 return NULL;
6004
6005         mode->hdisplay = hdisplay;
6006         mode->vdisplay = vdisplay;
6007         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6008         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6009
6010         return mode;
6011
6012 }
6013
6014 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6015                                                  struct drm_connector *connector)
6016 {
6017         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6018         struct drm_display_mode *mode = NULL;
6019         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6020         struct amdgpu_dm_connector *amdgpu_dm_connector =
6021                                 to_amdgpu_dm_connector(connector);
6022         int i;
6023         int n;
6024         struct mode_size {
6025                 char name[DRM_DISPLAY_MODE_LEN];
6026                 int w;
6027                 int h;
6028         } common_modes[] = {
6029                 {  "640x480",  640,  480},
6030                 {  "800x600",  800,  600},
6031                 { "1024x768", 1024,  768},
6032                 { "1280x720", 1280,  720},
6033                 { "1280x800", 1280,  800},
6034                 {"1280x1024", 1280, 1024},
6035                 { "1440x900", 1440,  900},
6036                 {"1680x1050", 1680, 1050},
6037                 {"1600x1200", 1600, 1200},
6038                 {"1920x1080", 1920, 1080},
6039                 {"1920x1200", 1920, 1200}
6040         };
6041
6042         n = ARRAY_SIZE(common_modes);
6043
6044         for (i = 0; i < n; i++) {
6045                 struct drm_display_mode *curmode = NULL;
6046                 bool mode_existed = false;
6047
6048                 if (common_modes[i].w > native_mode->hdisplay ||
6049                     common_modes[i].h > native_mode->vdisplay ||
6050                    (common_modes[i].w == native_mode->hdisplay &&
6051                     common_modes[i].h == native_mode->vdisplay))
6052                         continue;
6053
6054                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6055                         if (common_modes[i].w == curmode->hdisplay &&
6056                             common_modes[i].h == curmode->vdisplay) {
6057                                 mode_existed = true;
6058                                 break;
6059                         }
6060                 }
6061
6062                 if (mode_existed)
6063                         continue;
6064
6065                 mode = amdgpu_dm_create_common_mode(encoder,
6066                                 common_modes[i].name, common_modes[i].w,
6067                                 common_modes[i].h);
6068                 drm_mode_probed_add(connector, mode);
6069                 amdgpu_dm_connector->num_modes++;
6070         }
6071 }
6072
6073 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6074                                               struct edid *edid)
6075 {
6076         struct amdgpu_dm_connector *amdgpu_dm_connector =
6077                         to_amdgpu_dm_connector(connector);
6078
6079         if (edid) {
6080                 /* empty probed_modes */
6081                 INIT_LIST_HEAD(&connector->probed_modes);
6082                 amdgpu_dm_connector->num_modes =
6083                                 drm_add_edid_modes(connector, edid);
6084
6085                 /* sorting the probed modes before calling function
6086                  * amdgpu_dm_get_native_mode() since EDID can have
6087                  * more than one preferred mode. The modes that are
6088                  * later in the probed mode list could be of higher
6089                  * and preferred resolution. For example, 3840x2160
6090                  * resolution in base EDID preferred timing and 4096x2160
6091                  * preferred resolution in DID extension block later.
6092                  */
6093                 drm_mode_sort(&connector->probed_modes);
6094                 amdgpu_dm_get_native_mode(connector);
6095         } else {
6096                 amdgpu_dm_connector->num_modes = 0;
6097         }
6098 }
6099
6100 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6101 {
6102         struct amdgpu_dm_connector *amdgpu_dm_connector =
6103                         to_amdgpu_dm_connector(connector);
6104         struct drm_encoder *encoder;
6105         struct edid *edid = amdgpu_dm_connector->edid;
6106
6107         encoder = amdgpu_dm_connector_to_encoder(connector);
6108
6109         if (!edid || !drm_edid_is_valid(edid)) {
6110                 amdgpu_dm_connector->num_modes =
6111                                 drm_add_modes_noedid(connector, 640, 480);
6112         } else {
6113                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6114                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6115         }
6116         amdgpu_dm_fbc_init(connector);
6117
6118         return amdgpu_dm_connector->num_modes;
6119 }
6120
6121 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6122                                      struct amdgpu_dm_connector *aconnector,
6123                                      int connector_type,
6124                                      struct dc_link *link,
6125                                      int link_index)
6126 {
6127         struct amdgpu_device *adev = dm->ddev->dev_private;
6128
6129         /*
6130          * Some of the properties below require access to state, like bpc.
6131          * Allocate some default initial connector state with our reset helper.
6132          */
6133         if (aconnector->base.funcs->reset)
6134                 aconnector->base.funcs->reset(&aconnector->base);
6135
6136         aconnector->connector_id = link_index;
6137         aconnector->dc_link = link;
6138         aconnector->base.interlace_allowed = false;
6139         aconnector->base.doublescan_allowed = false;
6140         aconnector->base.stereo_allowed = false;
6141         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6142         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6143         aconnector->audio_inst = -1;
6144         mutex_init(&aconnector->hpd_lock);
6145
6146         /*
6147          * configure support HPD hot plug connector_>polled default value is 0
6148          * which means HPD hot plug not supported
6149          */
6150         switch (connector_type) {
6151         case DRM_MODE_CONNECTOR_HDMIA:
6152                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6153                 aconnector->base.ycbcr_420_allowed =
6154                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6155                 break;
6156         case DRM_MODE_CONNECTOR_DisplayPort:
6157                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6158                 aconnector->base.ycbcr_420_allowed =
6159                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
6160                 break;
6161         case DRM_MODE_CONNECTOR_DVID:
6162                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6163                 break;
6164         default:
6165                 break;
6166         }
6167
6168         drm_object_attach_property(&aconnector->base.base,
6169                                 dm->ddev->mode_config.scaling_mode_property,
6170                                 DRM_MODE_SCALE_NONE);
6171
6172         drm_object_attach_property(&aconnector->base.base,
6173                                 adev->mode_info.underscan_property,
6174                                 UNDERSCAN_OFF);
6175         drm_object_attach_property(&aconnector->base.base,
6176                                 adev->mode_info.underscan_hborder_property,
6177                                 0);
6178         drm_object_attach_property(&aconnector->base.base,
6179                                 adev->mode_info.underscan_vborder_property,
6180                                 0);
6181
6182         if (!aconnector->mst_port)
6183                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6184
6185         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6186         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6187         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6188
6189         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6190             dc_is_dmcu_initialized(adev->dm.dc)) {
6191                 drm_object_attach_property(&aconnector->base.base,
6192                                 adev->mode_info.abm_level_property, 0);
6193         }
6194
6195         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6196             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6197             connector_type == DRM_MODE_CONNECTOR_eDP) {
6198                 drm_object_attach_property(
6199                         &aconnector->base.base,
6200                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
6201
6202                 if (!aconnector->mst_port)
6203                         drm_connector_attach_vrr_capable_property(&aconnector->base);
6204
6205 #ifdef CONFIG_DRM_AMD_DC_HDCP
6206                 if (adev->dm.hdcp_workqueue)
6207                         drm_connector_attach_content_protection_property(&aconnector->base, true);
6208 #endif
6209         }
6210 }
6211
6212 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6213                               struct i2c_msg *msgs, int num)
6214 {
6215         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6216         struct ddc_service *ddc_service = i2c->ddc_service;
6217         struct i2c_command cmd;
6218         int i;
6219         int result = -EIO;
6220
6221         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6222
6223         if (!cmd.payloads)
6224                 return result;
6225
6226         cmd.number_of_payloads = num;
6227         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6228         cmd.speed = 100;
6229
6230         for (i = 0; i < num; i++) {
6231                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6232                 cmd.payloads[i].address = msgs[i].addr;
6233                 cmd.payloads[i].length = msgs[i].len;
6234                 cmd.payloads[i].data = msgs[i].buf;
6235         }
6236
6237         if (dc_submit_i2c(
6238                         ddc_service->ctx->dc,
6239                         ddc_service->ddc_pin->hw_info.ddc_channel,
6240                         &cmd))
6241                 result = num;
6242
6243         kfree(cmd.payloads);
6244         return result;
6245 }
6246
6247 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6248 {
6249         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6250 }
6251
6252 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6253         .master_xfer = amdgpu_dm_i2c_xfer,
6254         .functionality = amdgpu_dm_i2c_func,
6255 };
6256
6257 static struct amdgpu_i2c_adapter *
6258 create_i2c(struct ddc_service *ddc_service,
6259            int link_index,
6260            int *res)
6261 {
6262         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6263         struct amdgpu_i2c_adapter *i2c;
6264
6265         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6266         if (!i2c)
6267                 return NULL;
6268         i2c->base.owner = THIS_MODULE;
6269         i2c->base.class = I2C_CLASS_DDC;
6270         i2c->base.dev.parent = &adev->pdev->dev;
6271         i2c->base.algo = &amdgpu_dm_i2c_algo;
6272         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6273         i2c_set_adapdata(&i2c->base, i2c);
6274         i2c->ddc_service = ddc_service;
6275         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6276
6277         return i2c;
6278 }
6279
6280
6281 /*
6282  * Note: this function assumes that dc_link_detect() was called for the
6283  * dc_link which will be represented by this aconnector.
6284  */
6285 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6286                                     struct amdgpu_dm_connector *aconnector,
6287                                     uint32_t link_index,
6288                                     struct amdgpu_encoder *aencoder)
6289 {
6290         int res = 0;
6291         int connector_type;
6292         struct dc *dc = dm->dc;
6293         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6294         struct amdgpu_i2c_adapter *i2c;
6295
6296         link->priv = aconnector;
6297
6298         DRM_DEBUG_DRIVER("%s()\n", __func__);
6299
6300         i2c = create_i2c(link->ddc, link->link_index, &res);
6301         if (!i2c) {
6302                 DRM_ERROR("Failed to create i2c adapter data\n");
6303                 return -ENOMEM;
6304         }
6305
6306         aconnector->i2c = i2c;
6307         res = i2c_add_adapter(&i2c->base);
6308
6309         if (res) {
6310                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6311                 goto out_free;
6312         }
6313
6314         connector_type = to_drm_connector_type(link->connector_signal);
6315
6316         res = drm_connector_init_with_ddc(
6317                         dm->ddev,
6318                         &aconnector->base,
6319                         &amdgpu_dm_connector_funcs,
6320                         connector_type,
6321                         &i2c->base);
6322
6323         if (res) {
6324                 DRM_ERROR("connector_init failed\n");
6325                 aconnector->connector_id = -1;
6326                 goto out_free;
6327         }
6328
6329         drm_connector_helper_add(
6330                         &aconnector->base,
6331                         &amdgpu_dm_connector_helper_funcs);
6332
6333         amdgpu_dm_connector_init_helper(
6334                 dm,
6335                 aconnector,
6336                 connector_type,
6337                 link,
6338                 link_index);
6339
6340         drm_connector_attach_encoder(
6341                 &aconnector->base, &aencoder->base);
6342
6343         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6344                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6345                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6346
6347 out_free:
6348         if (res) {
6349                 kfree(i2c);
6350                 aconnector->i2c = NULL;
6351         }
6352         return res;
6353 }
6354
6355 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6356 {
6357         switch (adev->mode_info.num_crtc) {
6358         case 1:
6359                 return 0x1;
6360         case 2:
6361                 return 0x3;
6362         case 3:
6363                 return 0x7;
6364         case 4:
6365                 return 0xf;
6366         case 5:
6367                 return 0x1f;
6368         case 6:
6369         default:
6370                 return 0x3f;
6371         }
6372 }
6373
6374 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6375                                   struct amdgpu_encoder *aencoder,
6376                                   uint32_t link_index)
6377 {
6378         struct amdgpu_device *adev = dev->dev_private;
6379
6380         int res = drm_encoder_init(dev,
6381                                    &aencoder->base,
6382                                    &amdgpu_dm_encoder_funcs,
6383                                    DRM_MODE_ENCODER_TMDS,
6384                                    NULL);
6385
6386         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6387
6388         if (!res)
6389                 aencoder->encoder_id = link_index;
6390         else
6391                 aencoder->encoder_id = -1;
6392
6393         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6394
6395         return res;
6396 }
6397
6398 static void manage_dm_interrupts(struct amdgpu_device *adev,
6399                                  struct amdgpu_crtc *acrtc,
6400                                  bool enable)
6401 {
6402         /*
6403          * this is not correct translation but will work as soon as VBLANK
6404          * constant is the same as PFLIP
6405          */
6406         int irq_type =
6407                 amdgpu_display_crtc_idx_to_irq_type(
6408                         adev,
6409                         acrtc->crtc_id);
6410
6411         if (enable) {
6412                 drm_crtc_vblank_on(&acrtc->base);
6413                 amdgpu_irq_get(
6414                         adev,
6415                         &adev->pageflip_irq,
6416                         irq_type);
6417         } else {
6418
6419                 amdgpu_irq_put(
6420                         adev,
6421                         &adev->pageflip_irq,
6422                         irq_type);
6423                 drm_crtc_vblank_off(&acrtc->base);
6424         }
6425 }
6426
6427 static bool
6428 is_scaling_state_different(const struct dm_connector_state *dm_state,
6429                            const struct dm_connector_state *old_dm_state)
6430 {
6431         if (dm_state->scaling != old_dm_state->scaling)
6432                 return true;
6433         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6434                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6435                         return true;
6436         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6437                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6438                         return true;
6439         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6440                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6441                 return true;
6442         return false;
6443 }
6444
6445 #ifdef CONFIG_DRM_AMD_DC_HDCP
6446 static bool is_content_protection_different(struct drm_connector_state *state,
6447                                             const struct drm_connector_state *old_state,
6448                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6449 {
6450         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6451
6452         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6453             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6454                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6455                 return true;
6456         }
6457
6458         /* CP is being re enabled, ignore this */
6459         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6460             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6461                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6462                 return false;
6463         }
6464
6465         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6466         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6467             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6468                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6469
6470         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6471          * hot-plug, headless s3, dpms
6472          */
6473         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6474             aconnector->dc_sink != NULL)
6475                 return true;
6476
6477         if (old_state->content_protection == state->content_protection)
6478                 return false;
6479
6480         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6481                 return true;
6482
6483         return false;
6484 }
6485
6486 #endif
6487 static void remove_stream(struct amdgpu_device *adev,
6488                           struct amdgpu_crtc *acrtc,
6489                           struct dc_stream_state *stream)
6490 {
6491         /* this is the update mode case */
6492
6493         acrtc->otg_inst = -1;
6494         acrtc->enabled = false;
6495 }
6496
6497 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6498                                struct dc_cursor_position *position)
6499 {
6500         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6501         int x, y;
6502         int xorigin = 0, yorigin = 0;
6503
6504         position->enable = false;
6505         position->x = 0;
6506         position->y = 0;
6507
6508         if (!crtc || !plane->state->fb)
6509                 return 0;
6510
6511         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6512             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6513                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6514                           __func__,
6515                           plane->state->crtc_w,
6516                           plane->state->crtc_h);
6517                 return -EINVAL;
6518         }
6519
6520         x = plane->state->crtc_x;
6521         y = plane->state->crtc_y;
6522
6523         if (x <= -amdgpu_crtc->max_cursor_width ||
6524             y <= -amdgpu_crtc->max_cursor_height)
6525                 return 0;
6526
6527         if (x < 0) {
6528                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6529                 x = 0;
6530         }
6531         if (y < 0) {
6532                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6533                 y = 0;
6534         }
6535         position->enable = true;
6536         position->translate_by_source = true;
6537         position->x = x;
6538         position->y = y;
6539         position->x_hotspot = xorigin;
6540         position->y_hotspot = yorigin;
6541
6542         return 0;
6543 }
6544
6545 static void handle_cursor_update(struct drm_plane *plane,
6546                                  struct drm_plane_state *old_plane_state)
6547 {
6548         struct amdgpu_device *adev = plane->dev->dev_private;
6549         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6550         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6551         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6552         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6553         uint64_t address = afb ? afb->address : 0;
6554         struct dc_cursor_position position;
6555         struct dc_cursor_attributes attributes;
6556         int ret;
6557
6558         if (!plane->state->fb && !old_plane_state->fb)
6559                 return;
6560
6561         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6562                          __func__,
6563                          amdgpu_crtc->crtc_id,
6564                          plane->state->crtc_w,
6565                          plane->state->crtc_h);
6566
6567         ret = get_cursor_position(plane, crtc, &position);
6568         if (ret)
6569                 return;
6570
6571         if (!position.enable) {
6572                 /* turn off cursor */
6573                 if (crtc_state && crtc_state->stream) {
6574                         mutex_lock(&adev->dm.dc_lock);
6575                         dc_stream_set_cursor_position(crtc_state->stream,
6576                                                       &position);
6577                         mutex_unlock(&adev->dm.dc_lock);
6578                 }
6579                 return;
6580         }
6581
6582         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6583         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6584
6585         memset(&attributes, 0, sizeof(attributes));
6586         attributes.address.high_part = upper_32_bits(address);
6587         attributes.address.low_part  = lower_32_bits(address);
6588         attributes.width             = plane->state->crtc_w;
6589         attributes.height            = plane->state->crtc_h;
6590         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6591         attributes.rotation_angle    = 0;
6592         attributes.attribute_flags.value = 0;
6593
6594         attributes.pitch = attributes.width;
6595
6596         if (crtc_state->stream) {
6597                 mutex_lock(&adev->dm.dc_lock);
6598                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6599                                                          &attributes))
6600                         DRM_ERROR("DC failed to set cursor attributes\n");
6601
6602                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6603                                                    &position))
6604                         DRM_ERROR("DC failed to set cursor position\n");
6605                 mutex_unlock(&adev->dm.dc_lock);
6606         }
6607 }
6608
6609 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6610 {
6611
6612         assert_spin_locked(&acrtc->base.dev->event_lock);
6613         WARN_ON(acrtc->event);
6614
6615         acrtc->event = acrtc->base.state->event;
6616
6617         /* Set the flip status */
6618         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6619
6620         /* Mark this event as consumed */
6621         acrtc->base.state->event = NULL;
6622
6623         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6624                                                  acrtc->crtc_id);
6625 }
6626
6627 static void update_freesync_state_on_stream(
6628         struct amdgpu_display_manager *dm,
6629         struct dm_crtc_state *new_crtc_state,
6630         struct dc_stream_state *new_stream,
6631         struct dc_plane_state *surface,
6632         u32 flip_timestamp_in_us)
6633 {
6634         struct mod_vrr_params vrr_params;
6635         struct dc_info_packet vrr_infopacket = {0};
6636         struct amdgpu_device *adev = dm->adev;
6637         unsigned long flags;
6638
6639         if (!new_stream)
6640                 return;
6641
6642         /*
6643          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6644          * For now it's sufficient to just guard against these conditions.
6645          */
6646
6647         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6648                 return;
6649
6650         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6651         vrr_params = new_crtc_state->vrr_params;
6652
6653         if (surface) {
6654                 mod_freesync_handle_preflip(
6655                         dm->freesync_module,
6656                         surface,
6657                         new_stream,
6658                         flip_timestamp_in_us,
6659                         &vrr_params);
6660
6661                 if (adev->family < AMDGPU_FAMILY_AI &&
6662                     amdgpu_dm_vrr_active(new_crtc_state)) {
6663                         mod_freesync_handle_v_update(dm->freesync_module,
6664                                                      new_stream, &vrr_params);
6665
6666                         /* Need to call this before the frame ends. */
6667                         dc_stream_adjust_vmin_vmax(dm->dc,
6668                                                    new_crtc_state->stream,
6669                                                    &vrr_params.adjust);
6670                 }
6671         }
6672
6673         mod_freesync_build_vrr_infopacket(
6674                 dm->freesync_module,
6675                 new_stream,
6676                 &vrr_params,
6677                 PACKET_TYPE_VRR,
6678                 TRANSFER_FUNC_UNKNOWN,
6679                 &vrr_infopacket);
6680
6681         new_crtc_state->freesync_timing_changed |=
6682                 (memcmp(&new_crtc_state->vrr_params.adjust,
6683                         &vrr_params.adjust,
6684                         sizeof(vrr_params.adjust)) != 0);
6685
6686         new_crtc_state->freesync_vrr_info_changed |=
6687                 (memcmp(&new_crtc_state->vrr_infopacket,
6688                         &vrr_infopacket,
6689                         sizeof(vrr_infopacket)) != 0);
6690
6691         new_crtc_state->vrr_params = vrr_params;
6692         new_crtc_state->vrr_infopacket = vrr_infopacket;
6693
6694         new_stream->adjust = new_crtc_state->vrr_params.adjust;
6695         new_stream->vrr_infopacket = vrr_infopacket;
6696
6697         if (new_crtc_state->freesync_vrr_info_changed)
6698                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6699                               new_crtc_state->base.crtc->base.id,
6700                               (int)new_crtc_state->base.vrr_enabled,
6701                               (int)vrr_params.state);
6702
6703         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6704 }
6705
6706 static void pre_update_freesync_state_on_stream(
6707         struct amdgpu_display_manager *dm,
6708         struct dm_crtc_state *new_crtc_state)
6709 {
6710         struct dc_stream_state *new_stream = new_crtc_state->stream;
6711         struct mod_vrr_params vrr_params;
6712         struct mod_freesync_config config = new_crtc_state->freesync_config;
6713         struct amdgpu_device *adev = dm->adev;
6714         unsigned long flags;
6715
6716         if (!new_stream)
6717                 return;
6718
6719         /*
6720          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6721          * For now it's sufficient to just guard against these conditions.
6722          */
6723         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6724                 return;
6725
6726         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6727         vrr_params = new_crtc_state->vrr_params;
6728
6729         if (new_crtc_state->vrr_supported &&
6730             config.min_refresh_in_uhz &&
6731             config.max_refresh_in_uhz) {
6732                 config.state = new_crtc_state->base.vrr_enabled ?
6733                         VRR_STATE_ACTIVE_VARIABLE :
6734                         VRR_STATE_INACTIVE;
6735         } else {
6736                 config.state = VRR_STATE_UNSUPPORTED;
6737         }
6738
6739         mod_freesync_build_vrr_params(dm->freesync_module,
6740                                       new_stream,
6741                                       &config, &vrr_params);
6742
6743         new_crtc_state->freesync_timing_changed |=
6744                 (memcmp(&new_crtc_state->vrr_params.adjust,
6745                         &vrr_params.adjust,
6746                         sizeof(vrr_params.adjust)) != 0);
6747
6748         new_crtc_state->vrr_params = vrr_params;
6749         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6750 }
6751
6752 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6753                                             struct dm_crtc_state *new_state)
6754 {
6755         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6756         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6757
6758         if (!old_vrr_active && new_vrr_active) {
6759                 /* Transition VRR inactive -> active:
6760                  * While VRR is active, we must not disable vblank irq, as a
6761                  * reenable after disable would compute bogus vblank/pflip
6762                  * timestamps if it likely happened inside display front-porch.
6763                  *
6764                  * We also need vupdate irq for the actual core vblank handling
6765                  * at end of vblank.
6766                  */
6767                 dm_set_vupdate_irq(new_state->base.crtc, true);
6768                 drm_crtc_vblank_get(new_state->base.crtc);
6769                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6770                                  __func__, new_state->base.crtc->base.id);
6771         } else if (old_vrr_active && !new_vrr_active) {
6772                 /* Transition VRR active -> inactive:
6773                  * Allow vblank irq disable again for fixed refresh rate.
6774                  */
6775                 dm_set_vupdate_irq(new_state->base.crtc, false);
6776                 drm_crtc_vblank_put(new_state->base.crtc);
6777                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6778                                  __func__, new_state->base.crtc->base.id);
6779         }
6780 }
6781
6782 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6783 {
6784         struct drm_plane *plane;
6785         struct drm_plane_state *old_plane_state, *new_plane_state;
6786         int i;
6787
6788         /*
6789          * TODO: Make this per-stream so we don't issue redundant updates for
6790          * commits with multiple streams.
6791          */
6792         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6793                                        new_plane_state, i)
6794                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6795                         handle_cursor_update(plane, old_plane_state);
6796 }
6797
6798 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6799                                     struct dc_state *dc_state,
6800                                     struct drm_device *dev,
6801                                     struct amdgpu_display_manager *dm,
6802                                     struct drm_crtc *pcrtc,
6803                                     bool wait_for_vblank)
6804 {
6805         uint32_t i;
6806         uint64_t timestamp_ns;
6807         struct drm_plane *plane;
6808         struct drm_plane_state *old_plane_state, *new_plane_state;
6809         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6810         struct drm_crtc_state *new_pcrtc_state =
6811                         drm_atomic_get_new_crtc_state(state, pcrtc);
6812         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6813         struct dm_crtc_state *dm_old_crtc_state =
6814                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6815         int planes_count = 0, vpos, hpos;
6816         long r;
6817         unsigned long flags;
6818         struct amdgpu_bo *abo;
6819         uint64_t tiling_flags;
6820         bool tmz_surface = false;
6821         uint32_t target_vblank, last_flip_vblank;
6822         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6823         bool pflip_present = false;
6824         struct {
6825                 struct dc_surface_update surface_updates[MAX_SURFACES];
6826                 struct dc_plane_info plane_infos[MAX_SURFACES];
6827                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6828                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6829                 struct dc_stream_update stream_update;
6830         } *bundle;
6831
6832         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6833
6834         if (!bundle) {
6835                 dm_error("Failed to allocate update bundle\n");
6836                 goto cleanup;
6837         }
6838
6839         /*
6840          * Disable the cursor first if we're disabling all the planes.
6841          * It'll remain on the screen after the planes are re-enabled
6842          * if we don't.
6843          */
6844         if (acrtc_state->active_planes == 0)
6845                 amdgpu_dm_commit_cursors(state);
6846
6847         /* update planes when needed */
6848         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6849                 struct drm_crtc *crtc = new_plane_state->crtc;
6850                 struct drm_crtc_state *new_crtc_state;
6851                 struct drm_framebuffer *fb = new_plane_state->fb;
6852                 bool plane_needs_flip;
6853                 struct dc_plane_state *dc_plane;
6854                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6855
6856                 /* Cursor plane is handled after stream updates */
6857                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6858                         continue;
6859
6860                 if (!fb || !crtc || pcrtc != crtc)
6861                         continue;
6862
6863                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6864                 if (!new_crtc_state->active)
6865                         continue;
6866
6867                 dc_plane = dm_new_plane_state->dc_state;
6868
6869                 bundle->surface_updates[planes_count].surface = dc_plane;
6870                 if (new_pcrtc_state->color_mgmt_changed) {
6871                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6872                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6873                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6874                 }
6875
6876                 fill_dc_scaling_info(new_plane_state,
6877                                      &bundle->scaling_infos[planes_count]);
6878
6879                 bundle->surface_updates[planes_count].scaling_info =
6880                         &bundle->scaling_infos[planes_count];
6881
6882                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6883
6884                 pflip_present = pflip_present || plane_needs_flip;
6885
6886                 if (!plane_needs_flip) {
6887                         planes_count += 1;
6888                         continue;
6889                 }
6890
6891                 abo = gem_to_amdgpu_bo(fb->obj[0]);
6892
6893                 /*
6894                  * Wait for all fences on this FB. Do limited wait to avoid
6895                  * deadlock during GPU reset when this fence will not signal
6896                  * but we hold reservation lock for the BO.
6897                  */
6898                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6899                                                         false,
6900                                                         msecs_to_jiffies(5000));
6901                 if (unlikely(r <= 0))
6902                         DRM_ERROR("Waiting for fences timed out!");
6903
6904                 /*
6905                  * TODO This might fail and hence better not used, wait
6906                  * explicitly on fences instead
6907                  * and in general should be called for
6908                  * blocking commit to as per framework helpers
6909                  */
6910                 r = amdgpu_bo_reserve(abo, true);
6911                 if (unlikely(r != 0))
6912                         DRM_ERROR("failed to reserve buffer before flip\n");
6913
6914                 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6915
6916                 tmz_surface = amdgpu_bo_encrypted(abo);
6917
6918                 amdgpu_bo_unreserve(abo);
6919
6920                 fill_dc_plane_info_and_addr(
6921                         dm->adev, new_plane_state, tiling_flags,
6922                         &bundle->plane_infos[planes_count],
6923                         &bundle->flip_addrs[planes_count].address,
6924                         tmz_surface,
6925                         false);
6926
6927                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6928                                  new_plane_state->plane->index,
6929                                  bundle->plane_infos[planes_count].dcc.enable);
6930
6931                 bundle->surface_updates[planes_count].plane_info =
6932                         &bundle->plane_infos[planes_count];
6933
6934                 /*
6935                  * Only allow immediate flips for fast updates that don't
6936                  * change FB pitch, DCC state, rotation or mirroing.
6937                  */
6938                 bundle->flip_addrs[planes_count].flip_immediate =
6939                         crtc->state->async_flip &&
6940                         acrtc_state->update_type == UPDATE_TYPE_FAST;
6941
6942                 timestamp_ns = ktime_get_ns();
6943                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6944                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6945                 bundle->surface_updates[planes_count].surface = dc_plane;
6946
6947                 if (!bundle->surface_updates[planes_count].surface) {
6948                         DRM_ERROR("No surface for CRTC: id=%d\n",
6949                                         acrtc_attach->crtc_id);
6950                         continue;
6951                 }
6952
6953                 if (plane == pcrtc->primary)
6954                         update_freesync_state_on_stream(
6955                                 dm,
6956                                 acrtc_state,
6957                                 acrtc_state->stream,
6958                                 dc_plane,
6959                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6960
6961                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6962                                  __func__,
6963                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6964                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6965
6966                 planes_count += 1;
6967
6968         }
6969
6970         if (pflip_present) {
6971                 if (!vrr_active) {
6972                         /* Use old throttling in non-vrr fixed refresh rate mode
6973                          * to keep flip scheduling based on target vblank counts
6974                          * working in a backwards compatible way, e.g., for
6975                          * clients using the GLX_OML_sync_control extension or
6976                          * DRI3/Present extension with defined target_msc.
6977                          */
6978                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6979                 }
6980                 else {
6981                         /* For variable refresh rate mode only:
6982                          * Get vblank of last completed flip to avoid > 1 vrr
6983                          * flips per video frame by use of throttling, but allow
6984                          * flip programming anywhere in the possibly large
6985                          * variable vrr vblank interval for fine-grained flip
6986                          * timing control and more opportunity to avoid stutter
6987                          * on late submission of flips.
6988                          */
6989                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6990                         last_flip_vblank = acrtc_attach->last_flip_vblank;
6991                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6992                 }
6993
6994                 target_vblank = last_flip_vblank + wait_for_vblank;
6995
6996                 /*
6997                  * Wait until we're out of the vertical blank period before the one
6998                  * targeted by the flip
6999                  */
7000                 while ((acrtc_attach->enabled &&
7001                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7002                                                             0, &vpos, &hpos, NULL,
7003                                                             NULL, &pcrtc->hwmode)
7004                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7005                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7006                         (int)(target_vblank -
7007                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7008                         usleep_range(1000, 1100);
7009                 }
7010
7011                 if (acrtc_attach->base.state->event) {
7012                         drm_crtc_vblank_get(pcrtc);
7013
7014                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7015
7016                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7017                         prepare_flip_isr(acrtc_attach);
7018
7019                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7020                 }
7021
7022                 if (acrtc_state->stream) {
7023                         if (acrtc_state->freesync_vrr_info_changed)
7024                                 bundle->stream_update.vrr_infopacket =
7025                                         &acrtc_state->stream->vrr_infopacket;
7026                 }
7027         }
7028
7029         /* Update the planes if changed or disable if we don't have any. */
7030         if ((planes_count || acrtc_state->active_planes == 0) &&
7031                 acrtc_state->stream) {
7032                 bundle->stream_update.stream = acrtc_state->stream;
7033                 if (new_pcrtc_state->mode_changed) {
7034                         bundle->stream_update.src = acrtc_state->stream->src;
7035                         bundle->stream_update.dst = acrtc_state->stream->dst;
7036                 }
7037
7038                 if (new_pcrtc_state->color_mgmt_changed) {
7039                         /*
7040                          * TODO: This isn't fully correct since we've actually
7041                          * already modified the stream in place.
7042                          */
7043                         bundle->stream_update.gamut_remap =
7044                                 &acrtc_state->stream->gamut_remap_matrix;
7045                         bundle->stream_update.output_csc_transform =
7046                                 &acrtc_state->stream->csc_color_matrix;
7047                         bundle->stream_update.out_transfer_func =
7048                                 acrtc_state->stream->out_transfer_func;
7049                 }
7050
7051                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7052                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7053                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7054
7055                 /*
7056                  * If FreeSync state on the stream has changed then we need to
7057                  * re-adjust the min/max bounds now that DC doesn't handle this
7058                  * as part of commit.
7059                  */
7060                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7061                     amdgpu_dm_vrr_active(acrtc_state)) {
7062                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7063                         dc_stream_adjust_vmin_vmax(
7064                                 dm->dc, acrtc_state->stream,
7065                                 &acrtc_state->vrr_params.adjust);
7066                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7067                 }
7068                 mutex_lock(&dm->dc_lock);
7069                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7070                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7071                         amdgpu_dm_psr_disable(acrtc_state->stream);
7072
7073                 dc_commit_updates_for_stream(dm->dc,
7074                                                      bundle->surface_updates,
7075                                                      planes_count,
7076                                                      acrtc_state->stream,
7077                                                      &bundle->stream_update,
7078                                                      dc_state);
7079
7080                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7081                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7082                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7083                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7084                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7085                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7086                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7087                         amdgpu_dm_psr_enable(acrtc_state->stream);
7088                 }
7089
7090                 mutex_unlock(&dm->dc_lock);
7091         }
7092
7093         /*
7094          * Update cursor state *after* programming all the planes.
7095          * This avoids redundant programming in the case where we're going
7096          * to be disabling a single plane - those pipes are being disabled.
7097          */
7098         if (acrtc_state->active_planes)
7099                 amdgpu_dm_commit_cursors(state);
7100
7101 cleanup:
7102         kfree(bundle);
7103 }
7104
7105 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7106                                    struct drm_atomic_state *state)
7107 {
7108         struct amdgpu_device *adev = dev->dev_private;
7109         struct amdgpu_dm_connector *aconnector;
7110         struct drm_connector *connector;
7111         struct drm_connector_state *old_con_state, *new_con_state;
7112         struct drm_crtc_state *new_crtc_state;
7113         struct dm_crtc_state *new_dm_crtc_state;
7114         const struct dc_stream_status *status;
7115         int i, inst;
7116
7117         /* Notify device removals. */
7118         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7119                 if (old_con_state->crtc != new_con_state->crtc) {
7120                         /* CRTC changes require notification. */
7121                         goto notify;
7122                 }
7123
7124                 if (!new_con_state->crtc)
7125                         continue;
7126
7127                 new_crtc_state = drm_atomic_get_new_crtc_state(
7128                         state, new_con_state->crtc);
7129
7130                 if (!new_crtc_state)
7131                         continue;
7132
7133                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7134                         continue;
7135
7136         notify:
7137                 aconnector = to_amdgpu_dm_connector(connector);
7138
7139                 mutex_lock(&adev->dm.audio_lock);
7140                 inst = aconnector->audio_inst;
7141                 aconnector->audio_inst = -1;
7142                 mutex_unlock(&adev->dm.audio_lock);
7143
7144                 amdgpu_dm_audio_eld_notify(adev, inst);
7145         }
7146
7147         /* Notify audio device additions. */
7148         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7149                 if (!new_con_state->crtc)
7150                         continue;
7151
7152                 new_crtc_state = drm_atomic_get_new_crtc_state(
7153                         state, new_con_state->crtc);
7154
7155                 if (!new_crtc_state)
7156                         continue;
7157
7158                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7159                         continue;
7160
7161                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7162                 if (!new_dm_crtc_state->stream)
7163                         continue;
7164
7165                 status = dc_stream_get_status(new_dm_crtc_state->stream);
7166                 if (!status)
7167                         continue;
7168
7169                 aconnector = to_amdgpu_dm_connector(connector);
7170
7171                 mutex_lock(&adev->dm.audio_lock);
7172                 inst = status->audio_inst;
7173                 aconnector->audio_inst = inst;
7174                 mutex_unlock(&adev->dm.audio_lock);
7175
7176                 amdgpu_dm_audio_eld_notify(adev, inst);
7177         }
7178 }
7179
7180 /*
7181  * Enable interrupts on CRTCs that are newly active, undergone
7182  * a modeset, or have active planes again.
7183  *
7184  * Done in two passes, based on the for_modeset flag:
7185  * Pass 1: For CRTCs going through modeset
7186  * Pass 2: For CRTCs going from 0 to n active planes
7187  *
7188  * Interrupts can only be enabled after the planes are programmed,
7189  * so this requires a two-pass approach since we don't want to
7190  * just defer the interrupts until after commit planes every time.
7191  */
7192 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
7193                                              struct drm_atomic_state *state,
7194                                              bool for_modeset)
7195 {
7196         struct amdgpu_device *adev = dev->dev_private;
7197         struct drm_crtc *crtc;
7198         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7199         int i;
7200 #ifdef CONFIG_DEBUG_FS
7201         enum amdgpu_dm_pipe_crc_source source;
7202 #endif
7203
7204         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7205                                       new_crtc_state, i) {
7206                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7207                 struct dm_crtc_state *dm_new_crtc_state =
7208                         to_dm_crtc_state(new_crtc_state);
7209                 struct dm_crtc_state *dm_old_crtc_state =
7210                         to_dm_crtc_state(old_crtc_state);
7211                 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7212                 bool run_pass;
7213
7214                 run_pass = (for_modeset && modeset) ||
7215                            (!for_modeset && !modeset &&
7216                             !dm_old_crtc_state->interrupts_enabled);
7217
7218                 if (!run_pass)
7219                         continue;
7220
7221                 if (!dm_new_crtc_state->interrupts_enabled)
7222                         continue;
7223
7224                 manage_dm_interrupts(adev, acrtc, true);
7225
7226 #ifdef CONFIG_DEBUG_FS
7227                 /* The stream has changed so CRC capture needs to re-enabled. */
7228                 source = dm_new_crtc_state->crc_src;
7229                 if (amdgpu_dm_is_valid_crc_source(source)) {
7230                         amdgpu_dm_crtc_configure_crc_source(
7231                                 crtc, dm_new_crtc_state,
7232                                 dm_new_crtc_state->crc_src);
7233                 }
7234 #endif
7235         }
7236 }
7237
7238 /*
7239  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7240  * @crtc_state: the DRM CRTC state
7241  * @stream_state: the DC stream state.
7242  *
7243  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7244  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7245  */
7246 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7247                                                 struct dc_stream_state *stream_state)
7248 {
7249         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7250 }
7251
7252 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7253                                    struct drm_atomic_state *state,
7254                                    bool nonblock)
7255 {
7256         struct drm_crtc *crtc;
7257         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7258         struct amdgpu_device *adev = dev->dev_private;
7259         int i;
7260
7261         /*
7262          * We evade vblank and pflip interrupts on CRTCs that are undergoing
7263          * a modeset, being disabled, or have no active planes.
7264          *
7265          * It's done in atomic commit rather than commit tail for now since
7266          * some of these interrupt handlers access the current CRTC state and
7267          * potentially the stream pointer itself.
7268          *
7269          * Since the atomic state is swapped within atomic commit and not within
7270          * commit tail this would leave to new state (that hasn't been committed yet)
7271          * being accesssed from within the handlers.
7272          *
7273          * TODO: Fix this so we can do this in commit tail and not have to block
7274          * in atomic check.
7275          */
7276         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7277                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7278                 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7279                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7280
7281                 if (dm_old_crtc_state->interrupts_enabled &&
7282                     (!dm_new_crtc_state->interrupts_enabled ||
7283                      drm_atomic_crtc_needs_modeset(new_crtc_state)))
7284                         manage_dm_interrupts(adev, acrtc, false);
7285         }
7286         /*
7287          * Add check here for SoC's that support hardware cursor plane, to
7288          * unset legacy_cursor_update
7289          */
7290
7291         return drm_atomic_helper_commit(dev, state, nonblock);
7292
7293         /*TODO Handle EINTR, reenable IRQ*/
7294 }
7295
7296 /**
7297  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7298  * @state: The atomic state to commit
7299  *
7300  * This will tell DC to commit the constructed DC state from atomic_check,
7301  * programming the hardware. Any failures here implies a hardware failure, since
7302  * atomic check should have filtered anything non-kosher.
7303  */
7304 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7305 {
7306         struct drm_device *dev = state->dev;
7307         struct amdgpu_device *adev = dev->dev_private;
7308         struct amdgpu_display_manager *dm = &adev->dm;
7309         struct dm_atomic_state *dm_state;
7310         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7311         uint32_t i, j;
7312         struct drm_crtc *crtc;
7313         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7314         unsigned long flags;
7315         bool wait_for_vblank = true;
7316         struct drm_connector *connector;
7317         struct drm_connector_state *old_con_state, *new_con_state;
7318         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7319         int crtc_disable_count = 0;
7320
7321         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7322
7323         dm_state = dm_atomic_get_new_state(state);
7324         if (dm_state && dm_state->context) {
7325                 dc_state = dm_state->context;
7326         } else {
7327                 /* No state changes, retain current state. */
7328                 dc_state_temp = dc_create_state(dm->dc);
7329                 ASSERT(dc_state_temp);
7330                 dc_state = dc_state_temp;
7331                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7332         }
7333
7334         /* update changed items */
7335         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7336                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7337
7338                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7339                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7340
7341                 DRM_DEBUG_DRIVER(
7342                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7343                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7344                         "connectors_changed:%d\n",
7345                         acrtc->crtc_id,
7346                         new_crtc_state->enable,
7347                         new_crtc_state->active,
7348                         new_crtc_state->planes_changed,
7349                         new_crtc_state->mode_changed,
7350                         new_crtc_state->active_changed,
7351                         new_crtc_state->connectors_changed);
7352
7353                 /* Copy all transient state flags into dc state */
7354                 if (dm_new_crtc_state->stream) {
7355                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7356                                                             dm_new_crtc_state->stream);
7357                 }
7358
7359                 /* handles headless hotplug case, updating new_state and
7360                  * aconnector as needed
7361                  */
7362
7363                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7364
7365                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7366
7367                         if (!dm_new_crtc_state->stream) {
7368                                 /*
7369                                  * this could happen because of issues with
7370                                  * userspace notifications delivery.
7371                                  * In this case userspace tries to set mode on
7372                                  * display which is disconnected in fact.
7373                                  * dc_sink is NULL in this case on aconnector.
7374                                  * We expect reset mode will come soon.
7375                                  *
7376                                  * This can also happen when unplug is done
7377                                  * during resume sequence ended
7378                                  *
7379                                  * In this case, we want to pretend we still
7380                                  * have a sink to keep the pipe running so that
7381                                  * hw state is consistent with the sw state
7382                                  */
7383                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7384                                                 __func__, acrtc->base.base.id);
7385                                 continue;
7386                         }
7387
7388                         if (dm_old_crtc_state->stream)
7389                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7390
7391                         pm_runtime_get_noresume(dev->dev);
7392
7393                         acrtc->enabled = true;
7394                         acrtc->hw_mode = new_crtc_state->mode;
7395                         crtc->hwmode = new_crtc_state->mode;
7396                 } else if (modereset_required(new_crtc_state)) {
7397                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7398                         /* i.e. reset mode */
7399                         if (dm_old_crtc_state->stream) {
7400                                 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7401                                         amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7402
7403                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7404                         }
7405                 }
7406         } /* for_each_crtc_in_state() */
7407
7408         if (dc_state) {
7409                 dm_enable_per_frame_crtc_master_sync(dc_state);
7410                 mutex_lock(&dm->dc_lock);
7411                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7412                 mutex_unlock(&dm->dc_lock);
7413         }
7414
7415         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7416                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7417
7418                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7419
7420                 if (dm_new_crtc_state->stream != NULL) {
7421                         const struct dc_stream_status *status =
7422                                         dc_stream_get_status(dm_new_crtc_state->stream);
7423
7424                         if (!status)
7425                                 status = dc_stream_get_status_from_state(dc_state,
7426                                                                          dm_new_crtc_state->stream);
7427
7428                         if (!status)
7429                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7430                         else
7431                                 acrtc->otg_inst = status->primary_otg_inst;
7432                 }
7433         }
7434 #ifdef CONFIG_DRM_AMD_DC_HDCP
7435         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7436                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7437                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7438                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7439
7440                 new_crtc_state = NULL;
7441
7442                 if (acrtc)
7443                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7444
7445                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7446
7447                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7448                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7449                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7450                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7451                         continue;
7452                 }
7453
7454                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7455                         hdcp_update_display(
7456                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7457                                 new_con_state->hdcp_content_type,
7458                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7459                                                                                                          : false);
7460         }
7461 #endif
7462
7463         /* Handle connector state changes */
7464         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7465                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7466                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7467                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7468                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7469                 struct dc_stream_update stream_update;
7470                 struct dc_info_packet hdr_packet;
7471                 struct dc_stream_status *status = NULL;
7472                 bool abm_changed, hdr_changed, scaling_changed;
7473
7474                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7475                 memset(&stream_update, 0, sizeof(stream_update));
7476
7477                 if (acrtc) {
7478                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7479                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7480                 }
7481
7482                 /* Skip any modesets/resets */
7483                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7484                         continue;
7485
7486                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7487                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7488
7489                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7490                                                              dm_old_con_state);
7491
7492                 abm_changed = dm_new_crtc_state->abm_level !=
7493                               dm_old_crtc_state->abm_level;
7494
7495                 hdr_changed =
7496                         is_hdr_metadata_different(old_con_state, new_con_state);
7497
7498                 if (!scaling_changed && !abm_changed && !hdr_changed)
7499                         continue;
7500
7501                 stream_update.stream = dm_new_crtc_state->stream;
7502                 if (scaling_changed) {
7503                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7504                                         dm_new_con_state, dm_new_crtc_state->stream);
7505
7506                         stream_update.src = dm_new_crtc_state->stream->src;
7507                         stream_update.dst = dm_new_crtc_state->stream->dst;
7508                 }
7509
7510                 if (abm_changed) {
7511                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7512
7513                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7514                 }
7515
7516                 if (hdr_changed) {
7517                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7518                         stream_update.hdr_static_metadata = &hdr_packet;
7519                 }
7520
7521                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7522                 WARN_ON(!status);
7523                 WARN_ON(!status->plane_count);
7524
7525                 /*
7526                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7527                  * Here we create an empty update on each plane.
7528                  * To fix this, DC should permit updating only stream properties.
7529                  */
7530                 for (j = 0; j < status->plane_count; j++)
7531                         dummy_updates[j].surface = status->plane_states[0];
7532
7533
7534                 mutex_lock(&dm->dc_lock);
7535                 dc_commit_updates_for_stream(dm->dc,
7536                                                      dummy_updates,
7537                                                      status->plane_count,
7538                                                      dm_new_crtc_state->stream,
7539                                                      &stream_update,
7540                                                      dc_state);
7541                 mutex_unlock(&dm->dc_lock);
7542         }
7543
7544         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7545         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7546                                       new_crtc_state, i) {
7547                 if (old_crtc_state->active && !new_crtc_state->active)
7548                         crtc_disable_count++;
7549
7550                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7551                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7552
7553                 /* Update freesync active state. */
7554                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7555
7556                 /* Handle vrr on->off / off->on transitions */
7557                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7558                                                 dm_new_crtc_state);
7559         }
7560
7561         /* Enable interrupts for CRTCs going through a modeset. */
7562         amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7563
7564         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7565                 if (new_crtc_state->async_flip)
7566                         wait_for_vblank = false;
7567
7568         /* update planes when needed per crtc*/
7569         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7570                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7571
7572                 if (dm_new_crtc_state->stream)
7573                         amdgpu_dm_commit_planes(state, dc_state, dev,
7574                                                 dm, crtc, wait_for_vblank);
7575         }
7576
7577         /* Enable interrupts for CRTCs going from 0 to n active planes. */
7578         amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7579
7580         /* Update audio instances for each connector. */
7581         amdgpu_dm_commit_audio(dev, state);
7582
7583         /*
7584          * send vblank event on all events not handled in flip and
7585          * mark consumed event for drm_atomic_helper_commit_hw_done
7586          */
7587         spin_lock_irqsave(&adev->ddev->event_lock, flags);
7588         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7589
7590                 if (new_crtc_state->event)
7591                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7592
7593                 new_crtc_state->event = NULL;
7594         }
7595         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7596
7597         /* Signal HW programming completion */
7598         drm_atomic_helper_commit_hw_done(state);
7599
7600         if (wait_for_vblank)
7601                 drm_atomic_helper_wait_for_flip_done(dev, state);
7602
7603         drm_atomic_helper_cleanup_planes(dev, state);
7604
7605         /*
7606          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7607          * so we can put the GPU into runtime suspend if we're not driving any
7608          * displays anymore
7609          */
7610         for (i = 0; i < crtc_disable_count; i++)
7611                 pm_runtime_put_autosuspend(dev->dev);
7612         pm_runtime_mark_last_busy(dev->dev);
7613
7614         if (dc_state_temp)
7615                 dc_release_state(dc_state_temp);
7616 }
7617
7618
7619 static int dm_force_atomic_commit(struct drm_connector *connector)
7620 {
7621         int ret = 0;
7622         struct drm_device *ddev = connector->dev;
7623         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7624         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7625         struct drm_plane *plane = disconnected_acrtc->base.primary;
7626         struct drm_connector_state *conn_state;
7627         struct drm_crtc_state *crtc_state;
7628         struct drm_plane_state *plane_state;
7629
7630         if (!state)
7631                 return -ENOMEM;
7632
7633         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7634
7635         /* Construct an atomic state to restore previous display setting */
7636
7637         /*
7638          * Attach connectors to drm_atomic_state
7639          */
7640         conn_state = drm_atomic_get_connector_state(state, connector);
7641
7642         ret = PTR_ERR_OR_ZERO(conn_state);
7643         if (ret)
7644                 goto err;
7645
7646         /* Attach crtc to drm_atomic_state*/
7647         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7648
7649         ret = PTR_ERR_OR_ZERO(crtc_state);
7650         if (ret)
7651                 goto err;
7652
7653         /* force a restore */
7654         crtc_state->mode_changed = true;
7655
7656         /* Attach plane to drm_atomic_state */
7657         plane_state = drm_atomic_get_plane_state(state, plane);
7658
7659         ret = PTR_ERR_OR_ZERO(plane_state);
7660         if (ret)
7661                 goto err;
7662
7663
7664         /* Call commit internally with the state we just constructed */
7665         ret = drm_atomic_commit(state);
7666         if (!ret)
7667                 return 0;
7668
7669 err:
7670         DRM_ERROR("Restoring old state failed with %i\n", ret);
7671         drm_atomic_state_put(state);
7672
7673         return ret;
7674 }
7675
7676 /*
7677  * This function handles all cases when set mode does not come upon hotplug.
7678  * This includes when a display is unplugged then plugged back into the
7679  * same port and when running without usermode desktop manager supprot
7680  */
7681 void dm_restore_drm_connector_state(struct drm_device *dev,
7682                                     struct drm_connector *connector)
7683 {
7684         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7685         struct amdgpu_crtc *disconnected_acrtc;
7686         struct dm_crtc_state *acrtc_state;
7687
7688         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7689                 return;
7690
7691         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7692         if (!disconnected_acrtc)
7693                 return;
7694
7695         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7696         if (!acrtc_state->stream)
7697                 return;
7698
7699         /*
7700          * If the previous sink is not released and different from the current,
7701          * we deduce we are in a state where we can not rely on usermode call
7702          * to turn on the display, so we do it here
7703          */
7704         if (acrtc_state->stream->sink != aconnector->dc_sink)
7705                 dm_force_atomic_commit(&aconnector->base);
7706 }
7707
7708 /*
7709  * Grabs all modesetting locks to serialize against any blocking commits,
7710  * Waits for completion of all non blocking commits.
7711  */
7712 static int do_aquire_global_lock(struct drm_device *dev,
7713                                  struct drm_atomic_state *state)
7714 {
7715         struct drm_crtc *crtc;
7716         struct drm_crtc_commit *commit;
7717         long ret;
7718
7719         /*
7720          * Adding all modeset locks to aquire_ctx will
7721          * ensure that when the framework release it the
7722          * extra locks we are locking here will get released to
7723          */
7724         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7725         if (ret)
7726                 return ret;
7727
7728         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7729                 spin_lock(&crtc->commit_lock);
7730                 commit = list_first_entry_or_null(&crtc->commit_list,
7731                                 struct drm_crtc_commit, commit_entry);
7732                 if (commit)
7733                         drm_crtc_commit_get(commit);
7734                 spin_unlock(&crtc->commit_lock);
7735
7736                 if (!commit)
7737                         continue;
7738
7739                 /*
7740                  * Make sure all pending HW programming completed and
7741                  * page flips done
7742                  */
7743                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7744
7745                 if (ret > 0)
7746                         ret = wait_for_completion_interruptible_timeout(
7747                                         &commit->flip_done, 10*HZ);
7748
7749                 if (ret == 0)
7750                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7751                                   "timed out\n", crtc->base.id, crtc->name);
7752
7753                 drm_crtc_commit_put(commit);
7754         }
7755
7756         return ret < 0 ? ret : 0;
7757 }
7758
7759 static void get_freesync_config_for_crtc(
7760         struct dm_crtc_state *new_crtc_state,
7761         struct dm_connector_state *new_con_state)
7762 {
7763         struct mod_freesync_config config = {0};
7764         struct amdgpu_dm_connector *aconnector =
7765                         to_amdgpu_dm_connector(new_con_state->base.connector);
7766         struct drm_display_mode *mode = &new_crtc_state->base.mode;
7767         int vrefresh = drm_mode_vrefresh(mode);
7768
7769         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7770                                         vrefresh >= aconnector->min_vfreq &&
7771                                         vrefresh <= aconnector->max_vfreq;
7772
7773         if (new_crtc_state->vrr_supported) {
7774                 new_crtc_state->stream->ignore_msa_timing_param = true;
7775                 config.state = new_crtc_state->base.vrr_enabled ?
7776                                 VRR_STATE_ACTIVE_VARIABLE :
7777                                 VRR_STATE_INACTIVE;
7778                 config.min_refresh_in_uhz =
7779                                 aconnector->min_vfreq * 1000000;
7780                 config.max_refresh_in_uhz =
7781                                 aconnector->max_vfreq * 1000000;
7782                 config.vsif_supported = true;
7783                 config.btr = true;
7784         }
7785
7786         new_crtc_state->freesync_config = config;
7787 }
7788
7789 static void reset_freesync_config_for_crtc(
7790         struct dm_crtc_state *new_crtc_state)
7791 {
7792         new_crtc_state->vrr_supported = false;
7793
7794         memset(&new_crtc_state->vrr_params, 0,
7795                sizeof(new_crtc_state->vrr_params));
7796         memset(&new_crtc_state->vrr_infopacket, 0,
7797                sizeof(new_crtc_state->vrr_infopacket));
7798 }
7799
7800 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7801                                 struct drm_atomic_state *state,
7802                                 struct drm_crtc *crtc,
7803                                 struct drm_crtc_state *old_crtc_state,
7804                                 struct drm_crtc_state *new_crtc_state,
7805                                 bool enable,
7806                                 bool *lock_and_validation_needed)
7807 {
7808         struct dm_atomic_state *dm_state = NULL;
7809         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7810         struct dc_stream_state *new_stream;
7811         int ret = 0;
7812
7813         /*
7814          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7815          * update changed items
7816          */
7817         struct amdgpu_crtc *acrtc = NULL;
7818         struct amdgpu_dm_connector *aconnector = NULL;
7819         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7820         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7821
7822         new_stream = NULL;
7823
7824         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7825         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7826         acrtc = to_amdgpu_crtc(crtc);
7827         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7828
7829         /* TODO This hack should go away */
7830         if (aconnector && enable) {
7831                 /* Make sure fake sink is created in plug-in scenario */
7832                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7833                                                             &aconnector->base);
7834                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7835                                                             &aconnector->base);
7836
7837                 if (IS_ERR(drm_new_conn_state)) {
7838                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7839                         goto fail;
7840                 }
7841
7842                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7843                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7844
7845                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7846                         goto skip_modeset;
7847
7848                 new_stream = create_validate_stream_for_sink(aconnector,
7849                                                              &new_crtc_state->mode,
7850                                                              dm_new_conn_state,
7851                                                              dm_old_crtc_state->stream);
7852
7853                 /*
7854                  * we can have no stream on ACTION_SET if a display
7855                  * was disconnected during S3, in this case it is not an
7856                  * error, the OS will be updated after detection, and
7857                  * will do the right thing on next atomic commit
7858                  */
7859
7860                 if (!new_stream) {
7861                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7862                                         __func__, acrtc->base.base.id);
7863                         ret = -ENOMEM;
7864                         goto fail;
7865                 }
7866
7867                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7868
7869                 ret = fill_hdr_info_packet(drm_new_conn_state,
7870                                            &new_stream->hdr_static_metadata);
7871                 if (ret)
7872                         goto fail;
7873
7874                 /*
7875                  * If we already removed the old stream from the context
7876                  * (and set the new stream to NULL) then we can't reuse
7877                  * the old stream even if the stream and scaling are unchanged.
7878                  * We'll hit the BUG_ON and black screen.
7879                  *
7880                  * TODO: Refactor this function to allow this check to work
7881                  * in all conditions.
7882                  */
7883                 if (dm_new_crtc_state->stream &&
7884                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7885                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7886                         new_crtc_state->mode_changed = false;
7887                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7888                                          new_crtc_state->mode_changed);
7889                 }
7890         }
7891
7892         /* mode_changed flag may get updated above, need to check again */
7893         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7894                 goto skip_modeset;
7895
7896         DRM_DEBUG_DRIVER(
7897                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7898                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7899                 "connectors_changed:%d\n",
7900                 acrtc->crtc_id,
7901                 new_crtc_state->enable,
7902                 new_crtc_state->active,
7903                 new_crtc_state->planes_changed,
7904                 new_crtc_state->mode_changed,
7905                 new_crtc_state->active_changed,
7906                 new_crtc_state->connectors_changed);
7907
7908         /* Remove stream for any changed/disabled CRTC */
7909         if (!enable) {
7910
7911                 if (!dm_old_crtc_state->stream)
7912                         goto skip_modeset;
7913
7914                 ret = dm_atomic_get_state(state, &dm_state);
7915                 if (ret)
7916                         goto fail;
7917
7918                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7919                                 crtc->base.id);
7920
7921                 /* i.e. reset mode */
7922                 if (dc_remove_stream_from_ctx(
7923                                 dm->dc,
7924                                 dm_state->context,
7925                                 dm_old_crtc_state->stream) != DC_OK) {
7926                         ret = -EINVAL;
7927                         goto fail;
7928                 }
7929
7930                 dc_stream_release(dm_old_crtc_state->stream);
7931                 dm_new_crtc_state->stream = NULL;
7932
7933                 reset_freesync_config_for_crtc(dm_new_crtc_state);
7934
7935                 *lock_and_validation_needed = true;
7936
7937         } else {/* Add stream for any updated/enabled CRTC */
7938                 /*
7939                  * Quick fix to prevent NULL pointer on new_stream when
7940                  * added MST connectors not found in existing crtc_state in the chained mode
7941                  * TODO: need to dig out the root cause of that
7942                  */
7943                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7944                         goto skip_modeset;
7945
7946                 if (modereset_required(new_crtc_state))
7947                         goto skip_modeset;
7948
7949                 if (modeset_required(new_crtc_state, new_stream,
7950                                      dm_old_crtc_state->stream)) {
7951
7952                         WARN_ON(dm_new_crtc_state->stream);
7953
7954                         ret = dm_atomic_get_state(state, &dm_state);
7955                         if (ret)
7956                                 goto fail;
7957
7958                         dm_new_crtc_state->stream = new_stream;
7959
7960                         dc_stream_retain(new_stream);
7961
7962                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7963                                                 crtc->base.id);
7964
7965                         if (dc_add_stream_to_ctx(
7966                                         dm->dc,
7967                                         dm_state->context,
7968                                         dm_new_crtc_state->stream) != DC_OK) {
7969                                 ret = -EINVAL;
7970                                 goto fail;
7971                         }
7972
7973                         *lock_and_validation_needed = true;
7974                 }
7975         }
7976
7977 skip_modeset:
7978         /* Release extra reference */
7979         if (new_stream)
7980                  dc_stream_release(new_stream);
7981
7982         /*
7983          * We want to do dc stream updates that do not require a
7984          * full modeset below.
7985          */
7986         if (!(enable && aconnector && new_crtc_state->enable &&
7987               new_crtc_state->active))
7988                 return 0;
7989         /*
7990          * Given above conditions, the dc state cannot be NULL because:
7991          * 1. We're in the process of enabling CRTCs (just been added
7992          *    to the dc context, or already is on the context)
7993          * 2. Has a valid connector attached, and
7994          * 3. Is currently active and enabled.
7995          * => The dc stream state currently exists.
7996          */
7997         BUG_ON(dm_new_crtc_state->stream == NULL);
7998
7999         /* Scaling or underscan settings */
8000         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8001                 update_stream_scaling_settings(
8002                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8003
8004         /* ABM settings */
8005         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8006
8007         /*
8008          * Color management settings. We also update color properties
8009          * when a modeset is needed, to ensure it gets reprogrammed.
8010          */
8011         if (dm_new_crtc_state->base.color_mgmt_changed ||
8012             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8013                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8014                 if (ret)
8015                         goto fail;
8016         }
8017
8018         /* Update Freesync settings. */
8019         get_freesync_config_for_crtc(dm_new_crtc_state,
8020                                      dm_new_conn_state);
8021
8022         return ret;
8023
8024 fail:
8025         if (new_stream)
8026                 dc_stream_release(new_stream);
8027         return ret;
8028 }
8029
8030 static bool should_reset_plane(struct drm_atomic_state *state,
8031                                struct drm_plane *plane,
8032                                struct drm_plane_state *old_plane_state,
8033                                struct drm_plane_state *new_plane_state)
8034 {
8035         struct drm_plane *other;
8036         struct drm_plane_state *old_other_state, *new_other_state;
8037         struct drm_crtc_state *new_crtc_state;
8038         int i;
8039
8040         /*
8041          * TODO: Remove this hack once the checks below are sufficient
8042          * enough to determine when we need to reset all the planes on
8043          * the stream.
8044          */
8045         if (state->allow_modeset)
8046                 return true;
8047
8048         /* Exit early if we know that we're adding or removing the plane. */
8049         if (old_plane_state->crtc != new_plane_state->crtc)
8050                 return true;
8051
8052         /* old crtc == new_crtc == NULL, plane not in context. */
8053         if (!new_plane_state->crtc)
8054                 return false;
8055
8056         new_crtc_state =
8057                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8058
8059         if (!new_crtc_state)
8060                 return true;
8061
8062         /* CRTC Degamma changes currently require us to recreate planes. */
8063         if (new_crtc_state->color_mgmt_changed)
8064                 return true;
8065
8066         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8067                 return true;
8068
8069         /*
8070          * If there are any new primary or overlay planes being added or
8071          * removed then the z-order can potentially change. To ensure
8072          * correct z-order and pipe acquisition the current DC architecture
8073          * requires us to remove and recreate all existing planes.
8074          *
8075          * TODO: Come up with a more elegant solution for this.
8076          */
8077         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8078                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8079                         continue;
8080
8081                 if (old_other_state->crtc != new_plane_state->crtc &&
8082                     new_other_state->crtc != new_plane_state->crtc)
8083                         continue;
8084
8085                 if (old_other_state->crtc != new_other_state->crtc)
8086                         return true;
8087
8088                 /* TODO: Remove this once we can handle fast format changes. */
8089                 if (old_other_state->fb && new_other_state->fb &&
8090                     old_other_state->fb->format != new_other_state->fb->format)
8091                         return true;
8092         }
8093
8094         return false;
8095 }
8096
8097 static int dm_update_plane_state(struct dc *dc,
8098                                  struct drm_atomic_state *state,
8099                                  struct drm_plane *plane,
8100                                  struct drm_plane_state *old_plane_state,
8101                                  struct drm_plane_state *new_plane_state,
8102                                  bool enable,
8103                                  bool *lock_and_validation_needed)
8104 {
8105
8106         struct dm_atomic_state *dm_state = NULL;
8107         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8108         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8109         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8110         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8111         struct amdgpu_crtc *new_acrtc;
8112         bool needs_reset;
8113         int ret = 0;
8114
8115
8116         new_plane_crtc = new_plane_state->crtc;
8117         old_plane_crtc = old_plane_state->crtc;
8118         dm_new_plane_state = to_dm_plane_state(new_plane_state);
8119         dm_old_plane_state = to_dm_plane_state(old_plane_state);
8120
8121         /*TODO Implement better atomic check for cursor plane */
8122         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8123                 if (!enable || !new_plane_crtc ||
8124                         drm_atomic_plane_disabling(plane->state, new_plane_state))
8125                         return 0;
8126
8127                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8128
8129                 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8130                         (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8131                         DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8132                                                          new_plane_state->crtc_w, new_plane_state->crtc_h);
8133                         return -EINVAL;
8134                 }
8135
8136                 return 0;
8137         }
8138
8139         needs_reset = should_reset_plane(state, plane, old_plane_state,
8140                                          new_plane_state);
8141
8142         /* Remove any changed/removed planes */
8143         if (!enable) {
8144                 if (!needs_reset)
8145                         return 0;
8146
8147                 if (!old_plane_crtc)
8148                         return 0;
8149
8150                 old_crtc_state = drm_atomic_get_old_crtc_state(
8151                                 state, old_plane_crtc);
8152                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8153
8154                 if (!dm_old_crtc_state->stream)
8155                         return 0;
8156
8157                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8158                                 plane->base.id, old_plane_crtc->base.id);
8159
8160                 ret = dm_atomic_get_state(state, &dm_state);
8161                 if (ret)
8162                         return ret;
8163
8164                 if (!dc_remove_plane_from_context(
8165                                 dc,
8166                                 dm_old_crtc_state->stream,
8167                                 dm_old_plane_state->dc_state,
8168                                 dm_state->context)) {
8169
8170                         ret = EINVAL;
8171                         return ret;
8172                 }
8173
8174
8175                 dc_plane_state_release(dm_old_plane_state->dc_state);
8176                 dm_new_plane_state->dc_state = NULL;
8177
8178                 *lock_and_validation_needed = true;
8179
8180         } else { /* Add new planes */
8181                 struct dc_plane_state *dc_new_plane_state;
8182
8183                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8184                         return 0;
8185
8186                 if (!new_plane_crtc)
8187                         return 0;
8188
8189                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8190                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8191
8192                 if (!dm_new_crtc_state->stream)
8193                         return 0;
8194
8195                 if (!needs_reset)
8196                         return 0;
8197
8198                 WARN_ON(dm_new_plane_state->dc_state);
8199
8200                 dc_new_plane_state = dc_create_plane_state(dc);
8201                 if (!dc_new_plane_state)
8202                         return -ENOMEM;
8203
8204                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8205                                 plane->base.id, new_plane_crtc->base.id);
8206
8207                 ret = fill_dc_plane_attributes(
8208                         new_plane_crtc->dev->dev_private,
8209                         dc_new_plane_state,
8210                         new_plane_state,
8211                         new_crtc_state);
8212                 if (ret) {
8213                         dc_plane_state_release(dc_new_plane_state);
8214                         return ret;
8215                 }
8216
8217                 ret = dm_atomic_get_state(state, &dm_state);
8218                 if (ret) {
8219                         dc_plane_state_release(dc_new_plane_state);
8220                         return ret;
8221                 }
8222
8223                 /*
8224                  * Any atomic check errors that occur after this will
8225                  * not need a release. The plane state will be attached
8226                  * to the stream, and therefore part of the atomic
8227                  * state. It'll be released when the atomic state is
8228                  * cleaned.
8229                  */
8230                 if (!dc_add_plane_to_context(
8231                                 dc,
8232                                 dm_new_crtc_state->stream,
8233                                 dc_new_plane_state,
8234                                 dm_state->context)) {
8235
8236                         dc_plane_state_release(dc_new_plane_state);
8237                         return -EINVAL;
8238                 }
8239
8240                 dm_new_plane_state->dc_state = dc_new_plane_state;
8241
8242                 /* Tell DC to do a full surface update every time there
8243                  * is a plane change. Inefficient, but works for now.
8244                  */
8245                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8246
8247                 *lock_and_validation_needed = true;
8248         }
8249
8250
8251         return ret;
8252 }
8253
8254 static int
8255 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8256                                     struct drm_atomic_state *state,
8257                                     enum surface_update_type *out_type)
8258 {
8259         struct dc *dc = dm->dc;
8260         struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8261         int i, j, num_plane, ret = 0;
8262         struct drm_plane_state *old_plane_state, *new_plane_state;
8263         struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8264         struct drm_crtc *new_plane_crtc;
8265         struct drm_plane *plane;
8266
8267         struct drm_crtc *crtc;
8268         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8269         struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8270         struct dc_stream_status *status = NULL;
8271         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8272         struct surface_info_bundle {
8273                 struct dc_surface_update surface_updates[MAX_SURFACES];
8274                 struct dc_plane_info plane_infos[MAX_SURFACES];
8275                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8276                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8277                 struct dc_stream_update stream_update;
8278         } *bundle;
8279
8280         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8281
8282         if (!bundle) {
8283                 DRM_ERROR("Failed to allocate update bundle\n");
8284                 /* Set type to FULL to avoid crashing in DC*/
8285                 update_type = UPDATE_TYPE_FULL;
8286                 goto cleanup;
8287         }
8288
8289         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8290
8291                 memset(bundle, 0, sizeof(struct surface_info_bundle));
8292
8293                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8294                 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8295                 num_plane = 0;
8296
8297                 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8298                         update_type = UPDATE_TYPE_FULL;
8299                         goto cleanup;
8300                 }
8301
8302                 if (!new_dm_crtc_state->stream)
8303                         continue;
8304
8305                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8306                         const struct amdgpu_framebuffer *amdgpu_fb =
8307                                 to_amdgpu_framebuffer(new_plane_state->fb);
8308                         struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8309                         struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8310                         struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8311                         uint64_t tiling_flags;
8312                         bool tmz_surface = false;
8313
8314                         new_plane_crtc = new_plane_state->crtc;
8315                         new_dm_plane_state = to_dm_plane_state(new_plane_state);
8316                         old_dm_plane_state = to_dm_plane_state(old_plane_state);
8317
8318                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8319                                 continue;
8320
8321                         if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8322                                 update_type = UPDATE_TYPE_FULL;
8323                                 goto cleanup;
8324                         }
8325
8326                         if (crtc != new_plane_crtc)
8327                                 continue;
8328
8329                         bundle->surface_updates[num_plane].surface =
8330                                         new_dm_plane_state->dc_state;
8331
8332                         if (new_crtc_state->mode_changed) {
8333                                 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8334                                 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8335                         }
8336
8337                         if (new_crtc_state->color_mgmt_changed) {
8338                                 bundle->surface_updates[num_plane].gamma =
8339                                                 new_dm_plane_state->dc_state->gamma_correction;
8340                                 bundle->surface_updates[num_plane].in_transfer_func =
8341                                                 new_dm_plane_state->dc_state->in_transfer_func;
8342                                 bundle->surface_updates[num_plane].gamut_remap_matrix =
8343                                                 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8344                                 bundle->stream_update.gamut_remap =
8345                                                 &new_dm_crtc_state->stream->gamut_remap_matrix;
8346                                 bundle->stream_update.output_csc_transform =
8347                                                 &new_dm_crtc_state->stream->csc_color_matrix;
8348                                 bundle->stream_update.out_transfer_func =
8349                                                 new_dm_crtc_state->stream->out_transfer_func;
8350                         }
8351
8352                         ret = fill_dc_scaling_info(new_plane_state,
8353                                                    scaling_info);
8354                         if (ret)
8355                                 goto cleanup;
8356
8357                         bundle->surface_updates[num_plane].scaling_info = scaling_info;
8358
8359                         if (amdgpu_fb) {
8360                                 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8361                                 if (ret)
8362                                         goto cleanup;
8363
8364                                 ret = fill_dc_plane_info_and_addr(
8365                                         dm->adev, new_plane_state, tiling_flags,
8366                                         plane_info,
8367                                         &flip_addr->address, tmz_surface,
8368                                         false);
8369                                 if (ret)
8370                                         goto cleanup;
8371
8372                                 bundle->surface_updates[num_plane].plane_info = plane_info;
8373                                 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8374                         }
8375
8376                         num_plane++;
8377                 }
8378
8379                 if (num_plane == 0)
8380                         continue;
8381
8382                 ret = dm_atomic_get_state(state, &dm_state);
8383                 if (ret)
8384                         goto cleanup;
8385
8386                 old_dm_state = dm_atomic_get_old_state(state);
8387                 if (!old_dm_state) {
8388                         ret = -EINVAL;
8389                         goto cleanup;
8390                 }
8391
8392                 status = dc_stream_get_status_from_state(old_dm_state->context,
8393                                                          new_dm_crtc_state->stream);
8394                 bundle->stream_update.stream = new_dm_crtc_state->stream;
8395                 /*
8396                  * TODO: DC modifies the surface during this call so we need
8397                  * to lock here - find a way to do this without locking.
8398                  */
8399                 mutex_lock(&dm->dc_lock);
8400                 update_type = dc_check_update_surfaces_for_stream(
8401                                 dc,     bundle->surface_updates, num_plane,
8402                                 &bundle->stream_update, status);
8403                 mutex_unlock(&dm->dc_lock);
8404
8405                 if (update_type > UPDATE_TYPE_MED) {
8406                         update_type = UPDATE_TYPE_FULL;
8407                         goto cleanup;
8408                 }
8409         }
8410
8411 cleanup:
8412         kfree(bundle);
8413
8414         *out_type = update_type;
8415         return ret;
8416 }
8417
8418 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8419 {
8420         struct drm_connector *connector;
8421         struct drm_connector_state *conn_state;
8422         struct amdgpu_dm_connector *aconnector = NULL;
8423         int i;
8424         for_each_new_connector_in_state(state, connector, conn_state, i) {
8425                 if (conn_state->crtc != crtc)
8426                         continue;
8427
8428                 aconnector = to_amdgpu_dm_connector(connector);
8429                 if (!aconnector->port || !aconnector->mst_port)
8430                         aconnector = NULL;
8431                 else
8432                         break;
8433         }
8434
8435         if (!aconnector)
8436                 return 0;
8437
8438         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8439 }
8440
8441 /**
8442  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8443  * @dev: The DRM device
8444  * @state: The atomic state to commit
8445  *
8446  * Validate that the given atomic state is programmable by DC into hardware.
8447  * This involves constructing a &struct dc_state reflecting the new hardware
8448  * state we wish to commit, then querying DC to see if it is programmable. It's
8449  * important not to modify the existing DC state. Otherwise, atomic_check
8450  * may unexpectedly commit hardware changes.
8451  *
8452  * When validating the DC state, it's important that the right locks are
8453  * acquired. For full updates case which removes/adds/updates streams on one
8454  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8455  * that any such full update commit will wait for completion of any outstanding
8456  * flip using DRMs synchronization events. See
8457  * dm_determine_update_type_for_commit()
8458  *
8459  * Note that DM adds the affected connectors for all CRTCs in state, when that
8460  * might not seem necessary. This is because DC stream creation requires the
8461  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8462  * be possible but non-trivial - a possible TODO item.
8463  *
8464  * Return: -Error code if validation failed.
8465  */
8466 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8467                                   struct drm_atomic_state *state)
8468 {
8469         struct amdgpu_device *adev = dev->dev_private;
8470         struct dm_atomic_state *dm_state = NULL;
8471         struct dc *dc = adev->dm.dc;
8472         struct drm_connector *connector;
8473         struct drm_connector_state *old_con_state, *new_con_state;
8474         struct drm_crtc *crtc;
8475         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8476         struct drm_plane *plane;
8477         struct drm_plane_state *old_plane_state, *new_plane_state;
8478         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8479         enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8480
8481         int ret, i;
8482
8483         /*
8484          * This bool will be set for true for any modeset/reset
8485          * or plane update which implies non fast surface update.
8486          */
8487         bool lock_and_validation_needed = false;
8488
8489         ret = drm_atomic_helper_check_modeset(dev, state);
8490         if (ret)
8491                 goto fail;
8492
8493         if (adev->asic_type >= CHIP_NAVI10) {
8494                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8495                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8496                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8497                                 if (ret)
8498                                         goto fail;
8499                         }
8500                 }
8501         }
8502
8503         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8504                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8505                     !new_crtc_state->color_mgmt_changed &&
8506                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8507                         continue;
8508
8509                 if (!new_crtc_state->enable)
8510                         continue;
8511
8512                 ret = drm_atomic_add_affected_connectors(state, crtc);
8513                 if (ret)
8514                         return ret;
8515
8516                 ret = drm_atomic_add_affected_planes(state, crtc);
8517                 if (ret)
8518                         goto fail;
8519         }
8520
8521         /*
8522          * Add all primary and overlay planes on the CRTC to the state
8523          * whenever a plane is enabled to maintain correct z-ordering
8524          * and to enable fast surface updates.
8525          */
8526         drm_for_each_crtc(crtc, dev) {
8527                 bool modified = false;
8528
8529                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8530                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8531                                 continue;
8532
8533                         if (new_plane_state->crtc == crtc ||
8534                             old_plane_state->crtc == crtc) {
8535                                 modified = true;
8536                                 break;
8537                         }
8538                 }
8539
8540                 if (!modified)
8541                         continue;
8542
8543                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8544                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8545                                 continue;
8546
8547                         new_plane_state =
8548                                 drm_atomic_get_plane_state(state, plane);
8549
8550                         if (IS_ERR(new_plane_state)) {
8551                                 ret = PTR_ERR(new_plane_state);
8552                                 goto fail;
8553                         }
8554                 }
8555         }
8556
8557         /* Remove exiting planes if they are modified */
8558         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8559                 ret = dm_update_plane_state(dc, state, plane,
8560                                             old_plane_state,
8561                                             new_plane_state,
8562                                             false,
8563                                             &lock_and_validation_needed);
8564                 if (ret)
8565                         goto fail;
8566         }
8567
8568         /* Disable all crtcs which require disable */
8569         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8570                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8571                                            old_crtc_state,
8572                                            new_crtc_state,
8573                                            false,
8574                                            &lock_and_validation_needed);
8575                 if (ret)
8576                         goto fail;
8577         }
8578
8579         /* Enable all crtcs which require enable */
8580         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8581                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8582                                            old_crtc_state,
8583                                            new_crtc_state,
8584                                            true,
8585                                            &lock_and_validation_needed);
8586                 if (ret)
8587                         goto fail;
8588         }
8589
8590         /* Add new/modified planes */
8591         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8592                 ret = dm_update_plane_state(dc, state, plane,
8593                                             old_plane_state,
8594                                             new_plane_state,
8595                                             true,
8596                                             &lock_and_validation_needed);
8597                 if (ret)
8598                         goto fail;
8599         }
8600
8601         /* Run this here since we want to validate the streams we created */
8602         ret = drm_atomic_helper_check_planes(dev, state);
8603         if (ret)
8604                 goto fail;
8605
8606         if (state->legacy_cursor_update) {
8607                 /*
8608                  * This is a fast cursor update coming from the plane update
8609                  * helper, check if it can be done asynchronously for better
8610                  * performance.
8611                  */
8612                 state->async_update =
8613                         !drm_atomic_helper_async_check(dev, state);
8614
8615                 /*
8616                  * Skip the remaining global validation if this is an async
8617                  * update. Cursor updates can be done without affecting
8618                  * state or bandwidth calcs and this avoids the performance
8619                  * penalty of locking the private state object and
8620                  * allocating a new dc_state.
8621                  */
8622                 if (state->async_update)
8623                         return 0;
8624         }
8625
8626         /* Check scaling and underscan changes*/
8627         /* TODO Removed scaling changes validation due to inability to commit
8628          * new stream into context w\o causing full reset. Need to
8629          * decide how to handle.
8630          */
8631         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8632                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8633                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8634                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8635
8636                 /* Skip any modesets/resets */
8637                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8638                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8639                         continue;
8640
8641                 /* Skip any thing not scale or underscan changes */
8642                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8643                         continue;
8644
8645                 overall_update_type = UPDATE_TYPE_FULL;
8646                 lock_and_validation_needed = true;
8647         }
8648
8649         ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8650         if (ret)
8651                 goto fail;
8652
8653         if (overall_update_type < update_type)
8654                 overall_update_type = update_type;
8655
8656         /*
8657          * lock_and_validation_needed was an old way to determine if we need to set
8658          * the global lock. Leaving it in to check if we broke any corner cases
8659          * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8660          * lock_and_validation_needed false = UPDATE_TYPE_FAST
8661          */
8662         if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8663                 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8664
8665         if (overall_update_type > UPDATE_TYPE_FAST) {
8666                 ret = dm_atomic_get_state(state, &dm_state);
8667                 if (ret)
8668                         goto fail;
8669
8670                 ret = do_aquire_global_lock(dev, state);
8671                 if (ret)
8672                         goto fail;
8673
8674 #if defined(CONFIG_DRM_AMD_DC_DCN)
8675                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8676                         goto fail;
8677
8678                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8679                 if (ret)
8680                         goto fail;
8681 #endif
8682
8683                 /*
8684                  * Perform validation of MST topology in the state:
8685                  * We need to perform MST atomic check before calling
8686                  * dc_validate_global_state(), or there is a chance
8687                  * to get stuck in an infinite loop and hang eventually.
8688                  */
8689                 ret = drm_dp_mst_atomic_check(state);
8690                 if (ret)
8691                         goto fail;
8692
8693                 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8694                         ret = -EINVAL;
8695                         goto fail;
8696                 }
8697         } else {
8698                 /*
8699                  * The commit is a fast update. Fast updates shouldn't change
8700                  * the DC context, affect global validation, and can have their
8701                  * commit work done in parallel with other commits not touching
8702                  * the same resource. If we have a new DC context as part of
8703                  * the DM atomic state from validation we need to free it and
8704                  * retain the existing one instead.
8705                  */
8706                 struct dm_atomic_state *new_dm_state, *old_dm_state;
8707
8708                 new_dm_state = dm_atomic_get_new_state(state);
8709                 old_dm_state = dm_atomic_get_old_state(state);
8710
8711                 if (new_dm_state && old_dm_state) {
8712                         if (new_dm_state->context)
8713                                 dc_release_state(new_dm_state->context);
8714
8715                         new_dm_state->context = old_dm_state->context;
8716
8717                         if (old_dm_state->context)
8718                                 dc_retain_state(old_dm_state->context);
8719                 }
8720         }
8721
8722         /* Store the overall update type for use later in atomic check. */
8723         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8724                 struct dm_crtc_state *dm_new_crtc_state =
8725                         to_dm_crtc_state(new_crtc_state);
8726
8727                 dm_new_crtc_state->update_type = (int)overall_update_type;
8728         }
8729
8730         /* Must be success */
8731         WARN_ON(ret);
8732         return ret;
8733
8734 fail:
8735         if (ret == -EDEADLK)
8736                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8737         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8738                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8739         else
8740                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8741
8742         return ret;
8743 }
8744
8745 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8746                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
8747 {
8748         uint8_t dpcd_data;
8749         bool capable = false;
8750
8751         if (amdgpu_dm_connector->dc_link &&
8752                 dm_helpers_dp_read_dpcd(
8753                                 NULL,
8754                                 amdgpu_dm_connector->dc_link,
8755                                 DP_DOWN_STREAM_PORT_COUNT,
8756                                 &dpcd_data,
8757                                 sizeof(dpcd_data))) {
8758                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8759         }
8760
8761         return capable;
8762 }
8763 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8764                                         struct edid *edid)
8765 {
8766         int i;
8767         bool edid_check_required;
8768         struct detailed_timing *timing;
8769         struct detailed_non_pixel *data;
8770         struct detailed_data_monitor_range *range;
8771         struct amdgpu_dm_connector *amdgpu_dm_connector =
8772                         to_amdgpu_dm_connector(connector);
8773         struct dm_connector_state *dm_con_state = NULL;
8774
8775         struct drm_device *dev = connector->dev;
8776         struct amdgpu_device *adev = dev->dev_private;
8777         bool freesync_capable = false;
8778
8779         if (!connector->state) {
8780                 DRM_ERROR("%s - Connector has no state", __func__);
8781                 goto update;
8782         }
8783
8784         if (!edid) {
8785                 dm_con_state = to_dm_connector_state(connector->state);
8786
8787                 amdgpu_dm_connector->min_vfreq = 0;
8788                 amdgpu_dm_connector->max_vfreq = 0;
8789                 amdgpu_dm_connector->pixel_clock_mhz = 0;
8790
8791                 goto update;
8792         }
8793
8794         dm_con_state = to_dm_connector_state(connector->state);
8795
8796         edid_check_required = false;
8797         if (!amdgpu_dm_connector->dc_sink) {
8798                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8799                 goto update;
8800         }
8801         if (!adev->dm.freesync_module)
8802                 goto update;
8803         /*
8804          * if edid non zero restrict freesync only for dp and edp
8805          */
8806         if (edid) {
8807                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8808                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8809                         edid_check_required = is_dp_capable_without_timing_msa(
8810                                                 adev->dm.dc,
8811                                                 amdgpu_dm_connector);
8812                 }
8813         }
8814         if (edid_check_required == true && (edid->version > 1 ||
8815            (edid->version == 1 && edid->revision > 1))) {
8816                 for (i = 0; i < 4; i++) {
8817
8818                         timing  = &edid->detailed_timings[i];
8819                         data    = &timing->data.other_data;
8820                         range   = &data->data.range;
8821                         /*
8822                          * Check if monitor has continuous frequency mode
8823                          */
8824                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
8825                                 continue;
8826                         /*
8827                          * Check for flag range limits only. If flag == 1 then
8828                          * no additional timing information provided.
8829                          * Default GTF, GTF Secondary curve and CVT are not
8830                          * supported
8831                          */
8832                         if (range->flags != 1)
8833                                 continue;
8834
8835                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8836                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8837                         amdgpu_dm_connector->pixel_clock_mhz =
8838                                 range->pixel_clock_mhz * 10;
8839                         break;
8840                 }
8841
8842                 if (amdgpu_dm_connector->max_vfreq -
8843                     amdgpu_dm_connector->min_vfreq > 10) {
8844
8845                         freesync_capable = true;
8846                 }
8847         }
8848
8849 update:
8850         if (dm_con_state)
8851                 dm_con_state->freesync_capable = freesync_capable;
8852
8853         if (connector->vrr_capable_property)
8854                 drm_connector_set_vrr_capable_property(connector,
8855                                                        freesync_capable);
8856 }
8857
8858 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8859 {
8860         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8861
8862         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8863                 return;
8864         if (link->type == dc_connection_none)
8865                 return;
8866         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8867                                         dpcd_data, sizeof(dpcd_data))) {
8868                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8869
8870                 if (dpcd_data[0] == 0) {
8871                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8872                         link->psr_settings.psr_feature_enabled = false;
8873                 } else {
8874                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
8875                         link->psr_settings.psr_feature_enabled = true;
8876                 }
8877
8878                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8879         }
8880 }
8881
8882 /*
8883  * amdgpu_dm_link_setup_psr() - configure psr link
8884  * @stream: stream state
8885  *
8886  * Return: true if success
8887  */
8888 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8889 {
8890         struct dc_link *link = NULL;
8891         struct psr_config psr_config = {0};
8892         struct psr_context psr_context = {0};
8893         bool ret = false;
8894
8895         if (stream == NULL)
8896                 return false;
8897
8898         link = stream->link;
8899
8900         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8901
8902         if (psr_config.psr_version > 0) {
8903                 psr_config.psr_exit_link_training_required = 0x1;
8904                 psr_config.psr_frame_capture_indication_req = 0;
8905                 psr_config.psr_rfb_setup_time = 0x37;
8906                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8907                 psr_config.allow_smu_optimizations = 0x0;
8908
8909                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8910
8911         }
8912         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
8913
8914         return ret;
8915 }
8916
8917 /*
8918  * amdgpu_dm_psr_enable() - enable psr f/w
8919  * @stream: stream state
8920  *
8921  * Return: true if success
8922  */
8923 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8924 {
8925         struct dc_link *link = stream->link;
8926         unsigned int vsync_rate_hz = 0;
8927         struct dc_static_screen_params params = {0};
8928         /* Calculate number of static frames before generating interrupt to
8929          * enter PSR.
8930          */
8931         // Init fail safe of 2 frames static
8932         unsigned int num_frames_static = 2;
8933
8934         DRM_DEBUG_DRIVER("Enabling psr...\n");
8935
8936         vsync_rate_hz = div64_u64(div64_u64((
8937                         stream->timing.pix_clk_100hz * 100),
8938                         stream->timing.v_total),
8939                         stream->timing.h_total);
8940
8941         /* Round up
8942          * Calculate number of frames such that at least 30 ms of time has
8943          * passed.
8944          */
8945         if (vsync_rate_hz != 0) {
8946                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8947                 num_frames_static = (30000 / frame_time_microsec) + 1;
8948         }
8949
8950         params.triggers.cursor_update = true;
8951         params.triggers.overlay_update = true;
8952         params.triggers.surface_update = true;
8953         params.num_frames = num_frames_static;
8954
8955         dc_stream_set_static_screen_params(link->ctx->dc,
8956                                            &stream, 1,
8957                                            &params);
8958
8959         return dc_link_set_psr_allow_active(link, true, false);
8960 }
8961
8962 /*
8963  * amdgpu_dm_psr_disable() - disable psr f/w
8964  * @stream:  stream state
8965  *
8966  * Return: true if success
8967  */
8968 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8969 {
8970
8971         DRM_DEBUG_DRIVER("Disabling psr...\n");
8972
8973         return dc_link_set_psr_allow_active(stream->link, false, true);
8974 }