drm/amdgpu: Get DRM dev from adev by inline-f
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103
104 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129
130 /*
131  * initializes drm_device display related structures, based on the information
132  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133  * drm_encoder, drm_mode_config
134  *
135  * Returns 0 on success
136  */
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
140
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142                                 struct drm_plane *plane,
143                                 unsigned long possible_crtcs,
144                                 const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146                                struct drm_plane *plane,
147                                uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
150                                     uint32_t link_index,
151                                     struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153                                   struct amdgpu_encoder *aencoder,
154                                   uint32_t link_index);
155
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
157
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159                                    struct drm_atomic_state *state,
160                                    bool nonblock);
161
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
163
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165                                   struct drm_atomic_state *state);
166
167 static void handle_cursor_update(struct drm_plane *plane,
168                                  struct drm_plane_state *old_plane_state);
169
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
175
176 /*
177  * dm_vblank_get_counter
178  *
179  * @brief
180  * Get counter for number of vertical blanks
181  *
182  * @param
183  * struct amdgpu_device *adev - [in] desired amdgpu device
184  * int disp_idx - [in] which CRTC to get the counter from
185  *
186  * @return
187  * Counter for vertical blanks
188  */
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190 {
191         if (crtc >= adev->mode_info.num_crtc)
192                 return 0;
193         else {
194                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
196                                 acrtc->base.state);
197
198
199                 if (acrtc_state->stream == NULL) {
200                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
201                                   crtc);
202                         return 0;
203                 }
204
205                 return dc_stream_get_vblank_counter(acrtc_state->stream);
206         }
207 }
208
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210                                   u32 *vbl, u32 *position)
211 {
212         uint32_t v_blank_start, v_blank_end, h_position, v_position;
213
214         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215                 return -EINVAL;
216         else {
217                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219                                                 acrtc->base.state);
220
221                 if (acrtc_state->stream ==  NULL) {
222                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
223                                   crtc);
224                         return 0;
225                 }
226
227                 /*
228                  * TODO rework base driver to use values directly.
229                  * for now parse it back into reg-format
230                  */
231                 dc_stream_get_scanoutpos(acrtc_state->stream,
232                                          &v_blank_start,
233                                          &v_blank_end,
234                                          &h_position,
235                                          &v_position);
236
237                 *position = v_position | (h_position << 16);
238                 *vbl = v_blank_start | (v_blank_end << 16);
239         }
240
241         return 0;
242 }
243
244 static bool dm_is_idle(void *handle)
245 {
246         /* XXX todo */
247         return true;
248 }
249
250 static int dm_wait_for_idle(void *handle)
251 {
252         /* XXX todo */
253         return 0;
254 }
255
256 static bool dm_check_soft_reset(void *handle)
257 {
258         return false;
259 }
260
261 static int dm_soft_reset(void *handle)
262 {
263         /* XXX todo */
264         return 0;
265 }
266
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
269                      int otg_inst)
270 {
271         struct drm_device *dev = adev_to_drm(adev);
272         struct drm_crtc *crtc;
273         struct amdgpu_crtc *amdgpu_crtc;
274
275         if (otg_inst == -1) {
276                 WARN_ON(1);
277                 return adev->mode_info.crtcs[0];
278         }
279
280         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281                 amdgpu_crtc = to_amdgpu_crtc(crtc);
282
283                 if (amdgpu_crtc->otg_inst == otg_inst)
284                         return amdgpu_crtc;
285         }
286
287         return NULL;
288 }
289
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
291 {
292         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
294 }
295
296 /**
297  * dm_pflip_high_irq() - Handle pageflip interrupt
298  * @interrupt_params: ignored
299  *
300  * Handles the pageflip interrupt by notifying all interested parties
301  * that the pageflip has been completed.
302  */
303 static void dm_pflip_high_irq(void *interrupt_params)
304 {
305         struct amdgpu_crtc *amdgpu_crtc;
306         struct common_irq_params *irq_params = interrupt_params;
307         struct amdgpu_device *adev = irq_params->adev;
308         unsigned long flags;
309         struct drm_pending_vblank_event *e;
310         struct dm_crtc_state *acrtc_state;
311         uint32_t vpos, hpos, v_blank_start, v_blank_end;
312         bool vrr_active;
313
314         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
315
316         /* IRQ could occur when in initial stage */
317         /* TODO work and BO cleanup */
318         if (amdgpu_crtc == NULL) {
319                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
320                 return;
321         }
322
323         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
324
325         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327                                                  amdgpu_crtc->pflip_status,
328                                                  AMDGPU_FLIP_SUBMITTED,
329                                                  amdgpu_crtc->crtc_id,
330                                                  amdgpu_crtc);
331                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
332                 return;
333         }
334
335         /* page flip completed. */
336         e = amdgpu_crtc->event;
337         amdgpu_crtc->event = NULL;
338
339         if (!e)
340                 WARN_ON(1);
341
342         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
344
345         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
346         if (!vrr_active ||
347             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348                                       &v_blank_end, &hpos, &vpos) ||
349             (vpos < v_blank_start)) {
350                 /* Update to correct count and vblank timestamp if racing with
351                  * vblank irq. This also updates to the correct vblank timestamp
352                  * even in VRR mode, as scanout is past the front-porch atm.
353                  */
354                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
355
356                 /* Wake up userspace by sending the pageflip event with proper
357                  * count and timestamp of vblank of flip completion.
358                  */
359                 if (e) {
360                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
361
362                         /* Event sent, so done with vblank for this flip */
363                         drm_crtc_vblank_put(&amdgpu_crtc->base);
364                 }
365         } else if (e) {
366                 /* VRR active and inside front-porch: vblank count and
367                  * timestamp for pageflip event will only be up to date after
368                  * drm_crtc_handle_vblank() has been executed from late vblank
369                  * irq handler after start of back-porch (vline 0). We queue the
370                  * pageflip event for send-out by drm_crtc_handle_vblank() with
371                  * updated timestamp and count, once it runs after us.
372                  *
373                  * We need to open-code this instead of using the helper
374                  * drm_crtc_arm_vblank_event(), as that helper would
375                  * call drm_crtc_accurate_vblank_count(), which we must
376                  * not call in VRR mode while we are in front-porch!
377                  */
378
379                 /* sequence will be replaced by real count during send-out. */
380                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381                 e->pipe = amdgpu_crtc->crtc_id;
382
383                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
384                 e = NULL;
385         }
386
387         /* Keep track of vblank of this flip for flip throttling. We use the
388          * cooked hw counter, as that one incremented at start of this vblank
389          * of pageflip completion, so last_flip_vblank is the forbidden count
390          * for queueing new pageflips if vsync + VRR is enabled.
391          */
392         amdgpu_crtc->last_flip_vblank =
393                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
394
395         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
397
398         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399                          amdgpu_crtc->crtc_id, amdgpu_crtc,
400                          vrr_active, (int) !e);
401 }
402
403 static void dm_vupdate_high_irq(void *interrupt_params)
404 {
405         struct common_irq_params *irq_params = interrupt_params;
406         struct amdgpu_device *adev = irq_params->adev;
407         struct amdgpu_crtc *acrtc;
408         struct dm_crtc_state *acrtc_state;
409         unsigned long flags;
410
411         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412
413         if (acrtc) {
414                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
415
416                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
417                               acrtc->crtc_id,
418                               amdgpu_dm_vrr_active(acrtc_state));
419
420                 /* Core vblank handling is done here after end of front-porch in
421                  * vrr mode, as vblank timestamping will give valid results
422                  * while now done after front-porch. This will also deliver
423                  * page-flip completion events that have been queued to us
424                  * if a pageflip happened inside front-porch.
425                  */
426                 if (amdgpu_dm_vrr_active(acrtc_state)) {
427                         drm_crtc_handle_vblank(&acrtc->base);
428
429                         /* BTR processing for pre-DCE12 ASICs */
430                         if (acrtc_state->stream &&
431                             adev->family < AMDGPU_FAMILY_AI) {
432                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
433                                 mod_freesync_handle_v_update(
434                                     adev->dm.freesync_module,
435                                     acrtc_state->stream,
436                                     &acrtc_state->vrr_params);
437
438                                 dc_stream_adjust_vmin_vmax(
439                                     adev->dm.dc,
440                                     acrtc_state->stream,
441                                     &acrtc_state->vrr_params.adjust);
442                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
443                         }
444                 }
445         }
446 }
447
448 /**
449  * dm_crtc_high_irq() - Handles CRTC interrupt
450  * @interrupt_params: used for determining the CRTC instance
451  *
452  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453  * event handler.
454  */
455 static void dm_crtc_high_irq(void *interrupt_params)
456 {
457         struct common_irq_params *irq_params = interrupt_params;
458         struct amdgpu_device *adev = irq_params->adev;
459         struct amdgpu_crtc *acrtc;
460         struct dm_crtc_state *acrtc_state;
461         unsigned long flags;
462
463         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
464         if (!acrtc)
465                 return;
466
467         acrtc_state = to_dm_crtc_state(acrtc->base.state);
468
469         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470                          amdgpu_dm_vrr_active(acrtc_state),
471                          acrtc_state->active_planes);
472
473         /**
474          * Core vblank handling at start of front-porch is only possible
475          * in non-vrr mode, as only there vblank timestamping will give
476          * valid results while done in front-porch. Otherwise defer it
477          * to dm_vupdate_high_irq after end of front-porch.
478          */
479         if (!amdgpu_dm_vrr_active(acrtc_state))
480                 drm_crtc_handle_vblank(&acrtc->base);
481
482         /**
483          * Following stuff must happen at start of vblank, for crc
484          * computation and below-the-range btr support in vrr mode.
485          */
486         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
487
488         /* BTR updates need to happen before VUPDATE on Vega and above. */
489         if (adev->family < AMDGPU_FAMILY_AI)
490                 return;
491
492         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
493
494         if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495             acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496                 mod_freesync_handle_v_update(adev->dm.freesync_module,
497                                              acrtc_state->stream,
498                                              &acrtc_state->vrr_params);
499
500                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501                                            &acrtc_state->vrr_params.adjust);
502         }
503
504         /*
505          * If there aren't any active_planes then DCH HUBP may be clock-gated.
506          * In that case, pageflip completion interrupts won't fire and pageflip
507          * completion events won't get delivered. Prevent this by sending
508          * pending pageflip events from here if a flip is still pending.
509          *
510          * If any planes are enabled, use dm_pflip_high_irq() instead, to
511          * avoid race conditions between flip programming and completion,
512          * which could cause too early flip completion events.
513          */
514         if (adev->family >= AMDGPU_FAMILY_RV &&
515             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516             acrtc_state->active_planes == 0) {
517                 if (acrtc->event) {
518                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
519                         acrtc->event = NULL;
520                         drm_crtc_vblank_put(&acrtc->base);
521                 }
522                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
523         }
524
525         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
526 }
527
528 static int dm_set_clockgating_state(void *handle,
529                   enum amd_clockgating_state state)
530 {
531         return 0;
532 }
533
534 static int dm_set_powergating_state(void *handle,
535                   enum amd_powergating_state state)
536 {
537         return 0;
538 }
539
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
542
543 /* Allocate memory for FBC compressed data  */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
545 {
546         struct drm_device *dev = connector->dev;
547         struct amdgpu_device *adev = drm_to_adev(dev);
548         struct dm_comressor_info *compressor = &adev->dm.compressor;
549         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550         struct drm_display_mode *mode;
551         unsigned long max_size = 0;
552
553         if (adev->dm.dc->fbc_compressor == NULL)
554                 return;
555
556         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
557                 return;
558
559         if (compressor->bo_ptr)
560                 return;
561
562
563         list_for_each_entry(mode, &connector->modes, head) {
564                 if (max_size < mode->htotal * mode->vtotal)
565                         max_size = mode->htotal * mode->vtotal;
566         }
567
568         if (max_size) {
569                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571                             &compressor->gpu_addr, &compressor->cpu_addr);
572
573                 if (r)
574                         DRM_ERROR("DM: Failed to initialize FBC\n");
575                 else {
576                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578                 }
579
580         }
581
582 }
583
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585                                           int pipe, bool *enabled,
586                                           unsigned char *buf, int max_bytes)
587 {
588         struct drm_device *dev = dev_get_drvdata(kdev);
589         struct amdgpu_device *adev = drm_to_adev(dev);
590         struct drm_connector *connector;
591         struct drm_connector_list_iter conn_iter;
592         struct amdgpu_dm_connector *aconnector;
593         int ret = 0;
594
595         *enabled = false;
596
597         mutex_lock(&adev->dm.audio_lock);
598
599         drm_connector_list_iter_begin(dev, &conn_iter);
600         drm_for_each_connector_iter(connector, &conn_iter) {
601                 aconnector = to_amdgpu_dm_connector(connector);
602                 if (aconnector->audio_inst != port)
603                         continue;
604
605                 *enabled = true;
606                 ret = drm_eld_size(connector->eld);
607                 memcpy(buf, connector->eld, min(max_bytes, ret));
608
609                 break;
610         }
611         drm_connector_list_iter_end(&conn_iter);
612
613         mutex_unlock(&adev->dm.audio_lock);
614
615         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
616
617         return ret;
618 }
619
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621         .get_eld = amdgpu_dm_audio_component_get_eld,
622 };
623
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625                                        struct device *hda_kdev, void *data)
626 {
627         struct drm_device *dev = dev_get_drvdata(kdev);
628         struct amdgpu_device *adev = drm_to_adev(dev);
629         struct drm_audio_component *acomp = data;
630
631         acomp->ops = &amdgpu_dm_audio_component_ops;
632         acomp->dev = kdev;
633         adev->dm.audio_component = acomp;
634
635         return 0;
636 }
637
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639                                           struct device *hda_kdev, void *data)
640 {
641         struct drm_device *dev = dev_get_drvdata(kdev);
642         struct amdgpu_device *adev = drm_to_adev(dev);
643         struct drm_audio_component *acomp = data;
644
645         acomp->ops = NULL;
646         acomp->dev = NULL;
647         adev->dm.audio_component = NULL;
648 }
649
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651         .bind   = amdgpu_dm_audio_component_bind,
652         .unbind = amdgpu_dm_audio_component_unbind,
653 };
654
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656 {
657         int i, ret;
658
659         if (!amdgpu_audio)
660                 return 0;
661
662         adev->mode_info.audio.enabled = true;
663
664         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
665
666         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667                 adev->mode_info.audio.pin[i].channels = -1;
668                 adev->mode_info.audio.pin[i].rate = -1;
669                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
670                 adev->mode_info.audio.pin[i].status_bits = 0;
671                 adev->mode_info.audio.pin[i].category_code = 0;
672                 adev->mode_info.audio.pin[i].connected = false;
673                 adev->mode_info.audio.pin[i].id =
674                         adev->dm.dc->res_pool->audios[i]->inst;
675                 adev->mode_info.audio.pin[i].offset = 0;
676         }
677
678         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
679         if (ret < 0)
680                 return ret;
681
682         adev->dm.audio_registered = true;
683
684         return 0;
685 }
686
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
688 {
689         if (!amdgpu_audio)
690                 return;
691
692         if (!adev->mode_info.audio.enabled)
693                 return;
694
695         if (adev->dm.audio_registered) {
696                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697                 adev->dm.audio_registered = false;
698         }
699
700         /* TODO: Disable audio? */
701
702         adev->mode_info.audio.enabled = false;
703 }
704
705 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
706 {
707         struct drm_audio_component *acomp = adev->dm.audio_component;
708
709         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
711
712                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
713                                                  pin, -1);
714         }
715 }
716
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
718 {
719         const struct dmcub_firmware_header_v1_0 *hdr;
720         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722         const struct firmware *dmub_fw = adev->dm.dmub_fw;
723         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724         struct abm *abm = adev->dm.dc->res_pool->abm;
725         struct dmub_srv_hw_params hw_params;
726         enum dmub_status status;
727         const unsigned char *fw_inst_const, *fw_bss_data;
728         uint32_t i, fw_inst_const_size, fw_bss_data_size;
729         bool has_hw_support;
730
731         if (!dmub_srv)
732                 /* DMUB isn't supported on the ASIC. */
733                 return 0;
734
735         if (!fb_info) {
736                 DRM_ERROR("No framebuffer info for DMUB service.\n");
737                 return -EINVAL;
738         }
739
740         if (!dmub_fw) {
741                 /* Firmware required for DMUB support. */
742                 DRM_ERROR("No firmware provided for DMUB.\n");
743                 return -EINVAL;
744         }
745
746         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747         if (status != DMUB_STATUS_OK) {
748                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
749                 return -EINVAL;
750         }
751
752         if (!has_hw_support) {
753                 DRM_INFO("DMUB unsupported on ASIC\n");
754                 return 0;
755         }
756
757         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
758
759         fw_inst_const = dmub_fw->data +
760                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761                         PSP_HEADER_BYTES;
762
763         fw_bss_data = dmub_fw->data +
764                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765                       le32_to_cpu(hdr->inst_const_bytes);
766
767         /* Copy firmware and bios info into FB memory. */
768         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
770
771         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
772
773         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774          * amdgpu_ucode_init_single_fw will load dmub firmware
775          * fw_inst_const part to cw0; otherwise, the firmware back door load
776          * will be done by dm_dmub_hw_init
777          */
778         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
780                                 fw_inst_const_size);
781         }
782
783         if (fw_bss_data_size)
784                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785                        fw_bss_data, fw_bss_data_size);
786
787         /* Copy firmware bios info into FB memory. */
788         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789                adev->bios_size);
790
791         /* Reset regions that need to be reset. */
792         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
794
795         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
797
798         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
800
801         /* Initialize hardware. */
802         memset(&hw_params, 0, sizeof(hw_params));
803         hw_params.fb_base = adev->gmc.fb_start;
804         hw_params.fb_offset = adev->gmc.aper_base;
805
806         /* backdoor load firmware and trigger dmub running */
807         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808                 hw_params.load_inst_const = true;
809
810         if (dmcu)
811                 hw_params.psp_version = dmcu->psp_version;
812
813         for (i = 0; i < fb_info->num_fb; ++i)
814                 hw_params.fb[i] = &fb_info->fb[i];
815
816         status = dmub_srv_hw_init(dmub_srv, &hw_params);
817         if (status != DMUB_STATUS_OK) {
818                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
819                 return -EINVAL;
820         }
821
822         /* Wait for firmware load to finish. */
823         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824         if (status != DMUB_STATUS_OK)
825                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
826
827         /* Init DMCU and ABM if available. */
828         if (dmcu && abm) {
829                 dmcu->funcs->dmcu_init(dmcu);
830                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831         }
832
833         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834         if (!adev->dm.dc->ctx->dmub_srv) {
835                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
836                 return -ENOMEM;
837         }
838
839         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840                  adev->dm.dmcub_fw_version);
841
842         return 0;
843 }
844
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
846 {
847         struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849         struct dc_callback_init init_params;
850 #endif
851         int r;
852
853         adev->dm.ddev = adev_to_drm(adev);
854         adev->dm.adev = adev;
855
856         /* Zero all the fields */
857         memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859         memset(&init_params, 0, sizeof(init_params));
860 #endif
861
862         mutex_init(&adev->dm.dc_lock);
863         mutex_init(&adev->dm.audio_lock);
864
865         if(amdgpu_dm_irq_init(adev)) {
866                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
867                 goto error;
868         }
869
870         init_data.asic_id.chip_family = adev->family;
871
872         init_data.asic_id.pci_revision_id = adev->pdev->revision;
873         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
874
875         init_data.asic_id.vram_width = adev->gmc.vram_width;
876         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
877         init_data.asic_id.atombios_base_address =
878                 adev->mode_info.atom_context->bios;
879
880         init_data.driver = adev;
881
882         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
883
884         if (!adev->dm.cgs_device) {
885                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
886                 goto error;
887         }
888
889         init_data.cgs_device = adev->dm.cgs_device;
890
891         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
892
893         switch (adev->asic_type) {
894         case CHIP_CARRIZO:
895         case CHIP_STONEY:
896         case CHIP_RAVEN:
897         case CHIP_RENOIR:
898                 init_data.flags.gpu_vm_support = true;
899                 break;
900         default:
901                 break;
902         }
903
904         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905                 init_data.flags.fbc_support = true;
906
907         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908                 init_data.flags.multi_mon_pp_mclk_switch = true;
909
910         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911                 init_data.flags.disable_fractional_pwm = true;
912
913         init_data.flags.power_down_display_on_boot = true;
914
915         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
916
917         /* Display Core create. */
918         adev->dm.dc = dc_create(&init_data);
919
920         if (adev->dm.dc) {
921                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
922         } else {
923                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
924                 goto error;
925         }
926
927         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
929                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930         }
931
932         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
934
935         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936                 adev->dm.dc->debug.disable_stutter = true;
937
938         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939                 adev->dm.dc->debug.disable_dsc = true;
940
941         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942                 adev->dm.dc->debug.disable_clock_gate = true;
943
944         r = dm_dmub_hw_init(adev);
945         if (r) {
946                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
947                 goto error;
948         }
949
950         dc_hardware_init(adev->dm.dc);
951
952         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953         if (!adev->dm.freesync_module) {
954                 DRM_ERROR(
955                 "amdgpu: failed to initialize freesync_module.\n");
956         } else
957                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958                                 adev->dm.freesync_module);
959
960         amdgpu_dm_init_color_mod();
961
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963         if (adev->asic_type >= CHIP_RAVEN) {
964                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
965
966                 if (!adev->dm.hdcp_workqueue)
967                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968                 else
969                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
970
971                 dc_init_callbacks(adev->dm.dc, &init_params);
972         }
973 #endif
974         if (amdgpu_dm_initialize_drm_device(adev)) {
975                 DRM_ERROR(
976                 "amdgpu: failed to initialize sw for display support.\n");
977                 goto error;
978         }
979
980         /* Update the actual used number of crtc */
981         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982
983         /* create fake encoders for MST */
984         dm_dp_create_fake_mst_encoders(adev);
985
986         /* TODO: Add_display_info? */
987
988         /* TODO use dynamic cursor width */
989         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
991
992         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
993                 DRM_ERROR(
994                 "amdgpu: failed to initialize sw for display support.\n");
995                 goto error;
996         }
997
998         DRM_DEBUG_DRIVER("KMS initialized.\n");
999
1000         return 0;
1001 error:
1002         amdgpu_dm_fini(adev);
1003
1004         return -EINVAL;
1005 }
1006
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1008 {
1009         int i;
1010
1011         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1013         }
1014
1015         amdgpu_dm_audio_fini(adev);
1016
1017         amdgpu_dm_destroy_drm_device(&adev->dm);
1018
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020         if (adev->dm.hdcp_workqueue) {
1021                 hdcp_destroy(adev->dm.hdcp_workqueue);
1022                 adev->dm.hdcp_workqueue = NULL;
1023         }
1024
1025         if (adev->dm.dc)
1026                 dc_deinit_callbacks(adev->dm.dc);
1027 #endif
1028         if (adev->dm.dc->ctx->dmub_srv) {
1029                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030                 adev->dm.dc->ctx->dmub_srv = NULL;
1031         }
1032
1033         if (adev->dm.dmub_bo)
1034                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035                                       &adev->dm.dmub_bo_gpu_addr,
1036                                       &adev->dm.dmub_bo_cpu_addr);
1037
1038         /* DC Destroy TODO: Replace destroy DAL */
1039         if (adev->dm.dc)
1040                 dc_destroy(&adev->dm.dc);
1041         /*
1042          * TODO: pageflip, vlank interrupt
1043          *
1044          * amdgpu_dm_irq_fini(adev);
1045          */
1046
1047         if (adev->dm.cgs_device) {
1048                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049                 adev->dm.cgs_device = NULL;
1050         }
1051         if (adev->dm.freesync_module) {
1052                 mod_freesync_destroy(adev->dm.freesync_module);
1053                 adev->dm.freesync_module = NULL;
1054         }
1055
1056         mutex_destroy(&adev->dm.audio_lock);
1057         mutex_destroy(&adev->dm.dc_lock);
1058
1059         return;
1060 }
1061
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1063 {
1064         const char *fw_name_dmcu = NULL;
1065         int r;
1066         const struct dmcu_firmware_header_v1_0 *hdr;
1067
1068         switch(adev->asic_type) {
1069 #if defined(CONFIG_DRM_AMD_DC_SI)
1070         case CHIP_TAHITI:
1071         case CHIP_PITCAIRN:
1072         case CHIP_VERDE:
1073         case CHIP_OLAND:
1074 #endif
1075         case CHIP_BONAIRE:
1076         case CHIP_HAWAII:
1077         case CHIP_KAVERI:
1078         case CHIP_KABINI:
1079         case CHIP_MULLINS:
1080         case CHIP_TONGA:
1081         case CHIP_FIJI:
1082         case CHIP_CARRIZO:
1083         case CHIP_STONEY:
1084         case CHIP_POLARIS11:
1085         case CHIP_POLARIS10:
1086         case CHIP_POLARIS12:
1087         case CHIP_VEGAM:
1088         case CHIP_VEGA10:
1089         case CHIP_VEGA12:
1090         case CHIP_VEGA20:
1091         case CHIP_NAVI10:
1092         case CHIP_NAVI14:
1093         case CHIP_RENOIR:
1094 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1095         case CHIP_SIENNA_CICHLID:
1096         case CHIP_NAVY_FLOUNDER:
1097 #endif
1098                 return 0;
1099         case CHIP_NAVI12:
1100                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1101                 break;
1102         case CHIP_RAVEN:
1103                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1104                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1105                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1106                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1107                 else
1108                         return 0;
1109                 break;
1110         default:
1111                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1112                 return -EINVAL;
1113         }
1114
1115         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1116                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1117                 return 0;
1118         }
1119
1120         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1121         if (r == -ENOENT) {
1122                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1123                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1124                 adev->dm.fw_dmcu = NULL;
1125                 return 0;
1126         }
1127         if (r) {
1128                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1129                         fw_name_dmcu);
1130                 return r;
1131         }
1132
1133         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1134         if (r) {
1135                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1136                         fw_name_dmcu);
1137                 release_firmware(adev->dm.fw_dmcu);
1138                 adev->dm.fw_dmcu = NULL;
1139                 return r;
1140         }
1141
1142         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1143         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1144         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1145         adev->firmware.fw_size +=
1146                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1147
1148         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1149         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1150         adev->firmware.fw_size +=
1151                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1152
1153         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1154
1155         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1156
1157         return 0;
1158 }
1159
1160 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1161 {
1162         struct amdgpu_device *adev = ctx;
1163
1164         return dm_read_reg(adev->dm.dc->ctx, address);
1165 }
1166
1167 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1168                                      uint32_t value)
1169 {
1170         struct amdgpu_device *adev = ctx;
1171
1172         return dm_write_reg(adev->dm.dc->ctx, address, value);
1173 }
1174
1175 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1176 {
1177         struct dmub_srv_create_params create_params;
1178         struct dmub_srv_region_params region_params;
1179         struct dmub_srv_region_info region_info;
1180         struct dmub_srv_fb_params fb_params;
1181         struct dmub_srv_fb_info *fb_info;
1182         struct dmub_srv *dmub_srv;
1183         const struct dmcub_firmware_header_v1_0 *hdr;
1184         const char *fw_name_dmub;
1185         enum dmub_asic dmub_asic;
1186         enum dmub_status status;
1187         int r;
1188
1189         switch (adev->asic_type) {
1190         case CHIP_RENOIR:
1191                 dmub_asic = DMUB_ASIC_DCN21;
1192                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1193                 break;
1194 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1195         case CHIP_SIENNA_CICHLID:
1196                 dmub_asic = DMUB_ASIC_DCN30;
1197                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1198                 break;
1199         case CHIP_NAVY_FLOUNDER:
1200                 dmub_asic = DMUB_ASIC_DCN30;
1201                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1202                 break;
1203 #endif
1204
1205         default:
1206                 /* ASIC doesn't support DMUB. */
1207                 return 0;
1208         }
1209
1210         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1211         if (r) {
1212                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1213                 return 0;
1214         }
1215
1216         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1217         if (r) {
1218                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1219                 return 0;
1220         }
1221
1222         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1223
1224         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1225                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1226                         AMDGPU_UCODE_ID_DMCUB;
1227                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1228                         adev->dm.dmub_fw;
1229                 adev->firmware.fw_size +=
1230                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1231
1232                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1233                          adev->dm.dmcub_fw_version);
1234         }
1235
1236         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1237
1238         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1239         dmub_srv = adev->dm.dmub_srv;
1240
1241         if (!dmub_srv) {
1242                 DRM_ERROR("Failed to allocate DMUB service!\n");
1243                 return -ENOMEM;
1244         }
1245
1246         memset(&create_params, 0, sizeof(create_params));
1247         create_params.user_ctx = adev;
1248         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1249         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1250         create_params.asic = dmub_asic;
1251
1252         /* Create the DMUB service. */
1253         status = dmub_srv_create(dmub_srv, &create_params);
1254         if (status != DMUB_STATUS_OK) {
1255                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1256                 return -EINVAL;
1257         }
1258
1259         /* Calculate the size of all the regions for the DMUB service. */
1260         memset(&region_params, 0, sizeof(region_params));
1261
1262         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1263                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1264         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1265         region_params.vbios_size = adev->bios_size;
1266         region_params.fw_bss_data = region_params.bss_data_size ?
1267                 adev->dm.dmub_fw->data +
1268                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1269                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1270         region_params.fw_inst_const =
1271                 adev->dm.dmub_fw->data +
1272                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1273                 PSP_HEADER_BYTES;
1274
1275         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1276                                            &region_info);
1277
1278         if (status != DMUB_STATUS_OK) {
1279                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1280                 return -EINVAL;
1281         }
1282
1283         /*
1284          * Allocate a framebuffer based on the total size of all the regions.
1285          * TODO: Move this into GART.
1286          */
1287         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1288                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1289                                     &adev->dm.dmub_bo_gpu_addr,
1290                                     &adev->dm.dmub_bo_cpu_addr);
1291         if (r)
1292                 return r;
1293
1294         /* Rebase the regions on the framebuffer address. */
1295         memset(&fb_params, 0, sizeof(fb_params));
1296         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1297         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1298         fb_params.region_info = &region_info;
1299
1300         adev->dm.dmub_fb_info =
1301                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1302         fb_info = adev->dm.dmub_fb_info;
1303
1304         if (!fb_info) {
1305                 DRM_ERROR(
1306                         "Failed to allocate framebuffer info for DMUB service!\n");
1307                 return -ENOMEM;
1308         }
1309
1310         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1311         if (status != DMUB_STATUS_OK) {
1312                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1313                 return -EINVAL;
1314         }
1315
1316         return 0;
1317 }
1318
1319 static int dm_sw_init(void *handle)
1320 {
1321         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322         int r;
1323
1324         r = dm_dmub_sw_init(adev);
1325         if (r)
1326                 return r;
1327
1328         return load_dmcu_fw(adev);
1329 }
1330
1331 static int dm_sw_fini(void *handle)
1332 {
1333         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1334
1335         kfree(adev->dm.dmub_fb_info);
1336         adev->dm.dmub_fb_info = NULL;
1337
1338         if (adev->dm.dmub_srv) {
1339                 dmub_srv_destroy(adev->dm.dmub_srv);
1340                 adev->dm.dmub_srv = NULL;
1341         }
1342
1343         release_firmware(adev->dm.dmub_fw);
1344         adev->dm.dmub_fw = NULL;
1345
1346         release_firmware(adev->dm.fw_dmcu);
1347         adev->dm.fw_dmcu = NULL;
1348
1349         return 0;
1350 }
1351
1352 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1353 {
1354         struct amdgpu_dm_connector *aconnector;
1355         struct drm_connector *connector;
1356         struct drm_connector_list_iter iter;
1357         int ret = 0;
1358
1359         drm_connector_list_iter_begin(dev, &iter);
1360         drm_for_each_connector_iter(connector, &iter) {
1361                 aconnector = to_amdgpu_dm_connector(connector);
1362                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1363                     aconnector->mst_mgr.aux) {
1364                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1365                                          aconnector,
1366                                          aconnector->base.base.id);
1367
1368                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1369                         if (ret < 0) {
1370                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1371                                 aconnector->dc_link->type =
1372                                         dc_connection_single;
1373                                 break;
1374                         }
1375                 }
1376         }
1377         drm_connector_list_iter_end(&iter);
1378
1379         return ret;
1380 }
1381
1382 static int dm_late_init(void *handle)
1383 {
1384         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1385
1386         struct dmcu_iram_parameters params;
1387         unsigned int linear_lut[16];
1388         int i;
1389         struct dmcu *dmcu = NULL;
1390         bool ret = true;
1391
1392         if (!adev->dm.fw_dmcu)
1393                 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1394
1395         dmcu = adev->dm.dc->res_pool->dmcu;
1396
1397         for (i = 0; i < 16; i++)
1398                 linear_lut[i] = 0xFFFF * i / 15;
1399
1400         params.set = 0;
1401         params.backlight_ramping_start = 0xCCCC;
1402         params.backlight_ramping_reduction = 0xCCCCCCCC;
1403         params.backlight_lut_array_size = 16;
1404         params.backlight_lut_array = linear_lut;
1405
1406         /* Min backlight level after ABM reduction,  Don't allow below 1%
1407          * 0xFFFF x 0.01 = 0x28F
1408          */
1409         params.min_abm_backlight = 0x28F;
1410
1411         /* In the case where abm is implemented on dmcub,
1412          * dmcu object will be null.
1413          * ABM 2.4 and up are implemented on dmcub.
1414          */
1415         if (dmcu)
1416                 ret = dmcu_load_iram(dmcu, params);
1417         else if (adev->dm.dc->ctx->dmub_srv)
1418                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1419
1420         if (!ret)
1421                 return -EINVAL;
1422
1423         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1424 }
1425
1426 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1427 {
1428         struct amdgpu_dm_connector *aconnector;
1429         struct drm_connector *connector;
1430         struct drm_connector_list_iter iter;
1431         struct drm_dp_mst_topology_mgr *mgr;
1432         int ret;
1433         bool need_hotplug = false;
1434
1435         drm_connector_list_iter_begin(dev, &iter);
1436         drm_for_each_connector_iter(connector, &iter) {
1437                 aconnector = to_amdgpu_dm_connector(connector);
1438                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1439                     aconnector->mst_port)
1440                         continue;
1441
1442                 mgr = &aconnector->mst_mgr;
1443
1444                 if (suspend) {
1445                         drm_dp_mst_topology_mgr_suspend(mgr);
1446                 } else {
1447                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1448                         if (ret < 0) {
1449                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1450                                 need_hotplug = true;
1451                         }
1452                 }
1453         }
1454         drm_connector_list_iter_end(&iter);
1455
1456         if (need_hotplug)
1457                 drm_kms_helper_hotplug_event(dev);
1458 }
1459
1460 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1461 {
1462         struct smu_context *smu = &adev->smu;
1463         int ret = 0;
1464
1465         if (!is_support_sw_smu(adev))
1466                 return 0;
1467
1468         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1469          * on window driver dc implementation.
1470          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1471          * should be passed to smu during boot up and resume from s3.
1472          * boot up: dc calculate dcn watermark clock settings within dc_create,
1473          * dcn20_resource_construct
1474          * then call pplib functions below to pass the settings to smu:
1475          * smu_set_watermarks_for_clock_ranges
1476          * smu_set_watermarks_table
1477          * navi10_set_watermarks_table
1478          * smu_write_watermarks_table
1479          *
1480          * For Renoir, clock settings of dcn watermark are also fixed values.
1481          * dc has implemented different flow for window driver:
1482          * dc_hardware_init / dc_set_power_state
1483          * dcn10_init_hw
1484          * notify_wm_ranges
1485          * set_wm_ranges
1486          * -- Linux
1487          * smu_set_watermarks_for_clock_ranges
1488          * renoir_set_watermarks_table
1489          * smu_write_watermarks_table
1490          *
1491          * For Linux,
1492          * dc_hardware_init -> amdgpu_dm_init
1493          * dc_set_power_state --> dm_resume
1494          *
1495          * therefore, this function apply to navi10/12/14 but not Renoir
1496          * *
1497          */
1498         switch(adev->asic_type) {
1499         case CHIP_NAVI10:
1500         case CHIP_NAVI14:
1501         case CHIP_NAVI12:
1502                 break;
1503         default:
1504                 return 0;
1505         }
1506
1507         ret = smu_write_watermarks_table(smu);
1508         if (ret) {
1509                 DRM_ERROR("Failed to update WMTABLE!\n");
1510                 return ret;
1511         }
1512
1513         return 0;
1514 }
1515
1516 /**
1517  * dm_hw_init() - Initialize DC device
1518  * @handle: The base driver device containing the amdgpu_dm device.
1519  *
1520  * Initialize the &struct amdgpu_display_manager device. This involves calling
1521  * the initializers of each DM component, then populating the struct with them.
1522  *
1523  * Although the function implies hardware initialization, both hardware and
1524  * software are initialized here. Splitting them out to their relevant init
1525  * hooks is a future TODO item.
1526  *
1527  * Some notable things that are initialized here:
1528  *
1529  * - Display Core, both software and hardware
1530  * - DC modules that we need (freesync and color management)
1531  * - DRM software states
1532  * - Interrupt sources and handlers
1533  * - Vblank support
1534  * - Debug FS entries, if enabled
1535  */
1536 static int dm_hw_init(void *handle)
1537 {
1538         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539         /* Create DAL display manager */
1540         amdgpu_dm_init(adev);
1541         amdgpu_dm_hpd_init(adev);
1542
1543         return 0;
1544 }
1545
1546 /**
1547  * dm_hw_fini() - Teardown DC device
1548  * @handle: The base driver device containing the amdgpu_dm device.
1549  *
1550  * Teardown components within &struct amdgpu_display_manager that require
1551  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552  * were loaded. Also flush IRQ workqueues and disable them.
1553  */
1554 static int dm_hw_fini(void *handle)
1555 {
1556         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557
1558         amdgpu_dm_hpd_fini(adev);
1559
1560         amdgpu_dm_irq_fini(adev);
1561         amdgpu_dm_fini(adev);
1562         return 0;
1563 }
1564
1565
1566 static int dm_enable_vblank(struct drm_crtc *crtc);
1567 static void dm_disable_vblank(struct drm_crtc *crtc);
1568
1569 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1570                                  struct dc_state *state, bool enable)
1571 {
1572         enum dc_irq_source irq_source;
1573         struct amdgpu_crtc *acrtc;
1574         int rc = -EBUSY;
1575         int i = 0;
1576
1577         for (i = 0; i < state->stream_count; i++) {
1578                 acrtc = get_crtc_by_otg_inst(
1579                                 adev, state->stream_status[i].primary_otg_inst);
1580
1581                 if (acrtc && state->stream_status[i].plane_count != 0) {
1582                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1583                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1584                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1585                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1586                         if (rc)
1587                                 DRM_WARN("Failed to %s pflip interrupts\n",
1588                                          enable ? "enable" : "disable");
1589
1590                         if (enable) {
1591                                 rc = dm_enable_vblank(&acrtc->base);
1592                                 if (rc)
1593                                         DRM_WARN("Failed to enable vblank interrupts\n");
1594                         } else {
1595                                 dm_disable_vblank(&acrtc->base);
1596                         }
1597
1598                 }
1599         }
1600
1601 }
1602
1603 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1604 {
1605         struct dc_state *context = NULL;
1606         enum dc_status res = DC_ERROR_UNEXPECTED;
1607         int i;
1608         struct dc_stream_state *del_streams[MAX_PIPES];
1609         int del_streams_count = 0;
1610
1611         memset(del_streams, 0, sizeof(del_streams));
1612
1613         context = dc_create_state(dc);
1614         if (context == NULL)
1615                 goto context_alloc_fail;
1616
1617         dc_resource_state_copy_construct_current(dc, context);
1618
1619         /* First remove from context all streams */
1620         for (i = 0; i < context->stream_count; i++) {
1621                 struct dc_stream_state *stream = context->streams[i];
1622
1623                 del_streams[del_streams_count++] = stream;
1624         }
1625
1626         /* Remove all planes for removed streams and then remove the streams */
1627         for (i = 0; i < del_streams_count; i++) {
1628                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1629                         res = DC_FAIL_DETACH_SURFACES;
1630                         goto fail;
1631                 }
1632
1633                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1634                 if (res != DC_OK)
1635                         goto fail;
1636         }
1637
1638
1639         res = dc_validate_global_state(dc, context, false);
1640
1641         if (res != DC_OK) {
1642                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1643                 goto fail;
1644         }
1645
1646         res = dc_commit_state(dc, context);
1647
1648 fail:
1649         dc_release_state(context);
1650
1651 context_alloc_fail:
1652         return res;
1653 }
1654
1655 static int dm_suspend(void *handle)
1656 {
1657         struct amdgpu_device *adev = handle;
1658         struct amdgpu_display_manager *dm = &adev->dm;
1659         int ret = 0;
1660
1661         if (amdgpu_in_reset(adev)) {
1662                 mutex_lock(&dm->dc_lock);
1663                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1664
1665                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1666
1667                 amdgpu_dm_commit_zero_streams(dm->dc);
1668
1669                 amdgpu_dm_irq_suspend(adev);
1670
1671                 return ret;
1672         }
1673
1674         WARN_ON(adev->dm.cached_state);
1675         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1676
1677         s3_handle_mst(adev_to_drm(adev), true);
1678
1679         amdgpu_dm_irq_suspend(adev);
1680
1681
1682         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1683
1684         return 0;
1685 }
1686
1687 static struct amdgpu_dm_connector *
1688 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1689                                              struct drm_crtc *crtc)
1690 {
1691         uint32_t i;
1692         struct drm_connector_state *new_con_state;
1693         struct drm_connector *connector;
1694         struct drm_crtc *crtc_from_state;
1695
1696         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1697                 crtc_from_state = new_con_state->crtc;
1698
1699                 if (crtc_from_state == crtc)
1700                         return to_amdgpu_dm_connector(connector);
1701         }
1702
1703         return NULL;
1704 }
1705
1706 static void emulated_link_detect(struct dc_link *link)
1707 {
1708         struct dc_sink_init_data sink_init_data = { 0 };
1709         struct display_sink_capability sink_caps = { 0 };
1710         enum dc_edid_status edid_status;
1711         struct dc_context *dc_ctx = link->ctx;
1712         struct dc_sink *sink = NULL;
1713         struct dc_sink *prev_sink = NULL;
1714
1715         link->type = dc_connection_none;
1716         prev_sink = link->local_sink;
1717
1718         if (prev_sink != NULL)
1719                 dc_sink_retain(prev_sink);
1720
1721         switch (link->connector_signal) {
1722         case SIGNAL_TYPE_HDMI_TYPE_A: {
1723                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1725                 break;
1726         }
1727
1728         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1729                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1731                 break;
1732         }
1733
1734         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1735                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1737                 break;
1738         }
1739
1740         case SIGNAL_TYPE_LVDS: {
1741                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1742                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1743                 break;
1744         }
1745
1746         case SIGNAL_TYPE_EDP: {
1747                 sink_caps.transaction_type =
1748                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1749                 sink_caps.signal = SIGNAL_TYPE_EDP;
1750                 break;
1751         }
1752
1753         case SIGNAL_TYPE_DISPLAY_PORT: {
1754                 sink_caps.transaction_type =
1755                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1756                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1757                 break;
1758         }
1759
1760         default:
1761                 DC_ERROR("Invalid connector type! signal:%d\n",
1762                         link->connector_signal);
1763                 return;
1764         }
1765
1766         sink_init_data.link = link;
1767         sink_init_data.sink_signal = sink_caps.signal;
1768
1769         sink = dc_sink_create(&sink_init_data);
1770         if (!sink) {
1771                 DC_ERROR("Failed to create sink!\n");
1772                 return;
1773         }
1774
1775         /* dc_sink_create returns a new reference */
1776         link->local_sink = sink;
1777
1778         edid_status = dm_helpers_read_local_edid(
1779                         link->ctx,
1780                         link,
1781                         sink);
1782
1783         if (edid_status != EDID_OK)
1784                 DC_ERROR("Failed to read EDID");
1785
1786 }
1787
1788 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1789                                      struct amdgpu_display_manager *dm)
1790 {
1791         struct {
1792                 struct dc_surface_update surface_updates[MAX_SURFACES];
1793                 struct dc_plane_info plane_infos[MAX_SURFACES];
1794                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1795                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1796                 struct dc_stream_update stream_update;
1797         } * bundle;
1798         int k, m;
1799
1800         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1801
1802         if (!bundle) {
1803                 dm_error("Failed to allocate update bundle\n");
1804                 goto cleanup;
1805         }
1806
1807         for (k = 0; k < dc_state->stream_count; k++) {
1808                 bundle->stream_update.stream = dc_state->streams[k];
1809
1810                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1811                         bundle->surface_updates[m].surface =
1812                                 dc_state->stream_status->plane_states[m];
1813                         bundle->surface_updates[m].surface->force_full_update =
1814                                 true;
1815                 }
1816                 dc_commit_updates_for_stream(
1817                         dm->dc, bundle->surface_updates,
1818                         dc_state->stream_status->plane_count,
1819                         dc_state->streams[k], &bundle->stream_update, dc_state);
1820         }
1821
1822 cleanup:
1823         kfree(bundle);
1824
1825         return;
1826 }
1827
1828 static int dm_resume(void *handle)
1829 {
1830         struct amdgpu_device *adev = handle;
1831         struct drm_device *ddev = adev_to_drm(adev);
1832         struct amdgpu_display_manager *dm = &adev->dm;
1833         struct amdgpu_dm_connector *aconnector;
1834         struct drm_connector *connector;
1835         struct drm_connector_list_iter iter;
1836         struct drm_crtc *crtc;
1837         struct drm_crtc_state *new_crtc_state;
1838         struct dm_crtc_state *dm_new_crtc_state;
1839         struct drm_plane *plane;
1840         struct drm_plane_state *new_plane_state;
1841         struct dm_plane_state *dm_new_plane_state;
1842         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1843         enum dc_connection_type new_connection_type = dc_connection_none;
1844         struct dc_state *dc_state;
1845         int i, r, j;
1846
1847         if (amdgpu_in_reset(adev)) {
1848                 dc_state = dm->cached_dc_state;
1849
1850                 r = dm_dmub_hw_init(adev);
1851                 if (r)
1852                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1853
1854                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1855                 dc_resume(dm->dc);
1856
1857                 amdgpu_dm_irq_resume_early(adev);
1858
1859                 for (i = 0; i < dc_state->stream_count; i++) {
1860                         dc_state->streams[i]->mode_changed = true;
1861                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1862                                 dc_state->stream_status->plane_states[j]->update_flags.raw
1863                                         = 0xffffffff;
1864                         }
1865                 }
1866
1867                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1868
1869                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1870
1871                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1872
1873                 dc_release_state(dm->cached_dc_state);
1874                 dm->cached_dc_state = NULL;
1875
1876                 amdgpu_dm_irq_resume_late(adev);
1877
1878                 mutex_unlock(&dm->dc_lock);
1879
1880                 return 0;
1881         }
1882         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1883         dc_release_state(dm_state->context);
1884         dm_state->context = dc_create_state(dm->dc);
1885         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1886         dc_resource_state_construct(dm->dc, dm_state->context);
1887
1888         /* Before powering on DC we need to re-initialize DMUB. */
1889         r = dm_dmub_hw_init(adev);
1890         if (r)
1891                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1892
1893         /* power on hardware */
1894         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1895
1896         /* program HPD filter */
1897         dc_resume(dm->dc);
1898
1899         /*
1900          * early enable HPD Rx IRQ, should be done before set mode as short
1901          * pulse interrupts are used for MST
1902          */
1903         amdgpu_dm_irq_resume_early(adev);
1904
1905         /* On resume we need to rewrite the MSTM control bits to enable MST*/
1906         s3_handle_mst(ddev, false);
1907
1908         /* Do detection*/
1909         drm_connector_list_iter_begin(ddev, &iter);
1910         drm_for_each_connector_iter(connector, &iter) {
1911                 aconnector = to_amdgpu_dm_connector(connector);
1912
1913                 /*
1914                  * this is the case when traversing through already created
1915                  * MST connectors, should be skipped
1916                  */
1917                 if (aconnector->mst_port)
1918                         continue;
1919
1920                 mutex_lock(&aconnector->hpd_lock);
1921                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1922                         DRM_ERROR("KMS: Failed to detect connector\n");
1923
1924                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1925                         emulated_link_detect(aconnector->dc_link);
1926                 else
1927                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1928
1929                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1930                         aconnector->fake_enable = false;
1931
1932                 if (aconnector->dc_sink)
1933                         dc_sink_release(aconnector->dc_sink);
1934                 aconnector->dc_sink = NULL;
1935                 amdgpu_dm_update_connector_after_detect(aconnector);
1936                 mutex_unlock(&aconnector->hpd_lock);
1937         }
1938         drm_connector_list_iter_end(&iter);
1939
1940         /* Force mode set in atomic commit */
1941         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1942                 new_crtc_state->active_changed = true;
1943
1944         /*
1945          * atomic_check is expected to create the dc states. We need to release
1946          * them here, since they were duplicated as part of the suspend
1947          * procedure.
1948          */
1949         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1950                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1951                 if (dm_new_crtc_state->stream) {
1952                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1953                         dc_stream_release(dm_new_crtc_state->stream);
1954                         dm_new_crtc_state->stream = NULL;
1955                 }
1956         }
1957
1958         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1959                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1960                 if (dm_new_plane_state->dc_state) {
1961                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1962                         dc_plane_state_release(dm_new_plane_state->dc_state);
1963                         dm_new_plane_state->dc_state = NULL;
1964                 }
1965         }
1966
1967         drm_atomic_helper_resume(ddev, dm->cached_state);
1968
1969         dm->cached_state = NULL;
1970
1971         amdgpu_dm_irq_resume_late(adev);
1972
1973         amdgpu_dm_smu_write_watermarks_table(adev);
1974
1975         return 0;
1976 }
1977
1978 /**
1979  * DOC: DM Lifecycle
1980  *
1981  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1982  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1983  * the base driver's device list to be initialized and torn down accordingly.
1984  *
1985  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1986  */
1987
1988 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1989         .name = "dm",
1990         .early_init = dm_early_init,
1991         .late_init = dm_late_init,
1992         .sw_init = dm_sw_init,
1993         .sw_fini = dm_sw_fini,
1994         .hw_init = dm_hw_init,
1995         .hw_fini = dm_hw_fini,
1996         .suspend = dm_suspend,
1997         .resume = dm_resume,
1998         .is_idle = dm_is_idle,
1999         .wait_for_idle = dm_wait_for_idle,
2000         .check_soft_reset = dm_check_soft_reset,
2001         .soft_reset = dm_soft_reset,
2002         .set_clockgating_state = dm_set_clockgating_state,
2003         .set_powergating_state = dm_set_powergating_state,
2004 };
2005
2006 const struct amdgpu_ip_block_version dm_ip_block =
2007 {
2008         .type = AMD_IP_BLOCK_TYPE_DCE,
2009         .major = 1,
2010         .minor = 0,
2011         .rev = 0,
2012         .funcs = &amdgpu_dm_funcs,
2013 };
2014
2015
2016 /**
2017  * DOC: atomic
2018  *
2019  * *WIP*
2020  */
2021
2022 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2023         .fb_create = amdgpu_display_user_framebuffer_create,
2024         .output_poll_changed = drm_fb_helper_output_poll_changed,
2025         .atomic_check = amdgpu_dm_atomic_check,
2026         .atomic_commit = amdgpu_dm_atomic_commit,
2027 };
2028
2029 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2030         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2031 };
2032
2033 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2034 {
2035         u32 max_cll, min_cll, max, min, q, r;
2036         struct amdgpu_dm_backlight_caps *caps;
2037         struct amdgpu_display_manager *dm;
2038         struct drm_connector *conn_base;
2039         struct amdgpu_device *adev;
2040         struct dc_link *link = NULL;
2041         static const u8 pre_computed_values[] = {
2042                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2043                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2044
2045         if (!aconnector || !aconnector->dc_link)
2046                 return;
2047
2048         link = aconnector->dc_link;
2049         if (link->connector_signal != SIGNAL_TYPE_EDP)
2050                 return;
2051
2052         conn_base = &aconnector->base;
2053         adev = drm_to_adev(conn_base->dev);
2054         dm = &adev->dm;
2055         caps = &dm->backlight_caps;
2056         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2057         caps->aux_support = false;
2058         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2059         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2060
2061         if (caps->ext_caps->bits.oled == 1 ||
2062             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2063             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2064                 caps->aux_support = true;
2065
2066         /* From the specification (CTA-861-G), for calculating the maximum
2067          * luminance we need to use:
2068          *      Luminance = 50*2**(CV/32)
2069          * Where CV is a one-byte value.
2070          * For calculating this expression we may need float point precision;
2071          * to avoid this complexity level, we take advantage that CV is divided
2072          * by a constant. From the Euclids division algorithm, we know that CV
2073          * can be written as: CV = 32*q + r. Next, we replace CV in the
2074          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2075          * need to pre-compute the value of r/32. For pre-computing the values
2076          * We just used the following Ruby line:
2077          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2078          * The results of the above expressions can be verified at
2079          * pre_computed_values.
2080          */
2081         q = max_cll >> 5;
2082         r = max_cll % 32;
2083         max = (1 << q) * pre_computed_values[r];
2084
2085         // min luminance: maxLum * (CV/255)^2 / 100
2086         q = DIV_ROUND_CLOSEST(min_cll, 255);
2087         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2088
2089         caps->aux_max_input_signal = max;
2090         caps->aux_min_input_signal = min;
2091 }
2092
2093 void amdgpu_dm_update_connector_after_detect(
2094                 struct amdgpu_dm_connector *aconnector)
2095 {
2096         struct drm_connector *connector = &aconnector->base;
2097         struct drm_device *dev = connector->dev;
2098         struct dc_sink *sink;
2099
2100         /* MST handled by drm_mst framework */
2101         if (aconnector->mst_mgr.mst_state == true)
2102                 return;
2103
2104
2105         sink = aconnector->dc_link->local_sink;
2106         if (sink)
2107                 dc_sink_retain(sink);
2108
2109         /*
2110          * Edid mgmt connector gets first update only in mode_valid hook and then
2111          * the connector sink is set to either fake or physical sink depends on link status.
2112          * Skip if already done during boot.
2113          */
2114         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2115                         && aconnector->dc_em_sink) {
2116
2117                 /*
2118                  * For S3 resume with headless use eml_sink to fake stream
2119                  * because on resume connector->sink is set to NULL
2120                  */
2121                 mutex_lock(&dev->mode_config.mutex);
2122
2123                 if (sink) {
2124                         if (aconnector->dc_sink) {
2125                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2126                                 /*
2127                                  * retain and release below are used to
2128                                  * bump up refcount for sink because the link doesn't point
2129                                  * to it anymore after disconnect, so on next crtc to connector
2130                                  * reshuffle by UMD we will get into unwanted dc_sink release
2131                                  */
2132                                 dc_sink_release(aconnector->dc_sink);
2133                         }
2134                         aconnector->dc_sink = sink;
2135                         dc_sink_retain(aconnector->dc_sink);
2136                         amdgpu_dm_update_freesync_caps(connector,
2137                                         aconnector->edid);
2138                 } else {
2139                         amdgpu_dm_update_freesync_caps(connector, NULL);
2140                         if (!aconnector->dc_sink) {
2141                                 aconnector->dc_sink = aconnector->dc_em_sink;
2142                                 dc_sink_retain(aconnector->dc_sink);
2143                         }
2144                 }
2145
2146                 mutex_unlock(&dev->mode_config.mutex);
2147
2148                 if (sink)
2149                         dc_sink_release(sink);
2150                 return;
2151         }
2152
2153         /*
2154          * TODO: temporary guard to look for proper fix
2155          * if this sink is MST sink, we should not do anything
2156          */
2157         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2158                 dc_sink_release(sink);
2159                 return;
2160         }
2161
2162         if (aconnector->dc_sink == sink) {
2163                 /*
2164                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2165                  * Do nothing!!
2166                  */
2167                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2168                                 aconnector->connector_id);
2169                 if (sink)
2170                         dc_sink_release(sink);
2171                 return;
2172         }
2173
2174         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2175                 aconnector->connector_id, aconnector->dc_sink, sink);
2176
2177         mutex_lock(&dev->mode_config.mutex);
2178
2179         /*
2180          * 1. Update status of the drm connector
2181          * 2. Send an event and let userspace tell us what to do
2182          */
2183         if (sink) {
2184                 /*
2185                  * TODO: check if we still need the S3 mode update workaround.
2186                  * If yes, put it here.
2187                  */
2188                 if (aconnector->dc_sink)
2189                         amdgpu_dm_update_freesync_caps(connector, NULL);
2190
2191                 aconnector->dc_sink = sink;
2192                 dc_sink_retain(aconnector->dc_sink);
2193                 if (sink->dc_edid.length == 0) {
2194                         aconnector->edid = NULL;
2195                         if (aconnector->dc_link->aux_mode) {
2196                                 drm_dp_cec_unset_edid(
2197                                         &aconnector->dm_dp_aux.aux);
2198                         }
2199                 } else {
2200                         aconnector->edid =
2201                                 (struct edid *)sink->dc_edid.raw_edid;
2202
2203                         drm_connector_update_edid_property(connector,
2204                                                            aconnector->edid);
2205                         drm_add_edid_modes(connector, aconnector->edid);
2206
2207                         if (aconnector->dc_link->aux_mode)
2208                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2209                                                     aconnector->edid);
2210                 }
2211
2212                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2213                 update_connector_ext_caps(aconnector);
2214         } else {
2215                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2216                 amdgpu_dm_update_freesync_caps(connector, NULL);
2217                 drm_connector_update_edid_property(connector, NULL);
2218                 aconnector->num_modes = 0;
2219                 dc_sink_release(aconnector->dc_sink);
2220                 aconnector->dc_sink = NULL;
2221                 aconnector->edid = NULL;
2222 #ifdef CONFIG_DRM_AMD_DC_HDCP
2223                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2224                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2225                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2226 #endif
2227         }
2228
2229         mutex_unlock(&dev->mode_config.mutex);
2230
2231         if (sink)
2232                 dc_sink_release(sink);
2233 }
2234
2235 static void handle_hpd_irq(void *param)
2236 {
2237         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2238         struct drm_connector *connector = &aconnector->base;
2239         struct drm_device *dev = connector->dev;
2240         enum dc_connection_type new_connection_type = dc_connection_none;
2241 #ifdef CONFIG_DRM_AMD_DC_HDCP
2242         struct amdgpu_device *adev = drm_to_adev(dev);
2243 #endif
2244
2245         /*
2246          * In case of failure or MST no need to update connector status or notify the OS
2247          * since (for MST case) MST does this in its own context.
2248          */
2249         mutex_lock(&aconnector->hpd_lock);
2250
2251 #ifdef CONFIG_DRM_AMD_DC_HDCP
2252         if (adev->dm.hdcp_workqueue)
2253                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2254 #endif
2255         if (aconnector->fake_enable)
2256                 aconnector->fake_enable = false;
2257
2258         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2259                 DRM_ERROR("KMS: Failed to detect connector\n");
2260
2261         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2262                 emulated_link_detect(aconnector->dc_link);
2263
2264
2265                 drm_modeset_lock_all(dev);
2266                 dm_restore_drm_connector_state(dev, connector);
2267                 drm_modeset_unlock_all(dev);
2268
2269                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2270                         drm_kms_helper_hotplug_event(dev);
2271
2272         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2273                 amdgpu_dm_update_connector_after_detect(aconnector);
2274
2275
2276                 drm_modeset_lock_all(dev);
2277                 dm_restore_drm_connector_state(dev, connector);
2278                 drm_modeset_unlock_all(dev);
2279
2280                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2281                         drm_kms_helper_hotplug_event(dev);
2282         }
2283         mutex_unlock(&aconnector->hpd_lock);
2284
2285 }
2286
2287 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2288 {
2289         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2290         uint8_t dret;
2291         bool new_irq_handled = false;
2292         int dpcd_addr;
2293         int dpcd_bytes_to_read;
2294
2295         const int max_process_count = 30;
2296         int process_count = 0;
2297
2298         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2299
2300         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2301                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2302                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2303                 dpcd_addr = DP_SINK_COUNT;
2304         } else {
2305                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2306                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2307                 dpcd_addr = DP_SINK_COUNT_ESI;
2308         }
2309
2310         dret = drm_dp_dpcd_read(
2311                 &aconnector->dm_dp_aux.aux,
2312                 dpcd_addr,
2313                 esi,
2314                 dpcd_bytes_to_read);
2315
2316         while (dret == dpcd_bytes_to_read &&
2317                 process_count < max_process_count) {
2318                 uint8_t retry;
2319                 dret = 0;
2320
2321                 process_count++;
2322
2323                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2324                 /* handle HPD short pulse irq */
2325                 if (aconnector->mst_mgr.mst_state)
2326                         drm_dp_mst_hpd_irq(
2327                                 &aconnector->mst_mgr,
2328                                 esi,
2329                                 &new_irq_handled);
2330
2331                 if (new_irq_handled) {
2332                         /* ACK at DPCD to notify down stream */
2333                         const int ack_dpcd_bytes_to_write =
2334                                 dpcd_bytes_to_read - 1;
2335
2336                         for (retry = 0; retry < 3; retry++) {
2337                                 uint8_t wret;
2338
2339                                 wret = drm_dp_dpcd_write(
2340                                         &aconnector->dm_dp_aux.aux,
2341                                         dpcd_addr + 1,
2342                                         &esi[1],
2343                                         ack_dpcd_bytes_to_write);
2344                                 if (wret == ack_dpcd_bytes_to_write)
2345                                         break;
2346                         }
2347
2348                         /* check if there is new irq to be handled */
2349                         dret = drm_dp_dpcd_read(
2350                                 &aconnector->dm_dp_aux.aux,
2351                                 dpcd_addr,
2352                                 esi,
2353                                 dpcd_bytes_to_read);
2354
2355                         new_irq_handled = false;
2356                 } else {
2357                         break;
2358                 }
2359         }
2360
2361         if (process_count == max_process_count)
2362                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2363 }
2364
2365 static void handle_hpd_rx_irq(void *param)
2366 {
2367         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2368         struct drm_connector *connector = &aconnector->base;
2369         struct drm_device *dev = connector->dev;
2370         struct dc_link *dc_link = aconnector->dc_link;
2371         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2372         enum dc_connection_type new_connection_type = dc_connection_none;
2373 #ifdef CONFIG_DRM_AMD_DC_HDCP
2374         union hpd_irq_data hpd_irq_data;
2375         struct amdgpu_device *adev = drm_to_adev(dev);
2376
2377         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2378 #endif
2379
2380         /*
2381          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2382          * conflict, after implement i2c helper, this mutex should be
2383          * retired.
2384          */
2385         if (dc_link->type != dc_connection_mst_branch)
2386                 mutex_lock(&aconnector->hpd_lock);
2387
2388
2389 #ifdef CONFIG_DRM_AMD_DC_HDCP
2390         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2391 #else
2392         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2393 #endif
2394                         !is_mst_root_connector) {
2395                 /* Downstream Port status changed. */
2396                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2397                         DRM_ERROR("KMS: Failed to detect connector\n");
2398
2399                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2400                         emulated_link_detect(dc_link);
2401
2402                         if (aconnector->fake_enable)
2403                                 aconnector->fake_enable = false;
2404
2405                         amdgpu_dm_update_connector_after_detect(aconnector);
2406
2407
2408                         drm_modeset_lock_all(dev);
2409                         dm_restore_drm_connector_state(dev, connector);
2410                         drm_modeset_unlock_all(dev);
2411
2412                         drm_kms_helper_hotplug_event(dev);
2413                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2414
2415                         if (aconnector->fake_enable)
2416                                 aconnector->fake_enable = false;
2417
2418                         amdgpu_dm_update_connector_after_detect(aconnector);
2419
2420
2421                         drm_modeset_lock_all(dev);
2422                         dm_restore_drm_connector_state(dev, connector);
2423                         drm_modeset_unlock_all(dev);
2424
2425                         drm_kms_helper_hotplug_event(dev);
2426                 }
2427         }
2428 #ifdef CONFIG_DRM_AMD_DC_HDCP
2429         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2430                 if (adev->dm.hdcp_workqueue)
2431                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2432         }
2433 #endif
2434         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2435             (dc_link->type == dc_connection_mst_branch))
2436                 dm_handle_hpd_rx_irq(aconnector);
2437
2438         if (dc_link->type != dc_connection_mst_branch) {
2439                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2440                 mutex_unlock(&aconnector->hpd_lock);
2441         }
2442 }
2443
2444 static void register_hpd_handlers(struct amdgpu_device *adev)
2445 {
2446         struct drm_device *dev = adev_to_drm(adev);
2447         struct drm_connector *connector;
2448         struct amdgpu_dm_connector *aconnector;
2449         const struct dc_link *dc_link;
2450         struct dc_interrupt_params int_params = {0};
2451
2452         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2453         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2454
2455         list_for_each_entry(connector,
2456                         &dev->mode_config.connector_list, head) {
2457
2458                 aconnector = to_amdgpu_dm_connector(connector);
2459                 dc_link = aconnector->dc_link;
2460
2461                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2462                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2463                         int_params.irq_source = dc_link->irq_source_hpd;
2464
2465                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2466                                         handle_hpd_irq,
2467                                         (void *) aconnector);
2468                 }
2469
2470                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2471
2472                         /* Also register for DP short pulse (hpd_rx). */
2473                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2474                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2475
2476                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2477                                         handle_hpd_rx_irq,
2478                                         (void *) aconnector);
2479                 }
2480         }
2481 }
2482
2483 #if defined(CONFIG_DRM_AMD_DC_SI)
2484 /* Register IRQ sources and initialize IRQ callbacks */
2485 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2486 {
2487         struct dc *dc = adev->dm.dc;
2488         struct common_irq_params *c_irq_params;
2489         struct dc_interrupt_params int_params = {0};
2490         int r;
2491         int i;
2492         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2493
2494         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2495         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2496
2497         /*
2498          * Actions of amdgpu_irq_add_id():
2499          * 1. Register a set() function with base driver.
2500          *    Base driver will call set() function to enable/disable an
2501          *    interrupt in DC hardware.
2502          * 2. Register amdgpu_dm_irq_handler().
2503          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2504          *    coming from DC hardware.
2505          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2506          *    for acknowledging and handling. */
2507
2508         /* Use VBLANK interrupt */
2509         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2510                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2511                 if (r) {
2512                         DRM_ERROR("Failed to add crtc irq id!\n");
2513                         return r;
2514                 }
2515
2516                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2517                 int_params.irq_source =
2518                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2519
2520                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2521
2522                 c_irq_params->adev = adev;
2523                 c_irq_params->irq_src = int_params.irq_source;
2524
2525                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2526                                 dm_crtc_high_irq, c_irq_params);
2527         }
2528
2529         /* Use GRPH_PFLIP interrupt */
2530         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2531                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2532                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2533                 if (r) {
2534                         DRM_ERROR("Failed to add page flip irq id!\n");
2535                         return r;
2536                 }
2537
2538                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2539                 int_params.irq_source =
2540                         dc_interrupt_to_irq_source(dc, i, 0);
2541
2542                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2543
2544                 c_irq_params->adev = adev;
2545                 c_irq_params->irq_src = int_params.irq_source;
2546
2547                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2548                                 dm_pflip_high_irq, c_irq_params);
2549
2550         }
2551
2552         /* HPD */
2553         r = amdgpu_irq_add_id(adev, client_id,
2554                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2555         if (r) {
2556                 DRM_ERROR("Failed to add hpd irq id!\n");
2557                 return r;
2558         }
2559
2560         register_hpd_handlers(adev);
2561
2562         return 0;
2563 }
2564 #endif
2565
2566 /* Register IRQ sources and initialize IRQ callbacks */
2567 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2568 {
2569         struct dc *dc = adev->dm.dc;
2570         struct common_irq_params *c_irq_params;
2571         struct dc_interrupt_params int_params = {0};
2572         int r;
2573         int i;
2574         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2575
2576         if (adev->asic_type >= CHIP_VEGA10)
2577                 client_id = SOC15_IH_CLIENTID_DCE;
2578
2579         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2580         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2581
2582         /*
2583          * Actions of amdgpu_irq_add_id():
2584          * 1. Register a set() function with base driver.
2585          *    Base driver will call set() function to enable/disable an
2586          *    interrupt in DC hardware.
2587          * 2. Register amdgpu_dm_irq_handler().
2588          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2589          *    coming from DC hardware.
2590          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2591          *    for acknowledging and handling. */
2592
2593         /* Use VBLANK interrupt */
2594         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2595                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2596                 if (r) {
2597                         DRM_ERROR("Failed to add crtc irq id!\n");
2598                         return r;
2599                 }
2600
2601                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2602                 int_params.irq_source =
2603                         dc_interrupt_to_irq_source(dc, i, 0);
2604
2605                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2606
2607                 c_irq_params->adev = adev;
2608                 c_irq_params->irq_src = int_params.irq_source;
2609
2610                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2611                                 dm_crtc_high_irq, c_irq_params);
2612         }
2613
2614         /* Use VUPDATE interrupt */
2615         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2616                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2617                 if (r) {
2618                         DRM_ERROR("Failed to add vupdate irq id!\n");
2619                         return r;
2620                 }
2621
2622                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2623                 int_params.irq_source =
2624                         dc_interrupt_to_irq_source(dc, i, 0);
2625
2626                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2627
2628                 c_irq_params->adev = adev;
2629                 c_irq_params->irq_src = int_params.irq_source;
2630
2631                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2632                                 dm_vupdate_high_irq, c_irq_params);
2633         }
2634
2635         /* Use GRPH_PFLIP interrupt */
2636         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2637                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2638                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2639                 if (r) {
2640                         DRM_ERROR("Failed to add page flip irq id!\n");
2641                         return r;
2642                 }
2643
2644                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2645                 int_params.irq_source =
2646                         dc_interrupt_to_irq_source(dc, i, 0);
2647
2648                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2649
2650                 c_irq_params->adev = adev;
2651                 c_irq_params->irq_src = int_params.irq_source;
2652
2653                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2654                                 dm_pflip_high_irq, c_irq_params);
2655
2656         }
2657
2658         /* HPD */
2659         r = amdgpu_irq_add_id(adev, client_id,
2660                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2661         if (r) {
2662                 DRM_ERROR("Failed to add hpd irq id!\n");
2663                 return r;
2664         }
2665
2666         register_hpd_handlers(adev);
2667
2668         return 0;
2669 }
2670
2671 #if defined(CONFIG_DRM_AMD_DC_DCN)
2672 /* Register IRQ sources and initialize IRQ callbacks */
2673 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2674 {
2675         struct dc *dc = adev->dm.dc;
2676         struct common_irq_params *c_irq_params;
2677         struct dc_interrupt_params int_params = {0};
2678         int r;
2679         int i;
2680
2681         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2682         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2683
2684         /*
2685          * Actions of amdgpu_irq_add_id():
2686          * 1. Register a set() function with base driver.
2687          *    Base driver will call set() function to enable/disable an
2688          *    interrupt in DC hardware.
2689          * 2. Register amdgpu_dm_irq_handler().
2690          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2691          *    coming from DC hardware.
2692          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2693          *    for acknowledging and handling.
2694          */
2695
2696         /* Use VSTARTUP interrupt */
2697         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2698                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2699                         i++) {
2700                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2701
2702                 if (r) {
2703                         DRM_ERROR("Failed to add crtc irq id!\n");
2704                         return r;
2705                 }
2706
2707                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2708                 int_params.irq_source =
2709                         dc_interrupt_to_irq_source(dc, i, 0);
2710
2711                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2712
2713                 c_irq_params->adev = adev;
2714                 c_irq_params->irq_src = int_params.irq_source;
2715
2716                 amdgpu_dm_irq_register_interrupt(
2717                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2718         }
2719
2720         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2721          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2722          * to trigger at end of each vblank, regardless of state of the lock,
2723          * matching DCE behaviour.
2724          */
2725         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2726              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2727              i++) {
2728                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2729
2730                 if (r) {
2731                         DRM_ERROR("Failed to add vupdate irq id!\n");
2732                         return r;
2733                 }
2734
2735                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2736                 int_params.irq_source =
2737                         dc_interrupt_to_irq_source(dc, i, 0);
2738
2739                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2740
2741                 c_irq_params->adev = adev;
2742                 c_irq_params->irq_src = int_params.irq_source;
2743
2744                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2745                                 dm_vupdate_high_irq, c_irq_params);
2746         }
2747
2748         /* Use GRPH_PFLIP interrupt */
2749         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2750                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2751                         i++) {
2752                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2753                 if (r) {
2754                         DRM_ERROR("Failed to add page flip irq id!\n");
2755                         return r;
2756                 }
2757
2758                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2759                 int_params.irq_source =
2760                         dc_interrupt_to_irq_source(dc, i, 0);
2761
2762                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2763
2764                 c_irq_params->adev = adev;
2765                 c_irq_params->irq_src = int_params.irq_source;
2766
2767                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2768                                 dm_pflip_high_irq, c_irq_params);
2769
2770         }
2771
2772         /* HPD */
2773         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2774                         &adev->hpd_irq);
2775         if (r) {
2776                 DRM_ERROR("Failed to add hpd irq id!\n");
2777                 return r;
2778         }
2779
2780         register_hpd_handlers(adev);
2781
2782         return 0;
2783 }
2784 #endif
2785
2786 /*
2787  * Acquires the lock for the atomic state object and returns
2788  * the new atomic state.
2789  *
2790  * This should only be called during atomic check.
2791  */
2792 static int dm_atomic_get_state(struct drm_atomic_state *state,
2793                                struct dm_atomic_state **dm_state)
2794 {
2795         struct drm_device *dev = state->dev;
2796         struct amdgpu_device *adev = drm_to_adev(dev);
2797         struct amdgpu_display_manager *dm = &adev->dm;
2798         struct drm_private_state *priv_state;
2799
2800         if (*dm_state)
2801                 return 0;
2802
2803         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2804         if (IS_ERR(priv_state))
2805                 return PTR_ERR(priv_state);
2806
2807         *dm_state = to_dm_atomic_state(priv_state);
2808
2809         return 0;
2810 }
2811
2812 static struct dm_atomic_state *
2813 dm_atomic_get_new_state(struct drm_atomic_state *state)
2814 {
2815         struct drm_device *dev = state->dev;
2816         struct amdgpu_device *adev = drm_to_adev(dev);
2817         struct amdgpu_display_manager *dm = &adev->dm;
2818         struct drm_private_obj *obj;
2819         struct drm_private_state *new_obj_state;
2820         int i;
2821
2822         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2823                 if (obj->funcs == dm->atomic_obj.funcs)
2824                         return to_dm_atomic_state(new_obj_state);
2825         }
2826
2827         return NULL;
2828 }
2829
2830 static struct drm_private_state *
2831 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2832 {
2833         struct dm_atomic_state *old_state, *new_state;
2834
2835         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2836         if (!new_state)
2837                 return NULL;
2838
2839         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2840
2841         old_state = to_dm_atomic_state(obj->state);
2842
2843         if (old_state && old_state->context)
2844                 new_state->context = dc_copy_state(old_state->context);
2845
2846         if (!new_state->context) {
2847                 kfree(new_state);
2848                 return NULL;
2849         }
2850
2851         return &new_state->base;
2852 }
2853
2854 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2855                                     struct drm_private_state *state)
2856 {
2857         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2858
2859         if (dm_state && dm_state->context)
2860                 dc_release_state(dm_state->context);
2861
2862         kfree(dm_state);
2863 }
2864
2865 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2866         .atomic_duplicate_state = dm_atomic_duplicate_state,
2867         .atomic_destroy_state = dm_atomic_destroy_state,
2868 };
2869
2870 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2871 {
2872         struct dm_atomic_state *state;
2873         int r;
2874
2875         adev->mode_info.mode_config_initialized = true;
2876
2877         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2878         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2879
2880         adev_to_drm(adev)->mode_config.max_width = 16384;
2881         adev_to_drm(adev)->mode_config.max_height = 16384;
2882
2883         adev_to_drm(adev)->mode_config.preferred_depth = 24;
2884         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2885         /* indicates support for immediate flip */
2886         adev_to_drm(adev)->mode_config.async_page_flip = true;
2887
2888         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2889
2890         state = kzalloc(sizeof(*state), GFP_KERNEL);
2891         if (!state)
2892                 return -ENOMEM;
2893
2894         state->context = dc_create_state(adev->dm.dc);
2895         if (!state->context) {
2896                 kfree(state);
2897                 return -ENOMEM;
2898         }
2899
2900         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2901
2902         drm_atomic_private_obj_init(adev_to_drm(adev),
2903                                     &adev->dm.atomic_obj,
2904                                     &state->base,
2905                                     &dm_atomic_state_funcs);
2906
2907         r = amdgpu_display_modeset_create_props(adev);
2908         if (r)
2909                 return r;
2910
2911         r = amdgpu_dm_audio_init(adev);
2912         if (r)
2913                 return r;
2914
2915         return 0;
2916 }
2917
2918 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2919 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2920 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2921
2922 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2923         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2924
2925 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2926 {
2927 #if defined(CONFIG_ACPI)
2928         struct amdgpu_dm_backlight_caps caps;
2929
2930         memset(&caps, 0, sizeof(caps));
2931
2932         if (dm->backlight_caps.caps_valid)
2933                 return;
2934
2935         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2936         if (caps.caps_valid) {
2937                 dm->backlight_caps.caps_valid = true;
2938                 if (caps.aux_support)
2939                         return;
2940                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2941                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2942         } else {
2943                 dm->backlight_caps.min_input_signal =
2944                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2945                 dm->backlight_caps.max_input_signal =
2946                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2947         }
2948 #else
2949         if (dm->backlight_caps.aux_support)
2950                 return;
2951
2952         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2953         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2954 #endif
2955 }
2956
2957 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2958 {
2959         bool rc;
2960
2961         if (!link)
2962                 return 1;
2963
2964         rc = dc_link_set_backlight_level_nits(link, true, brightness,
2965                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2966
2967         return rc ? 0 : 1;
2968 }
2969
2970 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
2971                                 unsigned *min, unsigned *max)
2972 {
2973         if (!caps)
2974                 return 0;
2975
2976         if (caps->aux_support) {
2977                 // Firmware limits are in nits, DC API wants millinits.
2978                 *max = 1000 * caps->aux_max_input_signal;
2979                 *min = 1000 * caps->aux_min_input_signal;
2980         } else {
2981                 // Firmware limits are 8-bit, PWM control is 16-bit.
2982                 *max = 0x101 * caps->max_input_signal;
2983                 *min = 0x101 * caps->min_input_signal;
2984         }
2985         return 1;
2986 }
2987
2988 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
2989                                         uint32_t brightness)
2990 {
2991         unsigned min, max;
2992
2993         if (!get_brightness_range(caps, &min, &max))
2994                 return brightness;
2995
2996         // Rescale 0..255 to min..max
2997         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
2998                                        AMDGPU_MAX_BL_LEVEL);
2999 }
3000
3001 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3002                                       uint32_t brightness)
3003 {
3004         unsigned min, max;
3005
3006         if (!get_brightness_range(caps, &min, &max))
3007                 return brightness;
3008
3009         if (brightness < min)
3010                 return 0;
3011         // Rescale min..max to 0..255
3012         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3013                                  max - min);
3014 }
3015
3016 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3017 {
3018         struct amdgpu_display_manager *dm = bl_get_data(bd);
3019         struct amdgpu_dm_backlight_caps caps;
3020         struct dc_link *link = NULL;
3021         u32 brightness;
3022         bool rc;
3023
3024         amdgpu_dm_update_backlight_caps(dm);
3025         caps = dm->backlight_caps;
3026
3027         link = (struct dc_link *)dm->backlight_link;
3028
3029         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3030         // Change brightness based on AUX property
3031         if (caps.aux_support)
3032                 return set_backlight_via_aux(link, brightness);
3033
3034         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3035
3036         return rc ? 0 : 1;
3037 }
3038
3039 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3040 {
3041         struct amdgpu_display_manager *dm = bl_get_data(bd);
3042         int ret = dc_link_get_backlight_level(dm->backlight_link);
3043
3044         if (ret == DC_ERROR_UNEXPECTED)
3045                 return bd->props.brightness;
3046         return convert_brightness_to_user(&dm->backlight_caps, ret);
3047 }
3048
3049 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3050         .options = BL_CORE_SUSPENDRESUME,
3051         .get_brightness = amdgpu_dm_backlight_get_brightness,
3052         .update_status  = amdgpu_dm_backlight_update_status,
3053 };
3054
3055 static void
3056 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3057 {
3058         char bl_name[16];
3059         struct backlight_properties props = { 0 };
3060
3061         amdgpu_dm_update_backlight_caps(dm);
3062
3063         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3064         props.brightness = AMDGPU_MAX_BL_LEVEL;
3065         props.type = BACKLIGHT_RAW;
3066
3067         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3068                  adev_to_drm(dm->adev)->primary->index);
3069
3070         dm->backlight_dev = backlight_device_register(bl_name,
3071                                                       adev_to_drm(dm->adev)->dev,
3072                                                       dm,
3073                                                       &amdgpu_dm_backlight_ops,
3074                                                       &props);
3075
3076         if (IS_ERR(dm->backlight_dev))
3077                 DRM_ERROR("DM: Backlight registration failed!\n");
3078         else
3079                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3080 }
3081
3082 #endif
3083
3084 static int initialize_plane(struct amdgpu_display_manager *dm,
3085                             struct amdgpu_mode_info *mode_info, int plane_id,
3086                             enum drm_plane_type plane_type,
3087                             const struct dc_plane_cap *plane_cap)
3088 {
3089         struct drm_plane *plane;
3090         unsigned long possible_crtcs;
3091         int ret = 0;
3092
3093         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3094         if (!plane) {
3095                 DRM_ERROR("KMS: Failed to allocate plane\n");
3096                 return -ENOMEM;
3097         }
3098         plane->type = plane_type;
3099
3100         /*
3101          * HACK: IGT tests expect that the primary plane for a CRTC
3102          * can only have one possible CRTC. Only expose support for
3103          * any CRTC if they're not going to be used as a primary plane
3104          * for a CRTC - like overlay or underlay planes.
3105          */
3106         possible_crtcs = 1 << plane_id;
3107         if (plane_id >= dm->dc->caps.max_streams)
3108                 possible_crtcs = 0xff;
3109
3110         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3111
3112         if (ret) {
3113                 DRM_ERROR("KMS: Failed to initialize plane\n");
3114                 kfree(plane);
3115                 return ret;
3116         }
3117
3118         if (mode_info)
3119                 mode_info->planes[plane_id] = plane;
3120
3121         return ret;
3122 }
3123
3124
3125 static void register_backlight_device(struct amdgpu_display_manager *dm,
3126                                       struct dc_link *link)
3127 {
3128 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3129         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3130
3131         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3132             link->type != dc_connection_none) {
3133                 /*
3134                  * Event if registration failed, we should continue with
3135                  * DM initialization because not having a backlight control
3136                  * is better then a black screen.
3137                  */
3138                 amdgpu_dm_register_backlight_device(dm);
3139
3140                 if (dm->backlight_dev)
3141                         dm->backlight_link = link;
3142         }
3143 #endif
3144 }
3145
3146
3147 /*
3148  * In this architecture, the association
3149  * connector -> encoder -> crtc
3150  * id not really requried. The crtc and connector will hold the
3151  * display_index as an abstraction to use with DAL component
3152  *
3153  * Returns 0 on success
3154  */
3155 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3156 {
3157         struct amdgpu_display_manager *dm = &adev->dm;
3158         int32_t i;
3159         struct amdgpu_dm_connector *aconnector = NULL;
3160         struct amdgpu_encoder *aencoder = NULL;
3161         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3162         uint32_t link_cnt;
3163         int32_t primary_planes;
3164         enum dc_connection_type new_connection_type = dc_connection_none;
3165         const struct dc_plane_cap *plane;
3166
3167         link_cnt = dm->dc->caps.max_links;
3168         if (amdgpu_dm_mode_config_init(dm->adev)) {
3169                 DRM_ERROR("DM: Failed to initialize mode config\n");
3170                 return -EINVAL;
3171         }
3172
3173         /* There is one primary plane per CRTC */
3174         primary_planes = dm->dc->caps.max_streams;
3175         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3176
3177         /*
3178          * Initialize primary planes, implicit planes for legacy IOCTLS.
3179          * Order is reversed to match iteration order in atomic check.
3180          */
3181         for (i = (primary_planes - 1); i >= 0; i--) {
3182                 plane = &dm->dc->caps.planes[i];
3183
3184                 if (initialize_plane(dm, mode_info, i,
3185                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3186                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3187                         goto fail;
3188                 }
3189         }
3190
3191         /*
3192          * Initialize overlay planes, index starting after primary planes.
3193          * These planes have a higher DRM index than the primary planes since
3194          * they should be considered as having a higher z-order.
3195          * Order is reversed to match iteration order in atomic check.
3196          *
3197          * Only support DCN for now, and only expose one so we don't encourage
3198          * userspace to use up all the pipes.
3199          */
3200         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3201                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3202
3203                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3204                         continue;
3205
3206                 if (!plane->blends_with_above || !plane->blends_with_below)
3207                         continue;
3208
3209                 if (!plane->pixel_format_support.argb8888)
3210                         continue;
3211
3212                 if (initialize_plane(dm, NULL, primary_planes + i,
3213                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3214                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3215                         goto fail;
3216                 }
3217
3218                 /* Only create one overlay plane. */
3219                 break;
3220         }
3221
3222         for (i = 0; i < dm->dc->caps.max_streams; i++)
3223                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3224                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3225                         goto fail;
3226                 }
3227
3228         dm->display_indexes_num = dm->dc->caps.max_streams;
3229
3230         /* loops over all connectors on the board */
3231         for (i = 0; i < link_cnt; i++) {
3232                 struct dc_link *link = NULL;
3233
3234                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3235                         DRM_ERROR(
3236                                 "KMS: Cannot support more than %d display indexes\n",
3237                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3238                         continue;
3239                 }
3240
3241                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3242                 if (!aconnector)
3243                         goto fail;
3244
3245                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3246                 if (!aencoder)
3247                         goto fail;
3248
3249                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3250                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3251                         goto fail;
3252                 }
3253
3254                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3255                         DRM_ERROR("KMS: Failed to initialize connector\n");
3256                         goto fail;
3257                 }
3258
3259                 link = dc_get_link_at_index(dm->dc, i);
3260
3261                 if (!dc_link_detect_sink(link, &new_connection_type))
3262                         DRM_ERROR("KMS: Failed to detect connector\n");
3263
3264                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3265                         emulated_link_detect(link);
3266                         amdgpu_dm_update_connector_after_detect(aconnector);
3267
3268                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3269                         amdgpu_dm_update_connector_after_detect(aconnector);
3270                         register_backlight_device(dm, link);
3271                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3272                                 amdgpu_dm_set_psr_caps(link);
3273                 }
3274
3275
3276         }
3277
3278         /* Software is initialized. Now we can register interrupt handlers. */
3279         switch (adev->asic_type) {
3280 #if defined(CONFIG_DRM_AMD_DC_SI)
3281         case CHIP_TAHITI:
3282         case CHIP_PITCAIRN:
3283         case CHIP_VERDE:
3284         case CHIP_OLAND:
3285                 if (dce60_register_irq_handlers(dm->adev)) {
3286                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3287                         goto fail;
3288                 }
3289                 break;
3290 #endif
3291         case CHIP_BONAIRE:
3292         case CHIP_HAWAII:
3293         case CHIP_KAVERI:
3294         case CHIP_KABINI:
3295         case CHIP_MULLINS:
3296         case CHIP_TONGA:
3297         case CHIP_FIJI:
3298         case CHIP_CARRIZO:
3299         case CHIP_STONEY:
3300         case CHIP_POLARIS11:
3301         case CHIP_POLARIS10:
3302         case CHIP_POLARIS12:
3303         case CHIP_VEGAM:
3304         case CHIP_VEGA10:
3305         case CHIP_VEGA12:
3306         case CHIP_VEGA20:
3307                 if (dce110_register_irq_handlers(dm->adev)) {
3308                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3309                         goto fail;
3310                 }
3311                 break;
3312 #if defined(CONFIG_DRM_AMD_DC_DCN)
3313         case CHIP_RAVEN:
3314         case CHIP_NAVI12:
3315         case CHIP_NAVI10:
3316         case CHIP_NAVI14:
3317         case CHIP_RENOIR:
3318 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3319         case CHIP_SIENNA_CICHLID:
3320         case CHIP_NAVY_FLOUNDER:
3321 #endif
3322                 if (dcn10_register_irq_handlers(dm->adev)) {
3323                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3324                         goto fail;
3325                 }
3326                 break;
3327 #endif
3328         default:
3329                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3330                 goto fail;
3331         }
3332
3333         /* No userspace support. */
3334         dm->dc->debug.disable_tri_buf = true;
3335
3336         return 0;
3337 fail:
3338         kfree(aencoder);
3339         kfree(aconnector);
3340
3341         return -EINVAL;
3342 }
3343
3344 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3345 {
3346         drm_mode_config_cleanup(dm->ddev);
3347         drm_atomic_private_obj_fini(&dm->atomic_obj);
3348         return;
3349 }
3350
3351 /******************************************************************************
3352  * amdgpu_display_funcs functions
3353  *****************************************************************************/
3354
3355 /*
3356  * dm_bandwidth_update - program display watermarks
3357  *
3358  * @adev: amdgpu_device pointer
3359  *
3360  * Calculate and program the display watermarks and line buffer allocation.
3361  */
3362 static void dm_bandwidth_update(struct amdgpu_device *adev)
3363 {
3364         /* TODO: implement later */
3365 }
3366
3367 static const struct amdgpu_display_funcs dm_display_funcs = {
3368         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3369         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3370         .backlight_set_level = NULL, /* never called for DC */
3371         .backlight_get_level = NULL, /* never called for DC */
3372         .hpd_sense = NULL,/* called unconditionally */
3373         .hpd_set_polarity = NULL, /* called unconditionally */
3374         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3375         .page_flip_get_scanoutpos =
3376                 dm_crtc_get_scanoutpos,/* called unconditionally */
3377         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3378         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3379 };
3380
3381 #if defined(CONFIG_DEBUG_KERNEL_DC)
3382
3383 static ssize_t s3_debug_store(struct device *device,
3384                               struct device_attribute *attr,
3385                               const char *buf,
3386                               size_t count)
3387 {
3388         int ret;
3389         int s3_state;
3390         struct drm_device *drm_dev = dev_get_drvdata(device);
3391         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3392
3393         ret = kstrtoint(buf, 0, &s3_state);
3394
3395         if (ret == 0) {
3396                 if (s3_state) {
3397                         dm_resume(adev);
3398                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3399                 } else
3400                         dm_suspend(adev);
3401         }
3402
3403         return ret == 0 ? count : 0;
3404 }
3405
3406 DEVICE_ATTR_WO(s3_debug);
3407
3408 #endif
3409
3410 static int dm_early_init(void *handle)
3411 {
3412         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3413
3414         switch (adev->asic_type) {
3415 #if defined(CONFIG_DRM_AMD_DC_SI)
3416         case CHIP_TAHITI:
3417         case CHIP_PITCAIRN:
3418         case CHIP_VERDE:
3419                 adev->mode_info.num_crtc = 6;
3420                 adev->mode_info.num_hpd = 6;
3421                 adev->mode_info.num_dig = 6;
3422                 break;
3423         case CHIP_OLAND:
3424                 adev->mode_info.num_crtc = 2;
3425                 adev->mode_info.num_hpd = 2;
3426                 adev->mode_info.num_dig = 2;
3427                 break;
3428 #endif
3429         case CHIP_BONAIRE:
3430         case CHIP_HAWAII:
3431                 adev->mode_info.num_crtc = 6;
3432                 adev->mode_info.num_hpd = 6;
3433                 adev->mode_info.num_dig = 6;
3434                 break;
3435         case CHIP_KAVERI:
3436                 adev->mode_info.num_crtc = 4;
3437                 adev->mode_info.num_hpd = 6;
3438                 adev->mode_info.num_dig = 7;
3439                 break;
3440         case CHIP_KABINI:
3441         case CHIP_MULLINS:
3442                 adev->mode_info.num_crtc = 2;
3443                 adev->mode_info.num_hpd = 6;
3444                 adev->mode_info.num_dig = 6;
3445                 break;
3446         case CHIP_FIJI:
3447         case CHIP_TONGA:
3448                 adev->mode_info.num_crtc = 6;
3449                 adev->mode_info.num_hpd = 6;
3450                 adev->mode_info.num_dig = 7;
3451                 break;
3452         case CHIP_CARRIZO:
3453                 adev->mode_info.num_crtc = 3;
3454                 adev->mode_info.num_hpd = 6;
3455                 adev->mode_info.num_dig = 9;
3456                 break;
3457         case CHIP_STONEY:
3458                 adev->mode_info.num_crtc = 2;
3459                 adev->mode_info.num_hpd = 6;
3460                 adev->mode_info.num_dig = 9;
3461                 break;
3462         case CHIP_POLARIS11:
3463         case CHIP_POLARIS12:
3464                 adev->mode_info.num_crtc = 5;
3465                 adev->mode_info.num_hpd = 5;
3466                 adev->mode_info.num_dig = 5;
3467                 break;
3468         case CHIP_POLARIS10:
3469         case CHIP_VEGAM:
3470                 adev->mode_info.num_crtc = 6;
3471                 adev->mode_info.num_hpd = 6;
3472                 adev->mode_info.num_dig = 6;
3473                 break;
3474         case CHIP_VEGA10:
3475         case CHIP_VEGA12:
3476         case CHIP_VEGA20:
3477                 adev->mode_info.num_crtc = 6;
3478                 adev->mode_info.num_hpd = 6;
3479                 adev->mode_info.num_dig = 6;
3480                 break;
3481 #if defined(CONFIG_DRM_AMD_DC_DCN)
3482         case CHIP_RAVEN:
3483                 adev->mode_info.num_crtc = 4;
3484                 adev->mode_info.num_hpd = 4;
3485                 adev->mode_info.num_dig = 4;
3486                 break;
3487 #endif
3488         case CHIP_NAVI10:
3489         case CHIP_NAVI12:
3490 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3491         case CHIP_SIENNA_CICHLID:
3492         case CHIP_NAVY_FLOUNDER:
3493 #endif
3494                 adev->mode_info.num_crtc = 6;
3495                 adev->mode_info.num_hpd = 6;
3496                 adev->mode_info.num_dig = 6;
3497                 break;
3498         case CHIP_NAVI14:
3499                 adev->mode_info.num_crtc = 5;
3500                 adev->mode_info.num_hpd = 5;
3501                 adev->mode_info.num_dig = 5;
3502                 break;
3503         case CHIP_RENOIR:
3504                 adev->mode_info.num_crtc = 4;
3505                 adev->mode_info.num_hpd = 4;
3506                 adev->mode_info.num_dig = 4;
3507                 break;
3508         default:
3509                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3510                 return -EINVAL;
3511         }
3512
3513         amdgpu_dm_set_irq_funcs(adev);
3514
3515         if (adev->mode_info.funcs == NULL)
3516                 adev->mode_info.funcs = &dm_display_funcs;
3517
3518         /*
3519          * Note: Do NOT change adev->audio_endpt_rreg and
3520          * adev->audio_endpt_wreg because they are initialised in
3521          * amdgpu_device_init()
3522          */
3523 #if defined(CONFIG_DEBUG_KERNEL_DC)
3524         device_create_file(
3525                 adev_to_drm(adev)->dev,
3526                 &dev_attr_s3_debug);
3527 #endif
3528
3529         return 0;
3530 }
3531
3532 static bool modeset_required(struct drm_crtc_state *crtc_state,
3533                              struct dc_stream_state *new_stream,
3534                              struct dc_stream_state *old_stream)
3535 {
3536         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3537 }
3538
3539 static bool modereset_required(struct drm_crtc_state *crtc_state)
3540 {
3541         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3542 }
3543
3544 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3545 {
3546         drm_encoder_cleanup(encoder);
3547         kfree(encoder);
3548 }
3549
3550 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3551         .destroy = amdgpu_dm_encoder_destroy,
3552 };
3553
3554
3555 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3556                                 struct dc_scaling_info *scaling_info)
3557 {
3558         int scale_w, scale_h;
3559
3560         memset(scaling_info, 0, sizeof(*scaling_info));
3561
3562         /* Source is fixed 16.16 but we ignore mantissa for now... */
3563         scaling_info->src_rect.x = state->src_x >> 16;
3564         scaling_info->src_rect.y = state->src_y >> 16;
3565
3566         scaling_info->src_rect.width = state->src_w >> 16;
3567         if (scaling_info->src_rect.width == 0)
3568                 return -EINVAL;
3569
3570         scaling_info->src_rect.height = state->src_h >> 16;
3571         if (scaling_info->src_rect.height == 0)
3572                 return -EINVAL;
3573
3574         scaling_info->dst_rect.x = state->crtc_x;
3575         scaling_info->dst_rect.y = state->crtc_y;
3576
3577         if (state->crtc_w == 0)
3578                 return -EINVAL;
3579
3580         scaling_info->dst_rect.width = state->crtc_w;
3581
3582         if (state->crtc_h == 0)
3583                 return -EINVAL;
3584
3585         scaling_info->dst_rect.height = state->crtc_h;
3586
3587         /* DRM doesn't specify clipping on destination output. */
3588         scaling_info->clip_rect = scaling_info->dst_rect;
3589
3590         /* TODO: Validate scaling per-format with DC plane caps */
3591         scale_w = scaling_info->dst_rect.width * 1000 /
3592                   scaling_info->src_rect.width;
3593
3594         if (scale_w < 250 || scale_w > 16000)
3595                 return -EINVAL;
3596
3597         scale_h = scaling_info->dst_rect.height * 1000 /
3598                   scaling_info->src_rect.height;
3599
3600         if (scale_h < 250 || scale_h > 16000)
3601                 return -EINVAL;
3602
3603         /*
3604          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3605          * assume reasonable defaults based on the format.
3606          */
3607
3608         return 0;
3609 }
3610
3611 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3612                        uint64_t *tiling_flags, bool *tmz_surface)
3613 {
3614         struct amdgpu_bo *rbo;
3615         int r;
3616
3617         if (!amdgpu_fb) {
3618                 *tiling_flags = 0;
3619                 *tmz_surface = false;
3620                 return 0;
3621         }
3622
3623         rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3624         r = amdgpu_bo_reserve(rbo, false);
3625
3626         if (unlikely(r)) {
3627                 /* Don't show error message when returning -ERESTARTSYS */
3628                 if (r != -ERESTARTSYS)
3629                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3630                 return r;
3631         }
3632
3633         if (tiling_flags)
3634                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3635
3636         if (tmz_surface)
3637                 *tmz_surface = amdgpu_bo_encrypted(rbo);
3638
3639         amdgpu_bo_unreserve(rbo);
3640
3641         return r;
3642 }
3643
3644 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3645 {
3646         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3647
3648         return offset ? (address + offset * 256) : 0;
3649 }
3650
3651 static int
3652 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3653                           const struct amdgpu_framebuffer *afb,
3654                           const enum surface_pixel_format format,
3655                           const enum dc_rotation_angle rotation,
3656                           const struct plane_size *plane_size,
3657                           const union dc_tiling_info *tiling_info,
3658                           const uint64_t info,
3659                           struct dc_plane_dcc_param *dcc,
3660                           struct dc_plane_address *address,
3661                           bool force_disable_dcc)
3662 {
3663         struct dc *dc = adev->dm.dc;
3664         struct dc_dcc_surface_param input;
3665         struct dc_surface_dcc_cap output;
3666         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3667         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3668         uint64_t dcc_address;
3669
3670         memset(&input, 0, sizeof(input));
3671         memset(&output, 0, sizeof(output));
3672
3673         if (force_disable_dcc)
3674                 return 0;
3675
3676         if (!offset)
3677                 return 0;
3678
3679         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3680                 return 0;
3681
3682         if (!dc->cap_funcs.get_dcc_compression_cap)
3683                 return -EINVAL;
3684
3685         input.format = format;
3686         input.surface_size.width = plane_size->surface_size.width;
3687         input.surface_size.height = plane_size->surface_size.height;
3688         input.swizzle_mode = tiling_info->gfx9.swizzle;
3689
3690         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3691                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3692         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3693                 input.scan = SCAN_DIRECTION_VERTICAL;
3694
3695         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3696                 return -EINVAL;
3697
3698         if (!output.capable)
3699                 return -EINVAL;
3700
3701         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3702                 return -EINVAL;
3703
3704         dcc->enable = 1;
3705         dcc->meta_pitch =
3706                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3707         dcc->independent_64b_blks = i64b;
3708
3709         dcc_address = get_dcc_address(afb->address, info);
3710         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3711         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3712
3713         return 0;
3714 }
3715
3716 static int
3717 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3718                              const struct amdgpu_framebuffer *afb,
3719                              const enum surface_pixel_format format,
3720                              const enum dc_rotation_angle rotation,
3721                              const uint64_t tiling_flags,
3722                              union dc_tiling_info *tiling_info,
3723                              struct plane_size *plane_size,
3724                              struct dc_plane_dcc_param *dcc,
3725                              struct dc_plane_address *address,
3726                              bool tmz_surface,
3727                              bool force_disable_dcc)
3728 {
3729         const struct drm_framebuffer *fb = &afb->base;
3730         int ret;
3731
3732         memset(tiling_info, 0, sizeof(*tiling_info));
3733         memset(plane_size, 0, sizeof(*plane_size));
3734         memset(dcc, 0, sizeof(*dcc));
3735         memset(address, 0, sizeof(*address));
3736
3737         address->tmz_surface = tmz_surface;
3738
3739         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3740                 plane_size->surface_size.x = 0;
3741                 plane_size->surface_size.y = 0;
3742                 plane_size->surface_size.width = fb->width;
3743                 plane_size->surface_size.height = fb->height;
3744                 plane_size->surface_pitch =
3745                         fb->pitches[0] / fb->format->cpp[0];
3746
3747                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3748                 address->grph.addr.low_part = lower_32_bits(afb->address);
3749                 address->grph.addr.high_part = upper_32_bits(afb->address);
3750         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3751                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3752
3753                 plane_size->surface_size.x = 0;
3754                 plane_size->surface_size.y = 0;
3755                 plane_size->surface_size.width = fb->width;
3756                 plane_size->surface_size.height = fb->height;
3757                 plane_size->surface_pitch =
3758                         fb->pitches[0] / fb->format->cpp[0];
3759
3760                 plane_size->chroma_size.x = 0;
3761                 plane_size->chroma_size.y = 0;
3762                 /* TODO: set these based on surface format */
3763                 plane_size->chroma_size.width = fb->width / 2;
3764                 plane_size->chroma_size.height = fb->height / 2;
3765
3766                 plane_size->chroma_pitch =
3767                         fb->pitches[1] / fb->format->cpp[1];
3768
3769                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3770                 address->video_progressive.luma_addr.low_part =
3771                         lower_32_bits(afb->address);
3772                 address->video_progressive.luma_addr.high_part =
3773                         upper_32_bits(afb->address);
3774                 address->video_progressive.chroma_addr.low_part =
3775                         lower_32_bits(chroma_addr);
3776                 address->video_progressive.chroma_addr.high_part =
3777                         upper_32_bits(chroma_addr);
3778         }
3779
3780         /* Fill GFX8 params */
3781         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3782                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3783
3784                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3785                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3786                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3787                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3788                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3789
3790                 /* XXX fix me for VI */
3791                 tiling_info->gfx8.num_banks = num_banks;
3792                 tiling_info->gfx8.array_mode =
3793                                 DC_ARRAY_2D_TILED_THIN1;
3794                 tiling_info->gfx8.tile_split = tile_split;
3795                 tiling_info->gfx8.bank_width = bankw;
3796                 tiling_info->gfx8.bank_height = bankh;
3797                 tiling_info->gfx8.tile_aspect = mtaspect;
3798                 tiling_info->gfx8.tile_mode =
3799                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3800         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3801                         == DC_ARRAY_1D_TILED_THIN1) {
3802                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3803         }
3804
3805         tiling_info->gfx8.pipe_config =
3806                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3807
3808         if (adev->asic_type == CHIP_VEGA10 ||
3809             adev->asic_type == CHIP_VEGA12 ||
3810             adev->asic_type == CHIP_VEGA20 ||
3811             adev->asic_type == CHIP_NAVI10 ||
3812             adev->asic_type == CHIP_NAVI14 ||
3813             adev->asic_type == CHIP_NAVI12 ||
3814 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3815                 adev->asic_type == CHIP_SIENNA_CICHLID ||
3816                 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3817 #endif
3818             adev->asic_type == CHIP_RENOIR ||
3819             adev->asic_type == CHIP_RAVEN) {
3820                 /* Fill GFX9 params */
3821                 tiling_info->gfx9.num_pipes =
3822                         adev->gfx.config.gb_addr_config_fields.num_pipes;
3823                 tiling_info->gfx9.num_banks =
3824                         adev->gfx.config.gb_addr_config_fields.num_banks;
3825                 tiling_info->gfx9.pipe_interleave =
3826                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3827                 tiling_info->gfx9.num_shader_engines =
3828                         adev->gfx.config.gb_addr_config_fields.num_se;
3829                 tiling_info->gfx9.max_compressed_frags =
3830                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3831                 tiling_info->gfx9.num_rb_per_se =
3832                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3833                 tiling_info->gfx9.swizzle =
3834                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3835                 tiling_info->gfx9.shaderEnable = 1;
3836
3837 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3838                 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3839                     adev->asic_type == CHIP_NAVY_FLOUNDER)
3840                         tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3841 #endif
3842                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3843                                                 plane_size, tiling_info,
3844                                                 tiling_flags, dcc, address,
3845                                                 force_disable_dcc);
3846                 if (ret)
3847                         return ret;
3848         }
3849
3850         return 0;
3851 }
3852
3853 static void
3854 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3855                                bool *per_pixel_alpha, bool *global_alpha,
3856                                int *global_alpha_value)
3857 {
3858         *per_pixel_alpha = false;
3859         *global_alpha = false;
3860         *global_alpha_value = 0xff;
3861
3862         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3863                 return;
3864
3865         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3866                 static const uint32_t alpha_formats[] = {
3867                         DRM_FORMAT_ARGB8888,
3868                         DRM_FORMAT_RGBA8888,
3869                         DRM_FORMAT_ABGR8888,
3870                 };
3871                 uint32_t format = plane_state->fb->format->format;
3872                 unsigned int i;
3873
3874                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3875                         if (format == alpha_formats[i]) {
3876                                 *per_pixel_alpha = true;
3877                                 break;
3878                         }
3879                 }
3880         }
3881
3882         if (plane_state->alpha < 0xffff) {
3883                 *global_alpha = true;
3884                 *global_alpha_value = plane_state->alpha >> 8;
3885         }
3886 }
3887
3888 static int
3889 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3890                             const enum surface_pixel_format format,
3891                             enum dc_color_space *color_space)
3892 {
3893         bool full_range;
3894
3895         *color_space = COLOR_SPACE_SRGB;
3896
3897         /* DRM color properties only affect non-RGB formats. */
3898         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3899                 return 0;
3900
3901         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3902
3903         switch (plane_state->color_encoding) {
3904         case DRM_COLOR_YCBCR_BT601:
3905                 if (full_range)
3906                         *color_space = COLOR_SPACE_YCBCR601;
3907                 else
3908                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3909                 break;
3910
3911         case DRM_COLOR_YCBCR_BT709:
3912                 if (full_range)
3913                         *color_space = COLOR_SPACE_YCBCR709;
3914                 else
3915                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3916                 break;
3917
3918         case DRM_COLOR_YCBCR_BT2020:
3919                 if (full_range)
3920                         *color_space = COLOR_SPACE_2020_YCBCR;
3921                 else
3922                         return -EINVAL;
3923                 break;
3924
3925         default:
3926                 return -EINVAL;
3927         }
3928
3929         return 0;
3930 }
3931
3932 static int
3933 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3934                             const struct drm_plane_state *plane_state,
3935                             const uint64_t tiling_flags,
3936                             struct dc_plane_info *plane_info,
3937                             struct dc_plane_address *address,
3938                             bool tmz_surface,
3939                             bool force_disable_dcc)
3940 {
3941         const struct drm_framebuffer *fb = plane_state->fb;
3942         const struct amdgpu_framebuffer *afb =
3943                 to_amdgpu_framebuffer(plane_state->fb);
3944         struct drm_format_name_buf format_name;
3945         int ret;
3946
3947         memset(plane_info, 0, sizeof(*plane_info));
3948
3949         switch (fb->format->format) {
3950         case DRM_FORMAT_C8:
3951                 plane_info->format =
3952                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3953                 break;
3954         case DRM_FORMAT_RGB565:
3955                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3956                 break;
3957         case DRM_FORMAT_XRGB8888:
3958         case DRM_FORMAT_ARGB8888:
3959                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3960                 break;
3961         case DRM_FORMAT_XRGB2101010:
3962         case DRM_FORMAT_ARGB2101010:
3963                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3964                 break;
3965         case DRM_FORMAT_XBGR2101010:
3966         case DRM_FORMAT_ABGR2101010:
3967                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3968                 break;
3969         case DRM_FORMAT_XBGR8888:
3970         case DRM_FORMAT_ABGR8888:
3971                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3972                 break;
3973         case DRM_FORMAT_NV21:
3974                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3975                 break;
3976         case DRM_FORMAT_NV12:
3977                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3978                 break;
3979         case DRM_FORMAT_P010:
3980                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3981                 break;
3982         case DRM_FORMAT_XRGB16161616F:
3983         case DRM_FORMAT_ARGB16161616F:
3984                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3985                 break;
3986         case DRM_FORMAT_XBGR16161616F:
3987         case DRM_FORMAT_ABGR16161616F:
3988                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3989                 break;
3990         default:
3991                 DRM_ERROR(
3992                         "Unsupported screen format %s\n",
3993                         drm_get_format_name(fb->format->format, &format_name));
3994                 return -EINVAL;
3995         }
3996
3997         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3998         case DRM_MODE_ROTATE_0:
3999                 plane_info->rotation = ROTATION_ANGLE_0;
4000                 break;
4001         case DRM_MODE_ROTATE_90:
4002                 plane_info->rotation = ROTATION_ANGLE_90;
4003                 break;
4004         case DRM_MODE_ROTATE_180:
4005                 plane_info->rotation = ROTATION_ANGLE_180;
4006                 break;
4007         case DRM_MODE_ROTATE_270:
4008                 plane_info->rotation = ROTATION_ANGLE_270;
4009                 break;
4010         default:
4011                 plane_info->rotation = ROTATION_ANGLE_0;
4012                 break;
4013         }
4014
4015         plane_info->visible = true;
4016         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4017
4018         plane_info->layer_index = 0;
4019
4020         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4021                                           &plane_info->color_space);
4022         if (ret)
4023                 return ret;
4024
4025         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4026                                            plane_info->rotation, tiling_flags,
4027                                            &plane_info->tiling_info,
4028                                            &plane_info->plane_size,
4029                                            &plane_info->dcc, address, tmz_surface,
4030                                            force_disable_dcc);
4031         if (ret)
4032                 return ret;
4033
4034         fill_blending_from_plane_state(
4035                 plane_state, &plane_info->per_pixel_alpha,
4036                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4037
4038         return 0;
4039 }
4040
4041 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4042                                     struct dc_plane_state *dc_plane_state,
4043                                     struct drm_plane_state *plane_state,
4044                                     struct drm_crtc_state *crtc_state)
4045 {
4046         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4047         struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4048         struct dc_scaling_info scaling_info;
4049         struct dc_plane_info plane_info;
4050         int ret;
4051         bool force_disable_dcc = false;
4052
4053         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4054         if (ret)
4055                 return ret;
4056
4057         dc_plane_state->src_rect = scaling_info.src_rect;
4058         dc_plane_state->dst_rect = scaling_info.dst_rect;
4059         dc_plane_state->clip_rect = scaling_info.clip_rect;
4060         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4061
4062         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4063         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4064                                           dm_plane_state->tiling_flags,
4065                                           &plane_info,
4066                                           &dc_plane_state->address,
4067                                           dm_plane_state->tmz_surface,
4068                                           force_disable_dcc);
4069         if (ret)
4070                 return ret;
4071
4072         dc_plane_state->format = plane_info.format;
4073         dc_plane_state->color_space = plane_info.color_space;
4074         dc_plane_state->format = plane_info.format;
4075         dc_plane_state->plane_size = plane_info.plane_size;
4076         dc_plane_state->rotation = plane_info.rotation;
4077         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4078         dc_plane_state->stereo_format = plane_info.stereo_format;
4079         dc_plane_state->tiling_info = plane_info.tiling_info;
4080         dc_plane_state->visible = plane_info.visible;
4081         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4082         dc_plane_state->global_alpha = plane_info.global_alpha;
4083         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4084         dc_plane_state->dcc = plane_info.dcc;
4085         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4086
4087         /*
4088          * Always set input transfer function, since plane state is refreshed
4089          * every time.
4090          */
4091         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4092         if (ret)
4093                 return ret;
4094
4095         return 0;
4096 }
4097
4098 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4099                                            const struct dm_connector_state *dm_state,
4100                                            struct dc_stream_state *stream)
4101 {
4102         enum amdgpu_rmx_type rmx_type;
4103
4104         struct rect src = { 0 }; /* viewport in composition space*/
4105         struct rect dst = { 0 }; /* stream addressable area */
4106
4107         /* no mode. nothing to be done */
4108         if (!mode)
4109                 return;
4110
4111         /* Full screen scaling by default */
4112         src.width = mode->hdisplay;
4113         src.height = mode->vdisplay;
4114         dst.width = stream->timing.h_addressable;
4115         dst.height = stream->timing.v_addressable;
4116
4117         if (dm_state) {
4118                 rmx_type = dm_state->scaling;
4119                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4120                         if (src.width * dst.height <
4121                                         src.height * dst.width) {
4122                                 /* height needs less upscaling/more downscaling */
4123                                 dst.width = src.width *
4124                                                 dst.height / src.height;
4125                         } else {
4126                                 /* width needs less upscaling/more downscaling */
4127                                 dst.height = src.height *
4128                                                 dst.width / src.width;
4129                         }
4130                 } else if (rmx_type == RMX_CENTER) {
4131                         dst = src;
4132                 }
4133
4134                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4135                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4136
4137                 if (dm_state->underscan_enable) {
4138                         dst.x += dm_state->underscan_hborder / 2;
4139                         dst.y += dm_state->underscan_vborder / 2;
4140                         dst.width -= dm_state->underscan_hborder;
4141                         dst.height -= dm_state->underscan_vborder;
4142                 }
4143         }
4144
4145         stream->src = src;
4146         stream->dst = dst;
4147
4148         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4149                         dst.x, dst.y, dst.width, dst.height);
4150
4151 }
4152
4153 static enum dc_color_depth
4154 convert_color_depth_from_display_info(const struct drm_connector *connector,
4155                                       bool is_y420, int requested_bpc)
4156 {
4157         uint8_t bpc;
4158
4159         if (is_y420) {
4160                 bpc = 8;
4161
4162                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4163                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4164                         bpc = 16;
4165                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4166                         bpc = 12;
4167                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4168                         bpc = 10;
4169         } else {
4170                 bpc = (uint8_t)connector->display_info.bpc;
4171                 /* Assume 8 bpc by default if no bpc is specified. */
4172                 bpc = bpc ? bpc : 8;
4173         }
4174
4175         if (requested_bpc > 0) {
4176                 /*
4177                  * Cap display bpc based on the user requested value.
4178                  *
4179                  * The value for state->max_bpc may not correctly updated
4180                  * depending on when the connector gets added to the state
4181                  * or if this was called outside of atomic check, so it
4182                  * can't be used directly.
4183                  */
4184                 bpc = min_t(u8, bpc, requested_bpc);
4185
4186                 /* Round down to the nearest even number. */
4187                 bpc = bpc - (bpc & 1);
4188         }
4189
4190         switch (bpc) {
4191         case 0:
4192                 /*
4193                  * Temporary Work around, DRM doesn't parse color depth for
4194                  * EDID revision before 1.4
4195                  * TODO: Fix edid parsing
4196                  */
4197                 return COLOR_DEPTH_888;
4198         case 6:
4199                 return COLOR_DEPTH_666;
4200         case 8:
4201                 return COLOR_DEPTH_888;
4202         case 10:
4203                 return COLOR_DEPTH_101010;
4204         case 12:
4205                 return COLOR_DEPTH_121212;
4206         case 14:
4207                 return COLOR_DEPTH_141414;
4208         case 16:
4209                 return COLOR_DEPTH_161616;
4210         default:
4211                 return COLOR_DEPTH_UNDEFINED;
4212         }
4213 }
4214
4215 static enum dc_aspect_ratio
4216 get_aspect_ratio(const struct drm_display_mode *mode_in)
4217 {
4218         /* 1-1 mapping, since both enums follow the HDMI spec. */
4219         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4220 }
4221
4222 static enum dc_color_space
4223 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4224 {
4225         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4226
4227         switch (dc_crtc_timing->pixel_encoding) {
4228         case PIXEL_ENCODING_YCBCR422:
4229         case PIXEL_ENCODING_YCBCR444:
4230         case PIXEL_ENCODING_YCBCR420:
4231         {
4232                 /*
4233                  * 27030khz is the separation point between HDTV and SDTV
4234                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4235                  * respectively
4236                  */
4237                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4238                         if (dc_crtc_timing->flags.Y_ONLY)
4239                                 color_space =
4240                                         COLOR_SPACE_YCBCR709_LIMITED;
4241                         else
4242                                 color_space = COLOR_SPACE_YCBCR709;
4243                 } else {
4244                         if (dc_crtc_timing->flags.Y_ONLY)
4245                                 color_space =
4246                                         COLOR_SPACE_YCBCR601_LIMITED;
4247                         else
4248                                 color_space = COLOR_SPACE_YCBCR601;
4249                 }
4250
4251         }
4252         break;
4253         case PIXEL_ENCODING_RGB:
4254                 color_space = COLOR_SPACE_SRGB;
4255                 break;
4256
4257         default:
4258                 WARN_ON(1);
4259                 break;
4260         }
4261
4262         return color_space;
4263 }
4264
4265 static bool adjust_colour_depth_from_display_info(
4266         struct dc_crtc_timing *timing_out,
4267         const struct drm_display_info *info)
4268 {
4269         enum dc_color_depth depth = timing_out->display_color_depth;
4270         int normalized_clk;
4271         do {
4272                 normalized_clk = timing_out->pix_clk_100hz / 10;
4273                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4274                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4275                         normalized_clk /= 2;
4276                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4277                 switch (depth) {
4278                 case COLOR_DEPTH_888:
4279                         break;
4280                 case COLOR_DEPTH_101010:
4281                         normalized_clk = (normalized_clk * 30) / 24;
4282                         break;
4283                 case COLOR_DEPTH_121212:
4284                         normalized_clk = (normalized_clk * 36) / 24;
4285                         break;
4286                 case COLOR_DEPTH_161616:
4287                         normalized_clk = (normalized_clk * 48) / 24;
4288                         break;
4289                 default:
4290                         /* The above depths are the only ones valid for HDMI. */
4291                         return false;
4292                 }
4293                 if (normalized_clk <= info->max_tmds_clock) {
4294                         timing_out->display_color_depth = depth;
4295                         return true;
4296                 }
4297         } while (--depth > COLOR_DEPTH_666);
4298         return false;
4299 }
4300
4301 static void fill_stream_properties_from_drm_display_mode(
4302         struct dc_stream_state *stream,
4303         const struct drm_display_mode *mode_in,
4304         const struct drm_connector *connector,
4305         const struct drm_connector_state *connector_state,
4306         const struct dc_stream_state *old_stream,
4307         int requested_bpc)
4308 {
4309         struct dc_crtc_timing *timing_out = &stream->timing;
4310         const struct drm_display_info *info = &connector->display_info;
4311         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4312         struct hdmi_vendor_infoframe hv_frame;
4313         struct hdmi_avi_infoframe avi_frame;
4314
4315         memset(&hv_frame, 0, sizeof(hv_frame));
4316         memset(&avi_frame, 0, sizeof(avi_frame));
4317
4318         timing_out->h_border_left = 0;
4319         timing_out->h_border_right = 0;
4320         timing_out->v_border_top = 0;
4321         timing_out->v_border_bottom = 0;
4322         /* TODO: un-hardcode */
4323         if (drm_mode_is_420_only(info, mode_in)
4324                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4325                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4326         else if (drm_mode_is_420_also(info, mode_in)
4327                         && aconnector->force_yuv420_output)
4328                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4329         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4330                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4331                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4332         else
4333                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4334
4335         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4336         timing_out->display_color_depth = convert_color_depth_from_display_info(
4337                 connector,
4338                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4339                 requested_bpc);
4340         timing_out->scan_type = SCANNING_TYPE_NODATA;
4341         timing_out->hdmi_vic = 0;
4342
4343         if(old_stream) {
4344                 timing_out->vic = old_stream->timing.vic;
4345                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4346                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4347         } else {
4348                 timing_out->vic = drm_match_cea_mode(mode_in);
4349                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4350                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4351                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4352                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4353         }
4354
4355         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4356                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4357                 timing_out->vic = avi_frame.video_code;
4358                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4359                 timing_out->hdmi_vic = hv_frame.vic;
4360         }
4361
4362         timing_out->h_addressable = mode_in->crtc_hdisplay;
4363         timing_out->h_total = mode_in->crtc_htotal;
4364         timing_out->h_sync_width =
4365                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4366         timing_out->h_front_porch =
4367                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4368         timing_out->v_total = mode_in->crtc_vtotal;
4369         timing_out->v_addressable = mode_in->crtc_vdisplay;
4370         timing_out->v_front_porch =
4371                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4372         timing_out->v_sync_width =
4373                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4374         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4375         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4376
4377         stream->output_color_space = get_output_color_space(timing_out);
4378
4379         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4380         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4381         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4382                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4383                     drm_mode_is_420_also(info, mode_in) &&
4384                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4385                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4386                         adjust_colour_depth_from_display_info(timing_out, info);
4387                 }
4388         }
4389 }
4390
4391 static void fill_audio_info(struct audio_info *audio_info,
4392                             const struct drm_connector *drm_connector,
4393                             const struct dc_sink *dc_sink)
4394 {
4395         int i = 0;
4396         int cea_revision = 0;
4397         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4398
4399         audio_info->manufacture_id = edid_caps->manufacturer_id;
4400         audio_info->product_id = edid_caps->product_id;
4401
4402         cea_revision = drm_connector->display_info.cea_rev;
4403
4404         strscpy(audio_info->display_name,
4405                 edid_caps->display_name,
4406                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4407
4408         if (cea_revision >= 3) {
4409                 audio_info->mode_count = edid_caps->audio_mode_count;
4410
4411                 for (i = 0; i < audio_info->mode_count; ++i) {
4412                         audio_info->modes[i].format_code =
4413                                         (enum audio_format_code)
4414                                         (edid_caps->audio_modes[i].format_code);
4415                         audio_info->modes[i].channel_count =
4416                                         edid_caps->audio_modes[i].channel_count;
4417                         audio_info->modes[i].sample_rates.all =
4418                                         edid_caps->audio_modes[i].sample_rate;
4419                         audio_info->modes[i].sample_size =
4420                                         edid_caps->audio_modes[i].sample_size;
4421                 }
4422         }
4423
4424         audio_info->flags.all = edid_caps->speaker_flags;
4425
4426         /* TODO: We only check for the progressive mode, check for interlace mode too */
4427         if (drm_connector->latency_present[0]) {
4428                 audio_info->video_latency = drm_connector->video_latency[0];
4429                 audio_info->audio_latency = drm_connector->audio_latency[0];
4430         }
4431
4432         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4433
4434 }
4435
4436 static void
4437 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4438                                       struct drm_display_mode *dst_mode)
4439 {
4440         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4441         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4442         dst_mode->crtc_clock = src_mode->crtc_clock;
4443         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4444         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4445         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4446         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4447         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4448         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4449         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4450         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4451         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4452         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4453         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4454 }
4455
4456 static void
4457 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4458                                         const struct drm_display_mode *native_mode,
4459                                         bool scale_enabled)
4460 {
4461         if (scale_enabled) {
4462                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4463         } else if (native_mode->clock == drm_mode->clock &&
4464                         native_mode->htotal == drm_mode->htotal &&
4465                         native_mode->vtotal == drm_mode->vtotal) {
4466                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4467         } else {
4468                 /* no scaling nor amdgpu inserted, no need to patch */
4469         }
4470 }
4471
4472 static struct dc_sink *
4473 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4474 {
4475         struct dc_sink_init_data sink_init_data = { 0 };
4476         struct dc_sink *sink = NULL;
4477         sink_init_data.link = aconnector->dc_link;
4478         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4479
4480         sink = dc_sink_create(&sink_init_data);
4481         if (!sink) {
4482                 DRM_ERROR("Failed to create sink!\n");
4483                 return NULL;
4484         }
4485         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4486
4487         return sink;
4488 }
4489
4490 static void set_multisync_trigger_params(
4491                 struct dc_stream_state *stream)
4492 {
4493         if (stream->triggered_crtc_reset.enabled) {
4494                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4495                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4496         }
4497 }
4498
4499 static void set_master_stream(struct dc_stream_state *stream_set[],
4500                               int stream_count)
4501 {
4502         int j, highest_rfr = 0, master_stream = 0;
4503
4504         for (j = 0;  j < stream_count; j++) {
4505                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4506                         int refresh_rate = 0;
4507
4508                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4509                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4510                         if (refresh_rate > highest_rfr) {
4511                                 highest_rfr = refresh_rate;
4512                                 master_stream = j;
4513                         }
4514                 }
4515         }
4516         for (j = 0;  j < stream_count; j++) {
4517                 if (stream_set[j])
4518                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4519         }
4520 }
4521
4522 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4523 {
4524         int i = 0;
4525
4526         if (context->stream_count < 2)
4527                 return;
4528         for (i = 0; i < context->stream_count ; i++) {
4529                 if (!context->streams[i])
4530                         continue;
4531                 /*
4532                  * TODO: add a function to read AMD VSDB bits and set
4533                  * crtc_sync_master.multi_sync_enabled flag
4534                  * For now it's set to false
4535                  */
4536                 set_multisync_trigger_params(context->streams[i]);
4537         }
4538         set_master_stream(context->streams, context->stream_count);
4539 }
4540
4541 static struct dc_stream_state *
4542 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4543                        const struct drm_display_mode *drm_mode,
4544                        const struct dm_connector_state *dm_state,
4545                        const struct dc_stream_state *old_stream,
4546                        int requested_bpc)
4547 {
4548         struct drm_display_mode *preferred_mode = NULL;
4549         struct drm_connector *drm_connector;
4550         const struct drm_connector_state *con_state =
4551                 dm_state ? &dm_state->base : NULL;
4552         struct dc_stream_state *stream = NULL;
4553         struct drm_display_mode mode = *drm_mode;
4554         bool native_mode_found = false;
4555         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4556         int mode_refresh;
4557         int preferred_refresh = 0;
4558 #if defined(CONFIG_DRM_AMD_DC_DCN)
4559         struct dsc_dec_dpcd_caps dsc_caps;
4560 #endif
4561         uint32_t link_bandwidth_kbps;
4562
4563         struct dc_sink *sink = NULL;
4564         if (aconnector == NULL) {
4565                 DRM_ERROR("aconnector is NULL!\n");
4566                 return stream;
4567         }
4568
4569         drm_connector = &aconnector->base;
4570
4571         if (!aconnector->dc_sink) {
4572                 sink = create_fake_sink(aconnector);
4573                 if (!sink)
4574                         return stream;
4575         } else {
4576                 sink = aconnector->dc_sink;
4577                 dc_sink_retain(sink);
4578         }
4579
4580         stream = dc_create_stream_for_sink(sink);
4581
4582         if (stream == NULL) {
4583                 DRM_ERROR("Failed to create stream for sink!\n");
4584                 goto finish;
4585         }
4586
4587         stream->dm_stream_context = aconnector;
4588
4589         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4590                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4591
4592         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4593                 /* Search for preferred mode */
4594                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4595                         native_mode_found = true;
4596                         break;
4597                 }
4598         }
4599         if (!native_mode_found)
4600                 preferred_mode = list_first_entry_or_null(
4601                                 &aconnector->base.modes,
4602                                 struct drm_display_mode,
4603                                 head);
4604
4605         mode_refresh = drm_mode_vrefresh(&mode);
4606
4607         if (preferred_mode == NULL) {
4608                 /*
4609                  * This may not be an error, the use case is when we have no
4610                  * usermode calls to reset and set mode upon hotplug. In this
4611                  * case, we call set mode ourselves to restore the previous mode
4612                  * and the modelist may not be filled in in time.
4613                  */
4614                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4615         } else {
4616                 decide_crtc_timing_for_drm_display_mode(
4617                                 &mode, preferred_mode,
4618                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4619                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4620         }
4621
4622         if (!dm_state)
4623                 drm_mode_set_crtcinfo(&mode, 0);
4624
4625         /*
4626         * If scaling is enabled and refresh rate didn't change
4627         * we copy the vic and polarities of the old timings
4628         */
4629         if (!scale || mode_refresh != preferred_refresh)
4630                 fill_stream_properties_from_drm_display_mode(stream,
4631                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
4632         else
4633                 fill_stream_properties_from_drm_display_mode(stream,
4634                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4635
4636         stream->timing.flags.DSC = 0;
4637
4638         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4639 #if defined(CONFIG_DRM_AMD_DC_DCN)
4640                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4641                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4642                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4643                                       &dsc_caps);
4644 #endif
4645                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4646                                                              dc_link_get_link_cap(aconnector->dc_link));
4647
4648 #if defined(CONFIG_DRM_AMD_DC_DCN)
4649                 if (dsc_caps.is_dsc_supported) {
4650                         /* Set DSC policy according to dsc_clock_en */
4651                         dc_dsc_policy_set_enable_dsc_when_not_needed(aconnector->dsc_settings.dsc_clock_en);
4652
4653                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4654                                                   &dsc_caps,
4655                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4656                                                   link_bandwidth_kbps,
4657                                                   &stream->timing,
4658                                                   &stream->timing.dsc_cfg))
4659                                 stream->timing.flags.DSC = 1;
4660                         /* Overwrite the stream flag if DSC is enabled through debugfs */
4661                         if (aconnector->dsc_settings.dsc_clock_en)
4662                                 stream->timing.flags.DSC = 1;
4663
4664                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width)
4665                                 stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable,
4666                                                                         aconnector->dsc_settings.dsc_slice_width);
4667
4668                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height)
4669                                 stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable,
4670                                                                         aconnector->dsc_settings.dsc_slice_height);
4671
4672                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4673                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4674                 }
4675 #endif
4676         }
4677
4678         update_stream_scaling_settings(&mode, dm_state, stream);
4679
4680         fill_audio_info(
4681                 &stream->audio_info,
4682                 drm_connector,
4683                 sink);
4684
4685         update_stream_signal(stream, sink);
4686
4687         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4688                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4689         if (stream->link->psr_settings.psr_feature_enabled) {
4690                 //
4691                 // should decide stream support vsc sdp colorimetry capability
4692                 // before building vsc info packet
4693                 //
4694                 stream->use_vsc_sdp_for_colorimetry = false;
4695                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4696                         stream->use_vsc_sdp_for_colorimetry =
4697                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4698                 } else {
4699                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4700                                 stream->use_vsc_sdp_for_colorimetry = true;
4701                 }
4702                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4703         }
4704 finish:
4705         dc_sink_release(sink);
4706
4707         return stream;
4708 }
4709
4710 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4711 {
4712         drm_crtc_cleanup(crtc);
4713         kfree(crtc);
4714 }
4715
4716 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4717                                   struct drm_crtc_state *state)
4718 {
4719         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4720
4721         /* TODO Destroy dc_stream objects are stream object is flattened */
4722         if (cur->stream)
4723                 dc_stream_release(cur->stream);
4724
4725
4726         __drm_atomic_helper_crtc_destroy_state(state);
4727
4728
4729         kfree(state);
4730 }
4731
4732 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4733 {
4734         struct dm_crtc_state *state;
4735
4736         if (crtc->state)
4737                 dm_crtc_destroy_state(crtc, crtc->state);
4738
4739         state = kzalloc(sizeof(*state), GFP_KERNEL);
4740         if (WARN_ON(!state))
4741                 return;
4742
4743         crtc->state = &state->base;
4744         crtc->state->crtc = crtc;
4745
4746 }
4747
4748 static struct drm_crtc_state *
4749 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4750 {
4751         struct dm_crtc_state *state, *cur;
4752
4753         cur = to_dm_crtc_state(crtc->state);
4754
4755         if (WARN_ON(!crtc->state))
4756                 return NULL;
4757
4758         state = kzalloc(sizeof(*state), GFP_KERNEL);
4759         if (!state)
4760                 return NULL;
4761
4762         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4763
4764         if (cur->stream) {
4765                 state->stream = cur->stream;
4766                 dc_stream_retain(state->stream);
4767         }
4768
4769         state->active_planes = cur->active_planes;
4770         state->vrr_params = cur->vrr_params;
4771         state->vrr_infopacket = cur->vrr_infopacket;
4772         state->abm_level = cur->abm_level;
4773         state->vrr_supported = cur->vrr_supported;
4774         state->freesync_config = cur->freesync_config;
4775         state->crc_src = cur->crc_src;
4776         state->cm_has_degamma = cur->cm_has_degamma;
4777         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4778
4779         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4780
4781         return &state->base;
4782 }
4783
4784 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4785 {
4786         enum dc_irq_source irq_source;
4787         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4788         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4789         int rc;
4790
4791         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4792
4793         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4794
4795         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4796                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4797         return rc;
4798 }
4799
4800 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4801 {
4802         enum dc_irq_source irq_source;
4803         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4804         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4805         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4806         int rc = 0;
4807
4808         if (enable) {
4809                 /* vblank irq on -> Only need vupdate irq in vrr mode */
4810                 if (amdgpu_dm_vrr_active(acrtc_state))
4811                         rc = dm_set_vupdate_irq(crtc, true);
4812         } else {
4813                 /* vblank irq off -> vupdate irq off */
4814                 rc = dm_set_vupdate_irq(crtc, false);
4815         }
4816
4817         if (rc)
4818                 return rc;
4819
4820         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4821         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4822 }
4823
4824 static int dm_enable_vblank(struct drm_crtc *crtc)
4825 {
4826         return dm_set_vblank(crtc, true);
4827 }
4828
4829 static void dm_disable_vblank(struct drm_crtc *crtc)
4830 {
4831         dm_set_vblank(crtc, false);
4832 }
4833
4834 /* Implemented only the options currently availible for the driver */
4835 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4836         .reset = dm_crtc_reset_state,
4837         .destroy = amdgpu_dm_crtc_destroy,
4838         .gamma_set = drm_atomic_helper_legacy_gamma_set,
4839         .set_config = drm_atomic_helper_set_config,
4840         .page_flip = drm_atomic_helper_page_flip,
4841         .atomic_duplicate_state = dm_crtc_duplicate_state,
4842         .atomic_destroy_state = dm_crtc_destroy_state,
4843         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4844         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4845         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4846         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4847         .enable_vblank = dm_enable_vblank,
4848         .disable_vblank = dm_disable_vblank,
4849         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4850 };
4851
4852 static enum drm_connector_status
4853 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4854 {
4855         bool connected;
4856         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4857
4858         /*
4859          * Notes:
4860          * 1. This interface is NOT called in context of HPD irq.
4861          * 2. This interface *is called* in context of user-mode ioctl. Which
4862          * makes it a bad place for *any* MST-related activity.
4863          */
4864
4865         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4866             !aconnector->fake_enable)
4867                 connected = (aconnector->dc_sink != NULL);
4868         else
4869                 connected = (aconnector->base.force == DRM_FORCE_ON);
4870
4871         return (connected ? connector_status_connected :
4872                         connector_status_disconnected);
4873 }
4874
4875 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4876                                             struct drm_connector_state *connector_state,
4877                                             struct drm_property *property,
4878                                             uint64_t val)
4879 {
4880         struct drm_device *dev = connector->dev;
4881         struct amdgpu_device *adev = drm_to_adev(dev);
4882         struct dm_connector_state *dm_old_state =
4883                 to_dm_connector_state(connector->state);
4884         struct dm_connector_state *dm_new_state =
4885                 to_dm_connector_state(connector_state);
4886
4887         int ret = -EINVAL;
4888
4889         if (property == dev->mode_config.scaling_mode_property) {
4890                 enum amdgpu_rmx_type rmx_type;
4891
4892                 switch (val) {
4893                 case DRM_MODE_SCALE_CENTER:
4894                         rmx_type = RMX_CENTER;
4895                         break;
4896                 case DRM_MODE_SCALE_ASPECT:
4897                         rmx_type = RMX_ASPECT;
4898                         break;
4899                 case DRM_MODE_SCALE_FULLSCREEN:
4900                         rmx_type = RMX_FULL;
4901                         break;
4902                 case DRM_MODE_SCALE_NONE:
4903                 default:
4904                         rmx_type = RMX_OFF;
4905                         break;
4906                 }
4907
4908                 if (dm_old_state->scaling == rmx_type)
4909                         return 0;
4910
4911                 dm_new_state->scaling = rmx_type;
4912                 ret = 0;
4913         } else if (property == adev->mode_info.underscan_hborder_property) {
4914                 dm_new_state->underscan_hborder = val;
4915                 ret = 0;
4916         } else if (property == adev->mode_info.underscan_vborder_property) {
4917                 dm_new_state->underscan_vborder = val;
4918                 ret = 0;
4919         } else if (property == adev->mode_info.underscan_property) {
4920                 dm_new_state->underscan_enable = val;
4921                 ret = 0;
4922         } else if (property == adev->mode_info.abm_level_property) {
4923                 dm_new_state->abm_level = val;
4924                 ret = 0;
4925         }
4926
4927         return ret;
4928 }
4929
4930 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4931                                             const struct drm_connector_state *state,
4932                                             struct drm_property *property,
4933                                             uint64_t *val)
4934 {
4935         struct drm_device *dev = connector->dev;
4936         struct amdgpu_device *adev = drm_to_adev(dev);
4937         struct dm_connector_state *dm_state =
4938                 to_dm_connector_state(state);
4939         int ret = -EINVAL;
4940
4941         if (property == dev->mode_config.scaling_mode_property) {
4942                 switch (dm_state->scaling) {
4943                 case RMX_CENTER:
4944                         *val = DRM_MODE_SCALE_CENTER;
4945                         break;
4946                 case RMX_ASPECT:
4947                         *val = DRM_MODE_SCALE_ASPECT;
4948                         break;
4949                 case RMX_FULL:
4950                         *val = DRM_MODE_SCALE_FULLSCREEN;
4951                         break;
4952                 case RMX_OFF:
4953                 default:
4954                         *val = DRM_MODE_SCALE_NONE;
4955                         break;
4956                 }
4957                 ret = 0;
4958         } else if (property == adev->mode_info.underscan_hborder_property) {
4959                 *val = dm_state->underscan_hborder;
4960                 ret = 0;
4961         } else if (property == adev->mode_info.underscan_vborder_property) {
4962                 *val = dm_state->underscan_vborder;
4963                 ret = 0;
4964         } else if (property == adev->mode_info.underscan_property) {
4965                 *val = dm_state->underscan_enable;
4966                 ret = 0;
4967         } else if (property == adev->mode_info.abm_level_property) {
4968                 *val = dm_state->abm_level;
4969                 ret = 0;
4970         }
4971
4972         return ret;
4973 }
4974
4975 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4976 {
4977         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4978
4979         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4980 }
4981
4982 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4983 {
4984         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4985         const struct dc_link *link = aconnector->dc_link;
4986         struct amdgpu_device *adev = drm_to_adev(connector->dev);
4987         struct amdgpu_display_manager *dm = &adev->dm;
4988
4989 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4990         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4991
4992         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4993             link->type != dc_connection_none &&
4994             dm->backlight_dev) {
4995                 backlight_device_unregister(dm->backlight_dev);
4996                 dm->backlight_dev = NULL;
4997         }
4998 #endif
4999
5000         if (aconnector->dc_em_sink)
5001                 dc_sink_release(aconnector->dc_em_sink);
5002         aconnector->dc_em_sink = NULL;
5003         if (aconnector->dc_sink)
5004                 dc_sink_release(aconnector->dc_sink);
5005         aconnector->dc_sink = NULL;
5006
5007         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5008         drm_connector_unregister(connector);
5009         drm_connector_cleanup(connector);
5010         if (aconnector->i2c) {
5011                 i2c_del_adapter(&aconnector->i2c->base);
5012                 kfree(aconnector->i2c);
5013         }
5014         kfree(aconnector->dm_dp_aux.aux.name);
5015
5016         kfree(connector);
5017 }
5018
5019 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5020 {
5021         struct dm_connector_state *state =
5022                 to_dm_connector_state(connector->state);
5023
5024         if (connector->state)
5025                 __drm_atomic_helper_connector_destroy_state(connector->state);
5026
5027         kfree(state);
5028
5029         state = kzalloc(sizeof(*state), GFP_KERNEL);
5030
5031         if (state) {
5032                 state->scaling = RMX_OFF;
5033                 state->underscan_enable = false;
5034                 state->underscan_hborder = 0;
5035                 state->underscan_vborder = 0;
5036                 state->base.max_requested_bpc = 8;
5037                 state->vcpi_slots = 0;
5038                 state->pbn = 0;
5039                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5040                         state->abm_level = amdgpu_dm_abm_level;
5041
5042                 __drm_atomic_helper_connector_reset(connector, &state->base);
5043         }
5044 }
5045
5046 struct drm_connector_state *
5047 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5048 {
5049         struct dm_connector_state *state =
5050                 to_dm_connector_state(connector->state);
5051
5052         struct dm_connector_state *new_state =
5053                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5054
5055         if (!new_state)
5056                 return NULL;
5057
5058         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5059
5060         new_state->freesync_capable = state->freesync_capable;
5061         new_state->abm_level = state->abm_level;
5062         new_state->scaling = state->scaling;
5063         new_state->underscan_enable = state->underscan_enable;
5064         new_state->underscan_hborder = state->underscan_hborder;
5065         new_state->underscan_vborder = state->underscan_vborder;
5066         new_state->vcpi_slots = state->vcpi_slots;
5067         new_state->pbn = state->pbn;
5068         return &new_state->base;
5069 }
5070
5071 static int
5072 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5073 {
5074         struct amdgpu_dm_connector *amdgpu_dm_connector =
5075                 to_amdgpu_dm_connector(connector);
5076         int r;
5077
5078         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5079             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5080                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5081                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5082                 if (r)
5083                         return r;
5084         }
5085
5086 #if defined(CONFIG_DEBUG_FS)
5087         connector_debugfs_init(amdgpu_dm_connector);
5088 #endif
5089
5090         return 0;
5091 }
5092
5093 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5094         .reset = amdgpu_dm_connector_funcs_reset,
5095         .detect = amdgpu_dm_connector_detect,
5096         .fill_modes = drm_helper_probe_single_connector_modes,
5097         .destroy = amdgpu_dm_connector_destroy,
5098         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5099         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5100         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5101         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5102         .late_register = amdgpu_dm_connector_late_register,
5103         .early_unregister = amdgpu_dm_connector_unregister
5104 };
5105
5106 static int get_modes(struct drm_connector *connector)
5107 {
5108         return amdgpu_dm_connector_get_modes(connector);
5109 }
5110
5111 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5112 {
5113         struct dc_sink_init_data init_params = {
5114                         .link = aconnector->dc_link,
5115                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5116         };
5117         struct edid *edid;
5118
5119         if (!aconnector->base.edid_blob_ptr) {
5120                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5121                                 aconnector->base.name);
5122
5123                 aconnector->base.force = DRM_FORCE_OFF;
5124                 aconnector->base.override_edid = false;
5125                 return;
5126         }
5127
5128         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5129
5130         aconnector->edid = edid;
5131
5132         aconnector->dc_em_sink = dc_link_add_remote_sink(
5133                 aconnector->dc_link,
5134                 (uint8_t *)edid,
5135                 (edid->extensions + 1) * EDID_LENGTH,
5136                 &init_params);
5137
5138         if (aconnector->base.force == DRM_FORCE_ON) {
5139                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5140                 aconnector->dc_link->local_sink :
5141                 aconnector->dc_em_sink;
5142                 dc_sink_retain(aconnector->dc_sink);
5143         }
5144 }
5145
5146 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5147 {
5148         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5149
5150         /*
5151          * In case of headless boot with force on for DP managed connector
5152          * Those settings have to be != 0 to get initial modeset
5153          */
5154         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5155                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5156                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5157         }
5158
5159
5160         aconnector->base.override_edid = true;
5161         create_eml_sink(aconnector);
5162 }
5163
5164 static struct dc_stream_state *
5165 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5166                                 const struct drm_display_mode *drm_mode,
5167                                 const struct dm_connector_state *dm_state,
5168                                 const struct dc_stream_state *old_stream)
5169 {
5170         struct drm_connector *connector = &aconnector->base;
5171         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5172         struct dc_stream_state *stream;
5173         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5174         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5175         enum dc_status dc_result = DC_OK;
5176
5177         do {
5178                 stream = create_stream_for_sink(aconnector, drm_mode,
5179                                                 dm_state, old_stream,
5180                                                 requested_bpc);
5181                 if (stream == NULL) {
5182                         DRM_ERROR("Failed to create stream for sink!\n");
5183                         break;
5184                 }
5185
5186                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5187
5188                 if (dc_result != DC_OK) {
5189                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5190                                       drm_mode->hdisplay,
5191                                       drm_mode->vdisplay,
5192                                       drm_mode->clock,
5193                                       dc_result,
5194                                       dc_status_to_str(dc_result));
5195
5196                         dc_stream_release(stream);
5197                         stream = NULL;
5198                         requested_bpc -= 2; /* lower bpc to retry validation */
5199                 }
5200
5201         } while (stream == NULL && requested_bpc >= 6);
5202
5203         return stream;
5204 }
5205
5206 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5207                                    struct drm_display_mode *mode)
5208 {
5209         int result = MODE_ERROR;
5210         struct dc_sink *dc_sink;
5211         /* TODO: Unhardcode stream count */
5212         struct dc_stream_state *stream;
5213         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5214
5215         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5216                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5217                 return result;
5218
5219         /*
5220          * Only run this the first time mode_valid is called to initilialize
5221          * EDID mgmt
5222          */
5223         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5224                 !aconnector->dc_em_sink)
5225                 handle_edid_mgmt(aconnector);
5226
5227         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5228
5229         if (dc_sink == NULL) {
5230                 DRM_ERROR("dc_sink is NULL!\n");
5231                 goto fail;
5232         }
5233
5234         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5235         if (stream) {
5236                 dc_stream_release(stream);
5237                 result = MODE_OK;
5238         }
5239
5240 fail:
5241         /* TODO: error handling*/
5242         return result;
5243 }
5244
5245 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5246                                 struct dc_info_packet *out)
5247 {
5248         struct hdmi_drm_infoframe frame;
5249         unsigned char buf[30]; /* 26 + 4 */
5250         ssize_t len;
5251         int ret, i;
5252
5253         memset(out, 0, sizeof(*out));
5254
5255         if (!state->hdr_output_metadata)
5256                 return 0;
5257
5258         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5259         if (ret)
5260                 return ret;
5261
5262         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5263         if (len < 0)
5264                 return (int)len;
5265
5266         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5267         if (len != 30)
5268                 return -EINVAL;
5269
5270         /* Prepare the infopacket for DC. */
5271         switch (state->connector->connector_type) {
5272         case DRM_MODE_CONNECTOR_HDMIA:
5273                 out->hb0 = 0x87; /* type */
5274                 out->hb1 = 0x01; /* version */
5275                 out->hb2 = 0x1A; /* length */
5276                 out->sb[0] = buf[3]; /* checksum */
5277                 i = 1;
5278                 break;
5279
5280         case DRM_MODE_CONNECTOR_DisplayPort:
5281         case DRM_MODE_CONNECTOR_eDP:
5282                 out->hb0 = 0x00; /* sdp id, zero */
5283                 out->hb1 = 0x87; /* type */
5284                 out->hb2 = 0x1D; /* payload len - 1 */
5285                 out->hb3 = (0x13 << 2); /* sdp version */
5286                 out->sb[0] = 0x01; /* version */
5287                 out->sb[1] = 0x1A; /* length */
5288                 i = 2;
5289                 break;
5290
5291         default:
5292                 return -EINVAL;
5293         }
5294
5295         memcpy(&out->sb[i], &buf[4], 26);
5296         out->valid = true;
5297
5298         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5299                        sizeof(out->sb), false);
5300
5301         return 0;
5302 }
5303
5304 static bool
5305 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5306                           const struct drm_connector_state *new_state)
5307 {
5308         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5309         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5310
5311         if (old_blob != new_blob) {
5312                 if (old_blob && new_blob &&
5313                     old_blob->length == new_blob->length)
5314                         return memcmp(old_blob->data, new_blob->data,
5315                                       old_blob->length);
5316
5317                 return true;
5318         }
5319
5320         return false;
5321 }
5322
5323 static int
5324 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5325                                  struct drm_atomic_state *state)
5326 {
5327         struct drm_connector_state *new_con_state =
5328                 drm_atomic_get_new_connector_state(state, conn);
5329         struct drm_connector_state *old_con_state =
5330                 drm_atomic_get_old_connector_state(state, conn);
5331         struct drm_crtc *crtc = new_con_state->crtc;
5332         struct drm_crtc_state *new_crtc_state;
5333         int ret;
5334
5335         if (!crtc)
5336                 return 0;
5337
5338         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5339                 struct dc_info_packet hdr_infopacket;
5340
5341                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5342                 if (ret)
5343                         return ret;
5344
5345                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5346                 if (IS_ERR(new_crtc_state))
5347                         return PTR_ERR(new_crtc_state);
5348
5349                 /*
5350                  * DC considers the stream backends changed if the
5351                  * static metadata changes. Forcing the modeset also
5352                  * gives a simple way for userspace to switch from
5353                  * 8bpc to 10bpc when setting the metadata to enter
5354                  * or exit HDR.
5355                  *
5356                  * Changing the static metadata after it's been
5357                  * set is permissible, however. So only force a
5358                  * modeset if we're entering or exiting HDR.
5359                  */
5360                 new_crtc_state->mode_changed =
5361                         !old_con_state->hdr_output_metadata ||
5362                         !new_con_state->hdr_output_metadata;
5363         }
5364
5365         return 0;
5366 }
5367
5368 static const struct drm_connector_helper_funcs
5369 amdgpu_dm_connector_helper_funcs = {
5370         /*
5371          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5372          * modes will be filtered by drm_mode_validate_size(), and those modes
5373          * are missing after user start lightdm. So we need to renew modes list.
5374          * in get_modes call back, not just return the modes count
5375          */
5376         .get_modes = get_modes,
5377         .mode_valid = amdgpu_dm_connector_mode_valid,
5378         .atomic_check = amdgpu_dm_connector_atomic_check,
5379 };
5380
5381 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5382 {
5383 }
5384
5385 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5386 {
5387         struct drm_device *dev = new_crtc_state->crtc->dev;
5388         struct drm_plane *plane;
5389
5390         drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5391                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5392                         return true;
5393         }
5394
5395         return false;
5396 }
5397
5398 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5399 {
5400         struct drm_atomic_state *state = new_crtc_state->state;
5401         struct drm_plane *plane;
5402         int num_active = 0;
5403
5404         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5405                 struct drm_plane_state *new_plane_state;
5406
5407                 /* Cursor planes are "fake". */
5408                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5409                         continue;
5410
5411                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5412
5413                 if (!new_plane_state) {
5414                         /*
5415                          * The plane is enable on the CRTC and hasn't changed
5416                          * state. This means that it previously passed
5417                          * validation and is therefore enabled.
5418                          */
5419                         num_active += 1;
5420                         continue;
5421                 }
5422
5423                 /* We need a framebuffer to be considered enabled. */
5424                 num_active += (new_plane_state->fb != NULL);
5425         }
5426
5427         return num_active;
5428 }
5429
5430 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5431                                          struct drm_crtc_state *new_crtc_state)
5432 {
5433         struct dm_crtc_state *dm_new_crtc_state =
5434                 to_dm_crtc_state(new_crtc_state);
5435
5436         dm_new_crtc_state->active_planes = 0;
5437
5438         if (!dm_new_crtc_state->stream)
5439                 return;
5440
5441         dm_new_crtc_state->active_planes =
5442                 count_crtc_active_planes(new_crtc_state);
5443 }
5444
5445 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5446                                        struct drm_crtc_state *state)
5447 {
5448         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5449         struct dc *dc = adev->dm.dc;
5450         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5451         int ret = -EINVAL;
5452
5453         dm_update_crtc_active_planes(crtc, state);
5454
5455         if (unlikely(!dm_crtc_state->stream &&
5456                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5457                 WARN_ON(1);
5458                 return ret;
5459         }
5460
5461         /* In some use cases, like reset, no stream is attached */
5462         if (!dm_crtc_state->stream)
5463                 return 0;
5464
5465         /*
5466          * We want at least one hardware plane enabled to use
5467          * the stream with a cursor enabled.
5468          */
5469         if (state->enable && state->active &&
5470             does_crtc_have_active_cursor(state) &&
5471             dm_crtc_state->active_planes == 0)
5472                 return -EINVAL;
5473
5474         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5475                 return 0;
5476
5477         return ret;
5478 }
5479
5480 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5481                                       const struct drm_display_mode *mode,
5482                                       struct drm_display_mode *adjusted_mode)
5483 {
5484         return true;
5485 }
5486
5487 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5488         .disable = dm_crtc_helper_disable,
5489         .atomic_check = dm_crtc_helper_atomic_check,
5490         .mode_fixup = dm_crtc_helper_mode_fixup,
5491         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5492 };
5493
5494 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5495 {
5496
5497 }
5498
5499 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5500 {
5501         switch (display_color_depth) {
5502                 case COLOR_DEPTH_666:
5503                         return 6;
5504                 case COLOR_DEPTH_888:
5505                         return 8;
5506                 case COLOR_DEPTH_101010:
5507                         return 10;
5508                 case COLOR_DEPTH_121212:
5509                         return 12;
5510                 case COLOR_DEPTH_141414:
5511                         return 14;
5512                 case COLOR_DEPTH_161616:
5513                         return 16;
5514                 default:
5515                         break;
5516                 }
5517         return 0;
5518 }
5519
5520 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5521                                           struct drm_crtc_state *crtc_state,
5522                                           struct drm_connector_state *conn_state)
5523 {
5524         struct drm_atomic_state *state = crtc_state->state;
5525         struct drm_connector *connector = conn_state->connector;
5526         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5527         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5528         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5529         struct drm_dp_mst_topology_mgr *mst_mgr;
5530         struct drm_dp_mst_port *mst_port;
5531         enum dc_color_depth color_depth;
5532         int clock, bpp = 0;
5533         bool is_y420 = false;
5534
5535         if (!aconnector->port || !aconnector->dc_sink)
5536                 return 0;
5537
5538         mst_port = aconnector->port;
5539         mst_mgr = &aconnector->mst_port->mst_mgr;
5540
5541         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5542                 return 0;
5543
5544         if (!state->duplicated) {
5545                 int max_bpc = conn_state->max_requested_bpc;
5546                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5547                                 aconnector->force_yuv420_output;
5548                 color_depth = convert_color_depth_from_display_info(connector,
5549                                                                     is_y420,
5550                                                                     max_bpc);
5551                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5552                 clock = adjusted_mode->clock;
5553                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5554         }
5555         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5556                                                                            mst_mgr,
5557                                                                            mst_port,
5558                                                                            dm_new_connector_state->pbn,
5559                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
5560         if (dm_new_connector_state->vcpi_slots < 0) {
5561                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5562                 return dm_new_connector_state->vcpi_slots;
5563         }
5564         return 0;
5565 }
5566
5567 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5568         .disable = dm_encoder_helper_disable,
5569         .atomic_check = dm_encoder_helper_atomic_check
5570 };
5571
5572 #if defined(CONFIG_DRM_AMD_DC_DCN)
5573 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5574                                             struct dc_state *dc_state)
5575 {
5576         struct dc_stream_state *stream = NULL;
5577         struct drm_connector *connector;
5578         struct drm_connector_state *new_con_state, *old_con_state;
5579         struct amdgpu_dm_connector *aconnector;
5580         struct dm_connector_state *dm_conn_state;
5581         int i, j, clock, bpp;
5582         int vcpi, pbn_div, pbn = 0;
5583
5584         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5585
5586                 aconnector = to_amdgpu_dm_connector(connector);
5587
5588                 if (!aconnector->port)
5589                         continue;
5590
5591                 if (!new_con_state || !new_con_state->crtc)
5592                         continue;
5593
5594                 dm_conn_state = to_dm_connector_state(new_con_state);
5595
5596                 for (j = 0; j < dc_state->stream_count; j++) {
5597                         stream = dc_state->streams[j];
5598                         if (!stream)
5599                                 continue;
5600
5601                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5602                                 break;
5603
5604                         stream = NULL;
5605                 }
5606
5607                 if (!stream)
5608                         continue;
5609
5610                 if (stream->timing.flags.DSC != 1) {
5611                         drm_dp_mst_atomic_enable_dsc(state,
5612                                                      aconnector->port,
5613                                                      dm_conn_state->pbn,
5614                                                      0,
5615                                                      false);
5616                         continue;
5617                 }
5618
5619                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5620                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5621                 clock = stream->timing.pix_clk_100hz / 10;
5622                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5623                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5624                                                     aconnector->port,
5625                                                     pbn, pbn_div,
5626                                                     true);
5627                 if (vcpi < 0)
5628                         return vcpi;
5629
5630                 dm_conn_state->pbn = pbn;
5631                 dm_conn_state->vcpi_slots = vcpi;
5632         }
5633         return 0;
5634 }
5635 #endif
5636
5637 static void dm_drm_plane_reset(struct drm_plane *plane)
5638 {
5639         struct dm_plane_state *amdgpu_state = NULL;
5640
5641         if (plane->state)
5642                 plane->funcs->atomic_destroy_state(plane, plane->state);
5643
5644         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5645         WARN_ON(amdgpu_state == NULL);
5646
5647         if (amdgpu_state)
5648                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5649 }
5650
5651 static struct drm_plane_state *
5652 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5653 {
5654         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5655
5656         old_dm_plane_state = to_dm_plane_state(plane->state);
5657         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5658         if (!dm_plane_state)
5659                 return NULL;
5660
5661         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5662
5663         if (old_dm_plane_state->dc_state) {
5664                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5665                 dc_plane_state_retain(dm_plane_state->dc_state);
5666         }
5667
5668         /* Framebuffer hasn't been updated yet, so retain old flags. */
5669         dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5670         dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5671
5672         return &dm_plane_state->base;
5673 }
5674
5675 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5676                                 struct drm_plane_state *state)
5677 {
5678         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5679
5680         if (dm_plane_state->dc_state)
5681                 dc_plane_state_release(dm_plane_state->dc_state);
5682
5683         drm_atomic_helper_plane_destroy_state(plane, state);
5684 }
5685
5686 static const struct drm_plane_funcs dm_plane_funcs = {
5687         .update_plane   = drm_atomic_helper_update_plane,
5688         .disable_plane  = drm_atomic_helper_disable_plane,
5689         .destroy        = drm_primary_helper_destroy,
5690         .reset = dm_drm_plane_reset,
5691         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5692         .atomic_destroy_state = dm_drm_plane_destroy_state,
5693 };
5694
5695 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5696                                       struct drm_plane_state *new_state)
5697 {
5698         struct amdgpu_framebuffer *afb;
5699         struct drm_gem_object *obj;
5700         struct amdgpu_device *adev;
5701         struct amdgpu_bo *rbo;
5702         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5703         struct list_head list;
5704         struct ttm_validate_buffer tv;
5705         struct ww_acquire_ctx ticket;
5706         uint32_t domain;
5707         int r;
5708
5709         if (!new_state->fb) {
5710                 DRM_DEBUG_DRIVER("No FB bound\n");
5711                 return 0;
5712         }
5713
5714         afb = to_amdgpu_framebuffer(new_state->fb);
5715         obj = new_state->fb->obj[0];
5716         rbo = gem_to_amdgpu_bo(obj);
5717         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5718         INIT_LIST_HEAD(&list);
5719
5720         tv.bo = &rbo->tbo;
5721         tv.num_shared = 1;
5722         list_add(&tv.head, &list);
5723
5724         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5725         if (r) {
5726                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5727                 return r;
5728         }
5729
5730         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5731                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5732         else
5733                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5734
5735         r = amdgpu_bo_pin(rbo, domain);
5736         if (unlikely(r != 0)) {
5737                 if (r != -ERESTARTSYS)
5738                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5739                 ttm_eu_backoff_reservation(&ticket, &list);
5740                 return r;
5741         }
5742
5743         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5744         if (unlikely(r != 0)) {
5745                 amdgpu_bo_unpin(rbo);
5746                 ttm_eu_backoff_reservation(&ticket, &list);
5747                 DRM_ERROR("%p bind failed\n", rbo);
5748                 return r;
5749         }
5750
5751         ttm_eu_backoff_reservation(&ticket, &list);
5752
5753         afb->address = amdgpu_bo_gpu_offset(rbo);
5754
5755         amdgpu_bo_ref(rbo);
5756
5757         /**
5758          * We don't do surface updates on planes that have been newly created,
5759          * but we also don't have the afb->address during atomic check.
5760          *
5761          * Fill in buffer attributes depending on the address here, but only on
5762          * newly created planes since they're not being used by DC yet and this
5763          * won't modify global state.
5764          */
5765         dm_plane_state_old = to_dm_plane_state(plane->state);
5766         dm_plane_state_new = to_dm_plane_state(new_state);
5767
5768         if (dm_plane_state_new->dc_state &&
5769             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5770                 struct dc_plane_state *plane_state =
5771                         dm_plane_state_new->dc_state;
5772                 bool force_disable_dcc = !plane_state->dcc.enable;
5773
5774                 fill_plane_buffer_attributes(
5775                         adev, afb, plane_state->format, plane_state->rotation,
5776                         dm_plane_state_new->tiling_flags,
5777                         &plane_state->tiling_info, &plane_state->plane_size,
5778                         &plane_state->dcc, &plane_state->address,
5779                         dm_plane_state_new->tmz_surface, force_disable_dcc);
5780         }
5781
5782         return 0;
5783 }
5784
5785 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5786                                        struct drm_plane_state *old_state)
5787 {
5788         struct amdgpu_bo *rbo;
5789         int r;
5790
5791         if (!old_state->fb)
5792                 return;
5793
5794         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5795         r = amdgpu_bo_reserve(rbo, false);
5796         if (unlikely(r)) {
5797                 DRM_ERROR("failed to reserve rbo before unpin\n");
5798                 return;
5799         }
5800
5801         amdgpu_bo_unpin(rbo);
5802         amdgpu_bo_unreserve(rbo);
5803         amdgpu_bo_unref(&rbo);
5804 }
5805
5806 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5807                                        struct drm_crtc_state *new_crtc_state)
5808 {
5809         int max_downscale = 0;
5810         int max_upscale = INT_MAX;
5811
5812         /* TODO: These should be checked against DC plane caps */
5813         return drm_atomic_helper_check_plane_state(
5814                 state, new_crtc_state, max_downscale, max_upscale, true, true);
5815 }
5816
5817 static int dm_plane_atomic_check(struct drm_plane *plane,
5818                                  struct drm_plane_state *state)
5819 {
5820         struct amdgpu_device *adev = drm_to_adev(plane->dev);
5821         struct dc *dc = adev->dm.dc;
5822         struct dm_plane_state *dm_plane_state;
5823         struct dc_scaling_info scaling_info;
5824         struct drm_crtc_state *new_crtc_state;
5825         int ret;
5826
5827         dm_plane_state = to_dm_plane_state(state);
5828
5829         if (!dm_plane_state->dc_state)
5830                 return 0;
5831
5832         new_crtc_state =
5833                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5834         if (!new_crtc_state)
5835                 return -EINVAL;
5836
5837         ret = dm_plane_helper_check_state(state, new_crtc_state);
5838         if (ret)
5839                 return ret;
5840
5841         ret = fill_dc_scaling_info(state, &scaling_info);
5842         if (ret)
5843                 return ret;
5844
5845         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5846                 return 0;
5847
5848         return -EINVAL;
5849 }
5850
5851 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5852                                        struct drm_plane_state *new_plane_state)
5853 {
5854         /* Only support async updates on cursor planes. */
5855         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5856                 return -EINVAL;
5857
5858         return 0;
5859 }
5860
5861 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5862                                          struct drm_plane_state *new_state)
5863 {
5864         struct drm_plane_state *old_state =
5865                 drm_atomic_get_old_plane_state(new_state->state, plane);
5866
5867         swap(plane->state->fb, new_state->fb);
5868
5869         plane->state->src_x = new_state->src_x;
5870         plane->state->src_y = new_state->src_y;
5871         plane->state->src_w = new_state->src_w;
5872         plane->state->src_h = new_state->src_h;
5873         plane->state->crtc_x = new_state->crtc_x;
5874         plane->state->crtc_y = new_state->crtc_y;
5875         plane->state->crtc_w = new_state->crtc_w;
5876         plane->state->crtc_h = new_state->crtc_h;
5877
5878         handle_cursor_update(plane, old_state);
5879 }
5880
5881 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5882         .prepare_fb = dm_plane_helper_prepare_fb,
5883         .cleanup_fb = dm_plane_helper_cleanup_fb,
5884         .atomic_check = dm_plane_atomic_check,
5885         .atomic_async_check = dm_plane_atomic_async_check,
5886         .atomic_async_update = dm_plane_atomic_async_update
5887 };
5888
5889 /*
5890  * TODO: these are currently initialized to rgb formats only.
5891  * For future use cases we should either initialize them dynamically based on
5892  * plane capabilities, or initialize this array to all formats, so internal drm
5893  * check will succeed, and let DC implement proper check
5894  */
5895 static const uint32_t rgb_formats[] = {
5896         DRM_FORMAT_XRGB8888,
5897         DRM_FORMAT_ARGB8888,
5898         DRM_FORMAT_RGBA8888,
5899         DRM_FORMAT_XRGB2101010,
5900         DRM_FORMAT_XBGR2101010,
5901         DRM_FORMAT_ARGB2101010,
5902         DRM_FORMAT_ABGR2101010,
5903         DRM_FORMAT_XBGR8888,
5904         DRM_FORMAT_ABGR8888,
5905         DRM_FORMAT_RGB565,
5906 };
5907
5908 static const uint32_t overlay_formats[] = {
5909         DRM_FORMAT_XRGB8888,
5910         DRM_FORMAT_ARGB8888,
5911         DRM_FORMAT_RGBA8888,
5912         DRM_FORMAT_XBGR8888,
5913         DRM_FORMAT_ABGR8888,
5914         DRM_FORMAT_RGB565
5915 };
5916
5917 static const u32 cursor_formats[] = {
5918         DRM_FORMAT_ARGB8888
5919 };
5920
5921 static int get_plane_formats(const struct drm_plane *plane,
5922                              const struct dc_plane_cap *plane_cap,
5923                              uint32_t *formats, int max_formats)
5924 {
5925         int i, num_formats = 0;
5926
5927         /*
5928          * TODO: Query support for each group of formats directly from
5929          * DC plane caps. This will require adding more formats to the
5930          * caps list.
5931          */
5932
5933         switch (plane->type) {
5934         case DRM_PLANE_TYPE_PRIMARY:
5935                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5936                         if (num_formats >= max_formats)
5937                                 break;
5938
5939                         formats[num_formats++] = rgb_formats[i];
5940                 }
5941
5942                 if (plane_cap && plane_cap->pixel_format_support.nv12)
5943                         formats[num_formats++] = DRM_FORMAT_NV12;
5944                 if (plane_cap && plane_cap->pixel_format_support.p010)
5945                         formats[num_formats++] = DRM_FORMAT_P010;
5946                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5947                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5948                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5949                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5950                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5951                 }
5952                 break;
5953
5954         case DRM_PLANE_TYPE_OVERLAY:
5955                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5956                         if (num_formats >= max_formats)
5957                                 break;
5958
5959                         formats[num_formats++] = overlay_formats[i];
5960                 }
5961                 break;
5962
5963         case DRM_PLANE_TYPE_CURSOR:
5964                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5965                         if (num_formats >= max_formats)
5966                                 break;
5967
5968                         formats[num_formats++] = cursor_formats[i];
5969                 }
5970                 break;
5971         }
5972
5973         return num_formats;
5974 }
5975
5976 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5977                                 struct drm_plane *plane,
5978                                 unsigned long possible_crtcs,
5979                                 const struct dc_plane_cap *plane_cap)
5980 {
5981         uint32_t formats[32];
5982         int num_formats;
5983         int res = -EPERM;
5984         unsigned int supported_rotations;
5985
5986         num_formats = get_plane_formats(plane, plane_cap, formats,
5987                                         ARRAY_SIZE(formats));
5988
5989         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
5990                                        &dm_plane_funcs, formats, num_formats,
5991                                        NULL, plane->type, NULL);
5992         if (res)
5993                 return res;
5994
5995         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5996             plane_cap && plane_cap->per_pixel_alpha) {
5997                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5998                                           BIT(DRM_MODE_BLEND_PREMULTI);
5999
6000                 drm_plane_create_alpha_property(plane);
6001                 drm_plane_create_blend_mode_property(plane, blend_caps);
6002         }
6003
6004         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6005             plane_cap &&
6006             (plane_cap->pixel_format_support.nv12 ||
6007              plane_cap->pixel_format_support.p010)) {
6008                 /* This only affects YUV formats. */
6009                 drm_plane_create_color_properties(
6010                         plane,
6011                         BIT(DRM_COLOR_YCBCR_BT601) |
6012                         BIT(DRM_COLOR_YCBCR_BT709) |
6013                         BIT(DRM_COLOR_YCBCR_BT2020),
6014                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6015                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6016                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6017         }
6018
6019         supported_rotations =
6020                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6021                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6022
6023         if (dm->adev->asic_type >= CHIP_BONAIRE)
6024                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6025                                                    supported_rotations);
6026
6027         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6028
6029         /* Create (reset) the plane state */
6030         if (plane->funcs->reset)
6031                 plane->funcs->reset(plane);
6032
6033         return 0;
6034 }
6035
6036 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6037                                struct drm_plane *plane,
6038                                uint32_t crtc_index)
6039 {
6040         struct amdgpu_crtc *acrtc = NULL;
6041         struct drm_plane *cursor_plane;
6042
6043         int res = -ENOMEM;
6044
6045         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6046         if (!cursor_plane)
6047                 goto fail;
6048
6049         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6050         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6051
6052         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6053         if (!acrtc)
6054                 goto fail;
6055
6056         res = drm_crtc_init_with_planes(
6057                         dm->ddev,
6058                         &acrtc->base,
6059                         plane,
6060                         cursor_plane,
6061                         &amdgpu_dm_crtc_funcs, NULL);
6062
6063         if (res)
6064                 goto fail;
6065
6066         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6067
6068         /* Create (reset) the plane state */
6069         if (acrtc->base.funcs->reset)
6070                 acrtc->base.funcs->reset(&acrtc->base);
6071
6072         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6073         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6074
6075         acrtc->crtc_id = crtc_index;
6076         acrtc->base.enabled = false;
6077         acrtc->otg_inst = -1;
6078
6079         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6080         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6081                                    true, MAX_COLOR_LUT_ENTRIES);
6082         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6083
6084         return 0;
6085
6086 fail:
6087         kfree(acrtc);
6088         kfree(cursor_plane);
6089         return res;
6090 }
6091
6092
6093 static int to_drm_connector_type(enum signal_type st)
6094 {
6095         switch (st) {
6096         case SIGNAL_TYPE_HDMI_TYPE_A:
6097                 return DRM_MODE_CONNECTOR_HDMIA;
6098         case SIGNAL_TYPE_EDP:
6099                 return DRM_MODE_CONNECTOR_eDP;
6100         case SIGNAL_TYPE_LVDS:
6101                 return DRM_MODE_CONNECTOR_LVDS;
6102         case SIGNAL_TYPE_RGB:
6103                 return DRM_MODE_CONNECTOR_VGA;
6104         case SIGNAL_TYPE_DISPLAY_PORT:
6105         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6106                 return DRM_MODE_CONNECTOR_DisplayPort;
6107         case SIGNAL_TYPE_DVI_DUAL_LINK:
6108         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6109                 return DRM_MODE_CONNECTOR_DVID;
6110         case SIGNAL_TYPE_VIRTUAL:
6111                 return DRM_MODE_CONNECTOR_VIRTUAL;
6112
6113         default:
6114                 return DRM_MODE_CONNECTOR_Unknown;
6115         }
6116 }
6117
6118 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6119 {
6120         struct drm_encoder *encoder;
6121
6122         /* There is only one encoder per connector */
6123         drm_connector_for_each_possible_encoder(connector, encoder)
6124                 return encoder;
6125
6126         return NULL;
6127 }
6128
6129 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6130 {
6131         struct drm_encoder *encoder;
6132         struct amdgpu_encoder *amdgpu_encoder;
6133
6134         encoder = amdgpu_dm_connector_to_encoder(connector);
6135
6136         if (encoder == NULL)
6137                 return;
6138
6139         amdgpu_encoder = to_amdgpu_encoder(encoder);
6140
6141         amdgpu_encoder->native_mode.clock = 0;
6142
6143         if (!list_empty(&connector->probed_modes)) {
6144                 struct drm_display_mode *preferred_mode = NULL;
6145
6146                 list_for_each_entry(preferred_mode,
6147                                     &connector->probed_modes,
6148                                     head) {
6149                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6150                                 amdgpu_encoder->native_mode = *preferred_mode;
6151
6152                         break;
6153                 }
6154
6155         }
6156 }
6157
6158 static struct drm_display_mode *
6159 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6160                              char *name,
6161                              int hdisplay, int vdisplay)
6162 {
6163         struct drm_device *dev = encoder->dev;
6164         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6165         struct drm_display_mode *mode = NULL;
6166         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6167
6168         mode = drm_mode_duplicate(dev, native_mode);
6169
6170         if (mode == NULL)
6171                 return NULL;
6172
6173         mode->hdisplay = hdisplay;
6174         mode->vdisplay = vdisplay;
6175         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6176         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6177
6178         return mode;
6179
6180 }
6181
6182 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6183                                                  struct drm_connector *connector)
6184 {
6185         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6186         struct drm_display_mode *mode = NULL;
6187         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6188         struct amdgpu_dm_connector *amdgpu_dm_connector =
6189                                 to_amdgpu_dm_connector(connector);
6190         int i;
6191         int n;
6192         struct mode_size {
6193                 char name[DRM_DISPLAY_MODE_LEN];
6194                 int w;
6195                 int h;
6196         } common_modes[] = {
6197                 {  "640x480",  640,  480},
6198                 {  "800x600",  800,  600},
6199                 { "1024x768", 1024,  768},
6200                 { "1280x720", 1280,  720},
6201                 { "1280x800", 1280,  800},
6202                 {"1280x1024", 1280, 1024},
6203                 { "1440x900", 1440,  900},
6204                 {"1680x1050", 1680, 1050},
6205                 {"1600x1200", 1600, 1200},
6206                 {"1920x1080", 1920, 1080},
6207                 {"1920x1200", 1920, 1200}
6208         };
6209
6210         n = ARRAY_SIZE(common_modes);
6211
6212         for (i = 0; i < n; i++) {
6213                 struct drm_display_mode *curmode = NULL;
6214                 bool mode_existed = false;
6215
6216                 if (common_modes[i].w > native_mode->hdisplay ||
6217                     common_modes[i].h > native_mode->vdisplay ||
6218                    (common_modes[i].w == native_mode->hdisplay &&
6219                     common_modes[i].h == native_mode->vdisplay))
6220                         continue;
6221
6222                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6223                         if (common_modes[i].w == curmode->hdisplay &&
6224                             common_modes[i].h == curmode->vdisplay) {
6225                                 mode_existed = true;
6226                                 break;
6227                         }
6228                 }
6229
6230                 if (mode_existed)
6231                         continue;
6232
6233                 mode = amdgpu_dm_create_common_mode(encoder,
6234                                 common_modes[i].name, common_modes[i].w,
6235                                 common_modes[i].h);
6236                 drm_mode_probed_add(connector, mode);
6237                 amdgpu_dm_connector->num_modes++;
6238         }
6239 }
6240
6241 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6242                                               struct edid *edid)
6243 {
6244         struct amdgpu_dm_connector *amdgpu_dm_connector =
6245                         to_amdgpu_dm_connector(connector);
6246
6247         if (edid) {
6248                 /* empty probed_modes */
6249                 INIT_LIST_HEAD(&connector->probed_modes);
6250                 amdgpu_dm_connector->num_modes =
6251                                 drm_add_edid_modes(connector, edid);
6252
6253                 /* sorting the probed modes before calling function
6254                  * amdgpu_dm_get_native_mode() since EDID can have
6255                  * more than one preferred mode. The modes that are
6256                  * later in the probed mode list could be of higher
6257                  * and preferred resolution. For example, 3840x2160
6258                  * resolution in base EDID preferred timing and 4096x2160
6259                  * preferred resolution in DID extension block later.
6260                  */
6261                 drm_mode_sort(&connector->probed_modes);
6262                 amdgpu_dm_get_native_mode(connector);
6263         } else {
6264                 amdgpu_dm_connector->num_modes = 0;
6265         }
6266 }
6267
6268 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6269 {
6270         struct amdgpu_dm_connector *amdgpu_dm_connector =
6271                         to_amdgpu_dm_connector(connector);
6272         struct drm_encoder *encoder;
6273         struct edid *edid = amdgpu_dm_connector->edid;
6274
6275         encoder = amdgpu_dm_connector_to_encoder(connector);
6276
6277         if (!edid || !drm_edid_is_valid(edid)) {
6278                 amdgpu_dm_connector->num_modes =
6279                                 drm_add_modes_noedid(connector, 640, 480);
6280         } else {
6281                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6282                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6283         }
6284         amdgpu_dm_fbc_init(connector);
6285
6286         return amdgpu_dm_connector->num_modes;
6287 }
6288
6289 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6290                                      struct amdgpu_dm_connector *aconnector,
6291                                      int connector_type,
6292                                      struct dc_link *link,
6293                                      int link_index)
6294 {
6295         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6296
6297         /*
6298          * Some of the properties below require access to state, like bpc.
6299          * Allocate some default initial connector state with our reset helper.
6300          */
6301         if (aconnector->base.funcs->reset)
6302                 aconnector->base.funcs->reset(&aconnector->base);
6303
6304         aconnector->connector_id = link_index;
6305         aconnector->dc_link = link;
6306         aconnector->base.interlace_allowed = false;
6307         aconnector->base.doublescan_allowed = false;
6308         aconnector->base.stereo_allowed = false;
6309         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6310         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6311         aconnector->audio_inst = -1;
6312         mutex_init(&aconnector->hpd_lock);
6313
6314         /*
6315          * configure support HPD hot plug connector_>polled default value is 0
6316          * which means HPD hot plug not supported
6317          */
6318         switch (connector_type) {
6319         case DRM_MODE_CONNECTOR_HDMIA:
6320                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6321                 aconnector->base.ycbcr_420_allowed =
6322                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6323                 break;
6324         case DRM_MODE_CONNECTOR_DisplayPort:
6325                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6326                 aconnector->base.ycbcr_420_allowed =
6327                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
6328                 break;
6329         case DRM_MODE_CONNECTOR_DVID:
6330                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6331                 break;
6332         default:
6333                 break;
6334         }
6335
6336         drm_object_attach_property(&aconnector->base.base,
6337                                 dm->ddev->mode_config.scaling_mode_property,
6338                                 DRM_MODE_SCALE_NONE);
6339
6340         drm_object_attach_property(&aconnector->base.base,
6341                                 adev->mode_info.underscan_property,
6342                                 UNDERSCAN_OFF);
6343         drm_object_attach_property(&aconnector->base.base,
6344                                 adev->mode_info.underscan_hborder_property,
6345                                 0);
6346         drm_object_attach_property(&aconnector->base.base,
6347                                 adev->mode_info.underscan_vborder_property,
6348                                 0);
6349
6350         if (!aconnector->mst_port)
6351                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6352
6353         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6354         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6355         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6356
6357         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6358             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6359                 drm_object_attach_property(&aconnector->base.base,
6360                                 adev->mode_info.abm_level_property, 0);
6361         }
6362
6363         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6364             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6365             connector_type == DRM_MODE_CONNECTOR_eDP) {
6366                 drm_object_attach_property(
6367                         &aconnector->base.base,
6368                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
6369
6370                 if (!aconnector->mst_port)
6371                         drm_connector_attach_vrr_capable_property(&aconnector->base);
6372
6373 #ifdef CONFIG_DRM_AMD_DC_HDCP
6374                 if (adev->dm.hdcp_workqueue)
6375                         drm_connector_attach_content_protection_property(&aconnector->base, true);
6376 #endif
6377         }
6378 }
6379
6380 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6381                               struct i2c_msg *msgs, int num)
6382 {
6383         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6384         struct ddc_service *ddc_service = i2c->ddc_service;
6385         struct i2c_command cmd;
6386         int i;
6387         int result = -EIO;
6388
6389         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6390
6391         if (!cmd.payloads)
6392                 return result;
6393
6394         cmd.number_of_payloads = num;
6395         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6396         cmd.speed = 100;
6397
6398         for (i = 0; i < num; i++) {
6399                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6400                 cmd.payloads[i].address = msgs[i].addr;
6401                 cmd.payloads[i].length = msgs[i].len;
6402                 cmd.payloads[i].data = msgs[i].buf;
6403         }
6404
6405         if (dc_submit_i2c(
6406                         ddc_service->ctx->dc,
6407                         ddc_service->ddc_pin->hw_info.ddc_channel,
6408                         &cmd))
6409                 result = num;
6410
6411         kfree(cmd.payloads);
6412         return result;
6413 }
6414
6415 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6416 {
6417         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6418 }
6419
6420 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6421         .master_xfer = amdgpu_dm_i2c_xfer,
6422         .functionality = amdgpu_dm_i2c_func,
6423 };
6424
6425 static struct amdgpu_i2c_adapter *
6426 create_i2c(struct ddc_service *ddc_service,
6427            int link_index,
6428            int *res)
6429 {
6430         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6431         struct amdgpu_i2c_adapter *i2c;
6432
6433         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6434         if (!i2c)
6435                 return NULL;
6436         i2c->base.owner = THIS_MODULE;
6437         i2c->base.class = I2C_CLASS_DDC;
6438         i2c->base.dev.parent = &adev->pdev->dev;
6439         i2c->base.algo = &amdgpu_dm_i2c_algo;
6440         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6441         i2c_set_adapdata(&i2c->base, i2c);
6442         i2c->ddc_service = ddc_service;
6443         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6444
6445         return i2c;
6446 }
6447
6448
6449 /*
6450  * Note: this function assumes that dc_link_detect() was called for the
6451  * dc_link which will be represented by this aconnector.
6452  */
6453 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6454                                     struct amdgpu_dm_connector *aconnector,
6455                                     uint32_t link_index,
6456                                     struct amdgpu_encoder *aencoder)
6457 {
6458         int res = 0;
6459         int connector_type;
6460         struct dc *dc = dm->dc;
6461         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6462         struct amdgpu_i2c_adapter *i2c;
6463
6464         link->priv = aconnector;
6465
6466         DRM_DEBUG_DRIVER("%s()\n", __func__);
6467
6468         i2c = create_i2c(link->ddc, link->link_index, &res);
6469         if (!i2c) {
6470                 DRM_ERROR("Failed to create i2c adapter data\n");
6471                 return -ENOMEM;
6472         }
6473
6474         aconnector->i2c = i2c;
6475         res = i2c_add_adapter(&i2c->base);
6476
6477         if (res) {
6478                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6479                 goto out_free;
6480         }
6481
6482         connector_type = to_drm_connector_type(link->connector_signal);
6483
6484         res = drm_connector_init_with_ddc(
6485                         dm->ddev,
6486                         &aconnector->base,
6487                         &amdgpu_dm_connector_funcs,
6488                         connector_type,
6489                         &i2c->base);
6490
6491         if (res) {
6492                 DRM_ERROR("connector_init failed\n");
6493                 aconnector->connector_id = -1;
6494                 goto out_free;
6495         }
6496
6497         drm_connector_helper_add(
6498                         &aconnector->base,
6499                         &amdgpu_dm_connector_helper_funcs);
6500
6501         amdgpu_dm_connector_init_helper(
6502                 dm,
6503                 aconnector,
6504                 connector_type,
6505                 link,
6506                 link_index);
6507
6508         drm_connector_attach_encoder(
6509                 &aconnector->base, &aencoder->base);
6510
6511         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6512                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6513                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6514
6515 out_free:
6516         if (res) {
6517                 kfree(i2c);
6518                 aconnector->i2c = NULL;
6519         }
6520         return res;
6521 }
6522
6523 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6524 {
6525         switch (adev->mode_info.num_crtc) {
6526         case 1:
6527                 return 0x1;
6528         case 2:
6529                 return 0x3;
6530         case 3:
6531                 return 0x7;
6532         case 4:
6533                 return 0xf;
6534         case 5:
6535                 return 0x1f;
6536         case 6:
6537         default:
6538                 return 0x3f;
6539         }
6540 }
6541
6542 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6543                                   struct amdgpu_encoder *aencoder,
6544                                   uint32_t link_index)
6545 {
6546         struct amdgpu_device *adev = drm_to_adev(dev);
6547
6548         int res = drm_encoder_init(dev,
6549                                    &aencoder->base,
6550                                    &amdgpu_dm_encoder_funcs,
6551                                    DRM_MODE_ENCODER_TMDS,
6552                                    NULL);
6553
6554         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6555
6556         if (!res)
6557                 aencoder->encoder_id = link_index;
6558         else
6559                 aencoder->encoder_id = -1;
6560
6561         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6562
6563         return res;
6564 }
6565
6566 static void manage_dm_interrupts(struct amdgpu_device *adev,
6567                                  struct amdgpu_crtc *acrtc,
6568                                  bool enable)
6569 {
6570         /*
6571          * We have no guarantee that the frontend index maps to the same
6572          * backend index - some even map to more than one.
6573          *
6574          * TODO: Use a different interrupt or check DC itself for the mapping.
6575          */
6576         int irq_type =
6577                 amdgpu_display_crtc_idx_to_irq_type(
6578                         adev,
6579                         acrtc->crtc_id);
6580
6581         if (enable) {
6582                 drm_crtc_vblank_on(&acrtc->base);
6583                 amdgpu_irq_get(
6584                         adev,
6585                         &adev->pageflip_irq,
6586                         irq_type);
6587         } else {
6588
6589                 amdgpu_irq_put(
6590                         adev,
6591                         &adev->pageflip_irq,
6592                         irq_type);
6593                 drm_crtc_vblank_off(&acrtc->base);
6594         }
6595 }
6596
6597 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6598                                       struct amdgpu_crtc *acrtc)
6599 {
6600         int irq_type =
6601                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6602
6603         /**
6604          * This reads the current state for the IRQ and force reapplies
6605          * the setting to hardware.
6606          */
6607         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6608 }
6609
6610 static bool
6611 is_scaling_state_different(const struct dm_connector_state *dm_state,
6612                            const struct dm_connector_state *old_dm_state)
6613 {
6614         if (dm_state->scaling != old_dm_state->scaling)
6615                 return true;
6616         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6617                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6618                         return true;
6619         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6620                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6621                         return true;
6622         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6623                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6624                 return true;
6625         return false;
6626 }
6627
6628 #ifdef CONFIG_DRM_AMD_DC_HDCP
6629 static bool is_content_protection_different(struct drm_connector_state *state,
6630                                             const struct drm_connector_state *old_state,
6631                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6632 {
6633         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6634
6635         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6636             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6637                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6638                 return true;
6639         }
6640
6641         /* CP is being re enabled, ignore this */
6642         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6643             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6644                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6645                 return false;
6646         }
6647
6648         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6649         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6650             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6651                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6652
6653         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6654          * hot-plug, headless s3, dpms
6655          */
6656         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6657             aconnector->dc_sink != NULL)
6658                 return true;
6659
6660         if (old_state->content_protection == state->content_protection)
6661                 return false;
6662
6663         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6664                 return true;
6665
6666         return false;
6667 }
6668
6669 #endif
6670 static void remove_stream(struct amdgpu_device *adev,
6671                           struct amdgpu_crtc *acrtc,
6672                           struct dc_stream_state *stream)
6673 {
6674         /* this is the update mode case */
6675
6676         acrtc->otg_inst = -1;
6677         acrtc->enabled = false;
6678 }
6679
6680 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6681                                struct dc_cursor_position *position)
6682 {
6683         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6684         int x, y;
6685         int xorigin = 0, yorigin = 0;
6686
6687         position->enable = false;
6688         position->x = 0;
6689         position->y = 0;
6690
6691         if (!crtc || !plane->state->fb)
6692                 return 0;
6693
6694         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6695             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6696                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6697                           __func__,
6698                           plane->state->crtc_w,
6699                           plane->state->crtc_h);
6700                 return -EINVAL;
6701         }
6702
6703         x = plane->state->crtc_x;
6704         y = plane->state->crtc_y;
6705
6706         if (x <= -amdgpu_crtc->max_cursor_width ||
6707             y <= -amdgpu_crtc->max_cursor_height)
6708                 return 0;
6709
6710         if (x < 0) {
6711                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6712                 x = 0;
6713         }
6714         if (y < 0) {
6715                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6716                 y = 0;
6717         }
6718         position->enable = true;
6719         position->translate_by_source = true;
6720         position->x = x;
6721         position->y = y;
6722         position->x_hotspot = xorigin;
6723         position->y_hotspot = yorigin;
6724
6725         return 0;
6726 }
6727
6728 static void handle_cursor_update(struct drm_plane *plane,
6729                                  struct drm_plane_state *old_plane_state)
6730 {
6731         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6732         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6733         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6734         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6735         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6736         uint64_t address = afb ? afb->address : 0;
6737         struct dc_cursor_position position;
6738         struct dc_cursor_attributes attributes;
6739         int ret;
6740
6741         if (!plane->state->fb && !old_plane_state->fb)
6742                 return;
6743
6744         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6745                          __func__,
6746                          amdgpu_crtc->crtc_id,
6747                          plane->state->crtc_w,
6748                          plane->state->crtc_h);
6749
6750         ret = get_cursor_position(plane, crtc, &position);
6751         if (ret)
6752                 return;
6753
6754         if (!position.enable) {
6755                 /* turn off cursor */
6756                 if (crtc_state && crtc_state->stream) {
6757                         mutex_lock(&adev->dm.dc_lock);
6758                         dc_stream_set_cursor_position(crtc_state->stream,
6759                                                       &position);
6760                         mutex_unlock(&adev->dm.dc_lock);
6761                 }
6762                 return;
6763         }
6764
6765         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6766         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6767
6768         memset(&attributes, 0, sizeof(attributes));
6769         attributes.address.high_part = upper_32_bits(address);
6770         attributes.address.low_part  = lower_32_bits(address);
6771         attributes.width             = plane->state->crtc_w;
6772         attributes.height            = plane->state->crtc_h;
6773         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6774         attributes.rotation_angle    = 0;
6775         attributes.attribute_flags.value = 0;
6776
6777         attributes.pitch = attributes.width;
6778
6779         if (crtc_state->stream) {
6780                 mutex_lock(&adev->dm.dc_lock);
6781                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6782                                                          &attributes))
6783                         DRM_ERROR("DC failed to set cursor attributes\n");
6784
6785                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6786                                                    &position))
6787                         DRM_ERROR("DC failed to set cursor position\n");
6788                 mutex_unlock(&adev->dm.dc_lock);
6789         }
6790 }
6791
6792 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6793 {
6794
6795         assert_spin_locked(&acrtc->base.dev->event_lock);
6796         WARN_ON(acrtc->event);
6797
6798         acrtc->event = acrtc->base.state->event;
6799
6800         /* Set the flip status */
6801         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6802
6803         /* Mark this event as consumed */
6804         acrtc->base.state->event = NULL;
6805
6806         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6807                                                  acrtc->crtc_id);
6808 }
6809
6810 static void update_freesync_state_on_stream(
6811         struct amdgpu_display_manager *dm,
6812         struct dm_crtc_state *new_crtc_state,
6813         struct dc_stream_state *new_stream,
6814         struct dc_plane_state *surface,
6815         u32 flip_timestamp_in_us)
6816 {
6817         struct mod_vrr_params vrr_params;
6818         struct dc_info_packet vrr_infopacket = {0};
6819         struct amdgpu_device *adev = dm->adev;
6820         unsigned long flags;
6821
6822         if (!new_stream)
6823                 return;
6824
6825         /*
6826          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6827          * For now it's sufficient to just guard against these conditions.
6828          */
6829
6830         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6831                 return;
6832
6833         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6834         vrr_params = new_crtc_state->vrr_params;
6835
6836         if (surface) {
6837                 mod_freesync_handle_preflip(
6838                         dm->freesync_module,
6839                         surface,
6840                         new_stream,
6841                         flip_timestamp_in_us,
6842                         &vrr_params);
6843
6844                 if (adev->family < AMDGPU_FAMILY_AI &&
6845                     amdgpu_dm_vrr_active(new_crtc_state)) {
6846                         mod_freesync_handle_v_update(dm->freesync_module,
6847                                                      new_stream, &vrr_params);
6848
6849                         /* Need to call this before the frame ends. */
6850                         dc_stream_adjust_vmin_vmax(dm->dc,
6851                                                    new_crtc_state->stream,
6852                                                    &vrr_params.adjust);
6853                 }
6854         }
6855
6856         mod_freesync_build_vrr_infopacket(
6857                 dm->freesync_module,
6858                 new_stream,
6859                 &vrr_params,
6860                 PACKET_TYPE_VRR,
6861                 TRANSFER_FUNC_UNKNOWN,
6862                 &vrr_infopacket);
6863
6864         new_crtc_state->freesync_timing_changed |=
6865                 (memcmp(&new_crtc_state->vrr_params.adjust,
6866                         &vrr_params.adjust,
6867                         sizeof(vrr_params.adjust)) != 0);
6868
6869         new_crtc_state->freesync_vrr_info_changed |=
6870                 (memcmp(&new_crtc_state->vrr_infopacket,
6871                         &vrr_infopacket,
6872                         sizeof(vrr_infopacket)) != 0);
6873
6874         new_crtc_state->vrr_params = vrr_params;
6875         new_crtc_state->vrr_infopacket = vrr_infopacket;
6876
6877         new_stream->adjust = new_crtc_state->vrr_params.adjust;
6878         new_stream->vrr_infopacket = vrr_infopacket;
6879
6880         if (new_crtc_state->freesync_vrr_info_changed)
6881                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6882                               new_crtc_state->base.crtc->base.id,
6883                               (int)new_crtc_state->base.vrr_enabled,
6884                               (int)vrr_params.state);
6885
6886         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6887 }
6888
6889 static void pre_update_freesync_state_on_stream(
6890         struct amdgpu_display_manager *dm,
6891         struct dm_crtc_state *new_crtc_state)
6892 {
6893         struct dc_stream_state *new_stream = new_crtc_state->stream;
6894         struct mod_vrr_params vrr_params;
6895         struct mod_freesync_config config = new_crtc_state->freesync_config;
6896         struct amdgpu_device *adev = dm->adev;
6897         unsigned long flags;
6898
6899         if (!new_stream)
6900                 return;
6901
6902         /*
6903          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6904          * For now it's sufficient to just guard against these conditions.
6905          */
6906         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6907                 return;
6908
6909         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6910         vrr_params = new_crtc_state->vrr_params;
6911
6912         if (new_crtc_state->vrr_supported &&
6913             config.min_refresh_in_uhz &&
6914             config.max_refresh_in_uhz) {
6915                 config.state = new_crtc_state->base.vrr_enabled ?
6916                         VRR_STATE_ACTIVE_VARIABLE :
6917                         VRR_STATE_INACTIVE;
6918         } else {
6919                 config.state = VRR_STATE_UNSUPPORTED;
6920         }
6921
6922         mod_freesync_build_vrr_params(dm->freesync_module,
6923                                       new_stream,
6924                                       &config, &vrr_params);
6925
6926         new_crtc_state->freesync_timing_changed |=
6927                 (memcmp(&new_crtc_state->vrr_params.adjust,
6928                         &vrr_params.adjust,
6929                         sizeof(vrr_params.adjust)) != 0);
6930
6931         new_crtc_state->vrr_params = vrr_params;
6932         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6933 }
6934
6935 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6936                                             struct dm_crtc_state *new_state)
6937 {
6938         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6939         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6940
6941         if (!old_vrr_active && new_vrr_active) {
6942                 /* Transition VRR inactive -> active:
6943                  * While VRR is active, we must not disable vblank irq, as a
6944                  * reenable after disable would compute bogus vblank/pflip
6945                  * timestamps if it likely happened inside display front-porch.
6946                  *
6947                  * We also need vupdate irq for the actual core vblank handling
6948                  * at end of vblank.
6949                  */
6950                 dm_set_vupdate_irq(new_state->base.crtc, true);
6951                 drm_crtc_vblank_get(new_state->base.crtc);
6952                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6953                                  __func__, new_state->base.crtc->base.id);
6954         } else if (old_vrr_active && !new_vrr_active) {
6955                 /* Transition VRR active -> inactive:
6956                  * Allow vblank irq disable again for fixed refresh rate.
6957                  */
6958                 dm_set_vupdate_irq(new_state->base.crtc, false);
6959                 drm_crtc_vblank_put(new_state->base.crtc);
6960                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6961                                  __func__, new_state->base.crtc->base.id);
6962         }
6963 }
6964
6965 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6966 {
6967         struct drm_plane *plane;
6968         struct drm_plane_state *old_plane_state, *new_plane_state;
6969         int i;
6970
6971         /*
6972          * TODO: Make this per-stream so we don't issue redundant updates for
6973          * commits with multiple streams.
6974          */
6975         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6976                                        new_plane_state, i)
6977                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6978                         handle_cursor_update(plane, old_plane_state);
6979 }
6980
6981 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6982                                     struct dc_state *dc_state,
6983                                     struct drm_device *dev,
6984                                     struct amdgpu_display_manager *dm,
6985                                     struct drm_crtc *pcrtc,
6986                                     bool wait_for_vblank)
6987 {
6988         uint32_t i;
6989         uint64_t timestamp_ns;
6990         struct drm_plane *plane;
6991         struct drm_plane_state *old_plane_state, *new_plane_state;
6992         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6993         struct drm_crtc_state *new_pcrtc_state =
6994                         drm_atomic_get_new_crtc_state(state, pcrtc);
6995         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6996         struct dm_crtc_state *dm_old_crtc_state =
6997                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6998         int planes_count = 0, vpos, hpos;
6999         long r;
7000         unsigned long flags;
7001         struct amdgpu_bo *abo;
7002         uint32_t target_vblank, last_flip_vblank;
7003         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7004         bool pflip_present = false;
7005         struct {
7006                 struct dc_surface_update surface_updates[MAX_SURFACES];
7007                 struct dc_plane_info plane_infos[MAX_SURFACES];
7008                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7009                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7010                 struct dc_stream_update stream_update;
7011         } *bundle;
7012
7013         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7014
7015         if (!bundle) {
7016                 dm_error("Failed to allocate update bundle\n");
7017                 goto cleanup;
7018         }
7019
7020         /*
7021          * Disable the cursor first if we're disabling all the planes.
7022          * It'll remain on the screen after the planes are re-enabled
7023          * if we don't.
7024          */
7025         if (acrtc_state->active_planes == 0)
7026                 amdgpu_dm_commit_cursors(state);
7027
7028         /* update planes when needed */
7029         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7030                 struct drm_crtc *crtc = new_plane_state->crtc;
7031                 struct drm_crtc_state *new_crtc_state;
7032                 struct drm_framebuffer *fb = new_plane_state->fb;
7033                 bool plane_needs_flip;
7034                 struct dc_plane_state *dc_plane;
7035                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7036
7037                 /* Cursor plane is handled after stream updates */
7038                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7039                         continue;
7040
7041                 if (!fb || !crtc || pcrtc != crtc)
7042                         continue;
7043
7044                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7045                 if (!new_crtc_state->active)
7046                         continue;
7047
7048                 dc_plane = dm_new_plane_state->dc_state;
7049
7050                 bundle->surface_updates[planes_count].surface = dc_plane;
7051                 if (new_pcrtc_state->color_mgmt_changed) {
7052                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7053                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7054                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7055                 }
7056
7057                 fill_dc_scaling_info(new_plane_state,
7058                                      &bundle->scaling_infos[planes_count]);
7059
7060                 bundle->surface_updates[planes_count].scaling_info =
7061                         &bundle->scaling_infos[planes_count];
7062
7063                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7064
7065                 pflip_present = pflip_present || plane_needs_flip;
7066
7067                 if (!plane_needs_flip) {
7068                         planes_count += 1;
7069                         continue;
7070                 }
7071
7072                 abo = gem_to_amdgpu_bo(fb->obj[0]);
7073
7074                 /*
7075                  * Wait for all fences on this FB. Do limited wait to avoid
7076                  * deadlock during GPU reset when this fence will not signal
7077                  * but we hold reservation lock for the BO.
7078                  */
7079                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7080                                                         false,
7081                                                         msecs_to_jiffies(5000));
7082                 if (unlikely(r <= 0))
7083                         DRM_ERROR("Waiting for fences timed out!");
7084
7085                 fill_dc_plane_info_and_addr(
7086                         dm->adev, new_plane_state,
7087                         dm_new_plane_state->tiling_flags,
7088                         &bundle->plane_infos[planes_count],
7089                         &bundle->flip_addrs[planes_count].address,
7090                         dm_new_plane_state->tmz_surface, false);
7091
7092                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7093                                  new_plane_state->plane->index,
7094                                  bundle->plane_infos[planes_count].dcc.enable);
7095
7096                 bundle->surface_updates[planes_count].plane_info =
7097                         &bundle->plane_infos[planes_count];
7098
7099                 /*
7100                  * Only allow immediate flips for fast updates that don't
7101                  * change FB pitch, DCC state, rotation or mirroing.
7102                  */
7103                 bundle->flip_addrs[planes_count].flip_immediate =
7104                         crtc->state->async_flip &&
7105                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7106
7107                 timestamp_ns = ktime_get_ns();
7108                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7109                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7110                 bundle->surface_updates[planes_count].surface = dc_plane;
7111
7112                 if (!bundle->surface_updates[planes_count].surface) {
7113                         DRM_ERROR("No surface for CRTC: id=%d\n",
7114                                         acrtc_attach->crtc_id);
7115                         continue;
7116                 }
7117
7118                 if (plane == pcrtc->primary)
7119                         update_freesync_state_on_stream(
7120                                 dm,
7121                                 acrtc_state,
7122                                 acrtc_state->stream,
7123                                 dc_plane,
7124                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7125
7126                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7127                                  __func__,
7128                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7129                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7130
7131                 planes_count += 1;
7132
7133         }
7134
7135         if (pflip_present) {
7136                 if (!vrr_active) {
7137                         /* Use old throttling in non-vrr fixed refresh rate mode
7138                          * to keep flip scheduling based on target vblank counts
7139                          * working in a backwards compatible way, e.g., for
7140                          * clients using the GLX_OML_sync_control extension or
7141                          * DRI3/Present extension with defined target_msc.
7142                          */
7143                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7144                 }
7145                 else {
7146                         /* For variable refresh rate mode only:
7147                          * Get vblank of last completed flip to avoid > 1 vrr
7148                          * flips per video frame by use of throttling, but allow
7149                          * flip programming anywhere in the possibly large
7150                          * variable vrr vblank interval for fine-grained flip
7151                          * timing control and more opportunity to avoid stutter
7152                          * on late submission of flips.
7153                          */
7154                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7155                         last_flip_vblank = acrtc_attach->last_flip_vblank;
7156                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7157                 }
7158
7159                 target_vblank = last_flip_vblank + wait_for_vblank;
7160
7161                 /*
7162                  * Wait until we're out of the vertical blank period before the one
7163                  * targeted by the flip
7164                  */
7165                 while ((acrtc_attach->enabled &&
7166                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7167                                                             0, &vpos, &hpos, NULL,
7168                                                             NULL, &pcrtc->hwmode)
7169                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7170                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7171                         (int)(target_vblank -
7172                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7173                         usleep_range(1000, 1100);
7174                 }
7175
7176                 /**
7177                  * Prepare the flip event for the pageflip interrupt to handle.
7178                  *
7179                  * This only works in the case where we've already turned on the
7180                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7181                  * from 0 -> n planes we have to skip a hardware generated event
7182                  * and rely on sending it from software.
7183                  */
7184                 if (acrtc_attach->base.state->event &&
7185                     acrtc_state->active_planes > 0) {
7186                         drm_crtc_vblank_get(pcrtc);
7187
7188                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7189
7190                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7191                         prepare_flip_isr(acrtc_attach);
7192
7193                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7194                 }
7195
7196                 if (acrtc_state->stream) {
7197                         if (acrtc_state->freesync_vrr_info_changed)
7198                                 bundle->stream_update.vrr_infopacket =
7199                                         &acrtc_state->stream->vrr_infopacket;
7200                 }
7201         }
7202
7203         /* Update the planes if changed or disable if we don't have any. */
7204         if ((planes_count || acrtc_state->active_planes == 0) &&
7205                 acrtc_state->stream) {
7206                 bundle->stream_update.stream = acrtc_state->stream;
7207                 if (new_pcrtc_state->mode_changed) {
7208                         bundle->stream_update.src = acrtc_state->stream->src;
7209                         bundle->stream_update.dst = acrtc_state->stream->dst;
7210                 }
7211
7212                 if (new_pcrtc_state->color_mgmt_changed) {
7213                         /*
7214                          * TODO: This isn't fully correct since we've actually
7215                          * already modified the stream in place.
7216                          */
7217                         bundle->stream_update.gamut_remap =
7218                                 &acrtc_state->stream->gamut_remap_matrix;
7219                         bundle->stream_update.output_csc_transform =
7220                                 &acrtc_state->stream->csc_color_matrix;
7221                         bundle->stream_update.out_transfer_func =
7222                                 acrtc_state->stream->out_transfer_func;
7223                 }
7224
7225                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7226                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7227                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7228
7229                 /*
7230                  * If FreeSync state on the stream has changed then we need to
7231                  * re-adjust the min/max bounds now that DC doesn't handle this
7232                  * as part of commit.
7233                  */
7234                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7235                     amdgpu_dm_vrr_active(acrtc_state)) {
7236                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7237                         dc_stream_adjust_vmin_vmax(
7238                                 dm->dc, acrtc_state->stream,
7239                                 &acrtc_state->vrr_params.adjust);
7240                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7241                 }
7242                 mutex_lock(&dm->dc_lock);
7243                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7244                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7245                         amdgpu_dm_psr_disable(acrtc_state->stream);
7246
7247                 dc_commit_updates_for_stream(dm->dc,
7248                                                      bundle->surface_updates,
7249                                                      planes_count,
7250                                                      acrtc_state->stream,
7251                                                      &bundle->stream_update,
7252                                                      dc_state);
7253
7254                 /**
7255                  * Enable or disable the interrupts on the backend.
7256                  *
7257                  * Most pipes are put into power gating when unused.
7258                  *
7259                  * When power gating is enabled on a pipe we lose the
7260                  * interrupt enablement state when power gating is disabled.
7261                  *
7262                  * So we need to update the IRQ control state in hardware
7263                  * whenever the pipe turns on (since it could be previously
7264                  * power gated) or off (since some pipes can't be power gated
7265                  * on some ASICs).
7266                  */
7267                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7268                         dm_update_pflip_irq_state(drm_to_adev(dev),
7269                                                   acrtc_attach);
7270
7271                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7272                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7273                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7274                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7275                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7276                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7277                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7278                         amdgpu_dm_psr_enable(acrtc_state->stream);
7279                 }
7280
7281                 mutex_unlock(&dm->dc_lock);
7282         }
7283
7284         /*
7285          * Update cursor state *after* programming all the planes.
7286          * This avoids redundant programming in the case where we're going
7287          * to be disabling a single plane - those pipes are being disabled.
7288          */
7289         if (acrtc_state->active_planes)
7290                 amdgpu_dm_commit_cursors(state);
7291
7292 cleanup:
7293         kfree(bundle);
7294 }
7295
7296 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7297                                    struct drm_atomic_state *state)
7298 {
7299         struct amdgpu_device *adev = drm_to_adev(dev);
7300         struct amdgpu_dm_connector *aconnector;
7301         struct drm_connector *connector;
7302         struct drm_connector_state *old_con_state, *new_con_state;
7303         struct drm_crtc_state *new_crtc_state;
7304         struct dm_crtc_state *new_dm_crtc_state;
7305         const struct dc_stream_status *status;
7306         int i, inst;
7307
7308         /* Notify device removals. */
7309         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7310                 if (old_con_state->crtc != new_con_state->crtc) {
7311                         /* CRTC changes require notification. */
7312                         goto notify;
7313                 }
7314
7315                 if (!new_con_state->crtc)
7316                         continue;
7317
7318                 new_crtc_state = drm_atomic_get_new_crtc_state(
7319                         state, new_con_state->crtc);
7320
7321                 if (!new_crtc_state)
7322                         continue;
7323
7324                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7325                         continue;
7326
7327         notify:
7328                 aconnector = to_amdgpu_dm_connector(connector);
7329
7330                 mutex_lock(&adev->dm.audio_lock);
7331                 inst = aconnector->audio_inst;
7332                 aconnector->audio_inst = -1;
7333                 mutex_unlock(&adev->dm.audio_lock);
7334
7335                 amdgpu_dm_audio_eld_notify(adev, inst);
7336         }
7337
7338         /* Notify audio device additions. */
7339         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7340                 if (!new_con_state->crtc)
7341                         continue;
7342
7343                 new_crtc_state = drm_atomic_get_new_crtc_state(
7344                         state, new_con_state->crtc);
7345
7346                 if (!new_crtc_state)
7347                         continue;
7348
7349                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7350                         continue;
7351
7352                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7353                 if (!new_dm_crtc_state->stream)
7354                         continue;
7355
7356                 status = dc_stream_get_status(new_dm_crtc_state->stream);
7357                 if (!status)
7358                         continue;
7359
7360                 aconnector = to_amdgpu_dm_connector(connector);
7361
7362                 mutex_lock(&adev->dm.audio_lock);
7363                 inst = status->audio_inst;
7364                 aconnector->audio_inst = inst;
7365                 mutex_unlock(&adev->dm.audio_lock);
7366
7367                 amdgpu_dm_audio_eld_notify(adev, inst);
7368         }
7369 }
7370
7371 /*
7372  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7373  * @crtc_state: the DRM CRTC state
7374  * @stream_state: the DC stream state.
7375  *
7376  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7377  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7378  */
7379 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7380                                                 struct dc_stream_state *stream_state)
7381 {
7382         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7383 }
7384
7385 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7386                                    struct drm_atomic_state *state,
7387                                    bool nonblock)
7388 {
7389         struct drm_crtc *crtc;
7390         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7391         struct amdgpu_device *adev = drm_to_adev(dev);
7392         int i;
7393
7394         /*
7395          * We evade vblank and pflip interrupts on CRTCs that are undergoing
7396          * a modeset, being disabled, or have no active planes.
7397          *
7398          * It's done in atomic commit rather than commit tail for now since
7399          * some of these interrupt handlers access the current CRTC state and
7400          * potentially the stream pointer itself.
7401          *
7402          * Since the atomic state is swapped within atomic commit and not within
7403          * commit tail this would leave to new state (that hasn't been committed yet)
7404          * being accesssed from within the handlers.
7405          *
7406          * TODO: Fix this so we can do this in commit tail and not have to block
7407          * in atomic check.
7408          */
7409         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7410                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7411
7412                 if (old_crtc_state->active &&
7413                     (!new_crtc_state->active ||
7414                      drm_atomic_crtc_needs_modeset(new_crtc_state)))
7415                         manage_dm_interrupts(adev, acrtc, false);
7416         }
7417         /*
7418          * Add check here for SoC's that support hardware cursor plane, to
7419          * unset legacy_cursor_update
7420          */
7421
7422         return drm_atomic_helper_commit(dev, state, nonblock);
7423
7424         /*TODO Handle EINTR, reenable IRQ*/
7425 }
7426
7427 /**
7428  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7429  * @state: The atomic state to commit
7430  *
7431  * This will tell DC to commit the constructed DC state from atomic_check,
7432  * programming the hardware. Any failures here implies a hardware failure, since
7433  * atomic check should have filtered anything non-kosher.
7434  */
7435 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7436 {
7437         struct drm_device *dev = state->dev;
7438         struct amdgpu_device *adev = drm_to_adev(dev);
7439         struct amdgpu_display_manager *dm = &adev->dm;
7440         struct dm_atomic_state *dm_state;
7441         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7442         uint32_t i, j;
7443         struct drm_crtc *crtc;
7444         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7445         unsigned long flags;
7446         bool wait_for_vblank = true;
7447         struct drm_connector *connector;
7448         struct drm_connector_state *old_con_state, *new_con_state;
7449         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7450         int crtc_disable_count = 0;
7451         bool mode_set_reset_required = false;
7452
7453         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7454
7455         dm_state = dm_atomic_get_new_state(state);
7456         if (dm_state && dm_state->context) {
7457                 dc_state = dm_state->context;
7458         } else {
7459                 /* No state changes, retain current state. */
7460                 dc_state_temp = dc_create_state(dm->dc);
7461                 ASSERT(dc_state_temp);
7462                 dc_state = dc_state_temp;
7463                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7464         }
7465
7466         /* update changed items */
7467         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7468                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7469
7470                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7471                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7472
7473                 DRM_DEBUG_DRIVER(
7474                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7475                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7476                         "connectors_changed:%d\n",
7477                         acrtc->crtc_id,
7478                         new_crtc_state->enable,
7479                         new_crtc_state->active,
7480                         new_crtc_state->planes_changed,
7481                         new_crtc_state->mode_changed,
7482                         new_crtc_state->active_changed,
7483                         new_crtc_state->connectors_changed);
7484
7485                 /* Copy all transient state flags into dc state */
7486                 if (dm_new_crtc_state->stream) {
7487                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7488                                                             dm_new_crtc_state->stream);
7489                 }
7490
7491                 /* handles headless hotplug case, updating new_state and
7492                  * aconnector as needed
7493                  */
7494
7495                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7496
7497                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7498
7499                         if (!dm_new_crtc_state->stream) {
7500                                 /*
7501                                  * this could happen because of issues with
7502                                  * userspace notifications delivery.
7503                                  * In this case userspace tries to set mode on
7504                                  * display which is disconnected in fact.
7505                                  * dc_sink is NULL in this case on aconnector.
7506                                  * We expect reset mode will come soon.
7507                                  *
7508                                  * This can also happen when unplug is done
7509                                  * during resume sequence ended
7510                                  *
7511                                  * In this case, we want to pretend we still
7512                                  * have a sink to keep the pipe running so that
7513                                  * hw state is consistent with the sw state
7514                                  */
7515                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7516                                                 __func__, acrtc->base.base.id);
7517                                 continue;
7518                         }
7519
7520                         if (dm_old_crtc_state->stream)
7521                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7522
7523                         pm_runtime_get_noresume(dev->dev);
7524
7525                         acrtc->enabled = true;
7526                         acrtc->hw_mode = new_crtc_state->mode;
7527                         crtc->hwmode = new_crtc_state->mode;
7528                         mode_set_reset_required = true;
7529                 } else if (modereset_required(new_crtc_state)) {
7530                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7531                         /* i.e. reset mode */
7532                         if (dm_old_crtc_state->stream)
7533                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7534                         mode_set_reset_required = true;
7535                 }
7536         } /* for_each_crtc_in_state() */
7537
7538         if (dc_state) {
7539                 /* if there mode set or reset, disable eDP PSR */
7540                 if (mode_set_reset_required)
7541                         amdgpu_dm_psr_disable_all(dm);
7542
7543                 dm_enable_per_frame_crtc_master_sync(dc_state);
7544                 mutex_lock(&dm->dc_lock);
7545                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7546                 mutex_unlock(&dm->dc_lock);
7547         }
7548
7549         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7550                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7551
7552                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7553
7554                 if (dm_new_crtc_state->stream != NULL) {
7555                         const struct dc_stream_status *status =
7556                                         dc_stream_get_status(dm_new_crtc_state->stream);
7557
7558                         if (!status)
7559                                 status = dc_stream_get_status_from_state(dc_state,
7560                                                                          dm_new_crtc_state->stream);
7561
7562                         if (!status)
7563                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7564                         else
7565                                 acrtc->otg_inst = status->primary_otg_inst;
7566                 }
7567         }
7568 #ifdef CONFIG_DRM_AMD_DC_HDCP
7569         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7570                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7571                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7572                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7573
7574                 new_crtc_state = NULL;
7575
7576                 if (acrtc)
7577                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7578
7579                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7580
7581                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7582                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7583                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7584                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7585                         continue;
7586                 }
7587
7588                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7589                         hdcp_update_display(
7590                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7591                                 new_con_state->hdcp_content_type,
7592                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7593                                                                                                          : false);
7594         }
7595 #endif
7596
7597         /* Handle connector state changes */
7598         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7599                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7600                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7601                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7602                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7603                 struct dc_stream_update stream_update;
7604                 struct dc_info_packet hdr_packet;
7605                 struct dc_stream_status *status = NULL;
7606                 bool abm_changed, hdr_changed, scaling_changed;
7607
7608                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7609                 memset(&stream_update, 0, sizeof(stream_update));
7610
7611                 if (acrtc) {
7612                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7613                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7614                 }
7615
7616                 /* Skip any modesets/resets */
7617                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7618                         continue;
7619
7620                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7621                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7622
7623                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7624                                                              dm_old_con_state);
7625
7626                 abm_changed = dm_new_crtc_state->abm_level !=
7627                               dm_old_crtc_state->abm_level;
7628
7629                 hdr_changed =
7630                         is_hdr_metadata_different(old_con_state, new_con_state);
7631
7632                 if (!scaling_changed && !abm_changed && !hdr_changed)
7633                         continue;
7634
7635                 stream_update.stream = dm_new_crtc_state->stream;
7636                 if (scaling_changed) {
7637                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7638                                         dm_new_con_state, dm_new_crtc_state->stream);
7639
7640                         stream_update.src = dm_new_crtc_state->stream->src;
7641                         stream_update.dst = dm_new_crtc_state->stream->dst;
7642                 }
7643
7644                 if (abm_changed) {
7645                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7646
7647                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7648                 }
7649
7650                 if (hdr_changed) {
7651                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7652                         stream_update.hdr_static_metadata = &hdr_packet;
7653                 }
7654
7655                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7656                 WARN_ON(!status);
7657                 WARN_ON(!status->plane_count);
7658
7659                 /*
7660                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7661                  * Here we create an empty update on each plane.
7662                  * To fix this, DC should permit updating only stream properties.
7663                  */
7664                 for (j = 0; j < status->plane_count; j++)
7665                         dummy_updates[j].surface = status->plane_states[0];
7666
7667
7668                 mutex_lock(&dm->dc_lock);
7669                 dc_commit_updates_for_stream(dm->dc,
7670                                                      dummy_updates,
7671                                                      status->plane_count,
7672                                                      dm_new_crtc_state->stream,
7673                                                      &stream_update,
7674                                                      dc_state);
7675                 mutex_unlock(&dm->dc_lock);
7676         }
7677
7678         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7679         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7680                                       new_crtc_state, i) {
7681                 if (old_crtc_state->active && !new_crtc_state->active)
7682                         crtc_disable_count++;
7683
7684                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7685                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7686
7687                 /* Update freesync active state. */
7688                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7689
7690                 /* Handle vrr on->off / off->on transitions */
7691                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7692                                                 dm_new_crtc_state);
7693         }
7694
7695         /**
7696          * Enable interrupts for CRTCs that are newly enabled or went through
7697          * a modeset. It was intentionally deferred until after the front end
7698          * state was modified to wait until the OTG was on and so the IRQ
7699          * handlers didn't access stale or invalid state.
7700          */
7701         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7702                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7703
7704                 if (new_crtc_state->active &&
7705                     (!old_crtc_state->active ||
7706                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7707                         manage_dm_interrupts(adev, acrtc, true);
7708 #ifdef CONFIG_DEBUG_FS
7709                         /**
7710                          * Frontend may have changed so reapply the CRC capture
7711                          * settings for the stream.
7712                          */
7713                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7714
7715                         if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7716                                 amdgpu_dm_crtc_configure_crc_source(
7717                                         crtc, dm_new_crtc_state,
7718                                         dm_new_crtc_state->crc_src);
7719                         }
7720 #endif
7721                 }
7722         }
7723
7724         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7725                 if (new_crtc_state->async_flip)
7726                         wait_for_vblank = false;
7727
7728         /* update planes when needed per crtc*/
7729         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7730                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7731
7732                 if (dm_new_crtc_state->stream)
7733                         amdgpu_dm_commit_planes(state, dc_state, dev,
7734                                                 dm, crtc, wait_for_vblank);
7735         }
7736
7737         /* Update audio instances for each connector. */
7738         amdgpu_dm_commit_audio(dev, state);
7739
7740         /*
7741          * send vblank event on all events not handled in flip and
7742          * mark consumed event for drm_atomic_helper_commit_hw_done
7743          */
7744         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7745         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7746
7747                 if (new_crtc_state->event)
7748                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7749
7750                 new_crtc_state->event = NULL;
7751         }
7752         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7753
7754         /* Signal HW programming completion */
7755         drm_atomic_helper_commit_hw_done(state);
7756
7757         if (wait_for_vblank)
7758                 drm_atomic_helper_wait_for_flip_done(dev, state);
7759
7760         drm_atomic_helper_cleanup_planes(dev, state);
7761
7762         /*
7763          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7764          * so we can put the GPU into runtime suspend if we're not driving any
7765          * displays anymore
7766          */
7767         for (i = 0; i < crtc_disable_count; i++)
7768                 pm_runtime_put_autosuspend(dev->dev);
7769         pm_runtime_mark_last_busy(dev->dev);
7770
7771         if (dc_state_temp)
7772                 dc_release_state(dc_state_temp);
7773 }
7774
7775
7776 static int dm_force_atomic_commit(struct drm_connector *connector)
7777 {
7778         int ret = 0;
7779         struct drm_device *ddev = connector->dev;
7780         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7781         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7782         struct drm_plane *plane = disconnected_acrtc->base.primary;
7783         struct drm_connector_state *conn_state;
7784         struct drm_crtc_state *crtc_state;
7785         struct drm_plane_state *plane_state;
7786
7787         if (!state)
7788                 return -ENOMEM;
7789
7790         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7791
7792         /* Construct an atomic state to restore previous display setting */
7793
7794         /*
7795          * Attach connectors to drm_atomic_state
7796          */
7797         conn_state = drm_atomic_get_connector_state(state, connector);
7798
7799         ret = PTR_ERR_OR_ZERO(conn_state);
7800         if (ret)
7801                 goto err;
7802
7803         /* Attach crtc to drm_atomic_state*/
7804         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7805
7806         ret = PTR_ERR_OR_ZERO(crtc_state);
7807         if (ret)
7808                 goto err;
7809
7810         /* force a restore */
7811         crtc_state->mode_changed = true;
7812
7813         /* Attach plane to drm_atomic_state */
7814         plane_state = drm_atomic_get_plane_state(state, plane);
7815
7816         ret = PTR_ERR_OR_ZERO(plane_state);
7817         if (ret)
7818                 goto err;
7819
7820
7821         /* Call commit internally with the state we just constructed */
7822         ret = drm_atomic_commit(state);
7823         if (!ret)
7824                 return 0;
7825
7826 err:
7827         DRM_ERROR("Restoring old state failed with %i\n", ret);
7828         drm_atomic_state_put(state);
7829
7830         return ret;
7831 }
7832
7833 /*
7834  * This function handles all cases when set mode does not come upon hotplug.
7835  * This includes when a display is unplugged then plugged back into the
7836  * same port and when running without usermode desktop manager supprot
7837  */
7838 void dm_restore_drm_connector_state(struct drm_device *dev,
7839                                     struct drm_connector *connector)
7840 {
7841         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7842         struct amdgpu_crtc *disconnected_acrtc;
7843         struct dm_crtc_state *acrtc_state;
7844
7845         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7846                 return;
7847
7848         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7849         if (!disconnected_acrtc)
7850                 return;
7851
7852         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7853         if (!acrtc_state->stream)
7854                 return;
7855
7856         /*
7857          * If the previous sink is not released and different from the current,
7858          * we deduce we are in a state where we can not rely on usermode call
7859          * to turn on the display, so we do it here
7860          */
7861         if (acrtc_state->stream->sink != aconnector->dc_sink)
7862                 dm_force_atomic_commit(&aconnector->base);
7863 }
7864
7865 /*
7866  * Grabs all modesetting locks to serialize against any blocking commits,
7867  * Waits for completion of all non blocking commits.
7868  */
7869 static int do_aquire_global_lock(struct drm_device *dev,
7870                                  struct drm_atomic_state *state)
7871 {
7872         struct drm_crtc *crtc;
7873         struct drm_crtc_commit *commit;
7874         long ret;
7875
7876         /*
7877          * Adding all modeset locks to aquire_ctx will
7878          * ensure that when the framework release it the
7879          * extra locks we are locking here will get released to
7880          */
7881         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7882         if (ret)
7883                 return ret;
7884
7885         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7886                 spin_lock(&crtc->commit_lock);
7887                 commit = list_first_entry_or_null(&crtc->commit_list,
7888                                 struct drm_crtc_commit, commit_entry);
7889                 if (commit)
7890                         drm_crtc_commit_get(commit);
7891                 spin_unlock(&crtc->commit_lock);
7892
7893                 if (!commit)
7894                         continue;
7895
7896                 /*
7897                  * Make sure all pending HW programming completed and
7898                  * page flips done
7899                  */
7900                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7901
7902                 if (ret > 0)
7903                         ret = wait_for_completion_interruptible_timeout(
7904                                         &commit->flip_done, 10*HZ);
7905
7906                 if (ret == 0)
7907                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7908                                   "timed out\n", crtc->base.id, crtc->name);
7909
7910                 drm_crtc_commit_put(commit);
7911         }
7912
7913         return ret < 0 ? ret : 0;
7914 }
7915
7916 static void get_freesync_config_for_crtc(
7917         struct dm_crtc_state *new_crtc_state,
7918         struct dm_connector_state *new_con_state)
7919 {
7920         struct mod_freesync_config config = {0};
7921         struct amdgpu_dm_connector *aconnector =
7922                         to_amdgpu_dm_connector(new_con_state->base.connector);
7923         struct drm_display_mode *mode = &new_crtc_state->base.mode;
7924         int vrefresh = drm_mode_vrefresh(mode);
7925
7926         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7927                                         vrefresh >= aconnector->min_vfreq &&
7928                                         vrefresh <= aconnector->max_vfreq;
7929
7930         if (new_crtc_state->vrr_supported) {
7931                 new_crtc_state->stream->ignore_msa_timing_param = true;
7932                 config.state = new_crtc_state->base.vrr_enabled ?
7933                                 VRR_STATE_ACTIVE_VARIABLE :
7934                                 VRR_STATE_INACTIVE;
7935                 config.min_refresh_in_uhz =
7936                                 aconnector->min_vfreq * 1000000;
7937                 config.max_refresh_in_uhz =
7938                                 aconnector->max_vfreq * 1000000;
7939                 config.vsif_supported = true;
7940                 config.btr = true;
7941         }
7942
7943         new_crtc_state->freesync_config = config;
7944 }
7945
7946 static void reset_freesync_config_for_crtc(
7947         struct dm_crtc_state *new_crtc_state)
7948 {
7949         new_crtc_state->vrr_supported = false;
7950
7951         memset(&new_crtc_state->vrr_params, 0,
7952                sizeof(new_crtc_state->vrr_params));
7953         memset(&new_crtc_state->vrr_infopacket, 0,
7954                sizeof(new_crtc_state->vrr_infopacket));
7955 }
7956
7957 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7958                                 struct drm_atomic_state *state,
7959                                 struct drm_crtc *crtc,
7960                                 struct drm_crtc_state *old_crtc_state,
7961                                 struct drm_crtc_state *new_crtc_state,
7962                                 bool enable,
7963                                 bool *lock_and_validation_needed)
7964 {
7965         struct dm_atomic_state *dm_state = NULL;
7966         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7967         struct dc_stream_state *new_stream;
7968         int ret = 0;
7969
7970         /*
7971          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7972          * update changed items
7973          */
7974         struct amdgpu_crtc *acrtc = NULL;
7975         struct amdgpu_dm_connector *aconnector = NULL;
7976         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7977         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7978
7979         new_stream = NULL;
7980
7981         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7982         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7983         acrtc = to_amdgpu_crtc(crtc);
7984         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7985
7986         /* TODO This hack should go away */
7987         if (aconnector && enable) {
7988                 /* Make sure fake sink is created in plug-in scenario */
7989                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7990                                                             &aconnector->base);
7991                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7992                                                             &aconnector->base);
7993
7994                 if (IS_ERR(drm_new_conn_state)) {
7995                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7996                         goto fail;
7997                 }
7998
7999                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8000                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8001
8002                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8003                         goto skip_modeset;
8004
8005                 new_stream = create_validate_stream_for_sink(aconnector,
8006                                                              &new_crtc_state->mode,
8007                                                              dm_new_conn_state,
8008                                                              dm_old_crtc_state->stream);
8009
8010                 /*
8011                  * we can have no stream on ACTION_SET if a display
8012                  * was disconnected during S3, in this case it is not an
8013                  * error, the OS will be updated after detection, and
8014                  * will do the right thing on next atomic commit
8015                  */
8016
8017                 if (!new_stream) {
8018                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8019                                         __func__, acrtc->base.base.id);
8020                         ret = -ENOMEM;
8021                         goto fail;
8022                 }
8023
8024                 /*
8025                  * TODO: Check VSDB bits to decide whether this should
8026                  * be enabled or not.
8027                  */
8028                 new_stream->triggered_crtc_reset.enabled =
8029                         dm->force_timing_sync;
8030
8031                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8032
8033                 ret = fill_hdr_info_packet(drm_new_conn_state,
8034                                            &new_stream->hdr_static_metadata);
8035                 if (ret)
8036                         goto fail;
8037
8038                 /*
8039                  * If we already removed the old stream from the context
8040                  * (and set the new stream to NULL) then we can't reuse
8041                  * the old stream even if the stream and scaling are unchanged.
8042                  * We'll hit the BUG_ON and black screen.
8043                  *
8044                  * TODO: Refactor this function to allow this check to work
8045                  * in all conditions.
8046                  */
8047                 if (dm_new_crtc_state->stream &&
8048                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8049                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8050                         new_crtc_state->mode_changed = false;
8051                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8052                                          new_crtc_state->mode_changed);
8053                 }
8054         }
8055
8056         /* mode_changed flag may get updated above, need to check again */
8057         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8058                 goto skip_modeset;
8059
8060         DRM_DEBUG_DRIVER(
8061                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8062                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8063                 "connectors_changed:%d\n",
8064                 acrtc->crtc_id,
8065                 new_crtc_state->enable,
8066                 new_crtc_state->active,
8067                 new_crtc_state->planes_changed,
8068                 new_crtc_state->mode_changed,
8069                 new_crtc_state->active_changed,
8070                 new_crtc_state->connectors_changed);
8071
8072         /* Remove stream for any changed/disabled CRTC */
8073         if (!enable) {
8074
8075                 if (!dm_old_crtc_state->stream)
8076                         goto skip_modeset;
8077
8078                 ret = dm_atomic_get_state(state, &dm_state);
8079                 if (ret)
8080                         goto fail;
8081
8082                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8083                                 crtc->base.id);
8084
8085                 /* i.e. reset mode */
8086                 if (dc_remove_stream_from_ctx(
8087                                 dm->dc,
8088                                 dm_state->context,
8089                                 dm_old_crtc_state->stream) != DC_OK) {
8090                         ret = -EINVAL;
8091                         goto fail;
8092                 }
8093
8094                 dc_stream_release(dm_old_crtc_state->stream);
8095                 dm_new_crtc_state->stream = NULL;
8096
8097                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8098
8099                 *lock_and_validation_needed = true;
8100
8101         } else {/* Add stream for any updated/enabled CRTC */
8102                 /*
8103                  * Quick fix to prevent NULL pointer on new_stream when
8104                  * added MST connectors not found in existing crtc_state in the chained mode
8105                  * TODO: need to dig out the root cause of that
8106                  */
8107                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8108                         goto skip_modeset;
8109
8110                 if (modereset_required(new_crtc_state))
8111                         goto skip_modeset;
8112
8113                 if (modeset_required(new_crtc_state, new_stream,
8114                                      dm_old_crtc_state->stream)) {
8115
8116                         WARN_ON(dm_new_crtc_state->stream);
8117
8118                         ret = dm_atomic_get_state(state, &dm_state);
8119                         if (ret)
8120                                 goto fail;
8121
8122                         dm_new_crtc_state->stream = new_stream;
8123
8124                         dc_stream_retain(new_stream);
8125
8126                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8127                                                 crtc->base.id);
8128
8129                         if (dc_add_stream_to_ctx(
8130                                         dm->dc,
8131                                         dm_state->context,
8132                                         dm_new_crtc_state->stream) != DC_OK) {
8133                                 ret = -EINVAL;
8134                                 goto fail;
8135                         }
8136
8137                         *lock_and_validation_needed = true;
8138                 }
8139         }
8140
8141 skip_modeset:
8142         /* Release extra reference */
8143         if (new_stream)
8144                  dc_stream_release(new_stream);
8145
8146         /*
8147          * We want to do dc stream updates that do not require a
8148          * full modeset below.
8149          */
8150         if (!(enable && aconnector && new_crtc_state->active))
8151                 return 0;
8152         /*
8153          * Given above conditions, the dc state cannot be NULL because:
8154          * 1. We're in the process of enabling CRTCs (just been added
8155          *    to the dc context, or already is on the context)
8156          * 2. Has a valid connector attached, and
8157          * 3. Is currently active and enabled.
8158          * => The dc stream state currently exists.
8159          */
8160         BUG_ON(dm_new_crtc_state->stream == NULL);
8161
8162         /* Scaling or underscan settings */
8163         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8164                 update_stream_scaling_settings(
8165                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8166
8167         /* ABM settings */
8168         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8169
8170         /*
8171          * Color management settings. We also update color properties
8172          * when a modeset is needed, to ensure it gets reprogrammed.
8173          */
8174         if (dm_new_crtc_state->base.color_mgmt_changed ||
8175             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8176                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8177                 if (ret)
8178                         goto fail;
8179         }
8180
8181         /* Update Freesync settings. */
8182         get_freesync_config_for_crtc(dm_new_crtc_state,
8183                                      dm_new_conn_state);
8184
8185         return ret;
8186
8187 fail:
8188         if (new_stream)
8189                 dc_stream_release(new_stream);
8190         return ret;
8191 }
8192
8193 static bool should_reset_plane(struct drm_atomic_state *state,
8194                                struct drm_plane *plane,
8195                                struct drm_plane_state *old_plane_state,
8196                                struct drm_plane_state *new_plane_state)
8197 {
8198         struct drm_plane *other;
8199         struct drm_plane_state *old_other_state, *new_other_state;
8200         struct drm_crtc_state *new_crtc_state;
8201         int i;
8202
8203         /*
8204          * TODO: Remove this hack once the checks below are sufficient
8205          * enough to determine when we need to reset all the planes on
8206          * the stream.
8207          */
8208         if (state->allow_modeset)
8209                 return true;
8210
8211         /* Exit early if we know that we're adding or removing the plane. */
8212         if (old_plane_state->crtc != new_plane_state->crtc)
8213                 return true;
8214
8215         /* old crtc == new_crtc == NULL, plane not in context. */
8216         if (!new_plane_state->crtc)
8217                 return false;
8218
8219         new_crtc_state =
8220                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8221
8222         if (!new_crtc_state)
8223                 return true;
8224
8225         /* CRTC Degamma changes currently require us to recreate planes. */
8226         if (new_crtc_state->color_mgmt_changed)
8227                 return true;
8228
8229         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8230                 return true;
8231
8232         /*
8233          * If there are any new primary or overlay planes being added or
8234          * removed then the z-order can potentially change. To ensure
8235          * correct z-order and pipe acquisition the current DC architecture
8236          * requires us to remove and recreate all existing planes.
8237          *
8238          * TODO: Come up with a more elegant solution for this.
8239          */
8240         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8241                 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8242
8243                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8244                         continue;
8245
8246                 if (old_other_state->crtc != new_plane_state->crtc &&
8247                     new_other_state->crtc != new_plane_state->crtc)
8248                         continue;
8249
8250                 if (old_other_state->crtc != new_other_state->crtc)
8251                         return true;
8252
8253                 /* Src/dst size and scaling updates. */
8254                 if (old_other_state->src_w != new_other_state->src_w ||
8255                     old_other_state->src_h != new_other_state->src_h ||
8256                     old_other_state->crtc_w != new_other_state->crtc_w ||
8257                     old_other_state->crtc_h != new_other_state->crtc_h)
8258                         return true;
8259
8260                 /* Rotation / mirroring updates. */
8261                 if (old_other_state->rotation != new_other_state->rotation)
8262                         return true;
8263
8264                 /* Blending updates. */
8265                 if (old_other_state->pixel_blend_mode !=
8266                     new_other_state->pixel_blend_mode)
8267                         return true;
8268
8269                 /* Alpha updates. */
8270                 if (old_other_state->alpha != new_other_state->alpha)
8271                         return true;
8272
8273                 /* Colorspace changes. */
8274                 if (old_other_state->color_range != new_other_state->color_range ||
8275                     old_other_state->color_encoding != new_other_state->color_encoding)
8276                         return true;
8277
8278                 /* Framebuffer checks fall at the end. */
8279                 if (!old_other_state->fb || !new_other_state->fb)
8280                         continue;
8281
8282                 /* Pixel format changes can require bandwidth updates. */
8283                 if (old_other_state->fb->format != new_other_state->fb->format)
8284                         return true;
8285
8286                 old_dm_plane_state = to_dm_plane_state(old_other_state);
8287                 new_dm_plane_state = to_dm_plane_state(new_other_state);
8288
8289                 /* Tiling and DCC changes also require bandwidth updates. */
8290                 if (old_dm_plane_state->tiling_flags !=
8291                     new_dm_plane_state->tiling_flags)
8292                         return true;
8293         }
8294
8295         return false;
8296 }
8297
8298 static int dm_update_plane_state(struct dc *dc,
8299                                  struct drm_atomic_state *state,
8300                                  struct drm_plane *plane,
8301                                  struct drm_plane_state *old_plane_state,
8302                                  struct drm_plane_state *new_plane_state,
8303                                  bool enable,
8304                                  bool *lock_and_validation_needed)
8305 {
8306
8307         struct dm_atomic_state *dm_state = NULL;
8308         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8309         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8310         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8311         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8312         struct amdgpu_crtc *new_acrtc;
8313         bool needs_reset;
8314         int ret = 0;
8315
8316
8317         new_plane_crtc = new_plane_state->crtc;
8318         old_plane_crtc = old_plane_state->crtc;
8319         dm_new_plane_state = to_dm_plane_state(new_plane_state);
8320         dm_old_plane_state = to_dm_plane_state(old_plane_state);
8321
8322         /*TODO Implement better atomic check for cursor plane */
8323         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8324                 if (!enable || !new_plane_crtc ||
8325                         drm_atomic_plane_disabling(plane->state, new_plane_state))
8326                         return 0;
8327
8328                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8329
8330                 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8331                         (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8332                         DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8333                                                          new_plane_state->crtc_w, new_plane_state->crtc_h);
8334                         return -EINVAL;
8335                 }
8336
8337                 return 0;
8338         }
8339
8340         needs_reset = should_reset_plane(state, plane, old_plane_state,
8341                                          new_plane_state);
8342
8343         /* Remove any changed/removed planes */
8344         if (!enable) {
8345                 if (!needs_reset)
8346                         return 0;
8347
8348                 if (!old_plane_crtc)
8349                         return 0;
8350
8351                 old_crtc_state = drm_atomic_get_old_crtc_state(
8352                                 state, old_plane_crtc);
8353                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8354
8355                 if (!dm_old_crtc_state->stream)
8356                         return 0;
8357
8358                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8359                                 plane->base.id, old_plane_crtc->base.id);
8360
8361                 ret = dm_atomic_get_state(state, &dm_state);
8362                 if (ret)
8363                         return ret;
8364
8365                 if (!dc_remove_plane_from_context(
8366                                 dc,
8367                                 dm_old_crtc_state->stream,
8368                                 dm_old_plane_state->dc_state,
8369                                 dm_state->context)) {
8370
8371                         return -EINVAL;
8372                 }
8373
8374
8375                 dc_plane_state_release(dm_old_plane_state->dc_state);
8376                 dm_new_plane_state->dc_state = NULL;
8377
8378                 *lock_and_validation_needed = true;
8379
8380         } else { /* Add new planes */
8381                 struct dc_plane_state *dc_new_plane_state;
8382
8383                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8384                         return 0;
8385
8386                 if (!new_plane_crtc)
8387                         return 0;
8388
8389                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8390                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8391
8392                 if (!dm_new_crtc_state->stream)
8393                         return 0;
8394
8395                 if (!needs_reset)
8396                         return 0;
8397
8398                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8399                 if (ret)
8400                         return ret;
8401
8402                 WARN_ON(dm_new_plane_state->dc_state);
8403
8404                 dc_new_plane_state = dc_create_plane_state(dc);
8405                 if (!dc_new_plane_state)
8406                         return -ENOMEM;
8407
8408                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8409                                 plane->base.id, new_plane_crtc->base.id);
8410
8411                 ret = fill_dc_plane_attributes(
8412                         drm_to_adev(new_plane_crtc->dev),
8413                         dc_new_plane_state,
8414                         new_plane_state,
8415                         new_crtc_state);
8416                 if (ret) {
8417                         dc_plane_state_release(dc_new_plane_state);
8418                         return ret;
8419                 }
8420
8421                 ret = dm_atomic_get_state(state, &dm_state);
8422                 if (ret) {
8423                         dc_plane_state_release(dc_new_plane_state);
8424                         return ret;
8425                 }
8426
8427                 /*
8428                  * Any atomic check errors that occur after this will
8429                  * not need a release. The plane state will be attached
8430                  * to the stream, and therefore part of the atomic
8431                  * state. It'll be released when the atomic state is
8432                  * cleaned.
8433                  */
8434                 if (!dc_add_plane_to_context(
8435                                 dc,
8436                                 dm_new_crtc_state->stream,
8437                                 dc_new_plane_state,
8438                                 dm_state->context)) {
8439
8440                         dc_plane_state_release(dc_new_plane_state);
8441                         return -EINVAL;
8442                 }
8443
8444                 dm_new_plane_state->dc_state = dc_new_plane_state;
8445
8446                 /* Tell DC to do a full surface update every time there
8447                  * is a plane change. Inefficient, but works for now.
8448                  */
8449                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8450
8451                 *lock_and_validation_needed = true;
8452         }
8453
8454
8455         return ret;
8456 }
8457
8458 #if defined(CONFIG_DRM_AMD_DC_DCN)
8459 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8460 {
8461         struct drm_connector *connector;
8462         struct drm_connector_state *conn_state;
8463         struct amdgpu_dm_connector *aconnector = NULL;
8464         int i;
8465         for_each_new_connector_in_state(state, connector, conn_state, i) {
8466                 if (conn_state->crtc != crtc)
8467                         continue;
8468
8469                 aconnector = to_amdgpu_dm_connector(connector);
8470                 if (!aconnector->port || !aconnector->mst_port)
8471                         aconnector = NULL;
8472                 else
8473                         break;
8474         }
8475
8476         if (!aconnector)
8477                 return 0;
8478
8479         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8480 }
8481 #endif
8482
8483 /**
8484  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8485  * @dev: The DRM device
8486  * @state: The atomic state to commit
8487  *
8488  * Validate that the given atomic state is programmable by DC into hardware.
8489  * This involves constructing a &struct dc_state reflecting the new hardware
8490  * state we wish to commit, then querying DC to see if it is programmable. It's
8491  * important not to modify the existing DC state. Otherwise, atomic_check
8492  * may unexpectedly commit hardware changes.
8493  *
8494  * When validating the DC state, it's important that the right locks are
8495  * acquired. For full updates case which removes/adds/updates streams on one
8496  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8497  * that any such full update commit will wait for completion of any outstanding
8498  * flip using DRMs synchronization events.
8499  *
8500  * Note that DM adds the affected connectors for all CRTCs in state, when that
8501  * might not seem necessary. This is because DC stream creation requires the
8502  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8503  * be possible but non-trivial - a possible TODO item.
8504  *
8505  * Return: -Error code if validation failed.
8506  */
8507 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8508                                   struct drm_atomic_state *state)
8509 {
8510         struct amdgpu_device *adev = drm_to_adev(dev);
8511         struct dm_atomic_state *dm_state = NULL;
8512         struct dc *dc = adev->dm.dc;
8513         struct drm_connector *connector;
8514         struct drm_connector_state *old_con_state, *new_con_state;
8515         struct drm_crtc *crtc;
8516         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8517         struct drm_plane *plane;
8518         struct drm_plane_state *old_plane_state, *new_plane_state;
8519         enum dc_status status;
8520         int ret, i;
8521         bool lock_and_validation_needed = false;
8522
8523         ret = drm_atomic_helper_check_modeset(dev, state);
8524         if (ret)
8525                 goto fail;
8526
8527         /* Check connector changes */
8528         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8529                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8530                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8531
8532                 /* Skip connectors that are disabled or part of modeset already. */
8533                 if (!old_con_state->crtc && !new_con_state->crtc)
8534                         continue;
8535
8536                 if (!new_con_state->crtc)
8537                         continue;
8538
8539                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8540                 if (IS_ERR(new_crtc_state)) {
8541                         ret = PTR_ERR(new_crtc_state);
8542                         goto fail;
8543                 }
8544
8545                 if (dm_old_con_state->abm_level !=
8546                     dm_new_con_state->abm_level)
8547                         new_crtc_state->connectors_changed = true;
8548         }
8549
8550 #if defined(CONFIG_DRM_AMD_DC_DCN)
8551         if (adev->asic_type >= CHIP_NAVI10) {
8552                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8553                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8554                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8555                                 if (ret)
8556                                         goto fail;
8557                         }
8558                 }
8559         }
8560 #endif
8561         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8562                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8563                     !new_crtc_state->color_mgmt_changed &&
8564                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8565                         continue;
8566
8567                 if (!new_crtc_state->enable)
8568                         continue;
8569
8570                 ret = drm_atomic_add_affected_connectors(state, crtc);
8571                 if (ret)
8572                         return ret;
8573
8574                 ret = drm_atomic_add_affected_planes(state, crtc);
8575                 if (ret)
8576                         goto fail;
8577         }
8578
8579         /*
8580          * Add all primary and overlay planes on the CRTC to the state
8581          * whenever a plane is enabled to maintain correct z-ordering
8582          * and to enable fast surface updates.
8583          */
8584         drm_for_each_crtc(crtc, dev) {
8585                 bool modified = false;
8586
8587                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8588                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8589                                 continue;
8590
8591                         if (new_plane_state->crtc == crtc ||
8592                             old_plane_state->crtc == crtc) {
8593                                 modified = true;
8594                                 break;
8595                         }
8596                 }
8597
8598                 if (!modified)
8599                         continue;
8600
8601                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8602                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8603                                 continue;
8604
8605                         new_plane_state =
8606                                 drm_atomic_get_plane_state(state, plane);
8607
8608                         if (IS_ERR(new_plane_state)) {
8609                                 ret = PTR_ERR(new_plane_state);
8610                                 goto fail;
8611                         }
8612                 }
8613         }
8614
8615         /* Prepass for updating tiling flags on new planes. */
8616         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8617                 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8618                 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8619
8620                 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8621                                   &new_dm_plane_state->tmz_surface);
8622                 if (ret)
8623                         goto fail;
8624         }
8625
8626         /* Remove exiting planes if they are modified */
8627         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8628                 ret = dm_update_plane_state(dc, state, plane,
8629                                             old_plane_state,
8630                                             new_plane_state,
8631                                             false,
8632                                             &lock_and_validation_needed);
8633                 if (ret)
8634                         goto fail;
8635         }
8636
8637         /* Disable all crtcs which require disable */
8638         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8639                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8640                                            old_crtc_state,
8641                                            new_crtc_state,
8642                                            false,
8643                                            &lock_and_validation_needed);
8644                 if (ret)
8645                         goto fail;
8646         }
8647
8648         /* Enable all crtcs which require enable */
8649         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8650                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8651                                            old_crtc_state,
8652                                            new_crtc_state,
8653                                            true,
8654                                            &lock_and_validation_needed);
8655                 if (ret)
8656                         goto fail;
8657         }
8658
8659         /* Add new/modified planes */
8660         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8661                 ret = dm_update_plane_state(dc, state, plane,
8662                                             old_plane_state,
8663                                             new_plane_state,
8664                                             true,
8665                                             &lock_and_validation_needed);
8666                 if (ret)
8667                         goto fail;
8668         }
8669
8670         /* Run this here since we want to validate the streams we created */
8671         ret = drm_atomic_helper_check_planes(dev, state);
8672         if (ret)
8673                 goto fail;
8674
8675         if (state->legacy_cursor_update) {
8676                 /*
8677                  * This is a fast cursor update coming from the plane update
8678                  * helper, check if it can be done asynchronously for better
8679                  * performance.
8680                  */
8681                 state->async_update =
8682                         !drm_atomic_helper_async_check(dev, state);
8683
8684                 /*
8685                  * Skip the remaining global validation if this is an async
8686                  * update. Cursor updates can be done without affecting
8687                  * state or bandwidth calcs and this avoids the performance
8688                  * penalty of locking the private state object and
8689                  * allocating a new dc_state.
8690                  */
8691                 if (state->async_update)
8692                         return 0;
8693         }
8694
8695         /* Check scaling and underscan changes*/
8696         /* TODO Removed scaling changes validation due to inability to commit
8697          * new stream into context w\o causing full reset. Need to
8698          * decide how to handle.
8699          */
8700         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8701                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8702                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8703                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8704
8705                 /* Skip any modesets/resets */
8706                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8707                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8708                         continue;
8709
8710                 /* Skip any thing not scale or underscan changes */
8711                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8712                         continue;
8713
8714                 lock_and_validation_needed = true;
8715         }
8716
8717         /**
8718          * Streams and planes are reset when there are changes that affect
8719          * bandwidth. Anything that affects bandwidth needs to go through
8720          * DC global validation to ensure that the configuration can be applied
8721          * to hardware.
8722          *
8723          * We have to currently stall out here in atomic_check for outstanding
8724          * commits to finish in this case because our IRQ handlers reference
8725          * DRM state directly - we can end up disabling interrupts too early
8726          * if we don't.
8727          *
8728          * TODO: Remove this stall and drop DM state private objects.
8729          */
8730         if (lock_and_validation_needed) {
8731                 ret = dm_atomic_get_state(state, &dm_state);
8732                 if (ret)
8733                         goto fail;
8734
8735                 ret = do_aquire_global_lock(dev, state);
8736                 if (ret)
8737                         goto fail;
8738
8739 #if defined(CONFIG_DRM_AMD_DC_DCN)
8740                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8741                         goto fail;
8742
8743                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8744                 if (ret)
8745                         goto fail;
8746 #endif
8747
8748                 /*
8749                  * Perform validation of MST topology in the state:
8750                  * We need to perform MST atomic check before calling
8751                  * dc_validate_global_state(), or there is a chance
8752                  * to get stuck in an infinite loop and hang eventually.
8753                  */
8754                 ret = drm_dp_mst_atomic_check(state);
8755                 if (ret)
8756                         goto fail;
8757                 status = dc_validate_global_state(dc, dm_state->context, false);
8758                 if (status != DC_OK) {
8759                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
8760                                        dc_status_to_str(status), status);
8761                         ret = -EINVAL;
8762                         goto fail;
8763                 }
8764         } else {
8765                 /*
8766                  * The commit is a fast update. Fast updates shouldn't change
8767                  * the DC context, affect global validation, and can have their
8768                  * commit work done in parallel with other commits not touching
8769                  * the same resource. If we have a new DC context as part of
8770                  * the DM atomic state from validation we need to free it and
8771                  * retain the existing one instead.
8772                  *
8773                  * Furthermore, since the DM atomic state only contains the DC
8774                  * context and can safely be annulled, we can free the state
8775                  * and clear the associated private object now to free
8776                  * some memory and avoid a possible use-after-free later.
8777                  */
8778
8779                 for (i = 0; i < state->num_private_objs; i++) {
8780                         struct drm_private_obj *obj = state->private_objs[i].ptr;
8781
8782                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
8783                                 int j = state->num_private_objs-1;
8784
8785                                 dm_atomic_destroy_state(obj,
8786                                                 state->private_objs[i].state);
8787
8788                                 /* If i is not at the end of the array then the
8789                                  * last element needs to be moved to where i was
8790                                  * before the array can safely be truncated.
8791                                  */
8792                                 if (i != j)
8793                                         state->private_objs[i] =
8794                                                 state->private_objs[j];
8795
8796                                 state->private_objs[j].ptr = NULL;
8797                                 state->private_objs[j].state = NULL;
8798                                 state->private_objs[j].old_state = NULL;
8799                                 state->private_objs[j].new_state = NULL;
8800
8801                                 state->num_private_objs = j;
8802                                 break;
8803                         }
8804                 }
8805         }
8806
8807         /* Store the overall update type for use later in atomic check. */
8808         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8809                 struct dm_crtc_state *dm_new_crtc_state =
8810                         to_dm_crtc_state(new_crtc_state);
8811
8812                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
8813                                                          UPDATE_TYPE_FULL :
8814                                                          UPDATE_TYPE_FAST;
8815         }
8816
8817         /* Must be success */
8818         WARN_ON(ret);
8819         return ret;
8820
8821 fail:
8822         if (ret == -EDEADLK)
8823                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8824         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8825                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8826         else
8827                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8828
8829         return ret;
8830 }
8831
8832 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8833                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
8834 {
8835         uint8_t dpcd_data;
8836         bool capable = false;
8837
8838         if (amdgpu_dm_connector->dc_link &&
8839                 dm_helpers_dp_read_dpcd(
8840                                 NULL,
8841                                 amdgpu_dm_connector->dc_link,
8842                                 DP_DOWN_STREAM_PORT_COUNT,
8843                                 &dpcd_data,
8844                                 sizeof(dpcd_data))) {
8845                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8846         }
8847
8848         return capable;
8849 }
8850 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8851                                         struct edid *edid)
8852 {
8853         int i;
8854         bool edid_check_required;
8855         struct detailed_timing *timing;
8856         struct detailed_non_pixel *data;
8857         struct detailed_data_monitor_range *range;
8858         struct amdgpu_dm_connector *amdgpu_dm_connector =
8859                         to_amdgpu_dm_connector(connector);
8860         struct dm_connector_state *dm_con_state = NULL;
8861
8862         struct drm_device *dev = connector->dev;
8863         struct amdgpu_device *adev = drm_to_adev(dev);
8864         bool freesync_capable = false;
8865
8866         if (!connector->state) {
8867                 DRM_ERROR("%s - Connector has no state", __func__);
8868                 goto update;
8869         }
8870
8871         if (!edid) {
8872                 dm_con_state = to_dm_connector_state(connector->state);
8873
8874                 amdgpu_dm_connector->min_vfreq = 0;
8875                 amdgpu_dm_connector->max_vfreq = 0;
8876                 amdgpu_dm_connector->pixel_clock_mhz = 0;
8877
8878                 goto update;
8879         }
8880
8881         dm_con_state = to_dm_connector_state(connector->state);
8882
8883         edid_check_required = false;
8884         if (!amdgpu_dm_connector->dc_sink) {
8885                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8886                 goto update;
8887         }
8888         if (!adev->dm.freesync_module)
8889                 goto update;
8890         /*
8891          * if edid non zero restrict freesync only for dp and edp
8892          */
8893         if (edid) {
8894                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8895                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8896                         edid_check_required = is_dp_capable_without_timing_msa(
8897                                                 adev->dm.dc,
8898                                                 amdgpu_dm_connector);
8899                 }
8900         }
8901         if (edid_check_required == true && (edid->version > 1 ||
8902            (edid->version == 1 && edid->revision > 1))) {
8903                 for (i = 0; i < 4; i++) {
8904
8905                         timing  = &edid->detailed_timings[i];
8906                         data    = &timing->data.other_data;
8907                         range   = &data->data.range;
8908                         /*
8909                          * Check if monitor has continuous frequency mode
8910                          */
8911                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
8912                                 continue;
8913                         /*
8914                          * Check for flag range limits only. If flag == 1 then
8915                          * no additional timing information provided.
8916                          * Default GTF, GTF Secondary curve and CVT are not
8917                          * supported
8918                          */
8919                         if (range->flags != 1)
8920                                 continue;
8921
8922                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8923                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8924                         amdgpu_dm_connector->pixel_clock_mhz =
8925                                 range->pixel_clock_mhz * 10;
8926                         break;
8927                 }
8928
8929                 if (amdgpu_dm_connector->max_vfreq -
8930                     amdgpu_dm_connector->min_vfreq > 10) {
8931
8932                         freesync_capable = true;
8933                 }
8934         }
8935
8936 update:
8937         if (dm_con_state)
8938                 dm_con_state->freesync_capable = freesync_capable;
8939
8940         if (connector->vrr_capable_property)
8941                 drm_connector_set_vrr_capable_property(connector,
8942                                                        freesync_capable);
8943 }
8944
8945 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8946 {
8947         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8948
8949         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8950                 return;
8951         if (link->type == dc_connection_none)
8952                 return;
8953         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8954                                         dpcd_data, sizeof(dpcd_data))) {
8955                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8956
8957                 if (dpcd_data[0] == 0) {
8958                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8959                         link->psr_settings.psr_feature_enabled = false;
8960                 } else {
8961                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
8962                         link->psr_settings.psr_feature_enabled = true;
8963                 }
8964
8965                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8966         }
8967 }
8968
8969 /*
8970  * amdgpu_dm_link_setup_psr() - configure psr link
8971  * @stream: stream state
8972  *
8973  * Return: true if success
8974  */
8975 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8976 {
8977         struct dc_link *link = NULL;
8978         struct psr_config psr_config = {0};
8979         struct psr_context psr_context = {0};
8980         bool ret = false;
8981
8982         if (stream == NULL)
8983                 return false;
8984
8985         link = stream->link;
8986
8987         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8988
8989         if (psr_config.psr_version > 0) {
8990                 psr_config.psr_exit_link_training_required = 0x1;
8991                 psr_config.psr_frame_capture_indication_req = 0;
8992                 psr_config.psr_rfb_setup_time = 0x37;
8993                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8994                 psr_config.allow_smu_optimizations = 0x0;
8995
8996                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8997
8998         }
8999         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9000
9001         return ret;
9002 }
9003
9004 /*
9005  * amdgpu_dm_psr_enable() - enable psr f/w
9006  * @stream: stream state
9007  *
9008  * Return: true if success
9009  */
9010 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9011 {
9012         struct dc_link *link = stream->link;
9013         unsigned int vsync_rate_hz = 0;
9014         struct dc_static_screen_params params = {0};
9015         /* Calculate number of static frames before generating interrupt to
9016          * enter PSR.
9017          */
9018         // Init fail safe of 2 frames static
9019         unsigned int num_frames_static = 2;
9020
9021         DRM_DEBUG_DRIVER("Enabling psr...\n");
9022
9023         vsync_rate_hz = div64_u64(div64_u64((
9024                         stream->timing.pix_clk_100hz * 100),
9025                         stream->timing.v_total),
9026                         stream->timing.h_total);
9027
9028         /* Round up
9029          * Calculate number of frames such that at least 30 ms of time has
9030          * passed.
9031          */
9032         if (vsync_rate_hz != 0) {
9033                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9034                 num_frames_static = (30000 / frame_time_microsec) + 1;
9035         }
9036
9037         params.triggers.cursor_update = true;
9038         params.triggers.overlay_update = true;
9039         params.triggers.surface_update = true;
9040         params.num_frames = num_frames_static;
9041
9042         dc_stream_set_static_screen_params(link->ctx->dc,
9043                                            &stream, 1,
9044                                            &params);
9045
9046         return dc_link_set_psr_allow_active(link, true, false);
9047 }
9048
9049 /*
9050  * amdgpu_dm_psr_disable() - disable psr f/w
9051  * @stream:  stream state
9052  *
9053  * Return: true if success
9054  */
9055 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9056 {
9057
9058         DRM_DEBUG_DRIVER("Disabling psr...\n");
9059
9060         return dc_link_set_psr_allow_active(stream->link, false, true);
9061 }
9062
9063 /*
9064  * amdgpu_dm_psr_disable() - disable psr f/w
9065  * if psr is enabled on any stream
9066  *
9067  * Return: true if success
9068  */
9069 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9070 {
9071         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9072         return dc_set_psr_allow_active(dm->dc, false);
9073 }
9074
9075 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9076 {
9077         struct amdgpu_device *adev = drm_to_adev(dev);
9078         struct dc *dc = adev->dm.dc;
9079         int i;
9080
9081         mutex_lock(&adev->dm.dc_lock);
9082         if (dc->current_state) {
9083                 for (i = 0; i < dc->current_state->stream_count; ++i)
9084                         dc->current_state->streams[i]
9085                                 ->triggered_crtc_reset.enabled =
9086                                 adev->dm.force_timing_sync;
9087
9088                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9089                 dc_trigger_sync(dc, dc->current_state);
9090         }
9091         mutex_unlock(&adev->dm.dc_lock);
9092 }