drm/amd/display: Fix a list corruption
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103
104 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129
130 /*
131  * initializes drm_device display related structures, based on the information
132  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133  * drm_encoder, drm_mode_config
134  *
135  * Returns 0 on success
136  */
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
140
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142                                 struct drm_plane *plane,
143                                 unsigned long possible_crtcs,
144                                 const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146                                struct drm_plane *plane,
147                                uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
150                                     uint32_t link_index,
151                                     struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153                                   struct amdgpu_encoder *aencoder,
154                                   uint32_t link_index);
155
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
157
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159                                    struct drm_atomic_state *state,
160                                    bool nonblock);
161
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
163
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165                                   struct drm_atomic_state *state);
166
167 static void handle_cursor_update(struct drm_plane *plane,
168                                  struct drm_plane_state *old_plane_state);
169
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
175
176 /*
177  * dm_vblank_get_counter
178  *
179  * @brief
180  * Get counter for number of vertical blanks
181  *
182  * @param
183  * struct amdgpu_device *adev - [in] desired amdgpu device
184  * int disp_idx - [in] which CRTC to get the counter from
185  *
186  * @return
187  * Counter for vertical blanks
188  */
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190 {
191         if (crtc >= adev->mode_info.num_crtc)
192                 return 0;
193         else {
194                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
196                                 acrtc->base.state);
197
198
199                 if (acrtc_state->stream == NULL) {
200                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
201                                   crtc);
202                         return 0;
203                 }
204
205                 return dc_stream_get_vblank_counter(acrtc_state->stream);
206         }
207 }
208
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210                                   u32 *vbl, u32 *position)
211 {
212         uint32_t v_blank_start, v_blank_end, h_position, v_position;
213
214         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215                 return -EINVAL;
216         else {
217                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219                                                 acrtc->base.state);
220
221                 if (acrtc_state->stream ==  NULL) {
222                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
223                                   crtc);
224                         return 0;
225                 }
226
227                 /*
228                  * TODO rework base driver to use values directly.
229                  * for now parse it back into reg-format
230                  */
231                 dc_stream_get_scanoutpos(acrtc_state->stream,
232                                          &v_blank_start,
233                                          &v_blank_end,
234                                          &h_position,
235                                          &v_position);
236
237                 *position = v_position | (h_position << 16);
238                 *vbl = v_blank_start | (v_blank_end << 16);
239         }
240
241         return 0;
242 }
243
244 static bool dm_is_idle(void *handle)
245 {
246         /* XXX todo */
247         return true;
248 }
249
250 static int dm_wait_for_idle(void *handle)
251 {
252         /* XXX todo */
253         return 0;
254 }
255
256 static bool dm_check_soft_reset(void *handle)
257 {
258         return false;
259 }
260
261 static int dm_soft_reset(void *handle)
262 {
263         /* XXX todo */
264         return 0;
265 }
266
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
269                      int otg_inst)
270 {
271         struct drm_device *dev = adev_to_drm(adev);
272         struct drm_crtc *crtc;
273         struct amdgpu_crtc *amdgpu_crtc;
274
275         if (otg_inst == -1) {
276                 WARN_ON(1);
277                 return adev->mode_info.crtcs[0];
278         }
279
280         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281                 amdgpu_crtc = to_amdgpu_crtc(crtc);
282
283                 if (amdgpu_crtc->otg_inst == otg_inst)
284                         return amdgpu_crtc;
285         }
286
287         return NULL;
288 }
289
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
291 {
292         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
294 }
295
296 /**
297  * dm_pflip_high_irq() - Handle pageflip interrupt
298  * @interrupt_params: ignored
299  *
300  * Handles the pageflip interrupt by notifying all interested parties
301  * that the pageflip has been completed.
302  */
303 static void dm_pflip_high_irq(void *interrupt_params)
304 {
305         struct amdgpu_crtc *amdgpu_crtc;
306         struct common_irq_params *irq_params = interrupt_params;
307         struct amdgpu_device *adev = irq_params->adev;
308         unsigned long flags;
309         struct drm_pending_vblank_event *e;
310         struct dm_crtc_state *acrtc_state;
311         uint32_t vpos, hpos, v_blank_start, v_blank_end;
312         bool vrr_active;
313
314         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
315
316         /* IRQ could occur when in initial stage */
317         /* TODO work and BO cleanup */
318         if (amdgpu_crtc == NULL) {
319                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
320                 return;
321         }
322
323         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
324
325         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327                                                  amdgpu_crtc->pflip_status,
328                                                  AMDGPU_FLIP_SUBMITTED,
329                                                  amdgpu_crtc->crtc_id,
330                                                  amdgpu_crtc);
331                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
332                 return;
333         }
334
335         /* page flip completed. */
336         e = amdgpu_crtc->event;
337         amdgpu_crtc->event = NULL;
338
339         if (!e)
340                 WARN_ON(1);
341
342         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
344
345         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
346         if (!vrr_active ||
347             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348                                       &v_blank_end, &hpos, &vpos) ||
349             (vpos < v_blank_start)) {
350                 /* Update to correct count and vblank timestamp if racing with
351                  * vblank irq. This also updates to the correct vblank timestamp
352                  * even in VRR mode, as scanout is past the front-porch atm.
353                  */
354                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
355
356                 /* Wake up userspace by sending the pageflip event with proper
357                  * count and timestamp of vblank of flip completion.
358                  */
359                 if (e) {
360                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
361
362                         /* Event sent, so done with vblank for this flip */
363                         drm_crtc_vblank_put(&amdgpu_crtc->base);
364                 }
365         } else if (e) {
366                 /* VRR active and inside front-porch: vblank count and
367                  * timestamp for pageflip event will only be up to date after
368                  * drm_crtc_handle_vblank() has been executed from late vblank
369                  * irq handler after start of back-porch (vline 0). We queue the
370                  * pageflip event for send-out by drm_crtc_handle_vblank() with
371                  * updated timestamp and count, once it runs after us.
372                  *
373                  * We need to open-code this instead of using the helper
374                  * drm_crtc_arm_vblank_event(), as that helper would
375                  * call drm_crtc_accurate_vblank_count(), which we must
376                  * not call in VRR mode while we are in front-porch!
377                  */
378
379                 /* sequence will be replaced by real count during send-out. */
380                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381                 e->pipe = amdgpu_crtc->crtc_id;
382
383                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
384                 e = NULL;
385         }
386
387         /* Keep track of vblank of this flip for flip throttling. We use the
388          * cooked hw counter, as that one incremented at start of this vblank
389          * of pageflip completion, so last_flip_vblank is the forbidden count
390          * for queueing new pageflips if vsync + VRR is enabled.
391          */
392         amdgpu_crtc->last_flip_vblank =
393                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
394
395         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
397
398         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399                          amdgpu_crtc->crtc_id, amdgpu_crtc,
400                          vrr_active, (int) !e);
401 }
402
403 static void dm_vupdate_high_irq(void *interrupt_params)
404 {
405         struct common_irq_params *irq_params = interrupt_params;
406         struct amdgpu_device *adev = irq_params->adev;
407         struct amdgpu_crtc *acrtc;
408         struct dm_crtc_state *acrtc_state;
409         unsigned long flags;
410
411         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412
413         if (acrtc) {
414                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
415
416                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
417                               acrtc->crtc_id,
418                               amdgpu_dm_vrr_active(acrtc_state));
419
420                 /* Core vblank handling is done here after end of front-porch in
421                  * vrr mode, as vblank timestamping will give valid results
422                  * while now done after front-porch. This will also deliver
423                  * page-flip completion events that have been queued to us
424                  * if a pageflip happened inside front-porch.
425                  */
426                 if (amdgpu_dm_vrr_active(acrtc_state)) {
427                         drm_crtc_handle_vblank(&acrtc->base);
428
429                         /* BTR processing for pre-DCE12 ASICs */
430                         if (acrtc_state->stream &&
431                             adev->family < AMDGPU_FAMILY_AI) {
432                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
433                                 mod_freesync_handle_v_update(
434                                     adev->dm.freesync_module,
435                                     acrtc_state->stream,
436                                     &acrtc_state->vrr_params);
437
438                                 dc_stream_adjust_vmin_vmax(
439                                     adev->dm.dc,
440                                     acrtc_state->stream,
441                                     &acrtc_state->vrr_params.adjust);
442                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
443                         }
444                 }
445         }
446 }
447
448 /**
449  * dm_crtc_high_irq() - Handles CRTC interrupt
450  * @interrupt_params: used for determining the CRTC instance
451  *
452  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453  * event handler.
454  */
455 static void dm_crtc_high_irq(void *interrupt_params)
456 {
457         struct common_irq_params *irq_params = interrupt_params;
458         struct amdgpu_device *adev = irq_params->adev;
459         struct amdgpu_crtc *acrtc;
460         struct dm_crtc_state *acrtc_state;
461         unsigned long flags;
462
463         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
464         if (!acrtc)
465                 return;
466
467         acrtc_state = to_dm_crtc_state(acrtc->base.state);
468
469         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470                          amdgpu_dm_vrr_active(acrtc_state),
471                          acrtc_state->active_planes);
472
473         /**
474          * Core vblank handling at start of front-porch is only possible
475          * in non-vrr mode, as only there vblank timestamping will give
476          * valid results while done in front-porch. Otherwise defer it
477          * to dm_vupdate_high_irq after end of front-porch.
478          */
479         if (!amdgpu_dm_vrr_active(acrtc_state))
480                 drm_crtc_handle_vblank(&acrtc->base);
481
482         /**
483          * Following stuff must happen at start of vblank, for crc
484          * computation and below-the-range btr support in vrr mode.
485          */
486         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
487
488         /* BTR updates need to happen before VUPDATE on Vega and above. */
489         if (adev->family < AMDGPU_FAMILY_AI)
490                 return;
491
492         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
493
494         if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495             acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496                 mod_freesync_handle_v_update(adev->dm.freesync_module,
497                                              acrtc_state->stream,
498                                              &acrtc_state->vrr_params);
499
500                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501                                            &acrtc_state->vrr_params.adjust);
502         }
503
504         /*
505          * If there aren't any active_planes then DCH HUBP may be clock-gated.
506          * In that case, pageflip completion interrupts won't fire and pageflip
507          * completion events won't get delivered. Prevent this by sending
508          * pending pageflip events from here if a flip is still pending.
509          *
510          * If any planes are enabled, use dm_pflip_high_irq() instead, to
511          * avoid race conditions between flip programming and completion,
512          * which could cause too early flip completion events.
513          */
514         if (adev->family >= AMDGPU_FAMILY_RV &&
515             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516             acrtc_state->active_planes == 0) {
517                 if (acrtc->event) {
518                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
519                         acrtc->event = NULL;
520                         drm_crtc_vblank_put(&acrtc->base);
521                 }
522                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
523         }
524
525         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
526 }
527
528 static int dm_set_clockgating_state(void *handle,
529                   enum amd_clockgating_state state)
530 {
531         return 0;
532 }
533
534 static int dm_set_powergating_state(void *handle,
535                   enum amd_powergating_state state)
536 {
537         return 0;
538 }
539
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
542
543 /* Allocate memory for FBC compressed data  */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
545 {
546         struct drm_device *dev = connector->dev;
547         struct amdgpu_device *adev = drm_to_adev(dev);
548         struct dm_comressor_info *compressor = &adev->dm.compressor;
549         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550         struct drm_display_mode *mode;
551         unsigned long max_size = 0;
552
553         if (adev->dm.dc->fbc_compressor == NULL)
554                 return;
555
556         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
557                 return;
558
559         if (compressor->bo_ptr)
560                 return;
561
562
563         list_for_each_entry(mode, &connector->modes, head) {
564                 if (max_size < mode->htotal * mode->vtotal)
565                         max_size = mode->htotal * mode->vtotal;
566         }
567
568         if (max_size) {
569                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571                             &compressor->gpu_addr, &compressor->cpu_addr);
572
573                 if (r)
574                         DRM_ERROR("DM: Failed to initialize FBC\n");
575                 else {
576                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578                 }
579
580         }
581
582 }
583
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585                                           int pipe, bool *enabled,
586                                           unsigned char *buf, int max_bytes)
587 {
588         struct drm_device *dev = dev_get_drvdata(kdev);
589         struct amdgpu_device *adev = drm_to_adev(dev);
590         struct drm_connector *connector;
591         struct drm_connector_list_iter conn_iter;
592         struct amdgpu_dm_connector *aconnector;
593         int ret = 0;
594
595         *enabled = false;
596
597         mutex_lock(&adev->dm.audio_lock);
598
599         drm_connector_list_iter_begin(dev, &conn_iter);
600         drm_for_each_connector_iter(connector, &conn_iter) {
601                 aconnector = to_amdgpu_dm_connector(connector);
602                 if (aconnector->audio_inst != port)
603                         continue;
604
605                 *enabled = true;
606                 ret = drm_eld_size(connector->eld);
607                 memcpy(buf, connector->eld, min(max_bytes, ret));
608
609                 break;
610         }
611         drm_connector_list_iter_end(&conn_iter);
612
613         mutex_unlock(&adev->dm.audio_lock);
614
615         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
616
617         return ret;
618 }
619
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621         .get_eld = amdgpu_dm_audio_component_get_eld,
622 };
623
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625                                        struct device *hda_kdev, void *data)
626 {
627         struct drm_device *dev = dev_get_drvdata(kdev);
628         struct amdgpu_device *adev = drm_to_adev(dev);
629         struct drm_audio_component *acomp = data;
630
631         acomp->ops = &amdgpu_dm_audio_component_ops;
632         acomp->dev = kdev;
633         adev->dm.audio_component = acomp;
634
635         return 0;
636 }
637
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639                                           struct device *hda_kdev, void *data)
640 {
641         struct drm_device *dev = dev_get_drvdata(kdev);
642         struct amdgpu_device *adev = drm_to_adev(dev);
643         struct drm_audio_component *acomp = data;
644
645         acomp->ops = NULL;
646         acomp->dev = NULL;
647         adev->dm.audio_component = NULL;
648 }
649
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651         .bind   = amdgpu_dm_audio_component_bind,
652         .unbind = amdgpu_dm_audio_component_unbind,
653 };
654
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656 {
657         int i, ret;
658
659         if (!amdgpu_audio)
660                 return 0;
661
662         adev->mode_info.audio.enabled = true;
663
664         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
665
666         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667                 adev->mode_info.audio.pin[i].channels = -1;
668                 adev->mode_info.audio.pin[i].rate = -1;
669                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
670                 adev->mode_info.audio.pin[i].status_bits = 0;
671                 adev->mode_info.audio.pin[i].category_code = 0;
672                 adev->mode_info.audio.pin[i].connected = false;
673                 adev->mode_info.audio.pin[i].id =
674                         adev->dm.dc->res_pool->audios[i]->inst;
675                 adev->mode_info.audio.pin[i].offset = 0;
676         }
677
678         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
679         if (ret < 0)
680                 return ret;
681
682         adev->dm.audio_registered = true;
683
684         return 0;
685 }
686
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
688 {
689         if (!amdgpu_audio)
690                 return;
691
692         if (!adev->mode_info.audio.enabled)
693                 return;
694
695         if (adev->dm.audio_registered) {
696                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697                 adev->dm.audio_registered = false;
698         }
699
700         /* TODO: Disable audio? */
701
702         adev->mode_info.audio.enabled = false;
703 }
704
705 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
706 {
707         struct drm_audio_component *acomp = adev->dm.audio_component;
708
709         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
711
712                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
713                                                  pin, -1);
714         }
715 }
716
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
718 {
719         const struct dmcub_firmware_header_v1_0 *hdr;
720         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722         const struct firmware *dmub_fw = adev->dm.dmub_fw;
723         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724         struct abm *abm = adev->dm.dc->res_pool->abm;
725         struct dmub_srv_hw_params hw_params;
726         enum dmub_status status;
727         const unsigned char *fw_inst_const, *fw_bss_data;
728         uint32_t i, fw_inst_const_size, fw_bss_data_size;
729         bool has_hw_support;
730
731         if (!dmub_srv)
732                 /* DMUB isn't supported on the ASIC. */
733                 return 0;
734
735         if (!fb_info) {
736                 DRM_ERROR("No framebuffer info for DMUB service.\n");
737                 return -EINVAL;
738         }
739
740         if (!dmub_fw) {
741                 /* Firmware required for DMUB support. */
742                 DRM_ERROR("No firmware provided for DMUB.\n");
743                 return -EINVAL;
744         }
745
746         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747         if (status != DMUB_STATUS_OK) {
748                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
749                 return -EINVAL;
750         }
751
752         if (!has_hw_support) {
753                 DRM_INFO("DMUB unsupported on ASIC\n");
754                 return 0;
755         }
756
757         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
758
759         fw_inst_const = dmub_fw->data +
760                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761                         PSP_HEADER_BYTES;
762
763         fw_bss_data = dmub_fw->data +
764                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765                       le32_to_cpu(hdr->inst_const_bytes);
766
767         /* Copy firmware and bios info into FB memory. */
768         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
770
771         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
772
773         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774          * amdgpu_ucode_init_single_fw will load dmub firmware
775          * fw_inst_const part to cw0; otherwise, the firmware back door load
776          * will be done by dm_dmub_hw_init
777          */
778         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
780                                 fw_inst_const_size);
781         }
782
783         if (fw_bss_data_size)
784                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785                        fw_bss_data, fw_bss_data_size);
786
787         /* Copy firmware bios info into FB memory. */
788         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789                adev->bios_size);
790
791         /* Reset regions that need to be reset. */
792         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
794
795         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
797
798         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
800
801         /* Initialize hardware. */
802         memset(&hw_params, 0, sizeof(hw_params));
803         hw_params.fb_base = adev->gmc.fb_start;
804         hw_params.fb_offset = adev->gmc.aper_base;
805
806         /* backdoor load firmware and trigger dmub running */
807         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808                 hw_params.load_inst_const = true;
809
810         if (dmcu)
811                 hw_params.psp_version = dmcu->psp_version;
812
813         for (i = 0; i < fb_info->num_fb; ++i)
814                 hw_params.fb[i] = &fb_info->fb[i];
815
816         status = dmub_srv_hw_init(dmub_srv, &hw_params);
817         if (status != DMUB_STATUS_OK) {
818                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
819                 return -EINVAL;
820         }
821
822         /* Wait for firmware load to finish. */
823         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824         if (status != DMUB_STATUS_OK)
825                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
826
827         /* Init DMCU and ABM if available. */
828         if (dmcu && abm) {
829                 dmcu->funcs->dmcu_init(dmcu);
830                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831         }
832
833         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834         if (!adev->dm.dc->ctx->dmub_srv) {
835                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
836                 return -ENOMEM;
837         }
838
839         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840                  adev->dm.dmcub_fw_version);
841
842         return 0;
843 }
844
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
846 {
847         struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849         struct dc_callback_init init_params;
850 #endif
851         int r;
852
853         adev->dm.ddev = adev_to_drm(adev);
854         adev->dm.adev = adev;
855
856         /* Zero all the fields */
857         memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859         memset(&init_params, 0, sizeof(init_params));
860 #endif
861
862         mutex_init(&adev->dm.dc_lock);
863         mutex_init(&adev->dm.audio_lock);
864
865         if(amdgpu_dm_irq_init(adev)) {
866                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
867                 goto error;
868         }
869
870         init_data.asic_id.chip_family = adev->family;
871
872         init_data.asic_id.pci_revision_id = adev->pdev->revision;
873         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
874
875         init_data.asic_id.vram_width = adev->gmc.vram_width;
876         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
877         init_data.asic_id.atombios_base_address =
878                 adev->mode_info.atom_context->bios;
879
880         init_data.driver = adev;
881
882         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
883
884         if (!adev->dm.cgs_device) {
885                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
886                 goto error;
887         }
888
889         init_data.cgs_device = adev->dm.cgs_device;
890
891         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
892
893         switch (adev->asic_type) {
894         case CHIP_CARRIZO:
895         case CHIP_STONEY:
896         case CHIP_RAVEN:
897         case CHIP_RENOIR:
898                 init_data.flags.gpu_vm_support = true;
899                 break;
900         default:
901                 break;
902         }
903
904         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905                 init_data.flags.fbc_support = true;
906
907         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908                 init_data.flags.multi_mon_pp_mclk_switch = true;
909
910         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911                 init_data.flags.disable_fractional_pwm = true;
912
913         init_data.flags.power_down_display_on_boot = true;
914
915         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
916
917         /* Display Core create. */
918         adev->dm.dc = dc_create(&init_data);
919
920         if (adev->dm.dc) {
921                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
922         } else {
923                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
924                 goto error;
925         }
926
927         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
929                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930         }
931
932         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
934
935         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936                 adev->dm.dc->debug.disable_stutter = true;
937
938         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939                 adev->dm.dc->debug.disable_dsc = true;
940
941         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942                 adev->dm.dc->debug.disable_clock_gate = true;
943
944         r = dm_dmub_hw_init(adev);
945         if (r) {
946                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
947                 goto error;
948         }
949
950         dc_hardware_init(adev->dm.dc);
951
952         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953         if (!adev->dm.freesync_module) {
954                 DRM_ERROR(
955                 "amdgpu: failed to initialize freesync_module.\n");
956         } else
957                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958                                 adev->dm.freesync_module);
959
960         amdgpu_dm_init_color_mod();
961
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963         if (adev->asic_type >= CHIP_RAVEN) {
964                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
965
966                 if (!adev->dm.hdcp_workqueue)
967                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968                 else
969                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
970
971                 dc_init_callbacks(adev->dm.dc, &init_params);
972         }
973 #endif
974         if (amdgpu_dm_initialize_drm_device(adev)) {
975                 DRM_ERROR(
976                 "amdgpu: failed to initialize sw for display support.\n");
977                 goto error;
978         }
979
980         /* Update the actual used number of crtc */
981         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982
983         /* create fake encoders for MST */
984         dm_dp_create_fake_mst_encoders(adev);
985
986         /* TODO: Add_display_info? */
987
988         /* TODO use dynamic cursor width */
989         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
991
992         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
993                 DRM_ERROR(
994                 "amdgpu: failed to initialize sw for display support.\n");
995                 goto error;
996         }
997
998         DRM_DEBUG_DRIVER("KMS initialized.\n");
999
1000         return 0;
1001 error:
1002         amdgpu_dm_fini(adev);
1003
1004         return -EINVAL;
1005 }
1006
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1008 {
1009         int i;
1010
1011         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1013         }
1014
1015         amdgpu_dm_audio_fini(adev);
1016
1017         amdgpu_dm_destroy_drm_device(&adev->dm);
1018
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020         if (adev->dm.hdcp_workqueue) {
1021                 hdcp_destroy(adev->dm.hdcp_workqueue);
1022                 adev->dm.hdcp_workqueue = NULL;
1023         }
1024
1025         if (adev->dm.dc)
1026                 dc_deinit_callbacks(adev->dm.dc);
1027 #endif
1028         if (adev->dm.dc->ctx->dmub_srv) {
1029                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030                 adev->dm.dc->ctx->dmub_srv = NULL;
1031         }
1032
1033         if (adev->dm.dmub_bo)
1034                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035                                       &adev->dm.dmub_bo_gpu_addr,
1036                                       &adev->dm.dmub_bo_cpu_addr);
1037
1038         /* DC Destroy TODO: Replace destroy DAL */
1039         if (adev->dm.dc)
1040                 dc_destroy(&adev->dm.dc);
1041         /*
1042          * TODO: pageflip, vlank interrupt
1043          *
1044          * amdgpu_dm_irq_fini(adev);
1045          */
1046
1047         if (adev->dm.cgs_device) {
1048                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049                 adev->dm.cgs_device = NULL;
1050         }
1051         if (adev->dm.freesync_module) {
1052                 mod_freesync_destroy(adev->dm.freesync_module);
1053                 adev->dm.freesync_module = NULL;
1054         }
1055
1056         mutex_destroy(&adev->dm.audio_lock);
1057         mutex_destroy(&adev->dm.dc_lock);
1058
1059         return;
1060 }
1061
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1063 {
1064         const char *fw_name_dmcu = NULL;
1065         int r;
1066         const struct dmcu_firmware_header_v1_0 *hdr;
1067
1068         switch(adev->asic_type) {
1069 #if defined(CONFIG_DRM_AMD_DC_SI)
1070         case CHIP_TAHITI:
1071         case CHIP_PITCAIRN:
1072         case CHIP_VERDE:
1073         case CHIP_OLAND:
1074 #endif
1075         case CHIP_BONAIRE:
1076         case CHIP_HAWAII:
1077         case CHIP_KAVERI:
1078         case CHIP_KABINI:
1079         case CHIP_MULLINS:
1080         case CHIP_TONGA:
1081         case CHIP_FIJI:
1082         case CHIP_CARRIZO:
1083         case CHIP_STONEY:
1084         case CHIP_POLARIS11:
1085         case CHIP_POLARIS10:
1086         case CHIP_POLARIS12:
1087         case CHIP_VEGAM:
1088         case CHIP_VEGA10:
1089         case CHIP_VEGA12:
1090         case CHIP_VEGA20:
1091         case CHIP_NAVI10:
1092         case CHIP_NAVI14:
1093         case CHIP_RENOIR:
1094 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1095         case CHIP_SIENNA_CICHLID:
1096         case CHIP_NAVY_FLOUNDER:
1097 #endif
1098                 return 0;
1099         case CHIP_NAVI12:
1100                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1101                 break;
1102         case CHIP_RAVEN:
1103                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1104                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1105                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1106                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1107                 else
1108                         return 0;
1109                 break;
1110         default:
1111                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1112                 return -EINVAL;
1113         }
1114
1115         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1116                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1117                 return 0;
1118         }
1119
1120         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1121         if (r == -ENOENT) {
1122                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1123                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1124                 adev->dm.fw_dmcu = NULL;
1125                 return 0;
1126         }
1127         if (r) {
1128                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1129                         fw_name_dmcu);
1130                 return r;
1131         }
1132
1133         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1134         if (r) {
1135                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1136                         fw_name_dmcu);
1137                 release_firmware(adev->dm.fw_dmcu);
1138                 adev->dm.fw_dmcu = NULL;
1139                 return r;
1140         }
1141
1142         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1143         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1144         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1145         adev->firmware.fw_size +=
1146                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1147
1148         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1149         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1150         adev->firmware.fw_size +=
1151                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1152
1153         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1154
1155         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1156
1157         return 0;
1158 }
1159
1160 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1161 {
1162         struct amdgpu_device *adev = ctx;
1163
1164         return dm_read_reg(adev->dm.dc->ctx, address);
1165 }
1166
1167 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1168                                      uint32_t value)
1169 {
1170         struct amdgpu_device *adev = ctx;
1171
1172         return dm_write_reg(adev->dm.dc->ctx, address, value);
1173 }
1174
1175 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1176 {
1177         struct dmub_srv_create_params create_params;
1178         struct dmub_srv_region_params region_params;
1179         struct dmub_srv_region_info region_info;
1180         struct dmub_srv_fb_params fb_params;
1181         struct dmub_srv_fb_info *fb_info;
1182         struct dmub_srv *dmub_srv;
1183         const struct dmcub_firmware_header_v1_0 *hdr;
1184         const char *fw_name_dmub;
1185         enum dmub_asic dmub_asic;
1186         enum dmub_status status;
1187         int r;
1188
1189         switch (adev->asic_type) {
1190         case CHIP_RENOIR:
1191                 dmub_asic = DMUB_ASIC_DCN21;
1192                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1193                 break;
1194 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1195         case CHIP_SIENNA_CICHLID:
1196                 dmub_asic = DMUB_ASIC_DCN30;
1197                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1198                 break;
1199         case CHIP_NAVY_FLOUNDER:
1200                 dmub_asic = DMUB_ASIC_DCN30;
1201                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1202                 break;
1203 #endif
1204
1205         default:
1206                 /* ASIC doesn't support DMUB. */
1207                 return 0;
1208         }
1209
1210         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1211         if (r) {
1212                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1213                 return 0;
1214         }
1215
1216         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1217         if (r) {
1218                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1219                 return 0;
1220         }
1221
1222         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1223
1224         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1225                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1226                         AMDGPU_UCODE_ID_DMCUB;
1227                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1228                         adev->dm.dmub_fw;
1229                 adev->firmware.fw_size +=
1230                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1231
1232                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1233                          adev->dm.dmcub_fw_version);
1234         }
1235
1236         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1237
1238         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1239         dmub_srv = adev->dm.dmub_srv;
1240
1241         if (!dmub_srv) {
1242                 DRM_ERROR("Failed to allocate DMUB service!\n");
1243                 return -ENOMEM;
1244         }
1245
1246         memset(&create_params, 0, sizeof(create_params));
1247         create_params.user_ctx = adev;
1248         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1249         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1250         create_params.asic = dmub_asic;
1251
1252         /* Create the DMUB service. */
1253         status = dmub_srv_create(dmub_srv, &create_params);
1254         if (status != DMUB_STATUS_OK) {
1255                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1256                 return -EINVAL;
1257         }
1258
1259         /* Calculate the size of all the regions for the DMUB service. */
1260         memset(&region_params, 0, sizeof(region_params));
1261
1262         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1263                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1264         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1265         region_params.vbios_size = adev->bios_size;
1266         region_params.fw_bss_data = region_params.bss_data_size ?
1267                 adev->dm.dmub_fw->data +
1268                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1269                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1270         region_params.fw_inst_const =
1271                 adev->dm.dmub_fw->data +
1272                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1273                 PSP_HEADER_BYTES;
1274
1275         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1276                                            &region_info);
1277
1278         if (status != DMUB_STATUS_OK) {
1279                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1280                 return -EINVAL;
1281         }
1282
1283         /*
1284          * Allocate a framebuffer based on the total size of all the regions.
1285          * TODO: Move this into GART.
1286          */
1287         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1288                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1289                                     &adev->dm.dmub_bo_gpu_addr,
1290                                     &adev->dm.dmub_bo_cpu_addr);
1291         if (r)
1292                 return r;
1293
1294         /* Rebase the regions on the framebuffer address. */
1295         memset(&fb_params, 0, sizeof(fb_params));
1296         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1297         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1298         fb_params.region_info = &region_info;
1299
1300         adev->dm.dmub_fb_info =
1301                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1302         fb_info = adev->dm.dmub_fb_info;
1303
1304         if (!fb_info) {
1305                 DRM_ERROR(
1306                         "Failed to allocate framebuffer info for DMUB service!\n");
1307                 return -ENOMEM;
1308         }
1309
1310         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1311         if (status != DMUB_STATUS_OK) {
1312                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1313                 return -EINVAL;
1314         }
1315
1316         return 0;
1317 }
1318
1319 static int dm_sw_init(void *handle)
1320 {
1321         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322         int r;
1323
1324         r = dm_dmub_sw_init(adev);
1325         if (r)
1326                 return r;
1327
1328         return load_dmcu_fw(adev);
1329 }
1330
1331 static int dm_sw_fini(void *handle)
1332 {
1333         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1334
1335         kfree(adev->dm.dmub_fb_info);
1336         adev->dm.dmub_fb_info = NULL;
1337
1338         if (adev->dm.dmub_srv) {
1339                 dmub_srv_destroy(adev->dm.dmub_srv);
1340                 adev->dm.dmub_srv = NULL;
1341         }
1342
1343         release_firmware(adev->dm.dmub_fw);
1344         adev->dm.dmub_fw = NULL;
1345
1346         release_firmware(adev->dm.fw_dmcu);
1347         adev->dm.fw_dmcu = NULL;
1348
1349         return 0;
1350 }
1351
1352 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1353 {
1354         struct amdgpu_dm_connector *aconnector;
1355         struct drm_connector *connector;
1356         struct drm_connector_list_iter iter;
1357         int ret = 0;
1358
1359         drm_connector_list_iter_begin(dev, &iter);
1360         drm_for_each_connector_iter(connector, &iter) {
1361                 aconnector = to_amdgpu_dm_connector(connector);
1362                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1363                     aconnector->mst_mgr.aux) {
1364                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1365                                          aconnector,
1366                                          aconnector->base.base.id);
1367
1368                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1369                         if (ret < 0) {
1370                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1371                                 aconnector->dc_link->type =
1372                                         dc_connection_single;
1373                                 break;
1374                         }
1375                 }
1376         }
1377         drm_connector_list_iter_end(&iter);
1378
1379         return ret;
1380 }
1381
1382 static int dm_late_init(void *handle)
1383 {
1384         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1385
1386         struct dmcu_iram_parameters params;
1387         unsigned int linear_lut[16];
1388         int i;
1389         struct dmcu *dmcu = NULL;
1390         bool ret = true;
1391
1392         if (!adev->dm.fw_dmcu)
1393                 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1394
1395         dmcu = adev->dm.dc->res_pool->dmcu;
1396
1397         for (i = 0; i < 16; i++)
1398                 linear_lut[i] = 0xFFFF * i / 15;
1399
1400         params.set = 0;
1401         params.backlight_ramping_start = 0xCCCC;
1402         params.backlight_ramping_reduction = 0xCCCCCCCC;
1403         params.backlight_lut_array_size = 16;
1404         params.backlight_lut_array = linear_lut;
1405
1406         /* Min backlight level after ABM reduction,  Don't allow below 1%
1407          * 0xFFFF x 0.01 = 0x28F
1408          */
1409         params.min_abm_backlight = 0x28F;
1410
1411         /* In the case where abm is implemented on dmcub,
1412          * dmcu object will be null.
1413          * ABM 2.4 and up are implemented on dmcub.
1414          */
1415         if (dmcu)
1416                 ret = dmcu_load_iram(dmcu, params);
1417         else if (adev->dm.dc->ctx->dmub_srv)
1418                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1419
1420         if (!ret)
1421                 return -EINVAL;
1422
1423         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1424 }
1425
1426 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1427 {
1428         struct amdgpu_dm_connector *aconnector;
1429         struct drm_connector *connector;
1430         struct drm_connector_list_iter iter;
1431         struct drm_dp_mst_topology_mgr *mgr;
1432         int ret;
1433         bool need_hotplug = false;
1434
1435         drm_connector_list_iter_begin(dev, &iter);
1436         drm_for_each_connector_iter(connector, &iter) {
1437                 aconnector = to_amdgpu_dm_connector(connector);
1438                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1439                     aconnector->mst_port)
1440                         continue;
1441
1442                 mgr = &aconnector->mst_mgr;
1443
1444                 if (suspend) {
1445                         drm_dp_mst_topology_mgr_suspend(mgr);
1446                 } else {
1447                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1448                         if (ret < 0) {
1449                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1450                                 need_hotplug = true;
1451                         }
1452                 }
1453         }
1454         drm_connector_list_iter_end(&iter);
1455
1456         if (need_hotplug)
1457                 drm_kms_helper_hotplug_event(dev);
1458 }
1459
1460 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1461 {
1462         struct smu_context *smu = &adev->smu;
1463         int ret = 0;
1464
1465         if (!is_support_sw_smu(adev))
1466                 return 0;
1467
1468         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1469          * on window driver dc implementation.
1470          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1471          * should be passed to smu during boot up and resume from s3.
1472          * boot up: dc calculate dcn watermark clock settings within dc_create,
1473          * dcn20_resource_construct
1474          * then call pplib functions below to pass the settings to smu:
1475          * smu_set_watermarks_for_clock_ranges
1476          * smu_set_watermarks_table
1477          * navi10_set_watermarks_table
1478          * smu_write_watermarks_table
1479          *
1480          * For Renoir, clock settings of dcn watermark are also fixed values.
1481          * dc has implemented different flow for window driver:
1482          * dc_hardware_init / dc_set_power_state
1483          * dcn10_init_hw
1484          * notify_wm_ranges
1485          * set_wm_ranges
1486          * -- Linux
1487          * smu_set_watermarks_for_clock_ranges
1488          * renoir_set_watermarks_table
1489          * smu_write_watermarks_table
1490          *
1491          * For Linux,
1492          * dc_hardware_init -> amdgpu_dm_init
1493          * dc_set_power_state --> dm_resume
1494          *
1495          * therefore, this function apply to navi10/12/14 but not Renoir
1496          * *
1497          */
1498         switch(adev->asic_type) {
1499         case CHIP_NAVI10:
1500         case CHIP_NAVI14:
1501         case CHIP_NAVI12:
1502                 break;
1503         default:
1504                 return 0;
1505         }
1506
1507         ret = smu_write_watermarks_table(smu);
1508         if (ret) {
1509                 DRM_ERROR("Failed to update WMTABLE!\n");
1510                 return ret;
1511         }
1512
1513         return 0;
1514 }
1515
1516 /**
1517  * dm_hw_init() - Initialize DC device
1518  * @handle: The base driver device containing the amdgpu_dm device.
1519  *
1520  * Initialize the &struct amdgpu_display_manager device. This involves calling
1521  * the initializers of each DM component, then populating the struct with them.
1522  *
1523  * Although the function implies hardware initialization, both hardware and
1524  * software are initialized here. Splitting them out to their relevant init
1525  * hooks is a future TODO item.
1526  *
1527  * Some notable things that are initialized here:
1528  *
1529  * - Display Core, both software and hardware
1530  * - DC modules that we need (freesync and color management)
1531  * - DRM software states
1532  * - Interrupt sources and handlers
1533  * - Vblank support
1534  * - Debug FS entries, if enabled
1535  */
1536 static int dm_hw_init(void *handle)
1537 {
1538         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539         /* Create DAL display manager */
1540         amdgpu_dm_init(adev);
1541         amdgpu_dm_hpd_init(adev);
1542
1543         return 0;
1544 }
1545
1546 /**
1547  * dm_hw_fini() - Teardown DC device
1548  * @handle: The base driver device containing the amdgpu_dm device.
1549  *
1550  * Teardown components within &struct amdgpu_display_manager that require
1551  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552  * were loaded. Also flush IRQ workqueues and disable them.
1553  */
1554 static int dm_hw_fini(void *handle)
1555 {
1556         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557
1558         amdgpu_dm_hpd_fini(adev);
1559
1560         amdgpu_dm_irq_fini(adev);
1561         amdgpu_dm_fini(adev);
1562         return 0;
1563 }
1564
1565
1566 static int dm_enable_vblank(struct drm_crtc *crtc);
1567 static void dm_disable_vblank(struct drm_crtc *crtc);
1568
1569 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1570                                  struct dc_state *state, bool enable)
1571 {
1572         enum dc_irq_source irq_source;
1573         struct amdgpu_crtc *acrtc;
1574         int rc = -EBUSY;
1575         int i = 0;
1576
1577         for (i = 0; i < state->stream_count; i++) {
1578                 acrtc = get_crtc_by_otg_inst(
1579                                 adev, state->stream_status[i].primary_otg_inst);
1580
1581                 if (acrtc && state->stream_status[i].plane_count != 0) {
1582                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1583                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1584                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1585                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1586                         if (rc)
1587                                 DRM_WARN("Failed to %s pflip interrupts\n",
1588                                          enable ? "enable" : "disable");
1589
1590                         if (enable) {
1591                                 rc = dm_enable_vblank(&acrtc->base);
1592                                 if (rc)
1593                                         DRM_WARN("Failed to enable vblank interrupts\n");
1594                         } else {
1595                                 dm_disable_vblank(&acrtc->base);
1596                         }
1597
1598                 }
1599         }
1600
1601 }
1602
1603 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1604 {
1605         struct dc_state *context = NULL;
1606         enum dc_status res = DC_ERROR_UNEXPECTED;
1607         int i;
1608         struct dc_stream_state *del_streams[MAX_PIPES];
1609         int del_streams_count = 0;
1610
1611         memset(del_streams, 0, sizeof(del_streams));
1612
1613         context = dc_create_state(dc);
1614         if (context == NULL)
1615                 goto context_alloc_fail;
1616
1617         dc_resource_state_copy_construct_current(dc, context);
1618
1619         /* First remove from context all streams */
1620         for (i = 0; i < context->stream_count; i++) {
1621                 struct dc_stream_state *stream = context->streams[i];
1622
1623                 del_streams[del_streams_count++] = stream;
1624         }
1625
1626         /* Remove all planes for removed streams and then remove the streams */
1627         for (i = 0; i < del_streams_count; i++) {
1628                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1629                         res = DC_FAIL_DETACH_SURFACES;
1630                         goto fail;
1631                 }
1632
1633                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1634                 if (res != DC_OK)
1635                         goto fail;
1636         }
1637
1638
1639         res = dc_validate_global_state(dc, context, false);
1640
1641         if (res != DC_OK) {
1642                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1643                 goto fail;
1644         }
1645
1646         res = dc_commit_state(dc, context);
1647
1648 fail:
1649         dc_release_state(context);
1650
1651 context_alloc_fail:
1652         return res;
1653 }
1654
1655 static int dm_suspend(void *handle)
1656 {
1657         struct amdgpu_device *adev = handle;
1658         struct amdgpu_display_manager *dm = &adev->dm;
1659         int ret = 0;
1660
1661         if (amdgpu_in_reset(adev)) {
1662                 mutex_lock(&dm->dc_lock);
1663                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1664
1665                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1666
1667                 amdgpu_dm_commit_zero_streams(dm->dc);
1668
1669                 amdgpu_dm_irq_suspend(adev);
1670
1671                 return ret;
1672         }
1673
1674         WARN_ON(adev->dm.cached_state);
1675         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1676
1677         s3_handle_mst(adev_to_drm(adev), true);
1678
1679         amdgpu_dm_irq_suspend(adev);
1680
1681
1682         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1683
1684         return 0;
1685 }
1686
1687 static struct amdgpu_dm_connector *
1688 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1689                                              struct drm_crtc *crtc)
1690 {
1691         uint32_t i;
1692         struct drm_connector_state *new_con_state;
1693         struct drm_connector *connector;
1694         struct drm_crtc *crtc_from_state;
1695
1696         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1697                 crtc_from_state = new_con_state->crtc;
1698
1699                 if (crtc_from_state == crtc)
1700                         return to_amdgpu_dm_connector(connector);
1701         }
1702
1703         return NULL;
1704 }
1705
1706 static void emulated_link_detect(struct dc_link *link)
1707 {
1708         struct dc_sink_init_data sink_init_data = { 0 };
1709         struct display_sink_capability sink_caps = { 0 };
1710         enum dc_edid_status edid_status;
1711         struct dc_context *dc_ctx = link->ctx;
1712         struct dc_sink *sink = NULL;
1713         struct dc_sink *prev_sink = NULL;
1714
1715         link->type = dc_connection_none;
1716         prev_sink = link->local_sink;
1717
1718         if (prev_sink != NULL)
1719                 dc_sink_retain(prev_sink);
1720
1721         switch (link->connector_signal) {
1722         case SIGNAL_TYPE_HDMI_TYPE_A: {
1723                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1725                 break;
1726         }
1727
1728         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1729                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1731                 break;
1732         }
1733
1734         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1735                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1737                 break;
1738         }
1739
1740         case SIGNAL_TYPE_LVDS: {
1741                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1742                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1743                 break;
1744         }
1745
1746         case SIGNAL_TYPE_EDP: {
1747                 sink_caps.transaction_type =
1748                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1749                 sink_caps.signal = SIGNAL_TYPE_EDP;
1750                 break;
1751         }
1752
1753         case SIGNAL_TYPE_DISPLAY_PORT: {
1754                 sink_caps.transaction_type =
1755                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1756                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1757                 break;
1758         }
1759
1760         default:
1761                 DC_ERROR("Invalid connector type! signal:%d\n",
1762                         link->connector_signal);
1763                 return;
1764         }
1765
1766         sink_init_data.link = link;
1767         sink_init_data.sink_signal = sink_caps.signal;
1768
1769         sink = dc_sink_create(&sink_init_data);
1770         if (!sink) {
1771                 DC_ERROR("Failed to create sink!\n");
1772                 return;
1773         }
1774
1775         /* dc_sink_create returns a new reference */
1776         link->local_sink = sink;
1777
1778         edid_status = dm_helpers_read_local_edid(
1779                         link->ctx,
1780                         link,
1781                         sink);
1782
1783         if (edid_status != EDID_OK)
1784                 DC_ERROR("Failed to read EDID");
1785
1786 }
1787
1788 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1789                                      struct amdgpu_display_manager *dm)
1790 {
1791         struct {
1792                 struct dc_surface_update surface_updates[MAX_SURFACES];
1793                 struct dc_plane_info plane_infos[MAX_SURFACES];
1794                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1795                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1796                 struct dc_stream_update stream_update;
1797         } * bundle;
1798         int k, m;
1799
1800         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1801
1802         if (!bundle) {
1803                 dm_error("Failed to allocate update bundle\n");
1804                 goto cleanup;
1805         }
1806
1807         for (k = 0; k < dc_state->stream_count; k++) {
1808                 bundle->stream_update.stream = dc_state->streams[k];
1809
1810                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1811                         bundle->surface_updates[m].surface =
1812                                 dc_state->stream_status->plane_states[m];
1813                         bundle->surface_updates[m].surface->force_full_update =
1814                                 true;
1815                 }
1816                 dc_commit_updates_for_stream(
1817                         dm->dc, bundle->surface_updates,
1818                         dc_state->stream_status->plane_count,
1819                         dc_state->streams[k], &bundle->stream_update, dc_state);
1820         }
1821
1822 cleanup:
1823         kfree(bundle);
1824
1825         return;
1826 }
1827
1828 static int dm_resume(void *handle)
1829 {
1830         struct amdgpu_device *adev = handle;
1831         struct drm_device *ddev = adev_to_drm(adev);
1832         struct amdgpu_display_manager *dm = &adev->dm;
1833         struct amdgpu_dm_connector *aconnector;
1834         struct drm_connector *connector;
1835         struct drm_connector_list_iter iter;
1836         struct drm_crtc *crtc;
1837         struct drm_crtc_state *new_crtc_state;
1838         struct dm_crtc_state *dm_new_crtc_state;
1839         struct drm_plane *plane;
1840         struct drm_plane_state *new_plane_state;
1841         struct dm_plane_state *dm_new_plane_state;
1842         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1843         enum dc_connection_type new_connection_type = dc_connection_none;
1844         struct dc_state *dc_state;
1845         int i, r, j;
1846
1847         if (amdgpu_in_reset(adev)) {
1848                 dc_state = dm->cached_dc_state;
1849
1850                 r = dm_dmub_hw_init(adev);
1851                 if (r)
1852                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1853
1854                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1855                 dc_resume(dm->dc);
1856
1857                 amdgpu_dm_irq_resume_early(adev);
1858
1859                 for (i = 0; i < dc_state->stream_count; i++) {
1860                         dc_state->streams[i]->mode_changed = true;
1861                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1862                                 dc_state->stream_status->plane_states[j]->update_flags.raw
1863                                         = 0xffffffff;
1864                         }
1865                 }
1866
1867                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1868
1869                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1870
1871                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1872
1873                 dc_release_state(dm->cached_dc_state);
1874                 dm->cached_dc_state = NULL;
1875
1876                 amdgpu_dm_irq_resume_late(adev);
1877
1878                 mutex_unlock(&dm->dc_lock);
1879
1880                 return 0;
1881         }
1882         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1883         dc_release_state(dm_state->context);
1884         dm_state->context = dc_create_state(dm->dc);
1885         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1886         dc_resource_state_construct(dm->dc, dm_state->context);
1887
1888         /* Before powering on DC we need to re-initialize DMUB. */
1889         r = dm_dmub_hw_init(adev);
1890         if (r)
1891                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1892
1893         /* power on hardware */
1894         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1895
1896         /* program HPD filter */
1897         dc_resume(dm->dc);
1898
1899         /*
1900          * early enable HPD Rx IRQ, should be done before set mode as short
1901          * pulse interrupts are used for MST
1902          */
1903         amdgpu_dm_irq_resume_early(adev);
1904
1905         /* On resume we need to rewrite the MSTM control bits to enable MST*/
1906         s3_handle_mst(ddev, false);
1907
1908         /* Do detection*/
1909         drm_connector_list_iter_begin(ddev, &iter);
1910         drm_for_each_connector_iter(connector, &iter) {
1911                 aconnector = to_amdgpu_dm_connector(connector);
1912
1913                 /*
1914                  * this is the case when traversing through already created
1915                  * MST connectors, should be skipped
1916                  */
1917                 if (aconnector->mst_port)
1918                         continue;
1919
1920                 mutex_lock(&aconnector->hpd_lock);
1921                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1922                         DRM_ERROR("KMS: Failed to detect connector\n");
1923
1924                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1925                         emulated_link_detect(aconnector->dc_link);
1926                 else
1927                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1928
1929                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1930                         aconnector->fake_enable = false;
1931
1932                 if (aconnector->dc_sink)
1933                         dc_sink_release(aconnector->dc_sink);
1934                 aconnector->dc_sink = NULL;
1935                 amdgpu_dm_update_connector_after_detect(aconnector);
1936                 mutex_unlock(&aconnector->hpd_lock);
1937         }
1938         drm_connector_list_iter_end(&iter);
1939
1940         /* Force mode set in atomic commit */
1941         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1942                 new_crtc_state->active_changed = true;
1943
1944         /*
1945          * atomic_check is expected to create the dc states. We need to release
1946          * them here, since they were duplicated as part of the suspend
1947          * procedure.
1948          */
1949         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1950                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1951                 if (dm_new_crtc_state->stream) {
1952                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1953                         dc_stream_release(dm_new_crtc_state->stream);
1954                         dm_new_crtc_state->stream = NULL;
1955                 }
1956         }
1957
1958         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1959                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1960                 if (dm_new_plane_state->dc_state) {
1961                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1962                         dc_plane_state_release(dm_new_plane_state->dc_state);
1963                         dm_new_plane_state->dc_state = NULL;
1964                 }
1965         }
1966
1967         drm_atomic_helper_resume(ddev, dm->cached_state);
1968
1969         dm->cached_state = NULL;
1970
1971         amdgpu_dm_irq_resume_late(adev);
1972
1973         amdgpu_dm_smu_write_watermarks_table(adev);
1974
1975         return 0;
1976 }
1977
1978 /**
1979  * DOC: DM Lifecycle
1980  *
1981  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1982  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1983  * the base driver's device list to be initialized and torn down accordingly.
1984  *
1985  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1986  */
1987
1988 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1989         .name = "dm",
1990         .early_init = dm_early_init,
1991         .late_init = dm_late_init,
1992         .sw_init = dm_sw_init,
1993         .sw_fini = dm_sw_fini,
1994         .hw_init = dm_hw_init,
1995         .hw_fini = dm_hw_fini,
1996         .suspend = dm_suspend,
1997         .resume = dm_resume,
1998         .is_idle = dm_is_idle,
1999         .wait_for_idle = dm_wait_for_idle,
2000         .check_soft_reset = dm_check_soft_reset,
2001         .soft_reset = dm_soft_reset,
2002         .set_clockgating_state = dm_set_clockgating_state,
2003         .set_powergating_state = dm_set_powergating_state,
2004 };
2005
2006 const struct amdgpu_ip_block_version dm_ip_block =
2007 {
2008         .type = AMD_IP_BLOCK_TYPE_DCE,
2009         .major = 1,
2010         .minor = 0,
2011         .rev = 0,
2012         .funcs = &amdgpu_dm_funcs,
2013 };
2014
2015
2016 /**
2017  * DOC: atomic
2018  *
2019  * *WIP*
2020  */
2021
2022 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2023         .fb_create = amdgpu_display_user_framebuffer_create,
2024         .output_poll_changed = drm_fb_helper_output_poll_changed,
2025         .atomic_check = amdgpu_dm_atomic_check,
2026         .atomic_commit = amdgpu_dm_atomic_commit,
2027 };
2028
2029 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2030         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2031 };
2032
2033 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2034 {
2035         u32 max_cll, min_cll, max, min, q, r;
2036         struct amdgpu_dm_backlight_caps *caps;
2037         struct amdgpu_display_manager *dm;
2038         struct drm_connector *conn_base;
2039         struct amdgpu_device *adev;
2040         struct dc_link *link = NULL;
2041         static const u8 pre_computed_values[] = {
2042                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2043                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2044
2045         if (!aconnector || !aconnector->dc_link)
2046                 return;
2047
2048         link = aconnector->dc_link;
2049         if (link->connector_signal != SIGNAL_TYPE_EDP)
2050                 return;
2051
2052         conn_base = &aconnector->base;
2053         adev = drm_to_adev(conn_base->dev);
2054         dm = &adev->dm;
2055         caps = &dm->backlight_caps;
2056         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2057         caps->aux_support = false;
2058         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2059         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2060
2061         if (caps->ext_caps->bits.oled == 1 ||
2062             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2063             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2064                 caps->aux_support = true;
2065
2066         /* From the specification (CTA-861-G), for calculating the maximum
2067          * luminance we need to use:
2068          *      Luminance = 50*2**(CV/32)
2069          * Where CV is a one-byte value.
2070          * For calculating this expression we may need float point precision;
2071          * to avoid this complexity level, we take advantage that CV is divided
2072          * by a constant. From the Euclids division algorithm, we know that CV
2073          * can be written as: CV = 32*q + r. Next, we replace CV in the
2074          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2075          * need to pre-compute the value of r/32. For pre-computing the values
2076          * We just used the following Ruby line:
2077          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2078          * The results of the above expressions can be verified at
2079          * pre_computed_values.
2080          */
2081         q = max_cll >> 5;
2082         r = max_cll % 32;
2083         max = (1 << q) * pre_computed_values[r];
2084
2085         // min luminance: maxLum * (CV/255)^2 / 100
2086         q = DIV_ROUND_CLOSEST(min_cll, 255);
2087         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2088
2089         caps->aux_max_input_signal = max;
2090         caps->aux_min_input_signal = min;
2091 }
2092
2093 void amdgpu_dm_update_connector_after_detect(
2094                 struct amdgpu_dm_connector *aconnector)
2095 {
2096         struct drm_connector *connector = &aconnector->base;
2097         struct drm_device *dev = connector->dev;
2098         struct dc_sink *sink;
2099
2100         /* MST handled by drm_mst framework */
2101         if (aconnector->mst_mgr.mst_state == true)
2102                 return;
2103
2104
2105         sink = aconnector->dc_link->local_sink;
2106         if (sink)
2107                 dc_sink_retain(sink);
2108
2109         /*
2110          * Edid mgmt connector gets first update only in mode_valid hook and then
2111          * the connector sink is set to either fake or physical sink depends on link status.
2112          * Skip if already done during boot.
2113          */
2114         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2115                         && aconnector->dc_em_sink) {
2116
2117                 /*
2118                  * For S3 resume with headless use eml_sink to fake stream
2119                  * because on resume connector->sink is set to NULL
2120                  */
2121                 mutex_lock(&dev->mode_config.mutex);
2122
2123                 if (sink) {
2124                         if (aconnector->dc_sink) {
2125                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2126                                 /*
2127                                  * retain and release below are used to
2128                                  * bump up refcount for sink because the link doesn't point
2129                                  * to it anymore after disconnect, so on next crtc to connector
2130                                  * reshuffle by UMD we will get into unwanted dc_sink release
2131                                  */
2132                                 dc_sink_release(aconnector->dc_sink);
2133                         }
2134                         aconnector->dc_sink = sink;
2135                         dc_sink_retain(aconnector->dc_sink);
2136                         amdgpu_dm_update_freesync_caps(connector,
2137                                         aconnector->edid);
2138                 } else {
2139                         amdgpu_dm_update_freesync_caps(connector, NULL);
2140                         if (!aconnector->dc_sink) {
2141                                 aconnector->dc_sink = aconnector->dc_em_sink;
2142                                 dc_sink_retain(aconnector->dc_sink);
2143                         }
2144                 }
2145
2146                 mutex_unlock(&dev->mode_config.mutex);
2147
2148                 if (sink)
2149                         dc_sink_release(sink);
2150                 return;
2151         }
2152
2153         /*
2154          * TODO: temporary guard to look for proper fix
2155          * if this sink is MST sink, we should not do anything
2156          */
2157         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2158                 dc_sink_release(sink);
2159                 return;
2160         }
2161
2162         if (aconnector->dc_sink == sink) {
2163                 /*
2164                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2165                  * Do nothing!!
2166                  */
2167                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2168                                 aconnector->connector_id);
2169                 if (sink)
2170                         dc_sink_release(sink);
2171                 return;
2172         }
2173
2174         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2175                 aconnector->connector_id, aconnector->dc_sink, sink);
2176
2177         mutex_lock(&dev->mode_config.mutex);
2178
2179         /*
2180          * 1. Update status of the drm connector
2181          * 2. Send an event and let userspace tell us what to do
2182          */
2183         if (sink) {
2184                 /*
2185                  * TODO: check if we still need the S3 mode update workaround.
2186                  * If yes, put it here.
2187                  */
2188                 if (aconnector->dc_sink)
2189                         amdgpu_dm_update_freesync_caps(connector, NULL);
2190
2191                 aconnector->dc_sink = sink;
2192                 dc_sink_retain(aconnector->dc_sink);
2193                 if (sink->dc_edid.length == 0) {
2194                         aconnector->edid = NULL;
2195                         if (aconnector->dc_link->aux_mode) {
2196                                 drm_dp_cec_unset_edid(
2197                                         &aconnector->dm_dp_aux.aux);
2198                         }
2199                 } else {
2200                         aconnector->edid =
2201                                 (struct edid *)sink->dc_edid.raw_edid;
2202
2203                         drm_connector_update_edid_property(connector,
2204                                                            aconnector->edid);
2205                         drm_add_edid_modes(connector, aconnector->edid);
2206
2207                         if (aconnector->dc_link->aux_mode)
2208                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2209                                                     aconnector->edid);
2210                 }
2211
2212                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2213                 update_connector_ext_caps(aconnector);
2214         } else {
2215                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2216                 amdgpu_dm_update_freesync_caps(connector, NULL);
2217                 drm_connector_update_edid_property(connector, NULL);
2218                 aconnector->num_modes = 0;
2219                 dc_sink_release(aconnector->dc_sink);
2220                 aconnector->dc_sink = NULL;
2221                 aconnector->edid = NULL;
2222 #ifdef CONFIG_DRM_AMD_DC_HDCP
2223                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2224                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2225                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2226 #endif
2227         }
2228
2229         mutex_unlock(&dev->mode_config.mutex);
2230
2231         if (sink)
2232                 dc_sink_release(sink);
2233 }
2234
2235 static void handle_hpd_irq(void *param)
2236 {
2237         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2238         struct drm_connector *connector = &aconnector->base;
2239         struct drm_device *dev = connector->dev;
2240         enum dc_connection_type new_connection_type = dc_connection_none;
2241 #ifdef CONFIG_DRM_AMD_DC_HDCP
2242         struct amdgpu_device *adev = drm_to_adev(dev);
2243 #endif
2244
2245         /*
2246          * In case of failure or MST no need to update connector status or notify the OS
2247          * since (for MST case) MST does this in its own context.
2248          */
2249         mutex_lock(&aconnector->hpd_lock);
2250
2251 #ifdef CONFIG_DRM_AMD_DC_HDCP
2252         if (adev->dm.hdcp_workqueue)
2253                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2254 #endif
2255         if (aconnector->fake_enable)
2256                 aconnector->fake_enable = false;
2257
2258         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2259                 DRM_ERROR("KMS: Failed to detect connector\n");
2260
2261         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2262                 emulated_link_detect(aconnector->dc_link);
2263
2264
2265                 drm_modeset_lock_all(dev);
2266                 dm_restore_drm_connector_state(dev, connector);
2267                 drm_modeset_unlock_all(dev);
2268
2269                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2270                         drm_kms_helper_hotplug_event(dev);
2271
2272         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2273                 amdgpu_dm_update_connector_after_detect(aconnector);
2274
2275
2276                 drm_modeset_lock_all(dev);
2277                 dm_restore_drm_connector_state(dev, connector);
2278                 drm_modeset_unlock_all(dev);
2279
2280                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2281                         drm_kms_helper_hotplug_event(dev);
2282         }
2283         mutex_unlock(&aconnector->hpd_lock);
2284
2285 }
2286
2287 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2288 {
2289         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2290         uint8_t dret;
2291         bool new_irq_handled = false;
2292         int dpcd_addr;
2293         int dpcd_bytes_to_read;
2294
2295         const int max_process_count = 30;
2296         int process_count = 0;
2297
2298         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2299
2300         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2301                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2302                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2303                 dpcd_addr = DP_SINK_COUNT;
2304         } else {
2305                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2306                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2307                 dpcd_addr = DP_SINK_COUNT_ESI;
2308         }
2309
2310         dret = drm_dp_dpcd_read(
2311                 &aconnector->dm_dp_aux.aux,
2312                 dpcd_addr,
2313                 esi,
2314                 dpcd_bytes_to_read);
2315
2316         while (dret == dpcd_bytes_to_read &&
2317                 process_count < max_process_count) {
2318                 uint8_t retry;
2319                 dret = 0;
2320
2321                 process_count++;
2322
2323                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2324                 /* handle HPD short pulse irq */
2325                 if (aconnector->mst_mgr.mst_state)
2326                         drm_dp_mst_hpd_irq(
2327                                 &aconnector->mst_mgr,
2328                                 esi,
2329                                 &new_irq_handled);
2330
2331                 if (new_irq_handled) {
2332                         /* ACK at DPCD to notify down stream */
2333                         const int ack_dpcd_bytes_to_write =
2334                                 dpcd_bytes_to_read - 1;
2335
2336                         for (retry = 0; retry < 3; retry++) {
2337                                 uint8_t wret;
2338
2339                                 wret = drm_dp_dpcd_write(
2340                                         &aconnector->dm_dp_aux.aux,
2341                                         dpcd_addr + 1,
2342                                         &esi[1],
2343                                         ack_dpcd_bytes_to_write);
2344                                 if (wret == ack_dpcd_bytes_to_write)
2345                                         break;
2346                         }
2347
2348                         /* check if there is new irq to be handled */
2349                         dret = drm_dp_dpcd_read(
2350                                 &aconnector->dm_dp_aux.aux,
2351                                 dpcd_addr,
2352                                 esi,
2353                                 dpcd_bytes_to_read);
2354
2355                         new_irq_handled = false;
2356                 } else {
2357                         break;
2358                 }
2359         }
2360
2361         if (process_count == max_process_count)
2362                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2363 }
2364
2365 static void handle_hpd_rx_irq(void *param)
2366 {
2367         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2368         struct drm_connector *connector = &aconnector->base;
2369         struct drm_device *dev = connector->dev;
2370         struct dc_link *dc_link = aconnector->dc_link;
2371         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2372         enum dc_connection_type new_connection_type = dc_connection_none;
2373 #ifdef CONFIG_DRM_AMD_DC_HDCP
2374         union hpd_irq_data hpd_irq_data;
2375         struct amdgpu_device *adev = drm_to_adev(dev);
2376
2377         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2378 #endif
2379
2380         /*
2381          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2382          * conflict, after implement i2c helper, this mutex should be
2383          * retired.
2384          */
2385         if (dc_link->type != dc_connection_mst_branch)
2386                 mutex_lock(&aconnector->hpd_lock);
2387
2388
2389 #ifdef CONFIG_DRM_AMD_DC_HDCP
2390         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2391 #else
2392         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2393 #endif
2394                         !is_mst_root_connector) {
2395                 /* Downstream Port status changed. */
2396                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2397                         DRM_ERROR("KMS: Failed to detect connector\n");
2398
2399                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2400                         emulated_link_detect(dc_link);
2401
2402                         if (aconnector->fake_enable)
2403                                 aconnector->fake_enable = false;
2404
2405                         amdgpu_dm_update_connector_after_detect(aconnector);
2406
2407
2408                         drm_modeset_lock_all(dev);
2409                         dm_restore_drm_connector_state(dev, connector);
2410                         drm_modeset_unlock_all(dev);
2411
2412                         drm_kms_helper_hotplug_event(dev);
2413                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2414
2415                         if (aconnector->fake_enable)
2416                                 aconnector->fake_enable = false;
2417
2418                         amdgpu_dm_update_connector_after_detect(aconnector);
2419
2420
2421                         drm_modeset_lock_all(dev);
2422                         dm_restore_drm_connector_state(dev, connector);
2423                         drm_modeset_unlock_all(dev);
2424
2425                         drm_kms_helper_hotplug_event(dev);
2426                 }
2427         }
2428 #ifdef CONFIG_DRM_AMD_DC_HDCP
2429         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2430                 if (adev->dm.hdcp_workqueue)
2431                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2432         }
2433 #endif
2434         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2435             (dc_link->type == dc_connection_mst_branch))
2436                 dm_handle_hpd_rx_irq(aconnector);
2437
2438         if (dc_link->type != dc_connection_mst_branch) {
2439                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2440                 mutex_unlock(&aconnector->hpd_lock);
2441         }
2442 }
2443
2444 static void register_hpd_handlers(struct amdgpu_device *adev)
2445 {
2446         struct drm_device *dev = adev_to_drm(adev);
2447         struct drm_connector *connector;
2448         struct amdgpu_dm_connector *aconnector;
2449         const struct dc_link *dc_link;
2450         struct dc_interrupt_params int_params = {0};
2451
2452         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2453         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2454
2455         list_for_each_entry(connector,
2456                         &dev->mode_config.connector_list, head) {
2457
2458                 aconnector = to_amdgpu_dm_connector(connector);
2459                 dc_link = aconnector->dc_link;
2460
2461                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2462                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2463                         int_params.irq_source = dc_link->irq_source_hpd;
2464
2465                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2466                                         handle_hpd_irq,
2467                                         (void *) aconnector);
2468                 }
2469
2470                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2471
2472                         /* Also register for DP short pulse (hpd_rx). */
2473                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2474                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2475
2476                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2477                                         handle_hpd_rx_irq,
2478                                         (void *) aconnector);
2479                 }
2480         }
2481 }
2482
2483 #if defined(CONFIG_DRM_AMD_DC_SI)
2484 /* Register IRQ sources and initialize IRQ callbacks */
2485 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2486 {
2487         struct dc *dc = adev->dm.dc;
2488         struct common_irq_params *c_irq_params;
2489         struct dc_interrupt_params int_params = {0};
2490         int r;
2491         int i;
2492         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2493
2494         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2495         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2496
2497         /*
2498          * Actions of amdgpu_irq_add_id():
2499          * 1. Register a set() function with base driver.
2500          *    Base driver will call set() function to enable/disable an
2501          *    interrupt in DC hardware.
2502          * 2. Register amdgpu_dm_irq_handler().
2503          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2504          *    coming from DC hardware.
2505          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2506          *    for acknowledging and handling. */
2507
2508         /* Use VBLANK interrupt */
2509         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2510                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2511                 if (r) {
2512                         DRM_ERROR("Failed to add crtc irq id!\n");
2513                         return r;
2514                 }
2515
2516                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2517                 int_params.irq_source =
2518                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2519
2520                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2521
2522                 c_irq_params->adev = adev;
2523                 c_irq_params->irq_src = int_params.irq_source;
2524
2525                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2526                                 dm_crtc_high_irq, c_irq_params);
2527         }
2528
2529         /* Use GRPH_PFLIP interrupt */
2530         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2531                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2532                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2533                 if (r) {
2534                         DRM_ERROR("Failed to add page flip irq id!\n");
2535                         return r;
2536                 }
2537
2538                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2539                 int_params.irq_source =
2540                         dc_interrupt_to_irq_source(dc, i, 0);
2541
2542                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2543
2544                 c_irq_params->adev = adev;
2545                 c_irq_params->irq_src = int_params.irq_source;
2546
2547                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2548                                 dm_pflip_high_irq, c_irq_params);
2549
2550         }
2551
2552         /* HPD */
2553         r = amdgpu_irq_add_id(adev, client_id,
2554                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2555         if (r) {
2556                 DRM_ERROR("Failed to add hpd irq id!\n");
2557                 return r;
2558         }
2559
2560         register_hpd_handlers(adev);
2561
2562         return 0;
2563 }
2564 #endif
2565
2566 /* Register IRQ sources and initialize IRQ callbacks */
2567 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2568 {
2569         struct dc *dc = adev->dm.dc;
2570         struct common_irq_params *c_irq_params;
2571         struct dc_interrupt_params int_params = {0};
2572         int r;
2573         int i;
2574         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2575
2576         if (adev->asic_type >= CHIP_VEGA10)
2577                 client_id = SOC15_IH_CLIENTID_DCE;
2578
2579         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2580         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2581
2582         /*
2583          * Actions of amdgpu_irq_add_id():
2584          * 1. Register a set() function with base driver.
2585          *    Base driver will call set() function to enable/disable an
2586          *    interrupt in DC hardware.
2587          * 2. Register amdgpu_dm_irq_handler().
2588          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2589          *    coming from DC hardware.
2590          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2591          *    for acknowledging and handling. */
2592
2593         /* Use VBLANK interrupt */
2594         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2595                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2596                 if (r) {
2597                         DRM_ERROR("Failed to add crtc irq id!\n");
2598                         return r;
2599                 }
2600
2601                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2602                 int_params.irq_source =
2603                         dc_interrupt_to_irq_source(dc, i, 0);
2604
2605                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2606
2607                 c_irq_params->adev = adev;
2608                 c_irq_params->irq_src = int_params.irq_source;
2609
2610                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2611                                 dm_crtc_high_irq, c_irq_params);
2612         }
2613
2614         /* Use VUPDATE interrupt */
2615         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2616                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2617                 if (r) {
2618                         DRM_ERROR("Failed to add vupdate irq id!\n");
2619                         return r;
2620                 }
2621
2622                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2623                 int_params.irq_source =
2624                         dc_interrupt_to_irq_source(dc, i, 0);
2625
2626                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2627
2628                 c_irq_params->adev = adev;
2629                 c_irq_params->irq_src = int_params.irq_source;
2630
2631                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2632                                 dm_vupdate_high_irq, c_irq_params);
2633         }
2634
2635         /* Use GRPH_PFLIP interrupt */
2636         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2637                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2638                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2639                 if (r) {
2640                         DRM_ERROR("Failed to add page flip irq id!\n");
2641                         return r;
2642                 }
2643
2644                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2645                 int_params.irq_source =
2646                         dc_interrupt_to_irq_source(dc, i, 0);
2647
2648                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2649
2650                 c_irq_params->adev = adev;
2651                 c_irq_params->irq_src = int_params.irq_source;
2652
2653                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2654                                 dm_pflip_high_irq, c_irq_params);
2655
2656         }
2657
2658         /* HPD */
2659         r = amdgpu_irq_add_id(adev, client_id,
2660                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2661         if (r) {
2662                 DRM_ERROR("Failed to add hpd irq id!\n");
2663                 return r;
2664         }
2665
2666         register_hpd_handlers(adev);
2667
2668         return 0;
2669 }
2670
2671 #if defined(CONFIG_DRM_AMD_DC_DCN)
2672 /* Register IRQ sources and initialize IRQ callbacks */
2673 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2674 {
2675         struct dc *dc = adev->dm.dc;
2676         struct common_irq_params *c_irq_params;
2677         struct dc_interrupt_params int_params = {0};
2678         int r;
2679         int i;
2680
2681         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2682         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2683
2684         /*
2685          * Actions of amdgpu_irq_add_id():
2686          * 1. Register a set() function with base driver.
2687          *    Base driver will call set() function to enable/disable an
2688          *    interrupt in DC hardware.
2689          * 2. Register amdgpu_dm_irq_handler().
2690          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2691          *    coming from DC hardware.
2692          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2693          *    for acknowledging and handling.
2694          */
2695
2696         /* Use VSTARTUP interrupt */
2697         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2698                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2699                         i++) {
2700                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2701
2702                 if (r) {
2703                         DRM_ERROR("Failed to add crtc irq id!\n");
2704                         return r;
2705                 }
2706
2707                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2708                 int_params.irq_source =
2709                         dc_interrupt_to_irq_source(dc, i, 0);
2710
2711                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2712
2713                 c_irq_params->adev = adev;
2714                 c_irq_params->irq_src = int_params.irq_source;
2715
2716                 amdgpu_dm_irq_register_interrupt(
2717                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2718         }
2719
2720         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2721          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2722          * to trigger at end of each vblank, regardless of state of the lock,
2723          * matching DCE behaviour.
2724          */
2725         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2726              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2727              i++) {
2728                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2729
2730                 if (r) {
2731                         DRM_ERROR("Failed to add vupdate irq id!\n");
2732                         return r;
2733                 }
2734
2735                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2736                 int_params.irq_source =
2737                         dc_interrupt_to_irq_source(dc, i, 0);
2738
2739                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2740
2741                 c_irq_params->adev = adev;
2742                 c_irq_params->irq_src = int_params.irq_source;
2743
2744                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2745                                 dm_vupdate_high_irq, c_irq_params);
2746         }
2747
2748         /* Use GRPH_PFLIP interrupt */
2749         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2750                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2751                         i++) {
2752                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2753                 if (r) {
2754                         DRM_ERROR("Failed to add page flip irq id!\n");
2755                         return r;
2756                 }
2757
2758                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2759                 int_params.irq_source =
2760                         dc_interrupt_to_irq_source(dc, i, 0);
2761
2762                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2763
2764                 c_irq_params->adev = adev;
2765                 c_irq_params->irq_src = int_params.irq_source;
2766
2767                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2768                                 dm_pflip_high_irq, c_irq_params);
2769
2770         }
2771
2772         /* HPD */
2773         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2774                         &adev->hpd_irq);
2775         if (r) {
2776                 DRM_ERROR("Failed to add hpd irq id!\n");
2777                 return r;
2778         }
2779
2780         register_hpd_handlers(adev);
2781
2782         return 0;
2783 }
2784 #endif
2785
2786 /*
2787  * Acquires the lock for the atomic state object and returns
2788  * the new atomic state.
2789  *
2790  * This should only be called during atomic check.
2791  */
2792 static int dm_atomic_get_state(struct drm_atomic_state *state,
2793                                struct dm_atomic_state **dm_state)
2794 {
2795         struct drm_device *dev = state->dev;
2796         struct amdgpu_device *adev = drm_to_adev(dev);
2797         struct amdgpu_display_manager *dm = &adev->dm;
2798         struct drm_private_state *priv_state;
2799
2800         if (*dm_state)
2801                 return 0;
2802
2803         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2804         if (IS_ERR(priv_state))
2805                 return PTR_ERR(priv_state);
2806
2807         *dm_state = to_dm_atomic_state(priv_state);
2808
2809         return 0;
2810 }
2811
2812 static struct dm_atomic_state *
2813 dm_atomic_get_new_state(struct drm_atomic_state *state)
2814 {
2815         struct drm_device *dev = state->dev;
2816         struct amdgpu_device *adev = drm_to_adev(dev);
2817         struct amdgpu_display_manager *dm = &adev->dm;
2818         struct drm_private_obj *obj;
2819         struct drm_private_state *new_obj_state;
2820         int i;
2821
2822         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2823                 if (obj->funcs == dm->atomic_obj.funcs)
2824                         return to_dm_atomic_state(new_obj_state);
2825         }
2826
2827         return NULL;
2828 }
2829
2830 static struct drm_private_state *
2831 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2832 {
2833         struct dm_atomic_state *old_state, *new_state;
2834
2835         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2836         if (!new_state)
2837                 return NULL;
2838
2839         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2840
2841         old_state = to_dm_atomic_state(obj->state);
2842
2843         if (old_state && old_state->context)
2844                 new_state->context = dc_copy_state(old_state->context);
2845
2846         if (!new_state->context) {
2847                 kfree(new_state);
2848                 return NULL;
2849         }
2850
2851         return &new_state->base;
2852 }
2853
2854 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2855                                     struct drm_private_state *state)
2856 {
2857         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2858
2859         if (dm_state && dm_state->context)
2860                 dc_release_state(dm_state->context);
2861
2862         kfree(dm_state);
2863 }
2864
2865 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2866         .atomic_duplicate_state = dm_atomic_duplicate_state,
2867         .atomic_destroy_state = dm_atomic_destroy_state,
2868 };
2869
2870 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2871 {
2872         struct dm_atomic_state *state;
2873         int r;
2874
2875         adev->mode_info.mode_config_initialized = true;
2876
2877         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2878         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2879
2880         adev_to_drm(adev)->mode_config.max_width = 16384;
2881         adev_to_drm(adev)->mode_config.max_height = 16384;
2882
2883         adev_to_drm(adev)->mode_config.preferred_depth = 24;
2884         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2885         /* indicates support for immediate flip */
2886         adev_to_drm(adev)->mode_config.async_page_flip = true;
2887
2888         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2889
2890         state = kzalloc(sizeof(*state), GFP_KERNEL);
2891         if (!state)
2892                 return -ENOMEM;
2893
2894         state->context = dc_create_state(adev->dm.dc);
2895         if (!state->context) {
2896                 kfree(state);
2897                 return -ENOMEM;
2898         }
2899
2900         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2901
2902         drm_atomic_private_obj_init(adev_to_drm(adev),
2903                                     &adev->dm.atomic_obj,
2904                                     &state->base,
2905                                     &dm_atomic_state_funcs);
2906
2907         r = amdgpu_display_modeset_create_props(adev);
2908         if (r) {
2909                 dc_release_state(state->context);
2910                 kfree(state);
2911                 return r;
2912         }
2913
2914         r = amdgpu_dm_audio_init(adev);
2915         if (r) {
2916                 dc_release_state(state->context);
2917                 kfree(state);
2918                 return r;
2919         }
2920
2921         return 0;
2922 }
2923
2924 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2925 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2926 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2927
2928 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2929         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2930
2931 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2932 {
2933 #if defined(CONFIG_ACPI)
2934         struct amdgpu_dm_backlight_caps caps;
2935
2936         memset(&caps, 0, sizeof(caps));
2937
2938         if (dm->backlight_caps.caps_valid)
2939                 return;
2940
2941         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2942         if (caps.caps_valid) {
2943                 dm->backlight_caps.caps_valid = true;
2944                 if (caps.aux_support)
2945                         return;
2946                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2947                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2948         } else {
2949                 dm->backlight_caps.min_input_signal =
2950                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2951                 dm->backlight_caps.max_input_signal =
2952                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2953         }
2954 #else
2955         if (dm->backlight_caps.aux_support)
2956                 return;
2957
2958         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2959         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2960 #endif
2961 }
2962
2963 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2964 {
2965         bool rc;
2966
2967         if (!link)
2968                 return 1;
2969
2970         rc = dc_link_set_backlight_level_nits(link, true, brightness,
2971                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2972
2973         return rc ? 0 : 1;
2974 }
2975
2976 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
2977                                 unsigned *min, unsigned *max)
2978 {
2979         if (!caps)
2980                 return 0;
2981
2982         if (caps->aux_support) {
2983                 // Firmware limits are in nits, DC API wants millinits.
2984                 *max = 1000 * caps->aux_max_input_signal;
2985                 *min = 1000 * caps->aux_min_input_signal;
2986         } else {
2987                 // Firmware limits are 8-bit, PWM control is 16-bit.
2988                 *max = 0x101 * caps->max_input_signal;
2989                 *min = 0x101 * caps->min_input_signal;
2990         }
2991         return 1;
2992 }
2993
2994 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
2995                                         uint32_t brightness)
2996 {
2997         unsigned min, max;
2998
2999         if (!get_brightness_range(caps, &min, &max))
3000                 return brightness;
3001
3002         // Rescale 0..255 to min..max
3003         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3004                                        AMDGPU_MAX_BL_LEVEL);
3005 }
3006
3007 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3008                                       uint32_t brightness)
3009 {
3010         unsigned min, max;
3011
3012         if (!get_brightness_range(caps, &min, &max))
3013                 return brightness;
3014
3015         if (brightness < min)
3016                 return 0;
3017         // Rescale min..max to 0..255
3018         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3019                                  max - min);
3020 }
3021
3022 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3023 {
3024         struct amdgpu_display_manager *dm = bl_get_data(bd);
3025         struct amdgpu_dm_backlight_caps caps;
3026         struct dc_link *link = NULL;
3027         u32 brightness;
3028         bool rc;
3029
3030         amdgpu_dm_update_backlight_caps(dm);
3031         caps = dm->backlight_caps;
3032
3033         link = (struct dc_link *)dm->backlight_link;
3034
3035         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3036         // Change brightness based on AUX property
3037         if (caps.aux_support)
3038                 return set_backlight_via_aux(link, brightness);
3039
3040         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3041
3042         return rc ? 0 : 1;
3043 }
3044
3045 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3046 {
3047         struct amdgpu_display_manager *dm = bl_get_data(bd);
3048         int ret = dc_link_get_backlight_level(dm->backlight_link);
3049
3050         if (ret == DC_ERROR_UNEXPECTED)
3051                 return bd->props.brightness;
3052         return convert_brightness_to_user(&dm->backlight_caps, ret);
3053 }
3054
3055 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3056         .options = BL_CORE_SUSPENDRESUME,
3057         .get_brightness = amdgpu_dm_backlight_get_brightness,
3058         .update_status  = amdgpu_dm_backlight_update_status,
3059 };
3060
3061 static void
3062 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3063 {
3064         char bl_name[16];
3065         struct backlight_properties props = { 0 };
3066
3067         amdgpu_dm_update_backlight_caps(dm);
3068
3069         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3070         props.brightness = AMDGPU_MAX_BL_LEVEL;
3071         props.type = BACKLIGHT_RAW;
3072
3073         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3074                  adev_to_drm(dm->adev)->primary->index);
3075
3076         dm->backlight_dev = backlight_device_register(bl_name,
3077                                                       adev_to_drm(dm->adev)->dev,
3078                                                       dm,
3079                                                       &amdgpu_dm_backlight_ops,
3080                                                       &props);
3081
3082         if (IS_ERR(dm->backlight_dev))
3083                 DRM_ERROR("DM: Backlight registration failed!\n");
3084         else
3085                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3086 }
3087
3088 #endif
3089
3090 static int initialize_plane(struct amdgpu_display_manager *dm,
3091                             struct amdgpu_mode_info *mode_info, int plane_id,
3092                             enum drm_plane_type plane_type,
3093                             const struct dc_plane_cap *plane_cap)
3094 {
3095         struct drm_plane *plane;
3096         unsigned long possible_crtcs;
3097         int ret = 0;
3098
3099         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3100         if (!plane) {
3101                 DRM_ERROR("KMS: Failed to allocate plane\n");
3102                 return -ENOMEM;
3103         }
3104         plane->type = plane_type;
3105
3106         /*
3107          * HACK: IGT tests expect that the primary plane for a CRTC
3108          * can only have one possible CRTC. Only expose support for
3109          * any CRTC if they're not going to be used as a primary plane
3110          * for a CRTC - like overlay or underlay planes.
3111          */
3112         possible_crtcs = 1 << plane_id;
3113         if (plane_id >= dm->dc->caps.max_streams)
3114                 possible_crtcs = 0xff;
3115
3116         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3117
3118         if (ret) {
3119                 DRM_ERROR("KMS: Failed to initialize plane\n");
3120                 kfree(plane);
3121                 return ret;
3122         }
3123
3124         if (mode_info)
3125                 mode_info->planes[plane_id] = plane;
3126
3127         return ret;
3128 }
3129
3130
3131 static void register_backlight_device(struct amdgpu_display_manager *dm,
3132                                       struct dc_link *link)
3133 {
3134 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3135         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3136
3137         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3138             link->type != dc_connection_none) {
3139                 /*
3140                  * Event if registration failed, we should continue with
3141                  * DM initialization because not having a backlight control
3142                  * is better then a black screen.
3143                  */
3144                 amdgpu_dm_register_backlight_device(dm);
3145
3146                 if (dm->backlight_dev)
3147                         dm->backlight_link = link;
3148         }
3149 #endif
3150 }
3151
3152
3153 /*
3154  * In this architecture, the association
3155  * connector -> encoder -> crtc
3156  * id not really requried. The crtc and connector will hold the
3157  * display_index as an abstraction to use with DAL component
3158  *
3159  * Returns 0 on success
3160  */
3161 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3162 {
3163         struct amdgpu_display_manager *dm = &adev->dm;
3164         int32_t i;
3165         struct amdgpu_dm_connector *aconnector = NULL;
3166         struct amdgpu_encoder *aencoder = NULL;
3167         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3168         uint32_t link_cnt;
3169         int32_t primary_planes;
3170         enum dc_connection_type new_connection_type = dc_connection_none;
3171         const struct dc_plane_cap *plane;
3172
3173         link_cnt = dm->dc->caps.max_links;
3174         if (amdgpu_dm_mode_config_init(dm->adev)) {
3175                 DRM_ERROR("DM: Failed to initialize mode config\n");
3176                 return -EINVAL;
3177         }
3178
3179         /* There is one primary plane per CRTC */
3180         primary_planes = dm->dc->caps.max_streams;
3181         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3182
3183         /*
3184          * Initialize primary planes, implicit planes for legacy IOCTLS.
3185          * Order is reversed to match iteration order in atomic check.
3186          */
3187         for (i = (primary_planes - 1); i >= 0; i--) {
3188                 plane = &dm->dc->caps.planes[i];
3189
3190                 if (initialize_plane(dm, mode_info, i,
3191                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3192                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3193                         goto fail;
3194                 }
3195         }
3196
3197         /*
3198          * Initialize overlay planes, index starting after primary planes.
3199          * These planes have a higher DRM index than the primary planes since
3200          * they should be considered as having a higher z-order.
3201          * Order is reversed to match iteration order in atomic check.
3202          *
3203          * Only support DCN for now, and only expose one so we don't encourage
3204          * userspace to use up all the pipes.
3205          */
3206         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3207                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3208
3209                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3210                         continue;
3211
3212                 if (!plane->blends_with_above || !plane->blends_with_below)
3213                         continue;
3214
3215                 if (!plane->pixel_format_support.argb8888)
3216                         continue;
3217
3218                 if (initialize_plane(dm, NULL, primary_planes + i,
3219                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3220                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3221                         goto fail;
3222                 }
3223
3224                 /* Only create one overlay plane. */
3225                 break;
3226         }
3227
3228         for (i = 0; i < dm->dc->caps.max_streams; i++)
3229                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3230                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3231                         goto fail;
3232                 }
3233
3234         dm->display_indexes_num = dm->dc->caps.max_streams;
3235
3236         /* loops over all connectors on the board */
3237         for (i = 0; i < link_cnt; i++) {
3238                 struct dc_link *link = NULL;
3239
3240                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3241                         DRM_ERROR(
3242                                 "KMS: Cannot support more than %d display indexes\n",
3243                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3244                         continue;
3245                 }
3246
3247                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3248                 if (!aconnector)
3249                         goto fail;
3250
3251                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3252                 if (!aencoder)
3253                         goto fail;
3254
3255                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3256                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3257                         goto fail;
3258                 }
3259
3260                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3261                         DRM_ERROR("KMS: Failed to initialize connector\n");
3262                         goto fail;
3263                 }
3264
3265                 link = dc_get_link_at_index(dm->dc, i);
3266
3267                 if (!dc_link_detect_sink(link, &new_connection_type))
3268                         DRM_ERROR("KMS: Failed to detect connector\n");
3269
3270                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3271                         emulated_link_detect(link);
3272                         amdgpu_dm_update_connector_after_detect(aconnector);
3273
3274                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3275                         amdgpu_dm_update_connector_after_detect(aconnector);
3276                         register_backlight_device(dm, link);
3277                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3278                                 amdgpu_dm_set_psr_caps(link);
3279                 }
3280
3281
3282         }
3283
3284         /* Software is initialized. Now we can register interrupt handlers. */
3285         switch (adev->asic_type) {
3286 #if defined(CONFIG_DRM_AMD_DC_SI)
3287         case CHIP_TAHITI:
3288         case CHIP_PITCAIRN:
3289         case CHIP_VERDE:
3290         case CHIP_OLAND:
3291                 if (dce60_register_irq_handlers(dm->adev)) {
3292                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3293                         goto fail;
3294                 }
3295                 break;
3296 #endif
3297         case CHIP_BONAIRE:
3298         case CHIP_HAWAII:
3299         case CHIP_KAVERI:
3300         case CHIP_KABINI:
3301         case CHIP_MULLINS:
3302         case CHIP_TONGA:
3303         case CHIP_FIJI:
3304         case CHIP_CARRIZO:
3305         case CHIP_STONEY:
3306         case CHIP_POLARIS11:
3307         case CHIP_POLARIS10:
3308         case CHIP_POLARIS12:
3309         case CHIP_VEGAM:
3310         case CHIP_VEGA10:
3311         case CHIP_VEGA12:
3312         case CHIP_VEGA20:
3313                 if (dce110_register_irq_handlers(dm->adev)) {
3314                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3315                         goto fail;
3316                 }
3317                 break;
3318 #if defined(CONFIG_DRM_AMD_DC_DCN)
3319         case CHIP_RAVEN:
3320         case CHIP_NAVI12:
3321         case CHIP_NAVI10:
3322         case CHIP_NAVI14:
3323         case CHIP_RENOIR:
3324 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3325         case CHIP_SIENNA_CICHLID:
3326         case CHIP_NAVY_FLOUNDER:
3327 #endif
3328                 if (dcn10_register_irq_handlers(dm->adev)) {
3329                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3330                         goto fail;
3331                 }
3332                 break;
3333 #endif
3334         default:
3335                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3336                 goto fail;
3337         }
3338
3339         /* No userspace support. */
3340         dm->dc->debug.disable_tri_buf = true;
3341
3342         return 0;
3343 fail:
3344         kfree(aencoder);
3345         kfree(aconnector);
3346
3347         return -EINVAL;
3348 }
3349
3350 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3351 {
3352         drm_mode_config_cleanup(dm->ddev);
3353         drm_atomic_private_obj_fini(&dm->atomic_obj);
3354         return;
3355 }
3356
3357 /******************************************************************************
3358  * amdgpu_display_funcs functions
3359  *****************************************************************************/
3360
3361 /*
3362  * dm_bandwidth_update - program display watermarks
3363  *
3364  * @adev: amdgpu_device pointer
3365  *
3366  * Calculate and program the display watermarks and line buffer allocation.
3367  */
3368 static void dm_bandwidth_update(struct amdgpu_device *adev)
3369 {
3370         /* TODO: implement later */
3371 }
3372
3373 static const struct amdgpu_display_funcs dm_display_funcs = {
3374         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3375         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3376         .backlight_set_level = NULL, /* never called for DC */
3377         .backlight_get_level = NULL, /* never called for DC */
3378         .hpd_sense = NULL,/* called unconditionally */
3379         .hpd_set_polarity = NULL, /* called unconditionally */
3380         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3381         .page_flip_get_scanoutpos =
3382                 dm_crtc_get_scanoutpos,/* called unconditionally */
3383         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3384         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3385 };
3386
3387 #if defined(CONFIG_DEBUG_KERNEL_DC)
3388
3389 static ssize_t s3_debug_store(struct device *device,
3390                               struct device_attribute *attr,
3391                               const char *buf,
3392                               size_t count)
3393 {
3394         int ret;
3395         int s3_state;
3396         struct drm_device *drm_dev = dev_get_drvdata(device);
3397         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3398
3399         ret = kstrtoint(buf, 0, &s3_state);
3400
3401         if (ret == 0) {
3402                 if (s3_state) {
3403                         dm_resume(adev);
3404                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3405                 } else
3406                         dm_suspend(adev);
3407         }
3408
3409         return ret == 0 ? count : 0;
3410 }
3411
3412 DEVICE_ATTR_WO(s3_debug);
3413
3414 #endif
3415
3416 static int dm_early_init(void *handle)
3417 {
3418         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3419
3420         switch (adev->asic_type) {
3421 #if defined(CONFIG_DRM_AMD_DC_SI)
3422         case CHIP_TAHITI:
3423         case CHIP_PITCAIRN:
3424         case CHIP_VERDE:
3425                 adev->mode_info.num_crtc = 6;
3426                 adev->mode_info.num_hpd = 6;
3427                 adev->mode_info.num_dig = 6;
3428                 break;
3429         case CHIP_OLAND:
3430                 adev->mode_info.num_crtc = 2;
3431                 adev->mode_info.num_hpd = 2;
3432                 adev->mode_info.num_dig = 2;
3433                 break;
3434 #endif
3435         case CHIP_BONAIRE:
3436         case CHIP_HAWAII:
3437                 adev->mode_info.num_crtc = 6;
3438                 adev->mode_info.num_hpd = 6;
3439                 adev->mode_info.num_dig = 6;
3440                 break;
3441         case CHIP_KAVERI:
3442                 adev->mode_info.num_crtc = 4;
3443                 adev->mode_info.num_hpd = 6;
3444                 adev->mode_info.num_dig = 7;
3445                 break;
3446         case CHIP_KABINI:
3447         case CHIP_MULLINS:
3448                 adev->mode_info.num_crtc = 2;
3449                 adev->mode_info.num_hpd = 6;
3450                 adev->mode_info.num_dig = 6;
3451                 break;
3452         case CHIP_FIJI:
3453         case CHIP_TONGA:
3454                 adev->mode_info.num_crtc = 6;
3455                 adev->mode_info.num_hpd = 6;
3456                 adev->mode_info.num_dig = 7;
3457                 break;
3458         case CHIP_CARRIZO:
3459                 adev->mode_info.num_crtc = 3;
3460                 adev->mode_info.num_hpd = 6;
3461                 adev->mode_info.num_dig = 9;
3462                 break;
3463         case CHIP_STONEY:
3464                 adev->mode_info.num_crtc = 2;
3465                 adev->mode_info.num_hpd = 6;
3466                 adev->mode_info.num_dig = 9;
3467                 break;
3468         case CHIP_POLARIS11:
3469         case CHIP_POLARIS12:
3470                 adev->mode_info.num_crtc = 5;
3471                 adev->mode_info.num_hpd = 5;
3472                 adev->mode_info.num_dig = 5;
3473                 break;
3474         case CHIP_POLARIS10:
3475         case CHIP_VEGAM:
3476                 adev->mode_info.num_crtc = 6;
3477                 adev->mode_info.num_hpd = 6;
3478                 adev->mode_info.num_dig = 6;
3479                 break;
3480         case CHIP_VEGA10:
3481         case CHIP_VEGA12:
3482         case CHIP_VEGA20:
3483                 adev->mode_info.num_crtc = 6;
3484                 adev->mode_info.num_hpd = 6;
3485                 adev->mode_info.num_dig = 6;
3486                 break;
3487 #if defined(CONFIG_DRM_AMD_DC_DCN)
3488         case CHIP_RAVEN:
3489                 adev->mode_info.num_crtc = 4;
3490                 adev->mode_info.num_hpd = 4;
3491                 adev->mode_info.num_dig = 4;
3492                 break;
3493 #endif
3494         case CHIP_NAVI10:
3495         case CHIP_NAVI12:
3496 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3497         case CHIP_SIENNA_CICHLID:
3498         case CHIP_NAVY_FLOUNDER:
3499 #endif
3500                 adev->mode_info.num_crtc = 6;
3501                 adev->mode_info.num_hpd = 6;
3502                 adev->mode_info.num_dig = 6;
3503                 break;
3504         case CHIP_NAVI14:
3505                 adev->mode_info.num_crtc = 5;
3506                 adev->mode_info.num_hpd = 5;
3507                 adev->mode_info.num_dig = 5;
3508                 break;
3509         case CHIP_RENOIR:
3510                 adev->mode_info.num_crtc = 4;
3511                 adev->mode_info.num_hpd = 4;
3512                 adev->mode_info.num_dig = 4;
3513                 break;
3514         default:
3515                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3516                 return -EINVAL;
3517         }
3518
3519         amdgpu_dm_set_irq_funcs(adev);
3520
3521         if (adev->mode_info.funcs == NULL)
3522                 adev->mode_info.funcs = &dm_display_funcs;
3523
3524         /*
3525          * Note: Do NOT change adev->audio_endpt_rreg and
3526          * adev->audio_endpt_wreg because they are initialised in
3527          * amdgpu_device_init()
3528          */
3529 #if defined(CONFIG_DEBUG_KERNEL_DC)
3530         device_create_file(
3531                 adev_to_drm(adev)->dev,
3532                 &dev_attr_s3_debug);
3533 #endif
3534
3535         return 0;
3536 }
3537
3538 static bool modeset_required(struct drm_crtc_state *crtc_state,
3539                              struct dc_stream_state *new_stream,
3540                              struct dc_stream_state *old_stream)
3541 {
3542         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3543 }
3544
3545 static bool modereset_required(struct drm_crtc_state *crtc_state)
3546 {
3547         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3548 }
3549
3550 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3551 {
3552         drm_encoder_cleanup(encoder);
3553         kfree(encoder);
3554 }
3555
3556 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3557         .destroy = amdgpu_dm_encoder_destroy,
3558 };
3559
3560
3561 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3562                                 struct dc_scaling_info *scaling_info)
3563 {
3564         int scale_w, scale_h;
3565
3566         memset(scaling_info, 0, sizeof(*scaling_info));
3567
3568         /* Source is fixed 16.16 but we ignore mantissa for now... */
3569         scaling_info->src_rect.x = state->src_x >> 16;
3570         scaling_info->src_rect.y = state->src_y >> 16;
3571
3572         scaling_info->src_rect.width = state->src_w >> 16;
3573         if (scaling_info->src_rect.width == 0)
3574                 return -EINVAL;
3575
3576         scaling_info->src_rect.height = state->src_h >> 16;
3577         if (scaling_info->src_rect.height == 0)
3578                 return -EINVAL;
3579
3580         scaling_info->dst_rect.x = state->crtc_x;
3581         scaling_info->dst_rect.y = state->crtc_y;
3582
3583         if (state->crtc_w == 0)
3584                 return -EINVAL;
3585
3586         scaling_info->dst_rect.width = state->crtc_w;
3587
3588         if (state->crtc_h == 0)
3589                 return -EINVAL;
3590
3591         scaling_info->dst_rect.height = state->crtc_h;
3592
3593         /* DRM doesn't specify clipping on destination output. */
3594         scaling_info->clip_rect = scaling_info->dst_rect;
3595
3596         /* TODO: Validate scaling per-format with DC plane caps */
3597         scale_w = scaling_info->dst_rect.width * 1000 /
3598                   scaling_info->src_rect.width;
3599
3600         if (scale_w < 250 || scale_w > 16000)
3601                 return -EINVAL;
3602
3603         scale_h = scaling_info->dst_rect.height * 1000 /
3604                   scaling_info->src_rect.height;
3605
3606         if (scale_h < 250 || scale_h > 16000)
3607                 return -EINVAL;
3608
3609         /*
3610          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3611          * assume reasonable defaults based on the format.
3612          */
3613
3614         return 0;
3615 }
3616
3617 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3618                        uint64_t *tiling_flags, bool *tmz_surface)
3619 {
3620         struct amdgpu_bo *rbo;
3621         int r;
3622
3623         if (!amdgpu_fb) {
3624                 *tiling_flags = 0;
3625                 *tmz_surface = false;
3626                 return 0;
3627         }
3628
3629         rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3630         r = amdgpu_bo_reserve(rbo, false);
3631
3632         if (unlikely(r)) {
3633                 /* Don't show error message when returning -ERESTARTSYS */
3634                 if (r != -ERESTARTSYS)
3635                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3636                 return r;
3637         }
3638
3639         if (tiling_flags)
3640                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3641
3642         if (tmz_surface)
3643                 *tmz_surface = amdgpu_bo_encrypted(rbo);
3644
3645         amdgpu_bo_unreserve(rbo);
3646
3647         return r;
3648 }
3649
3650 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3651 {
3652         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3653
3654         return offset ? (address + offset * 256) : 0;
3655 }
3656
3657 static int
3658 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3659                           const struct amdgpu_framebuffer *afb,
3660                           const enum surface_pixel_format format,
3661                           const enum dc_rotation_angle rotation,
3662                           const struct plane_size *plane_size,
3663                           const union dc_tiling_info *tiling_info,
3664                           const uint64_t info,
3665                           struct dc_plane_dcc_param *dcc,
3666                           struct dc_plane_address *address,
3667                           bool force_disable_dcc)
3668 {
3669         struct dc *dc = adev->dm.dc;
3670         struct dc_dcc_surface_param input;
3671         struct dc_surface_dcc_cap output;
3672         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3673         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3674         uint64_t dcc_address;
3675
3676         memset(&input, 0, sizeof(input));
3677         memset(&output, 0, sizeof(output));
3678
3679         if (force_disable_dcc)
3680                 return 0;
3681
3682         if (!offset)
3683                 return 0;
3684
3685         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3686                 return 0;
3687
3688         if (!dc->cap_funcs.get_dcc_compression_cap)
3689                 return -EINVAL;
3690
3691         input.format = format;
3692         input.surface_size.width = plane_size->surface_size.width;
3693         input.surface_size.height = plane_size->surface_size.height;
3694         input.swizzle_mode = tiling_info->gfx9.swizzle;
3695
3696         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3697                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3698         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3699                 input.scan = SCAN_DIRECTION_VERTICAL;
3700
3701         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3702                 return -EINVAL;
3703
3704         if (!output.capable)
3705                 return -EINVAL;
3706
3707         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3708                 return -EINVAL;
3709
3710         dcc->enable = 1;
3711         dcc->meta_pitch =
3712                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3713         dcc->independent_64b_blks = i64b;
3714
3715         dcc_address = get_dcc_address(afb->address, info);
3716         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3717         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3718
3719         return 0;
3720 }
3721
3722 static int
3723 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3724                              const struct amdgpu_framebuffer *afb,
3725                              const enum surface_pixel_format format,
3726                              const enum dc_rotation_angle rotation,
3727                              const uint64_t tiling_flags,
3728                              union dc_tiling_info *tiling_info,
3729                              struct plane_size *plane_size,
3730                              struct dc_plane_dcc_param *dcc,
3731                              struct dc_plane_address *address,
3732                              bool tmz_surface,
3733                              bool force_disable_dcc)
3734 {
3735         const struct drm_framebuffer *fb = &afb->base;
3736         int ret;
3737
3738         memset(tiling_info, 0, sizeof(*tiling_info));
3739         memset(plane_size, 0, sizeof(*plane_size));
3740         memset(dcc, 0, sizeof(*dcc));
3741         memset(address, 0, sizeof(*address));
3742
3743         address->tmz_surface = tmz_surface;
3744
3745         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3746                 plane_size->surface_size.x = 0;
3747                 plane_size->surface_size.y = 0;
3748                 plane_size->surface_size.width = fb->width;
3749                 plane_size->surface_size.height = fb->height;
3750                 plane_size->surface_pitch =
3751                         fb->pitches[0] / fb->format->cpp[0];
3752
3753                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3754                 address->grph.addr.low_part = lower_32_bits(afb->address);
3755                 address->grph.addr.high_part = upper_32_bits(afb->address);
3756         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3757                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3758
3759                 plane_size->surface_size.x = 0;
3760                 plane_size->surface_size.y = 0;
3761                 plane_size->surface_size.width = fb->width;
3762                 plane_size->surface_size.height = fb->height;
3763                 plane_size->surface_pitch =
3764                         fb->pitches[0] / fb->format->cpp[0];
3765
3766                 plane_size->chroma_size.x = 0;
3767                 plane_size->chroma_size.y = 0;
3768                 /* TODO: set these based on surface format */
3769                 plane_size->chroma_size.width = fb->width / 2;
3770                 plane_size->chroma_size.height = fb->height / 2;
3771
3772                 plane_size->chroma_pitch =
3773                         fb->pitches[1] / fb->format->cpp[1];
3774
3775                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3776                 address->video_progressive.luma_addr.low_part =
3777                         lower_32_bits(afb->address);
3778                 address->video_progressive.luma_addr.high_part =
3779                         upper_32_bits(afb->address);
3780                 address->video_progressive.chroma_addr.low_part =
3781                         lower_32_bits(chroma_addr);
3782                 address->video_progressive.chroma_addr.high_part =
3783                         upper_32_bits(chroma_addr);
3784         }
3785
3786         /* Fill GFX8 params */
3787         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3788                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3789
3790                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3791                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3792                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3793                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3794                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3795
3796                 /* XXX fix me for VI */
3797                 tiling_info->gfx8.num_banks = num_banks;
3798                 tiling_info->gfx8.array_mode =
3799                                 DC_ARRAY_2D_TILED_THIN1;
3800                 tiling_info->gfx8.tile_split = tile_split;
3801                 tiling_info->gfx8.bank_width = bankw;
3802                 tiling_info->gfx8.bank_height = bankh;
3803                 tiling_info->gfx8.tile_aspect = mtaspect;
3804                 tiling_info->gfx8.tile_mode =
3805                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3806         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3807                         == DC_ARRAY_1D_TILED_THIN1) {
3808                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3809         }
3810
3811         tiling_info->gfx8.pipe_config =
3812                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3813
3814         if (adev->asic_type == CHIP_VEGA10 ||
3815             adev->asic_type == CHIP_VEGA12 ||
3816             adev->asic_type == CHIP_VEGA20 ||
3817             adev->asic_type == CHIP_NAVI10 ||
3818             adev->asic_type == CHIP_NAVI14 ||
3819             adev->asic_type == CHIP_NAVI12 ||
3820 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3821                 adev->asic_type == CHIP_SIENNA_CICHLID ||
3822                 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3823 #endif
3824             adev->asic_type == CHIP_RENOIR ||
3825             adev->asic_type == CHIP_RAVEN) {
3826                 /* Fill GFX9 params */
3827                 tiling_info->gfx9.num_pipes =
3828                         adev->gfx.config.gb_addr_config_fields.num_pipes;
3829                 tiling_info->gfx9.num_banks =
3830                         adev->gfx.config.gb_addr_config_fields.num_banks;
3831                 tiling_info->gfx9.pipe_interleave =
3832                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3833                 tiling_info->gfx9.num_shader_engines =
3834                         adev->gfx.config.gb_addr_config_fields.num_se;
3835                 tiling_info->gfx9.max_compressed_frags =
3836                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3837                 tiling_info->gfx9.num_rb_per_se =
3838                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3839                 tiling_info->gfx9.swizzle =
3840                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3841                 tiling_info->gfx9.shaderEnable = 1;
3842
3843 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3844                 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3845                     adev->asic_type == CHIP_NAVY_FLOUNDER)
3846                         tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3847 #endif
3848                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3849                                                 plane_size, tiling_info,
3850                                                 tiling_flags, dcc, address,
3851                                                 force_disable_dcc);
3852                 if (ret)
3853                         return ret;
3854         }
3855
3856         return 0;
3857 }
3858
3859 static void
3860 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3861                                bool *per_pixel_alpha, bool *global_alpha,
3862                                int *global_alpha_value)
3863 {
3864         *per_pixel_alpha = false;
3865         *global_alpha = false;
3866         *global_alpha_value = 0xff;
3867
3868         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3869                 return;
3870
3871         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3872                 static const uint32_t alpha_formats[] = {
3873                         DRM_FORMAT_ARGB8888,
3874                         DRM_FORMAT_RGBA8888,
3875                         DRM_FORMAT_ABGR8888,
3876                 };
3877                 uint32_t format = plane_state->fb->format->format;
3878                 unsigned int i;
3879
3880                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3881                         if (format == alpha_formats[i]) {
3882                                 *per_pixel_alpha = true;
3883                                 break;
3884                         }
3885                 }
3886         }
3887
3888         if (plane_state->alpha < 0xffff) {
3889                 *global_alpha = true;
3890                 *global_alpha_value = plane_state->alpha >> 8;
3891         }
3892 }
3893
3894 static int
3895 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3896                             const enum surface_pixel_format format,
3897                             enum dc_color_space *color_space)
3898 {
3899         bool full_range;
3900
3901         *color_space = COLOR_SPACE_SRGB;
3902
3903         /* DRM color properties only affect non-RGB formats. */
3904         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3905                 return 0;
3906
3907         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3908
3909         switch (plane_state->color_encoding) {
3910         case DRM_COLOR_YCBCR_BT601:
3911                 if (full_range)
3912                         *color_space = COLOR_SPACE_YCBCR601;
3913                 else
3914                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3915                 break;
3916
3917         case DRM_COLOR_YCBCR_BT709:
3918                 if (full_range)
3919                         *color_space = COLOR_SPACE_YCBCR709;
3920                 else
3921                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3922                 break;
3923
3924         case DRM_COLOR_YCBCR_BT2020:
3925                 if (full_range)
3926                         *color_space = COLOR_SPACE_2020_YCBCR;
3927                 else
3928                         return -EINVAL;
3929                 break;
3930
3931         default:
3932                 return -EINVAL;
3933         }
3934
3935         return 0;
3936 }
3937
3938 static int
3939 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3940                             const struct drm_plane_state *plane_state,
3941                             const uint64_t tiling_flags,
3942                             struct dc_plane_info *plane_info,
3943                             struct dc_plane_address *address,
3944                             bool tmz_surface,
3945                             bool force_disable_dcc)
3946 {
3947         const struct drm_framebuffer *fb = plane_state->fb;
3948         const struct amdgpu_framebuffer *afb =
3949                 to_amdgpu_framebuffer(plane_state->fb);
3950         struct drm_format_name_buf format_name;
3951         int ret;
3952
3953         memset(plane_info, 0, sizeof(*plane_info));
3954
3955         switch (fb->format->format) {
3956         case DRM_FORMAT_C8:
3957                 plane_info->format =
3958                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3959                 break;
3960         case DRM_FORMAT_RGB565:
3961                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3962                 break;
3963         case DRM_FORMAT_XRGB8888:
3964         case DRM_FORMAT_ARGB8888:
3965                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3966                 break;
3967         case DRM_FORMAT_XRGB2101010:
3968         case DRM_FORMAT_ARGB2101010:
3969                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3970                 break;
3971         case DRM_FORMAT_XBGR2101010:
3972         case DRM_FORMAT_ABGR2101010:
3973                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3974                 break;
3975         case DRM_FORMAT_XBGR8888:
3976         case DRM_FORMAT_ABGR8888:
3977                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3978                 break;
3979         case DRM_FORMAT_NV21:
3980                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3981                 break;
3982         case DRM_FORMAT_NV12:
3983                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3984                 break;
3985         case DRM_FORMAT_P010:
3986                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3987                 break;
3988         case DRM_FORMAT_XRGB16161616F:
3989         case DRM_FORMAT_ARGB16161616F:
3990                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3991                 break;
3992         case DRM_FORMAT_XBGR16161616F:
3993         case DRM_FORMAT_ABGR16161616F:
3994                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3995                 break;
3996         default:
3997                 DRM_ERROR(
3998                         "Unsupported screen format %s\n",
3999                         drm_get_format_name(fb->format->format, &format_name));
4000                 return -EINVAL;
4001         }
4002
4003         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4004         case DRM_MODE_ROTATE_0:
4005                 plane_info->rotation = ROTATION_ANGLE_0;
4006                 break;
4007         case DRM_MODE_ROTATE_90:
4008                 plane_info->rotation = ROTATION_ANGLE_90;
4009                 break;
4010         case DRM_MODE_ROTATE_180:
4011                 plane_info->rotation = ROTATION_ANGLE_180;
4012                 break;
4013         case DRM_MODE_ROTATE_270:
4014                 plane_info->rotation = ROTATION_ANGLE_270;
4015                 break;
4016         default:
4017                 plane_info->rotation = ROTATION_ANGLE_0;
4018                 break;
4019         }
4020
4021         plane_info->visible = true;
4022         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4023
4024         plane_info->layer_index = 0;
4025
4026         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4027                                           &plane_info->color_space);
4028         if (ret)
4029                 return ret;
4030
4031         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4032                                            plane_info->rotation, tiling_flags,
4033                                            &plane_info->tiling_info,
4034                                            &plane_info->plane_size,
4035                                            &plane_info->dcc, address, tmz_surface,
4036                                            force_disable_dcc);
4037         if (ret)
4038                 return ret;
4039
4040         fill_blending_from_plane_state(
4041                 plane_state, &plane_info->per_pixel_alpha,
4042                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4043
4044         return 0;
4045 }
4046
4047 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4048                                     struct dc_plane_state *dc_plane_state,
4049                                     struct drm_plane_state *plane_state,
4050                                     struct drm_crtc_state *crtc_state)
4051 {
4052         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4053         struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4054         struct dc_scaling_info scaling_info;
4055         struct dc_plane_info plane_info;
4056         int ret;
4057         bool force_disable_dcc = false;
4058
4059         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4060         if (ret)
4061                 return ret;
4062
4063         dc_plane_state->src_rect = scaling_info.src_rect;
4064         dc_plane_state->dst_rect = scaling_info.dst_rect;
4065         dc_plane_state->clip_rect = scaling_info.clip_rect;
4066         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4067
4068         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4069         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4070                                           dm_plane_state->tiling_flags,
4071                                           &plane_info,
4072                                           &dc_plane_state->address,
4073                                           dm_plane_state->tmz_surface,
4074                                           force_disable_dcc);
4075         if (ret)
4076                 return ret;
4077
4078         dc_plane_state->format = plane_info.format;
4079         dc_plane_state->color_space = plane_info.color_space;
4080         dc_plane_state->format = plane_info.format;
4081         dc_plane_state->plane_size = plane_info.plane_size;
4082         dc_plane_state->rotation = plane_info.rotation;
4083         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4084         dc_plane_state->stereo_format = plane_info.stereo_format;
4085         dc_plane_state->tiling_info = plane_info.tiling_info;
4086         dc_plane_state->visible = plane_info.visible;
4087         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4088         dc_plane_state->global_alpha = plane_info.global_alpha;
4089         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4090         dc_plane_state->dcc = plane_info.dcc;
4091         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4092
4093         /*
4094          * Always set input transfer function, since plane state is refreshed
4095          * every time.
4096          */
4097         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4098         if (ret)
4099                 return ret;
4100
4101         return 0;
4102 }
4103
4104 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4105                                            const struct dm_connector_state *dm_state,
4106                                            struct dc_stream_state *stream)
4107 {
4108         enum amdgpu_rmx_type rmx_type;
4109
4110         struct rect src = { 0 }; /* viewport in composition space*/
4111         struct rect dst = { 0 }; /* stream addressable area */
4112
4113         /* no mode. nothing to be done */
4114         if (!mode)
4115                 return;
4116
4117         /* Full screen scaling by default */
4118         src.width = mode->hdisplay;
4119         src.height = mode->vdisplay;
4120         dst.width = stream->timing.h_addressable;
4121         dst.height = stream->timing.v_addressable;
4122
4123         if (dm_state) {
4124                 rmx_type = dm_state->scaling;
4125                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4126                         if (src.width * dst.height <
4127                                         src.height * dst.width) {
4128                                 /* height needs less upscaling/more downscaling */
4129                                 dst.width = src.width *
4130                                                 dst.height / src.height;
4131                         } else {
4132                                 /* width needs less upscaling/more downscaling */
4133                                 dst.height = src.height *
4134                                                 dst.width / src.width;
4135                         }
4136                 } else if (rmx_type == RMX_CENTER) {
4137                         dst = src;
4138                 }
4139
4140                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4141                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4142
4143                 if (dm_state->underscan_enable) {
4144                         dst.x += dm_state->underscan_hborder / 2;
4145                         dst.y += dm_state->underscan_vborder / 2;
4146                         dst.width -= dm_state->underscan_hborder;
4147                         dst.height -= dm_state->underscan_vborder;
4148                 }
4149         }
4150
4151         stream->src = src;
4152         stream->dst = dst;
4153
4154         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4155                         dst.x, dst.y, dst.width, dst.height);
4156
4157 }
4158
4159 static enum dc_color_depth
4160 convert_color_depth_from_display_info(const struct drm_connector *connector,
4161                                       bool is_y420, int requested_bpc)
4162 {
4163         uint8_t bpc;
4164
4165         if (is_y420) {
4166                 bpc = 8;
4167
4168                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4169                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4170                         bpc = 16;
4171                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4172                         bpc = 12;
4173                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4174                         bpc = 10;
4175         } else {
4176                 bpc = (uint8_t)connector->display_info.bpc;
4177                 /* Assume 8 bpc by default if no bpc is specified. */
4178                 bpc = bpc ? bpc : 8;
4179         }
4180
4181         if (requested_bpc > 0) {
4182                 /*
4183                  * Cap display bpc based on the user requested value.
4184                  *
4185                  * The value for state->max_bpc may not correctly updated
4186                  * depending on when the connector gets added to the state
4187                  * or if this was called outside of atomic check, so it
4188                  * can't be used directly.
4189                  */
4190                 bpc = min_t(u8, bpc, requested_bpc);
4191
4192                 /* Round down to the nearest even number. */
4193                 bpc = bpc - (bpc & 1);
4194         }
4195
4196         switch (bpc) {
4197         case 0:
4198                 /*
4199                  * Temporary Work around, DRM doesn't parse color depth for
4200                  * EDID revision before 1.4
4201                  * TODO: Fix edid parsing
4202                  */
4203                 return COLOR_DEPTH_888;
4204         case 6:
4205                 return COLOR_DEPTH_666;
4206         case 8:
4207                 return COLOR_DEPTH_888;
4208         case 10:
4209                 return COLOR_DEPTH_101010;
4210         case 12:
4211                 return COLOR_DEPTH_121212;
4212         case 14:
4213                 return COLOR_DEPTH_141414;
4214         case 16:
4215                 return COLOR_DEPTH_161616;
4216         default:
4217                 return COLOR_DEPTH_UNDEFINED;
4218         }
4219 }
4220
4221 static enum dc_aspect_ratio
4222 get_aspect_ratio(const struct drm_display_mode *mode_in)
4223 {
4224         /* 1-1 mapping, since both enums follow the HDMI spec. */
4225         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4226 }
4227
4228 static enum dc_color_space
4229 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4230 {
4231         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4232
4233         switch (dc_crtc_timing->pixel_encoding) {
4234         case PIXEL_ENCODING_YCBCR422:
4235         case PIXEL_ENCODING_YCBCR444:
4236         case PIXEL_ENCODING_YCBCR420:
4237         {
4238                 /*
4239                  * 27030khz is the separation point between HDTV and SDTV
4240                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4241                  * respectively
4242                  */
4243                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4244                         if (dc_crtc_timing->flags.Y_ONLY)
4245                                 color_space =
4246                                         COLOR_SPACE_YCBCR709_LIMITED;
4247                         else
4248                                 color_space = COLOR_SPACE_YCBCR709;
4249                 } else {
4250                         if (dc_crtc_timing->flags.Y_ONLY)
4251                                 color_space =
4252                                         COLOR_SPACE_YCBCR601_LIMITED;
4253                         else
4254                                 color_space = COLOR_SPACE_YCBCR601;
4255                 }
4256
4257         }
4258         break;
4259         case PIXEL_ENCODING_RGB:
4260                 color_space = COLOR_SPACE_SRGB;
4261                 break;
4262
4263         default:
4264                 WARN_ON(1);
4265                 break;
4266         }
4267
4268         return color_space;
4269 }
4270
4271 static bool adjust_colour_depth_from_display_info(
4272         struct dc_crtc_timing *timing_out,
4273         const struct drm_display_info *info)
4274 {
4275         enum dc_color_depth depth = timing_out->display_color_depth;
4276         int normalized_clk;
4277         do {
4278                 normalized_clk = timing_out->pix_clk_100hz / 10;
4279                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4280                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4281                         normalized_clk /= 2;
4282                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4283                 switch (depth) {
4284                 case COLOR_DEPTH_888:
4285                         break;
4286                 case COLOR_DEPTH_101010:
4287                         normalized_clk = (normalized_clk * 30) / 24;
4288                         break;
4289                 case COLOR_DEPTH_121212:
4290                         normalized_clk = (normalized_clk * 36) / 24;
4291                         break;
4292                 case COLOR_DEPTH_161616:
4293                         normalized_clk = (normalized_clk * 48) / 24;
4294                         break;
4295                 default:
4296                         /* The above depths are the only ones valid for HDMI. */
4297                         return false;
4298                 }
4299                 if (normalized_clk <= info->max_tmds_clock) {
4300                         timing_out->display_color_depth = depth;
4301                         return true;
4302                 }
4303         } while (--depth > COLOR_DEPTH_666);
4304         return false;
4305 }
4306
4307 static void fill_stream_properties_from_drm_display_mode(
4308         struct dc_stream_state *stream,
4309         const struct drm_display_mode *mode_in,
4310         const struct drm_connector *connector,
4311         const struct drm_connector_state *connector_state,
4312         const struct dc_stream_state *old_stream,
4313         int requested_bpc)
4314 {
4315         struct dc_crtc_timing *timing_out = &stream->timing;
4316         const struct drm_display_info *info = &connector->display_info;
4317         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4318         struct hdmi_vendor_infoframe hv_frame;
4319         struct hdmi_avi_infoframe avi_frame;
4320
4321         memset(&hv_frame, 0, sizeof(hv_frame));
4322         memset(&avi_frame, 0, sizeof(avi_frame));
4323
4324         timing_out->h_border_left = 0;
4325         timing_out->h_border_right = 0;
4326         timing_out->v_border_top = 0;
4327         timing_out->v_border_bottom = 0;
4328         /* TODO: un-hardcode */
4329         if (drm_mode_is_420_only(info, mode_in)
4330                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4331                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4332         else if (drm_mode_is_420_also(info, mode_in)
4333                         && aconnector->force_yuv420_output)
4334                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4335         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4336                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4337                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4338         else
4339                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4340
4341         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4342         timing_out->display_color_depth = convert_color_depth_from_display_info(
4343                 connector,
4344                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4345                 requested_bpc);
4346         timing_out->scan_type = SCANNING_TYPE_NODATA;
4347         timing_out->hdmi_vic = 0;
4348
4349         if(old_stream) {
4350                 timing_out->vic = old_stream->timing.vic;
4351                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4352                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4353         } else {
4354                 timing_out->vic = drm_match_cea_mode(mode_in);
4355                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4356                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4357                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4358                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4359         }
4360
4361         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4362                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4363                 timing_out->vic = avi_frame.video_code;
4364                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4365                 timing_out->hdmi_vic = hv_frame.vic;
4366         }
4367
4368         timing_out->h_addressable = mode_in->crtc_hdisplay;
4369         timing_out->h_total = mode_in->crtc_htotal;
4370         timing_out->h_sync_width =
4371                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4372         timing_out->h_front_porch =
4373                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4374         timing_out->v_total = mode_in->crtc_vtotal;
4375         timing_out->v_addressable = mode_in->crtc_vdisplay;
4376         timing_out->v_front_porch =
4377                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4378         timing_out->v_sync_width =
4379                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4380         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4381         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4382
4383         stream->output_color_space = get_output_color_space(timing_out);
4384
4385         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4386         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4387         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4388                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4389                     drm_mode_is_420_also(info, mode_in) &&
4390                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4391                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4392                         adjust_colour_depth_from_display_info(timing_out, info);
4393                 }
4394         }
4395 }
4396
4397 static void fill_audio_info(struct audio_info *audio_info,
4398                             const struct drm_connector *drm_connector,
4399                             const struct dc_sink *dc_sink)
4400 {
4401         int i = 0;
4402         int cea_revision = 0;
4403         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4404
4405         audio_info->manufacture_id = edid_caps->manufacturer_id;
4406         audio_info->product_id = edid_caps->product_id;
4407
4408         cea_revision = drm_connector->display_info.cea_rev;
4409
4410         strscpy(audio_info->display_name,
4411                 edid_caps->display_name,
4412                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4413
4414         if (cea_revision >= 3) {
4415                 audio_info->mode_count = edid_caps->audio_mode_count;
4416
4417                 for (i = 0; i < audio_info->mode_count; ++i) {
4418                         audio_info->modes[i].format_code =
4419                                         (enum audio_format_code)
4420                                         (edid_caps->audio_modes[i].format_code);
4421                         audio_info->modes[i].channel_count =
4422                                         edid_caps->audio_modes[i].channel_count;
4423                         audio_info->modes[i].sample_rates.all =
4424                                         edid_caps->audio_modes[i].sample_rate;
4425                         audio_info->modes[i].sample_size =
4426                                         edid_caps->audio_modes[i].sample_size;
4427                 }
4428         }
4429
4430         audio_info->flags.all = edid_caps->speaker_flags;
4431
4432         /* TODO: We only check for the progressive mode, check for interlace mode too */
4433         if (drm_connector->latency_present[0]) {
4434                 audio_info->video_latency = drm_connector->video_latency[0];
4435                 audio_info->audio_latency = drm_connector->audio_latency[0];
4436         }
4437
4438         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4439
4440 }
4441
4442 static void
4443 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4444                                       struct drm_display_mode *dst_mode)
4445 {
4446         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4447         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4448         dst_mode->crtc_clock = src_mode->crtc_clock;
4449         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4450         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4451         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4452         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4453         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4454         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4455         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4456         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4457         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4458         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4459         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4460 }
4461
4462 static void
4463 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4464                                         const struct drm_display_mode *native_mode,
4465                                         bool scale_enabled)
4466 {
4467         if (scale_enabled) {
4468                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4469         } else if (native_mode->clock == drm_mode->clock &&
4470                         native_mode->htotal == drm_mode->htotal &&
4471                         native_mode->vtotal == drm_mode->vtotal) {
4472                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4473         } else {
4474                 /* no scaling nor amdgpu inserted, no need to patch */
4475         }
4476 }
4477
4478 static struct dc_sink *
4479 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4480 {
4481         struct dc_sink_init_data sink_init_data = { 0 };
4482         struct dc_sink *sink = NULL;
4483         sink_init_data.link = aconnector->dc_link;
4484         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4485
4486         sink = dc_sink_create(&sink_init_data);
4487         if (!sink) {
4488                 DRM_ERROR("Failed to create sink!\n");
4489                 return NULL;
4490         }
4491         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4492
4493         return sink;
4494 }
4495
4496 static void set_multisync_trigger_params(
4497                 struct dc_stream_state *stream)
4498 {
4499         if (stream->triggered_crtc_reset.enabled) {
4500                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4501                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4502         }
4503 }
4504
4505 static void set_master_stream(struct dc_stream_state *stream_set[],
4506                               int stream_count)
4507 {
4508         int j, highest_rfr = 0, master_stream = 0;
4509
4510         for (j = 0;  j < stream_count; j++) {
4511                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4512                         int refresh_rate = 0;
4513
4514                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4515                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4516                         if (refresh_rate > highest_rfr) {
4517                                 highest_rfr = refresh_rate;
4518                                 master_stream = j;
4519                         }
4520                 }
4521         }
4522         for (j = 0;  j < stream_count; j++) {
4523                 if (stream_set[j])
4524                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4525         }
4526 }
4527
4528 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4529 {
4530         int i = 0;
4531
4532         if (context->stream_count < 2)
4533                 return;
4534         for (i = 0; i < context->stream_count ; i++) {
4535                 if (!context->streams[i])
4536                         continue;
4537                 /*
4538                  * TODO: add a function to read AMD VSDB bits and set
4539                  * crtc_sync_master.multi_sync_enabled flag
4540                  * For now it's set to false
4541                  */
4542                 set_multisync_trigger_params(context->streams[i]);
4543         }
4544         set_master_stream(context->streams, context->stream_count);
4545 }
4546
4547 static struct dc_stream_state *
4548 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4549                        const struct drm_display_mode *drm_mode,
4550                        const struct dm_connector_state *dm_state,
4551                        const struct dc_stream_state *old_stream,
4552                        int requested_bpc)
4553 {
4554         struct drm_display_mode *preferred_mode = NULL;
4555         struct drm_connector *drm_connector;
4556         const struct drm_connector_state *con_state =
4557                 dm_state ? &dm_state->base : NULL;
4558         struct dc_stream_state *stream = NULL;
4559         struct drm_display_mode mode = *drm_mode;
4560         bool native_mode_found = false;
4561         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4562         int mode_refresh;
4563         int preferred_refresh = 0;
4564 #if defined(CONFIG_DRM_AMD_DC_DCN)
4565         struct dsc_dec_dpcd_caps dsc_caps;
4566 #endif
4567         uint32_t link_bandwidth_kbps;
4568
4569         struct dc_sink *sink = NULL;
4570         if (aconnector == NULL) {
4571                 DRM_ERROR("aconnector is NULL!\n");
4572                 return stream;
4573         }
4574
4575         drm_connector = &aconnector->base;
4576
4577         if (!aconnector->dc_sink) {
4578                 sink = create_fake_sink(aconnector);
4579                 if (!sink)
4580                         return stream;
4581         } else {
4582                 sink = aconnector->dc_sink;
4583                 dc_sink_retain(sink);
4584         }
4585
4586         stream = dc_create_stream_for_sink(sink);
4587
4588         if (stream == NULL) {
4589                 DRM_ERROR("Failed to create stream for sink!\n");
4590                 goto finish;
4591         }
4592
4593         stream->dm_stream_context = aconnector;
4594
4595         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4596                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4597
4598         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4599                 /* Search for preferred mode */
4600                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4601                         native_mode_found = true;
4602                         break;
4603                 }
4604         }
4605         if (!native_mode_found)
4606                 preferred_mode = list_first_entry_or_null(
4607                                 &aconnector->base.modes,
4608                                 struct drm_display_mode,
4609                                 head);
4610
4611         mode_refresh = drm_mode_vrefresh(&mode);
4612
4613         if (preferred_mode == NULL) {
4614                 /*
4615                  * This may not be an error, the use case is when we have no
4616                  * usermode calls to reset and set mode upon hotplug. In this
4617                  * case, we call set mode ourselves to restore the previous mode
4618                  * and the modelist may not be filled in in time.
4619                  */
4620                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4621         } else {
4622                 decide_crtc_timing_for_drm_display_mode(
4623                                 &mode, preferred_mode,
4624                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4625                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4626         }
4627
4628         if (!dm_state)
4629                 drm_mode_set_crtcinfo(&mode, 0);
4630
4631         /*
4632         * If scaling is enabled and refresh rate didn't change
4633         * we copy the vic and polarities of the old timings
4634         */
4635         if (!scale || mode_refresh != preferred_refresh)
4636                 fill_stream_properties_from_drm_display_mode(stream,
4637                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
4638         else
4639                 fill_stream_properties_from_drm_display_mode(stream,
4640                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4641
4642         stream->timing.flags.DSC = 0;
4643
4644         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4645 #if defined(CONFIG_DRM_AMD_DC_DCN)
4646                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4647                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4648                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4649                                       &dsc_caps);
4650 #endif
4651                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4652                                                              dc_link_get_link_cap(aconnector->dc_link));
4653
4654 #if defined(CONFIG_DRM_AMD_DC_DCN)
4655                 if (dsc_caps.is_dsc_supported) {
4656                         /* Set DSC policy according to dsc_clock_en */
4657                         dc_dsc_policy_set_enable_dsc_when_not_needed(aconnector->dsc_settings.dsc_clock_en);
4658
4659                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4660                                                   &dsc_caps,
4661                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4662                                                   link_bandwidth_kbps,
4663                                                   &stream->timing,
4664                                                   &stream->timing.dsc_cfg))
4665                                 stream->timing.flags.DSC = 1;
4666                         /* Overwrite the stream flag if DSC is enabled through debugfs */
4667                         if (aconnector->dsc_settings.dsc_clock_en)
4668                                 stream->timing.flags.DSC = 1;
4669
4670                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width)
4671                                 stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable,
4672                                                                         aconnector->dsc_settings.dsc_slice_width);
4673
4674                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height)
4675                                 stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable,
4676                                                                         aconnector->dsc_settings.dsc_slice_height);
4677
4678                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4679                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4680                 }
4681 #endif
4682         }
4683
4684         update_stream_scaling_settings(&mode, dm_state, stream);
4685
4686         fill_audio_info(
4687                 &stream->audio_info,
4688                 drm_connector,
4689                 sink);
4690
4691         update_stream_signal(stream, sink);
4692
4693         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4694                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4695
4696         if (stream->link->psr_settings.psr_feature_enabled) {
4697                 //
4698                 // should decide stream support vsc sdp colorimetry capability
4699                 // before building vsc info packet
4700                 //
4701                 stream->use_vsc_sdp_for_colorimetry = false;
4702                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4703                         stream->use_vsc_sdp_for_colorimetry =
4704                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4705                 } else {
4706                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4707                                 stream->use_vsc_sdp_for_colorimetry = true;
4708                 }
4709                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4710         }
4711 finish:
4712         dc_sink_release(sink);
4713
4714         return stream;
4715 }
4716
4717 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4718 {
4719         drm_crtc_cleanup(crtc);
4720         kfree(crtc);
4721 }
4722
4723 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4724                                   struct drm_crtc_state *state)
4725 {
4726         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4727
4728         /* TODO Destroy dc_stream objects are stream object is flattened */
4729         if (cur->stream)
4730                 dc_stream_release(cur->stream);
4731
4732
4733         __drm_atomic_helper_crtc_destroy_state(state);
4734
4735
4736         kfree(state);
4737 }
4738
4739 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4740 {
4741         struct dm_crtc_state *state;
4742
4743         if (crtc->state)
4744                 dm_crtc_destroy_state(crtc, crtc->state);
4745
4746         state = kzalloc(sizeof(*state), GFP_KERNEL);
4747         if (WARN_ON(!state))
4748                 return;
4749
4750         crtc->state = &state->base;
4751         crtc->state->crtc = crtc;
4752
4753 }
4754
4755 static struct drm_crtc_state *
4756 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4757 {
4758         struct dm_crtc_state *state, *cur;
4759
4760         cur = to_dm_crtc_state(crtc->state);
4761
4762         if (WARN_ON(!crtc->state))
4763                 return NULL;
4764
4765         state = kzalloc(sizeof(*state), GFP_KERNEL);
4766         if (!state)
4767                 return NULL;
4768
4769         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4770
4771         if (cur->stream) {
4772                 state->stream = cur->stream;
4773                 dc_stream_retain(state->stream);
4774         }
4775
4776         state->active_planes = cur->active_planes;
4777         state->vrr_params = cur->vrr_params;
4778         state->vrr_infopacket = cur->vrr_infopacket;
4779         state->abm_level = cur->abm_level;
4780         state->vrr_supported = cur->vrr_supported;
4781         state->freesync_config = cur->freesync_config;
4782         state->crc_src = cur->crc_src;
4783         state->cm_has_degamma = cur->cm_has_degamma;
4784         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4785
4786         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4787
4788         return &state->base;
4789 }
4790
4791 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4792 {
4793         enum dc_irq_source irq_source;
4794         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4795         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4796         int rc;
4797
4798         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4799
4800         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4801
4802         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4803                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4804         return rc;
4805 }
4806
4807 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4808 {
4809         enum dc_irq_source irq_source;
4810         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4811         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4812         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4813         int rc = 0;
4814
4815         if (enable) {
4816                 /* vblank irq on -> Only need vupdate irq in vrr mode */
4817                 if (amdgpu_dm_vrr_active(acrtc_state))
4818                         rc = dm_set_vupdate_irq(crtc, true);
4819         } else {
4820                 /* vblank irq off -> vupdate irq off */
4821                 rc = dm_set_vupdate_irq(crtc, false);
4822         }
4823
4824         if (rc)
4825                 return rc;
4826
4827         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4828         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4829 }
4830
4831 static int dm_enable_vblank(struct drm_crtc *crtc)
4832 {
4833         return dm_set_vblank(crtc, true);
4834 }
4835
4836 static void dm_disable_vblank(struct drm_crtc *crtc)
4837 {
4838         dm_set_vblank(crtc, false);
4839 }
4840
4841 /* Implemented only the options currently availible for the driver */
4842 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4843         .reset = dm_crtc_reset_state,
4844         .destroy = amdgpu_dm_crtc_destroy,
4845         .gamma_set = drm_atomic_helper_legacy_gamma_set,
4846         .set_config = drm_atomic_helper_set_config,
4847         .page_flip = drm_atomic_helper_page_flip,
4848         .atomic_duplicate_state = dm_crtc_duplicate_state,
4849         .atomic_destroy_state = dm_crtc_destroy_state,
4850         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4851         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4852         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4853         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4854         .enable_vblank = dm_enable_vblank,
4855         .disable_vblank = dm_disable_vblank,
4856         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4857 };
4858
4859 static enum drm_connector_status
4860 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4861 {
4862         bool connected;
4863         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4864
4865         /*
4866          * Notes:
4867          * 1. This interface is NOT called in context of HPD irq.
4868          * 2. This interface *is called* in context of user-mode ioctl. Which
4869          * makes it a bad place for *any* MST-related activity.
4870          */
4871
4872         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4873             !aconnector->fake_enable)
4874                 connected = (aconnector->dc_sink != NULL);
4875         else
4876                 connected = (aconnector->base.force == DRM_FORCE_ON);
4877
4878         return (connected ? connector_status_connected :
4879                         connector_status_disconnected);
4880 }
4881
4882 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4883                                             struct drm_connector_state *connector_state,
4884                                             struct drm_property *property,
4885                                             uint64_t val)
4886 {
4887         struct drm_device *dev = connector->dev;
4888         struct amdgpu_device *adev = drm_to_adev(dev);
4889         struct dm_connector_state *dm_old_state =
4890                 to_dm_connector_state(connector->state);
4891         struct dm_connector_state *dm_new_state =
4892                 to_dm_connector_state(connector_state);
4893
4894         int ret = -EINVAL;
4895
4896         if (property == dev->mode_config.scaling_mode_property) {
4897                 enum amdgpu_rmx_type rmx_type;
4898
4899                 switch (val) {
4900                 case DRM_MODE_SCALE_CENTER:
4901                         rmx_type = RMX_CENTER;
4902                         break;
4903                 case DRM_MODE_SCALE_ASPECT:
4904                         rmx_type = RMX_ASPECT;
4905                         break;
4906                 case DRM_MODE_SCALE_FULLSCREEN:
4907                         rmx_type = RMX_FULL;
4908                         break;
4909                 case DRM_MODE_SCALE_NONE:
4910                 default:
4911                         rmx_type = RMX_OFF;
4912                         break;
4913                 }
4914
4915                 if (dm_old_state->scaling == rmx_type)
4916                         return 0;
4917
4918                 dm_new_state->scaling = rmx_type;
4919                 ret = 0;
4920         } else if (property == adev->mode_info.underscan_hborder_property) {
4921                 dm_new_state->underscan_hborder = val;
4922                 ret = 0;
4923         } else if (property == adev->mode_info.underscan_vborder_property) {
4924                 dm_new_state->underscan_vborder = val;
4925                 ret = 0;
4926         } else if (property == adev->mode_info.underscan_property) {
4927                 dm_new_state->underscan_enable = val;
4928                 ret = 0;
4929         } else if (property == adev->mode_info.abm_level_property) {
4930                 dm_new_state->abm_level = val;
4931                 ret = 0;
4932         }
4933
4934         return ret;
4935 }
4936
4937 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4938                                             const struct drm_connector_state *state,
4939                                             struct drm_property *property,
4940                                             uint64_t *val)
4941 {
4942         struct drm_device *dev = connector->dev;
4943         struct amdgpu_device *adev = drm_to_adev(dev);
4944         struct dm_connector_state *dm_state =
4945                 to_dm_connector_state(state);
4946         int ret = -EINVAL;
4947
4948         if (property == dev->mode_config.scaling_mode_property) {
4949                 switch (dm_state->scaling) {
4950                 case RMX_CENTER:
4951                         *val = DRM_MODE_SCALE_CENTER;
4952                         break;
4953                 case RMX_ASPECT:
4954                         *val = DRM_MODE_SCALE_ASPECT;
4955                         break;
4956                 case RMX_FULL:
4957                         *val = DRM_MODE_SCALE_FULLSCREEN;
4958                         break;
4959                 case RMX_OFF:
4960                 default:
4961                         *val = DRM_MODE_SCALE_NONE;
4962                         break;
4963                 }
4964                 ret = 0;
4965         } else if (property == adev->mode_info.underscan_hborder_property) {
4966                 *val = dm_state->underscan_hborder;
4967                 ret = 0;
4968         } else if (property == adev->mode_info.underscan_vborder_property) {
4969                 *val = dm_state->underscan_vborder;
4970                 ret = 0;
4971         } else if (property == adev->mode_info.underscan_property) {
4972                 *val = dm_state->underscan_enable;
4973                 ret = 0;
4974         } else if (property == adev->mode_info.abm_level_property) {
4975                 *val = dm_state->abm_level;
4976                 ret = 0;
4977         }
4978
4979         return ret;
4980 }
4981
4982 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4983 {
4984         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4985
4986         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4987 }
4988
4989 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4990 {
4991         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4992         const struct dc_link *link = aconnector->dc_link;
4993         struct amdgpu_device *adev = drm_to_adev(connector->dev);
4994         struct amdgpu_display_manager *dm = &adev->dm;
4995
4996         drm_atomic_private_obj_fini(&aconnector->mst_mgr.base);
4997 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4998         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4999
5000         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5001             link->type != dc_connection_none &&
5002             dm->backlight_dev) {
5003                 backlight_device_unregister(dm->backlight_dev);
5004                 dm->backlight_dev = NULL;
5005         }
5006 #endif
5007
5008         if (aconnector->dc_em_sink)
5009                 dc_sink_release(aconnector->dc_em_sink);
5010         aconnector->dc_em_sink = NULL;
5011         if (aconnector->dc_sink)
5012                 dc_sink_release(aconnector->dc_sink);
5013         aconnector->dc_sink = NULL;
5014
5015         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5016         drm_connector_unregister(connector);
5017         drm_connector_cleanup(connector);
5018         if (aconnector->i2c) {
5019                 i2c_del_adapter(&aconnector->i2c->base);
5020                 kfree(aconnector->i2c);
5021         }
5022         kfree(aconnector->dm_dp_aux.aux.name);
5023
5024         kfree(connector);
5025 }
5026
5027 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5028 {
5029         struct dm_connector_state *state =
5030                 to_dm_connector_state(connector->state);
5031
5032         if (connector->state)
5033                 __drm_atomic_helper_connector_destroy_state(connector->state);
5034
5035         kfree(state);
5036
5037         state = kzalloc(sizeof(*state), GFP_KERNEL);
5038
5039         if (state) {
5040                 state->scaling = RMX_OFF;
5041                 state->underscan_enable = false;
5042                 state->underscan_hborder = 0;
5043                 state->underscan_vborder = 0;
5044                 state->base.max_requested_bpc = 8;
5045                 state->vcpi_slots = 0;
5046                 state->pbn = 0;
5047                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5048                         state->abm_level = amdgpu_dm_abm_level;
5049
5050                 __drm_atomic_helper_connector_reset(connector, &state->base);
5051         }
5052 }
5053
5054 struct drm_connector_state *
5055 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5056 {
5057         struct dm_connector_state *state =
5058                 to_dm_connector_state(connector->state);
5059
5060         struct dm_connector_state *new_state =
5061                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5062
5063         if (!new_state)
5064                 return NULL;
5065
5066         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5067
5068         new_state->freesync_capable = state->freesync_capable;
5069         new_state->abm_level = state->abm_level;
5070         new_state->scaling = state->scaling;
5071         new_state->underscan_enable = state->underscan_enable;
5072         new_state->underscan_hborder = state->underscan_hborder;
5073         new_state->underscan_vborder = state->underscan_vborder;
5074         new_state->vcpi_slots = state->vcpi_slots;
5075         new_state->pbn = state->pbn;
5076         return &new_state->base;
5077 }
5078
5079 static int
5080 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5081 {
5082         struct amdgpu_dm_connector *amdgpu_dm_connector =
5083                 to_amdgpu_dm_connector(connector);
5084         int r;
5085
5086         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5087             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5088                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5089                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5090                 if (r)
5091                         return r;
5092         }
5093
5094 #if defined(CONFIG_DEBUG_FS)
5095         connector_debugfs_init(amdgpu_dm_connector);
5096 #endif
5097
5098         return 0;
5099 }
5100
5101 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5102         .reset = amdgpu_dm_connector_funcs_reset,
5103         .detect = amdgpu_dm_connector_detect,
5104         .fill_modes = drm_helper_probe_single_connector_modes,
5105         .destroy = amdgpu_dm_connector_destroy,
5106         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5107         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5108         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5109         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5110         .late_register = amdgpu_dm_connector_late_register,
5111         .early_unregister = amdgpu_dm_connector_unregister
5112 };
5113
5114 static int get_modes(struct drm_connector *connector)
5115 {
5116         return amdgpu_dm_connector_get_modes(connector);
5117 }
5118
5119 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5120 {
5121         struct dc_sink_init_data init_params = {
5122                         .link = aconnector->dc_link,
5123                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5124         };
5125         struct edid *edid;
5126
5127         if (!aconnector->base.edid_blob_ptr) {
5128                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5129                                 aconnector->base.name);
5130
5131                 aconnector->base.force = DRM_FORCE_OFF;
5132                 aconnector->base.override_edid = false;
5133                 return;
5134         }
5135
5136         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5137
5138         aconnector->edid = edid;
5139
5140         aconnector->dc_em_sink = dc_link_add_remote_sink(
5141                 aconnector->dc_link,
5142                 (uint8_t *)edid,
5143                 (edid->extensions + 1) * EDID_LENGTH,
5144                 &init_params);
5145
5146         if (aconnector->base.force == DRM_FORCE_ON) {
5147                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5148                 aconnector->dc_link->local_sink :
5149                 aconnector->dc_em_sink;
5150                 dc_sink_retain(aconnector->dc_sink);
5151         }
5152 }
5153
5154 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5155 {
5156         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5157
5158         /*
5159          * In case of headless boot with force on for DP managed connector
5160          * Those settings have to be != 0 to get initial modeset
5161          */
5162         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5163                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5164                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5165         }
5166
5167
5168         aconnector->base.override_edid = true;
5169         create_eml_sink(aconnector);
5170 }
5171
5172 static struct dc_stream_state *
5173 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5174                                 const struct drm_display_mode *drm_mode,
5175                                 const struct dm_connector_state *dm_state,
5176                                 const struct dc_stream_state *old_stream)
5177 {
5178         struct drm_connector *connector = &aconnector->base;
5179         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5180         struct dc_stream_state *stream;
5181         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5182         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5183         enum dc_status dc_result = DC_OK;
5184
5185         do {
5186                 stream = create_stream_for_sink(aconnector, drm_mode,
5187                                                 dm_state, old_stream,
5188                                                 requested_bpc);
5189                 if (stream == NULL) {
5190                         DRM_ERROR("Failed to create stream for sink!\n");
5191                         break;
5192                 }
5193
5194                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5195
5196                 if (dc_result != DC_OK) {
5197                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5198                                       drm_mode->hdisplay,
5199                                       drm_mode->vdisplay,
5200                                       drm_mode->clock,
5201                                       dc_result,
5202                                       dc_status_to_str(dc_result));
5203
5204                         dc_stream_release(stream);
5205                         stream = NULL;
5206                         requested_bpc -= 2; /* lower bpc to retry validation */
5207                 }
5208
5209         } while (stream == NULL && requested_bpc >= 6);
5210
5211         return stream;
5212 }
5213
5214 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5215                                    struct drm_display_mode *mode)
5216 {
5217         int result = MODE_ERROR;
5218         struct dc_sink *dc_sink;
5219         /* TODO: Unhardcode stream count */
5220         struct dc_stream_state *stream;
5221         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5222
5223         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5224                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5225                 return result;
5226
5227         /*
5228          * Only run this the first time mode_valid is called to initilialize
5229          * EDID mgmt
5230          */
5231         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5232                 !aconnector->dc_em_sink)
5233                 handle_edid_mgmt(aconnector);
5234
5235         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5236
5237         if (dc_sink == NULL) {
5238                 DRM_ERROR("dc_sink is NULL!\n");
5239                 goto fail;
5240         }
5241
5242         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5243         if (stream) {
5244                 dc_stream_release(stream);
5245                 result = MODE_OK;
5246         }
5247
5248 fail:
5249         /* TODO: error handling*/
5250         return result;
5251 }
5252
5253 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5254                                 struct dc_info_packet *out)
5255 {
5256         struct hdmi_drm_infoframe frame;
5257         unsigned char buf[30]; /* 26 + 4 */
5258         ssize_t len;
5259         int ret, i;
5260
5261         memset(out, 0, sizeof(*out));
5262
5263         if (!state->hdr_output_metadata)
5264                 return 0;
5265
5266         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5267         if (ret)
5268                 return ret;
5269
5270         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5271         if (len < 0)
5272                 return (int)len;
5273
5274         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5275         if (len != 30)
5276                 return -EINVAL;
5277
5278         /* Prepare the infopacket for DC. */
5279         switch (state->connector->connector_type) {
5280         case DRM_MODE_CONNECTOR_HDMIA:
5281                 out->hb0 = 0x87; /* type */
5282                 out->hb1 = 0x01; /* version */
5283                 out->hb2 = 0x1A; /* length */
5284                 out->sb[0] = buf[3]; /* checksum */
5285                 i = 1;
5286                 break;
5287
5288         case DRM_MODE_CONNECTOR_DisplayPort:
5289         case DRM_MODE_CONNECTOR_eDP:
5290                 out->hb0 = 0x00; /* sdp id, zero */
5291                 out->hb1 = 0x87; /* type */
5292                 out->hb2 = 0x1D; /* payload len - 1 */
5293                 out->hb3 = (0x13 << 2); /* sdp version */
5294                 out->sb[0] = 0x01; /* version */
5295                 out->sb[1] = 0x1A; /* length */
5296                 i = 2;
5297                 break;
5298
5299         default:
5300                 return -EINVAL;
5301         }
5302
5303         memcpy(&out->sb[i], &buf[4], 26);
5304         out->valid = true;
5305
5306         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5307                        sizeof(out->sb), false);
5308
5309         return 0;
5310 }
5311
5312 static bool
5313 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5314                           const struct drm_connector_state *new_state)
5315 {
5316         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5317         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5318
5319         if (old_blob != new_blob) {
5320                 if (old_blob && new_blob &&
5321                     old_blob->length == new_blob->length)
5322                         return memcmp(old_blob->data, new_blob->data,
5323                                       old_blob->length);
5324
5325                 return true;
5326         }
5327
5328         return false;
5329 }
5330
5331 static int
5332 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5333                                  struct drm_atomic_state *state)
5334 {
5335         struct drm_connector_state *new_con_state =
5336                 drm_atomic_get_new_connector_state(state, conn);
5337         struct drm_connector_state *old_con_state =
5338                 drm_atomic_get_old_connector_state(state, conn);
5339         struct drm_crtc *crtc = new_con_state->crtc;
5340         struct drm_crtc_state *new_crtc_state;
5341         int ret;
5342
5343         if (!crtc)
5344                 return 0;
5345
5346         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5347                 struct dc_info_packet hdr_infopacket;
5348
5349                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5350                 if (ret)
5351                         return ret;
5352
5353                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5354                 if (IS_ERR(new_crtc_state))
5355                         return PTR_ERR(new_crtc_state);
5356
5357                 /*
5358                  * DC considers the stream backends changed if the
5359                  * static metadata changes. Forcing the modeset also
5360                  * gives a simple way for userspace to switch from
5361                  * 8bpc to 10bpc when setting the metadata to enter
5362                  * or exit HDR.
5363                  *
5364                  * Changing the static metadata after it's been
5365                  * set is permissible, however. So only force a
5366                  * modeset if we're entering or exiting HDR.
5367                  */
5368                 new_crtc_state->mode_changed =
5369                         !old_con_state->hdr_output_metadata ||
5370                         !new_con_state->hdr_output_metadata;
5371         }
5372
5373         return 0;
5374 }
5375
5376 static const struct drm_connector_helper_funcs
5377 amdgpu_dm_connector_helper_funcs = {
5378         /*
5379          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5380          * modes will be filtered by drm_mode_validate_size(), and those modes
5381          * are missing after user start lightdm. So we need to renew modes list.
5382          * in get_modes call back, not just return the modes count
5383          */
5384         .get_modes = get_modes,
5385         .mode_valid = amdgpu_dm_connector_mode_valid,
5386         .atomic_check = amdgpu_dm_connector_atomic_check,
5387 };
5388
5389 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5390 {
5391 }
5392
5393 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5394 {
5395         struct drm_device *dev = new_crtc_state->crtc->dev;
5396         struct drm_plane *plane;
5397
5398         drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5399                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5400                         return true;
5401         }
5402
5403         return false;
5404 }
5405
5406 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5407 {
5408         struct drm_atomic_state *state = new_crtc_state->state;
5409         struct drm_plane *plane;
5410         int num_active = 0;
5411
5412         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5413                 struct drm_plane_state *new_plane_state;
5414
5415                 /* Cursor planes are "fake". */
5416                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5417                         continue;
5418
5419                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5420
5421                 if (!new_plane_state) {
5422                         /*
5423                          * The plane is enable on the CRTC and hasn't changed
5424                          * state. This means that it previously passed
5425                          * validation and is therefore enabled.
5426                          */
5427                         num_active += 1;
5428                         continue;
5429                 }
5430
5431                 /* We need a framebuffer to be considered enabled. */
5432                 num_active += (new_plane_state->fb != NULL);
5433         }
5434
5435         return num_active;
5436 }
5437
5438 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5439                                          struct drm_crtc_state *new_crtc_state)
5440 {
5441         struct dm_crtc_state *dm_new_crtc_state =
5442                 to_dm_crtc_state(new_crtc_state);
5443
5444         dm_new_crtc_state->active_planes = 0;
5445
5446         if (!dm_new_crtc_state->stream)
5447                 return;
5448
5449         dm_new_crtc_state->active_planes =
5450                 count_crtc_active_planes(new_crtc_state);
5451 }
5452
5453 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5454                                        struct drm_crtc_state *state)
5455 {
5456         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5457         struct dc *dc = adev->dm.dc;
5458         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5459         int ret = -EINVAL;
5460
5461         dm_update_crtc_active_planes(crtc, state);
5462
5463         if (unlikely(!dm_crtc_state->stream &&
5464                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5465                 WARN_ON(1);
5466                 return ret;
5467         }
5468
5469         /* In some use cases, like reset, no stream is attached */
5470         if (!dm_crtc_state->stream)
5471                 return 0;
5472
5473         /*
5474          * We want at least one hardware plane enabled to use
5475          * the stream with a cursor enabled.
5476          */
5477         if (state->enable && state->active &&
5478             does_crtc_have_active_cursor(state) &&
5479             dm_crtc_state->active_planes == 0)
5480                 return -EINVAL;
5481
5482         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5483                 return 0;
5484
5485         return ret;
5486 }
5487
5488 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5489                                       const struct drm_display_mode *mode,
5490                                       struct drm_display_mode *adjusted_mode)
5491 {
5492         return true;
5493 }
5494
5495 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5496         .disable = dm_crtc_helper_disable,
5497         .atomic_check = dm_crtc_helper_atomic_check,
5498         .mode_fixup = dm_crtc_helper_mode_fixup,
5499         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5500 };
5501
5502 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5503 {
5504
5505 }
5506
5507 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5508 {
5509         switch (display_color_depth) {
5510                 case COLOR_DEPTH_666:
5511                         return 6;
5512                 case COLOR_DEPTH_888:
5513                         return 8;
5514                 case COLOR_DEPTH_101010:
5515                         return 10;
5516                 case COLOR_DEPTH_121212:
5517                         return 12;
5518                 case COLOR_DEPTH_141414:
5519                         return 14;
5520                 case COLOR_DEPTH_161616:
5521                         return 16;
5522                 default:
5523                         break;
5524                 }
5525         return 0;
5526 }
5527
5528 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5529                                           struct drm_crtc_state *crtc_state,
5530                                           struct drm_connector_state *conn_state)
5531 {
5532         struct drm_atomic_state *state = crtc_state->state;
5533         struct drm_connector *connector = conn_state->connector;
5534         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5535         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5536         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5537         struct drm_dp_mst_topology_mgr *mst_mgr;
5538         struct drm_dp_mst_port *mst_port;
5539         enum dc_color_depth color_depth;
5540         int clock, bpp = 0;
5541         bool is_y420 = false;
5542
5543         if (!aconnector->port || !aconnector->dc_sink)
5544                 return 0;
5545
5546         mst_port = aconnector->port;
5547         mst_mgr = &aconnector->mst_port->mst_mgr;
5548
5549         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5550                 return 0;
5551
5552         if (!state->duplicated) {
5553                 int max_bpc = conn_state->max_requested_bpc;
5554                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5555                                 aconnector->force_yuv420_output;
5556                 color_depth = convert_color_depth_from_display_info(connector,
5557                                                                     is_y420,
5558                                                                     max_bpc);
5559                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5560                 clock = adjusted_mode->clock;
5561                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5562         }
5563         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5564                                                                            mst_mgr,
5565                                                                            mst_port,
5566                                                                            dm_new_connector_state->pbn,
5567                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
5568         if (dm_new_connector_state->vcpi_slots < 0) {
5569                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5570                 return dm_new_connector_state->vcpi_slots;
5571         }
5572         return 0;
5573 }
5574
5575 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5576         .disable = dm_encoder_helper_disable,
5577         .atomic_check = dm_encoder_helper_atomic_check
5578 };
5579
5580 #if defined(CONFIG_DRM_AMD_DC_DCN)
5581 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5582                                             struct dc_state *dc_state)
5583 {
5584         struct dc_stream_state *stream = NULL;
5585         struct drm_connector *connector;
5586         struct drm_connector_state *new_con_state, *old_con_state;
5587         struct amdgpu_dm_connector *aconnector;
5588         struct dm_connector_state *dm_conn_state;
5589         int i, j, clock, bpp;
5590         int vcpi, pbn_div, pbn = 0;
5591
5592         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5593
5594                 aconnector = to_amdgpu_dm_connector(connector);
5595
5596                 if (!aconnector->port)
5597                         continue;
5598
5599                 if (!new_con_state || !new_con_state->crtc)
5600                         continue;
5601
5602                 dm_conn_state = to_dm_connector_state(new_con_state);
5603
5604                 for (j = 0; j < dc_state->stream_count; j++) {
5605                         stream = dc_state->streams[j];
5606                         if (!stream)
5607                                 continue;
5608
5609                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5610                                 break;
5611
5612                         stream = NULL;
5613                 }
5614
5615                 if (!stream)
5616                         continue;
5617
5618                 if (stream->timing.flags.DSC != 1) {
5619                         drm_dp_mst_atomic_enable_dsc(state,
5620                                                      aconnector->port,
5621                                                      dm_conn_state->pbn,
5622                                                      0,
5623                                                      false);
5624                         continue;
5625                 }
5626
5627                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5628                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5629                 clock = stream->timing.pix_clk_100hz / 10;
5630                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5631                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5632                                                     aconnector->port,
5633                                                     pbn, pbn_div,
5634                                                     true);
5635                 if (vcpi < 0)
5636                         return vcpi;
5637
5638                 dm_conn_state->pbn = pbn;
5639                 dm_conn_state->vcpi_slots = vcpi;
5640         }
5641         return 0;
5642 }
5643 #endif
5644
5645 static void dm_drm_plane_reset(struct drm_plane *plane)
5646 {
5647         struct dm_plane_state *amdgpu_state = NULL;
5648
5649         if (plane->state)
5650                 plane->funcs->atomic_destroy_state(plane, plane->state);
5651
5652         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5653         WARN_ON(amdgpu_state == NULL);
5654
5655         if (amdgpu_state)
5656                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5657 }
5658
5659 static struct drm_plane_state *
5660 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5661 {
5662         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5663
5664         old_dm_plane_state = to_dm_plane_state(plane->state);
5665         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5666         if (!dm_plane_state)
5667                 return NULL;
5668
5669         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5670
5671         if (old_dm_plane_state->dc_state) {
5672                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5673                 dc_plane_state_retain(dm_plane_state->dc_state);
5674         }
5675
5676         /* Framebuffer hasn't been updated yet, so retain old flags. */
5677         dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5678         dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5679
5680         return &dm_plane_state->base;
5681 }
5682
5683 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5684                                 struct drm_plane_state *state)
5685 {
5686         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5687
5688         if (dm_plane_state->dc_state)
5689                 dc_plane_state_release(dm_plane_state->dc_state);
5690
5691         drm_atomic_helper_plane_destroy_state(plane, state);
5692 }
5693
5694 static const struct drm_plane_funcs dm_plane_funcs = {
5695         .update_plane   = drm_atomic_helper_update_plane,
5696         .disable_plane  = drm_atomic_helper_disable_plane,
5697         .destroy        = drm_primary_helper_destroy,
5698         .reset = dm_drm_plane_reset,
5699         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5700         .atomic_destroy_state = dm_drm_plane_destroy_state,
5701 };
5702
5703 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5704                                       struct drm_plane_state *new_state)
5705 {
5706         struct amdgpu_framebuffer *afb;
5707         struct drm_gem_object *obj;
5708         struct amdgpu_device *adev;
5709         struct amdgpu_bo *rbo;
5710         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5711         struct list_head list;
5712         struct ttm_validate_buffer tv;
5713         struct ww_acquire_ctx ticket;
5714         uint32_t domain;
5715         int r;
5716
5717         if (!new_state->fb) {
5718                 DRM_DEBUG_DRIVER("No FB bound\n");
5719                 return 0;
5720         }
5721
5722         afb = to_amdgpu_framebuffer(new_state->fb);
5723         obj = new_state->fb->obj[0];
5724         rbo = gem_to_amdgpu_bo(obj);
5725         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5726         INIT_LIST_HEAD(&list);
5727
5728         tv.bo = &rbo->tbo;
5729         tv.num_shared = 1;
5730         list_add(&tv.head, &list);
5731
5732         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5733         if (r) {
5734                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5735                 return r;
5736         }
5737
5738         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5739                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5740         else
5741                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5742
5743         r = amdgpu_bo_pin(rbo, domain);
5744         if (unlikely(r != 0)) {
5745                 if (r != -ERESTARTSYS)
5746                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5747                 ttm_eu_backoff_reservation(&ticket, &list);
5748                 return r;
5749         }
5750
5751         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5752         if (unlikely(r != 0)) {
5753                 amdgpu_bo_unpin(rbo);
5754                 ttm_eu_backoff_reservation(&ticket, &list);
5755                 DRM_ERROR("%p bind failed\n", rbo);
5756                 return r;
5757         }
5758
5759         ttm_eu_backoff_reservation(&ticket, &list);
5760
5761         afb->address = amdgpu_bo_gpu_offset(rbo);
5762
5763         amdgpu_bo_ref(rbo);
5764
5765         /**
5766          * We don't do surface updates on planes that have been newly created,
5767          * but we also don't have the afb->address during atomic check.
5768          *
5769          * Fill in buffer attributes depending on the address here, but only on
5770          * newly created planes since they're not being used by DC yet and this
5771          * won't modify global state.
5772          */
5773         dm_plane_state_old = to_dm_plane_state(plane->state);
5774         dm_plane_state_new = to_dm_plane_state(new_state);
5775
5776         if (dm_plane_state_new->dc_state &&
5777             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5778                 struct dc_plane_state *plane_state =
5779                         dm_plane_state_new->dc_state;
5780                 bool force_disable_dcc = !plane_state->dcc.enable;
5781
5782                 fill_plane_buffer_attributes(
5783                         adev, afb, plane_state->format, plane_state->rotation,
5784                         dm_plane_state_new->tiling_flags,
5785                         &plane_state->tiling_info, &plane_state->plane_size,
5786                         &plane_state->dcc, &plane_state->address,
5787                         dm_plane_state_new->tmz_surface, force_disable_dcc);
5788         }
5789
5790         return 0;
5791 }
5792
5793 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5794                                        struct drm_plane_state *old_state)
5795 {
5796         struct amdgpu_bo *rbo;
5797         int r;
5798
5799         if (!old_state->fb)
5800                 return;
5801
5802         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5803         r = amdgpu_bo_reserve(rbo, false);
5804         if (unlikely(r)) {
5805                 DRM_ERROR("failed to reserve rbo before unpin\n");
5806                 return;
5807         }
5808
5809         amdgpu_bo_unpin(rbo);
5810         amdgpu_bo_unreserve(rbo);
5811         amdgpu_bo_unref(&rbo);
5812 }
5813
5814 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5815                                        struct drm_crtc_state *new_crtc_state)
5816 {
5817         int max_downscale = 0;
5818         int max_upscale = INT_MAX;
5819
5820         /* TODO: These should be checked against DC plane caps */
5821         return drm_atomic_helper_check_plane_state(
5822                 state, new_crtc_state, max_downscale, max_upscale, true, true);
5823 }
5824
5825 static int dm_plane_atomic_check(struct drm_plane *plane,
5826                                  struct drm_plane_state *state)
5827 {
5828         struct amdgpu_device *adev = drm_to_adev(plane->dev);
5829         struct dc *dc = adev->dm.dc;
5830         struct dm_plane_state *dm_plane_state;
5831         struct dc_scaling_info scaling_info;
5832         struct drm_crtc_state *new_crtc_state;
5833         int ret;
5834
5835         dm_plane_state = to_dm_plane_state(state);
5836
5837         if (!dm_plane_state->dc_state)
5838                 return 0;
5839
5840         new_crtc_state =
5841                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5842         if (!new_crtc_state)
5843                 return -EINVAL;
5844
5845         ret = dm_plane_helper_check_state(state, new_crtc_state);
5846         if (ret)
5847                 return ret;
5848
5849         ret = fill_dc_scaling_info(state, &scaling_info);
5850         if (ret)
5851                 return ret;
5852
5853         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5854                 return 0;
5855
5856         return -EINVAL;
5857 }
5858
5859 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5860                                        struct drm_plane_state *new_plane_state)
5861 {
5862         /* Only support async updates on cursor planes. */
5863         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5864                 return -EINVAL;
5865
5866         return 0;
5867 }
5868
5869 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5870                                          struct drm_plane_state *new_state)
5871 {
5872         struct drm_plane_state *old_state =
5873                 drm_atomic_get_old_plane_state(new_state->state, plane);
5874
5875         swap(plane->state->fb, new_state->fb);
5876
5877         plane->state->src_x = new_state->src_x;
5878         plane->state->src_y = new_state->src_y;
5879         plane->state->src_w = new_state->src_w;
5880         plane->state->src_h = new_state->src_h;
5881         plane->state->crtc_x = new_state->crtc_x;
5882         plane->state->crtc_y = new_state->crtc_y;
5883         plane->state->crtc_w = new_state->crtc_w;
5884         plane->state->crtc_h = new_state->crtc_h;
5885
5886         handle_cursor_update(plane, old_state);
5887 }
5888
5889 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5890         .prepare_fb = dm_plane_helper_prepare_fb,
5891         .cleanup_fb = dm_plane_helper_cleanup_fb,
5892         .atomic_check = dm_plane_atomic_check,
5893         .atomic_async_check = dm_plane_atomic_async_check,
5894         .atomic_async_update = dm_plane_atomic_async_update
5895 };
5896
5897 /*
5898  * TODO: these are currently initialized to rgb formats only.
5899  * For future use cases we should either initialize them dynamically based on
5900  * plane capabilities, or initialize this array to all formats, so internal drm
5901  * check will succeed, and let DC implement proper check
5902  */
5903 static const uint32_t rgb_formats[] = {
5904         DRM_FORMAT_XRGB8888,
5905         DRM_FORMAT_ARGB8888,
5906         DRM_FORMAT_RGBA8888,
5907         DRM_FORMAT_XRGB2101010,
5908         DRM_FORMAT_XBGR2101010,
5909         DRM_FORMAT_ARGB2101010,
5910         DRM_FORMAT_ABGR2101010,
5911         DRM_FORMAT_XBGR8888,
5912         DRM_FORMAT_ABGR8888,
5913         DRM_FORMAT_RGB565,
5914 };
5915
5916 static const uint32_t overlay_formats[] = {
5917         DRM_FORMAT_XRGB8888,
5918         DRM_FORMAT_ARGB8888,
5919         DRM_FORMAT_RGBA8888,
5920         DRM_FORMAT_XBGR8888,
5921         DRM_FORMAT_ABGR8888,
5922         DRM_FORMAT_RGB565
5923 };
5924
5925 static const u32 cursor_formats[] = {
5926         DRM_FORMAT_ARGB8888
5927 };
5928
5929 static int get_plane_formats(const struct drm_plane *plane,
5930                              const struct dc_plane_cap *plane_cap,
5931                              uint32_t *formats, int max_formats)
5932 {
5933         int i, num_formats = 0;
5934
5935         /*
5936          * TODO: Query support for each group of formats directly from
5937          * DC plane caps. This will require adding more formats to the
5938          * caps list.
5939          */
5940
5941         switch (plane->type) {
5942         case DRM_PLANE_TYPE_PRIMARY:
5943                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5944                         if (num_formats >= max_formats)
5945                                 break;
5946
5947                         formats[num_formats++] = rgb_formats[i];
5948                 }
5949
5950                 if (plane_cap && plane_cap->pixel_format_support.nv12)
5951                         formats[num_formats++] = DRM_FORMAT_NV12;
5952                 if (plane_cap && plane_cap->pixel_format_support.p010)
5953                         formats[num_formats++] = DRM_FORMAT_P010;
5954                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5955                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5956                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5957                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5958                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5959                 }
5960                 break;
5961
5962         case DRM_PLANE_TYPE_OVERLAY:
5963                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5964                         if (num_formats >= max_formats)
5965                                 break;
5966
5967                         formats[num_formats++] = overlay_formats[i];
5968                 }
5969                 break;
5970
5971         case DRM_PLANE_TYPE_CURSOR:
5972                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5973                         if (num_formats >= max_formats)
5974                                 break;
5975
5976                         formats[num_formats++] = cursor_formats[i];
5977                 }
5978                 break;
5979         }
5980
5981         return num_formats;
5982 }
5983
5984 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5985                                 struct drm_plane *plane,
5986                                 unsigned long possible_crtcs,
5987                                 const struct dc_plane_cap *plane_cap)
5988 {
5989         uint32_t formats[32];
5990         int num_formats;
5991         int res = -EPERM;
5992         unsigned int supported_rotations;
5993
5994         num_formats = get_plane_formats(plane, plane_cap, formats,
5995                                         ARRAY_SIZE(formats));
5996
5997         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
5998                                        &dm_plane_funcs, formats, num_formats,
5999                                        NULL, plane->type, NULL);
6000         if (res)
6001                 return res;
6002
6003         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6004             plane_cap && plane_cap->per_pixel_alpha) {
6005                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6006                                           BIT(DRM_MODE_BLEND_PREMULTI);
6007
6008                 drm_plane_create_alpha_property(plane);
6009                 drm_plane_create_blend_mode_property(plane, blend_caps);
6010         }
6011
6012         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6013             plane_cap &&
6014             (plane_cap->pixel_format_support.nv12 ||
6015              plane_cap->pixel_format_support.p010)) {
6016                 /* This only affects YUV formats. */
6017                 drm_plane_create_color_properties(
6018                         plane,
6019                         BIT(DRM_COLOR_YCBCR_BT601) |
6020                         BIT(DRM_COLOR_YCBCR_BT709) |
6021                         BIT(DRM_COLOR_YCBCR_BT2020),
6022                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6023                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6024                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6025         }
6026
6027         supported_rotations =
6028                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6029                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6030
6031         if (dm->adev->asic_type >= CHIP_BONAIRE)
6032                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6033                                                    supported_rotations);
6034
6035         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6036
6037         /* Create (reset) the plane state */
6038         if (plane->funcs->reset)
6039                 plane->funcs->reset(plane);
6040
6041         return 0;
6042 }
6043
6044 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6045                                struct drm_plane *plane,
6046                                uint32_t crtc_index)
6047 {
6048         struct amdgpu_crtc *acrtc = NULL;
6049         struct drm_plane *cursor_plane;
6050
6051         int res = -ENOMEM;
6052
6053         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6054         if (!cursor_plane)
6055                 goto fail;
6056
6057         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6058         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6059
6060         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6061         if (!acrtc)
6062                 goto fail;
6063
6064         res = drm_crtc_init_with_planes(
6065                         dm->ddev,
6066                         &acrtc->base,
6067                         plane,
6068                         cursor_plane,
6069                         &amdgpu_dm_crtc_funcs, NULL);
6070
6071         if (res)
6072                 goto fail;
6073
6074         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6075
6076         /* Create (reset) the plane state */
6077         if (acrtc->base.funcs->reset)
6078                 acrtc->base.funcs->reset(&acrtc->base);
6079
6080         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6081         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6082
6083         acrtc->crtc_id = crtc_index;
6084         acrtc->base.enabled = false;
6085         acrtc->otg_inst = -1;
6086
6087         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6088         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6089                                    true, MAX_COLOR_LUT_ENTRIES);
6090         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6091
6092         return 0;
6093
6094 fail:
6095         kfree(acrtc);
6096         kfree(cursor_plane);
6097         return res;
6098 }
6099
6100
6101 static int to_drm_connector_type(enum signal_type st)
6102 {
6103         switch (st) {
6104         case SIGNAL_TYPE_HDMI_TYPE_A:
6105                 return DRM_MODE_CONNECTOR_HDMIA;
6106         case SIGNAL_TYPE_EDP:
6107                 return DRM_MODE_CONNECTOR_eDP;
6108         case SIGNAL_TYPE_LVDS:
6109                 return DRM_MODE_CONNECTOR_LVDS;
6110         case SIGNAL_TYPE_RGB:
6111                 return DRM_MODE_CONNECTOR_VGA;
6112         case SIGNAL_TYPE_DISPLAY_PORT:
6113         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6114                 return DRM_MODE_CONNECTOR_DisplayPort;
6115         case SIGNAL_TYPE_DVI_DUAL_LINK:
6116         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6117                 return DRM_MODE_CONNECTOR_DVID;
6118         case SIGNAL_TYPE_VIRTUAL:
6119                 return DRM_MODE_CONNECTOR_VIRTUAL;
6120
6121         default:
6122                 return DRM_MODE_CONNECTOR_Unknown;
6123         }
6124 }
6125
6126 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6127 {
6128         struct drm_encoder *encoder;
6129
6130         /* There is only one encoder per connector */
6131         drm_connector_for_each_possible_encoder(connector, encoder)
6132                 return encoder;
6133
6134         return NULL;
6135 }
6136
6137 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6138 {
6139         struct drm_encoder *encoder;
6140         struct amdgpu_encoder *amdgpu_encoder;
6141
6142         encoder = amdgpu_dm_connector_to_encoder(connector);
6143
6144         if (encoder == NULL)
6145                 return;
6146
6147         amdgpu_encoder = to_amdgpu_encoder(encoder);
6148
6149         amdgpu_encoder->native_mode.clock = 0;
6150
6151         if (!list_empty(&connector->probed_modes)) {
6152                 struct drm_display_mode *preferred_mode = NULL;
6153
6154                 list_for_each_entry(preferred_mode,
6155                                     &connector->probed_modes,
6156                                     head) {
6157                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6158                                 amdgpu_encoder->native_mode = *preferred_mode;
6159
6160                         break;
6161                 }
6162
6163         }
6164 }
6165
6166 static struct drm_display_mode *
6167 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6168                              char *name,
6169                              int hdisplay, int vdisplay)
6170 {
6171         struct drm_device *dev = encoder->dev;
6172         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6173         struct drm_display_mode *mode = NULL;
6174         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6175
6176         mode = drm_mode_duplicate(dev, native_mode);
6177
6178         if (mode == NULL)
6179                 return NULL;
6180
6181         mode->hdisplay = hdisplay;
6182         mode->vdisplay = vdisplay;
6183         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6184         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6185
6186         return mode;
6187
6188 }
6189
6190 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6191                                                  struct drm_connector *connector)
6192 {
6193         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6194         struct drm_display_mode *mode = NULL;
6195         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6196         struct amdgpu_dm_connector *amdgpu_dm_connector =
6197                                 to_amdgpu_dm_connector(connector);
6198         int i;
6199         int n;
6200         struct mode_size {
6201                 char name[DRM_DISPLAY_MODE_LEN];
6202                 int w;
6203                 int h;
6204         } common_modes[] = {
6205                 {  "640x480",  640,  480},
6206                 {  "800x600",  800,  600},
6207                 { "1024x768", 1024,  768},
6208                 { "1280x720", 1280,  720},
6209                 { "1280x800", 1280,  800},
6210                 {"1280x1024", 1280, 1024},
6211                 { "1440x900", 1440,  900},
6212                 {"1680x1050", 1680, 1050},
6213                 {"1600x1200", 1600, 1200},
6214                 {"1920x1080", 1920, 1080},
6215                 {"1920x1200", 1920, 1200}
6216         };
6217
6218         n = ARRAY_SIZE(common_modes);
6219
6220         for (i = 0; i < n; i++) {
6221                 struct drm_display_mode *curmode = NULL;
6222                 bool mode_existed = false;
6223
6224                 if (common_modes[i].w > native_mode->hdisplay ||
6225                     common_modes[i].h > native_mode->vdisplay ||
6226                    (common_modes[i].w == native_mode->hdisplay &&
6227                     common_modes[i].h == native_mode->vdisplay))
6228                         continue;
6229
6230                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6231                         if (common_modes[i].w == curmode->hdisplay &&
6232                             common_modes[i].h == curmode->vdisplay) {
6233                                 mode_existed = true;
6234                                 break;
6235                         }
6236                 }
6237
6238                 if (mode_existed)
6239                         continue;
6240
6241                 mode = amdgpu_dm_create_common_mode(encoder,
6242                                 common_modes[i].name, common_modes[i].w,
6243                                 common_modes[i].h);
6244                 drm_mode_probed_add(connector, mode);
6245                 amdgpu_dm_connector->num_modes++;
6246         }
6247 }
6248
6249 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6250                                               struct edid *edid)
6251 {
6252         struct amdgpu_dm_connector *amdgpu_dm_connector =
6253                         to_amdgpu_dm_connector(connector);
6254
6255         if (edid) {
6256                 /* empty probed_modes */
6257                 INIT_LIST_HEAD(&connector->probed_modes);
6258                 amdgpu_dm_connector->num_modes =
6259                                 drm_add_edid_modes(connector, edid);
6260
6261                 /* sorting the probed modes before calling function
6262                  * amdgpu_dm_get_native_mode() since EDID can have
6263                  * more than one preferred mode. The modes that are
6264                  * later in the probed mode list could be of higher
6265                  * and preferred resolution. For example, 3840x2160
6266                  * resolution in base EDID preferred timing and 4096x2160
6267                  * preferred resolution in DID extension block later.
6268                  */
6269                 drm_mode_sort(&connector->probed_modes);
6270                 amdgpu_dm_get_native_mode(connector);
6271         } else {
6272                 amdgpu_dm_connector->num_modes = 0;
6273         }
6274 }
6275
6276 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6277 {
6278         struct amdgpu_dm_connector *amdgpu_dm_connector =
6279                         to_amdgpu_dm_connector(connector);
6280         struct drm_encoder *encoder;
6281         struct edid *edid = amdgpu_dm_connector->edid;
6282
6283         encoder = amdgpu_dm_connector_to_encoder(connector);
6284
6285         if (!edid || !drm_edid_is_valid(edid)) {
6286                 amdgpu_dm_connector->num_modes =
6287                                 drm_add_modes_noedid(connector, 640, 480);
6288         } else {
6289                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6290                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6291         }
6292         amdgpu_dm_fbc_init(connector);
6293
6294         return amdgpu_dm_connector->num_modes;
6295 }
6296
6297 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6298                                      struct amdgpu_dm_connector *aconnector,
6299                                      int connector_type,
6300                                      struct dc_link *link,
6301                                      int link_index)
6302 {
6303         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6304
6305         /*
6306          * Some of the properties below require access to state, like bpc.
6307          * Allocate some default initial connector state with our reset helper.
6308          */
6309         if (aconnector->base.funcs->reset)
6310                 aconnector->base.funcs->reset(&aconnector->base);
6311
6312         aconnector->connector_id = link_index;
6313         aconnector->dc_link = link;
6314         aconnector->base.interlace_allowed = false;
6315         aconnector->base.doublescan_allowed = false;
6316         aconnector->base.stereo_allowed = false;
6317         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6318         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6319         aconnector->audio_inst = -1;
6320         mutex_init(&aconnector->hpd_lock);
6321
6322         /*
6323          * configure support HPD hot plug connector_>polled default value is 0
6324          * which means HPD hot plug not supported
6325          */
6326         switch (connector_type) {
6327         case DRM_MODE_CONNECTOR_HDMIA:
6328                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6329                 aconnector->base.ycbcr_420_allowed =
6330                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6331                 break;
6332         case DRM_MODE_CONNECTOR_DisplayPort:
6333                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6334                 aconnector->base.ycbcr_420_allowed =
6335                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
6336                 break;
6337         case DRM_MODE_CONNECTOR_DVID:
6338                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6339                 break;
6340         default:
6341                 break;
6342         }
6343
6344         drm_object_attach_property(&aconnector->base.base,
6345                                 dm->ddev->mode_config.scaling_mode_property,
6346                                 DRM_MODE_SCALE_NONE);
6347
6348         drm_object_attach_property(&aconnector->base.base,
6349                                 adev->mode_info.underscan_property,
6350                                 UNDERSCAN_OFF);
6351         drm_object_attach_property(&aconnector->base.base,
6352                                 adev->mode_info.underscan_hborder_property,
6353                                 0);
6354         drm_object_attach_property(&aconnector->base.base,
6355                                 adev->mode_info.underscan_vborder_property,
6356                                 0);
6357
6358         if (!aconnector->mst_port)
6359                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6360
6361         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6362         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6363         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6364
6365         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6366             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6367                 drm_object_attach_property(&aconnector->base.base,
6368                                 adev->mode_info.abm_level_property, 0);
6369         }
6370
6371         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6372             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6373             connector_type == DRM_MODE_CONNECTOR_eDP) {
6374                 drm_object_attach_property(
6375                         &aconnector->base.base,
6376                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
6377
6378                 if (!aconnector->mst_port)
6379                         drm_connector_attach_vrr_capable_property(&aconnector->base);
6380
6381 #ifdef CONFIG_DRM_AMD_DC_HDCP
6382                 if (adev->dm.hdcp_workqueue)
6383                         drm_connector_attach_content_protection_property(&aconnector->base, true);
6384 #endif
6385         }
6386 }
6387
6388 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6389                               struct i2c_msg *msgs, int num)
6390 {
6391         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6392         struct ddc_service *ddc_service = i2c->ddc_service;
6393         struct i2c_command cmd;
6394         int i;
6395         int result = -EIO;
6396
6397         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6398
6399         if (!cmd.payloads)
6400                 return result;
6401
6402         cmd.number_of_payloads = num;
6403         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6404         cmd.speed = 100;
6405
6406         for (i = 0; i < num; i++) {
6407                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6408                 cmd.payloads[i].address = msgs[i].addr;
6409                 cmd.payloads[i].length = msgs[i].len;
6410                 cmd.payloads[i].data = msgs[i].buf;
6411         }
6412
6413         if (dc_submit_i2c(
6414                         ddc_service->ctx->dc,
6415                         ddc_service->ddc_pin->hw_info.ddc_channel,
6416                         &cmd))
6417                 result = num;
6418
6419         kfree(cmd.payloads);
6420         return result;
6421 }
6422
6423 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6424 {
6425         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6426 }
6427
6428 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6429         .master_xfer = amdgpu_dm_i2c_xfer,
6430         .functionality = amdgpu_dm_i2c_func,
6431 };
6432
6433 static struct amdgpu_i2c_adapter *
6434 create_i2c(struct ddc_service *ddc_service,
6435            int link_index,
6436            int *res)
6437 {
6438         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6439         struct amdgpu_i2c_adapter *i2c;
6440
6441         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6442         if (!i2c)
6443                 return NULL;
6444         i2c->base.owner = THIS_MODULE;
6445         i2c->base.class = I2C_CLASS_DDC;
6446         i2c->base.dev.parent = &adev->pdev->dev;
6447         i2c->base.algo = &amdgpu_dm_i2c_algo;
6448         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6449         i2c_set_adapdata(&i2c->base, i2c);
6450         i2c->ddc_service = ddc_service;
6451         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6452
6453         return i2c;
6454 }
6455
6456
6457 /*
6458  * Note: this function assumes that dc_link_detect() was called for the
6459  * dc_link which will be represented by this aconnector.
6460  */
6461 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6462                                     struct amdgpu_dm_connector *aconnector,
6463                                     uint32_t link_index,
6464                                     struct amdgpu_encoder *aencoder)
6465 {
6466         int res = 0;
6467         int connector_type;
6468         struct dc *dc = dm->dc;
6469         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6470         struct amdgpu_i2c_adapter *i2c;
6471
6472         link->priv = aconnector;
6473
6474         DRM_DEBUG_DRIVER("%s()\n", __func__);
6475
6476         i2c = create_i2c(link->ddc, link->link_index, &res);
6477         if (!i2c) {
6478                 DRM_ERROR("Failed to create i2c adapter data\n");
6479                 return -ENOMEM;
6480         }
6481
6482         aconnector->i2c = i2c;
6483         res = i2c_add_adapter(&i2c->base);
6484
6485         if (res) {
6486                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6487                 goto out_free;
6488         }
6489
6490         connector_type = to_drm_connector_type(link->connector_signal);
6491
6492         res = drm_connector_init_with_ddc(
6493                         dm->ddev,
6494                         &aconnector->base,
6495                         &amdgpu_dm_connector_funcs,
6496                         connector_type,
6497                         &i2c->base);
6498
6499         if (res) {
6500                 DRM_ERROR("connector_init failed\n");
6501                 aconnector->connector_id = -1;
6502                 goto out_free;
6503         }
6504
6505         drm_connector_helper_add(
6506                         &aconnector->base,
6507                         &amdgpu_dm_connector_helper_funcs);
6508
6509         amdgpu_dm_connector_init_helper(
6510                 dm,
6511                 aconnector,
6512                 connector_type,
6513                 link,
6514                 link_index);
6515
6516         drm_connector_attach_encoder(
6517                 &aconnector->base, &aencoder->base);
6518
6519         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6520                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6521                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6522
6523 out_free:
6524         if (res) {
6525                 kfree(i2c);
6526                 aconnector->i2c = NULL;
6527         }
6528         return res;
6529 }
6530
6531 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6532 {
6533         switch (adev->mode_info.num_crtc) {
6534         case 1:
6535                 return 0x1;
6536         case 2:
6537                 return 0x3;
6538         case 3:
6539                 return 0x7;
6540         case 4:
6541                 return 0xf;
6542         case 5:
6543                 return 0x1f;
6544         case 6:
6545         default:
6546                 return 0x3f;
6547         }
6548 }
6549
6550 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6551                                   struct amdgpu_encoder *aencoder,
6552                                   uint32_t link_index)
6553 {
6554         struct amdgpu_device *adev = drm_to_adev(dev);
6555
6556         int res = drm_encoder_init(dev,
6557                                    &aencoder->base,
6558                                    &amdgpu_dm_encoder_funcs,
6559                                    DRM_MODE_ENCODER_TMDS,
6560                                    NULL);
6561
6562         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6563
6564         if (!res)
6565                 aencoder->encoder_id = link_index;
6566         else
6567                 aencoder->encoder_id = -1;
6568
6569         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6570
6571         return res;
6572 }
6573
6574 static void manage_dm_interrupts(struct amdgpu_device *adev,
6575                                  struct amdgpu_crtc *acrtc,
6576                                  bool enable)
6577 {
6578         /*
6579          * We have no guarantee that the frontend index maps to the same
6580          * backend index - some even map to more than one.
6581          *
6582          * TODO: Use a different interrupt or check DC itself for the mapping.
6583          */
6584         int irq_type =
6585                 amdgpu_display_crtc_idx_to_irq_type(
6586                         adev,
6587                         acrtc->crtc_id);
6588
6589         if (enable) {
6590                 drm_crtc_vblank_on(&acrtc->base);
6591                 amdgpu_irq_get(
6592                         adev,
6593                         &adev->pageflip_irq,
6594                         irq_type);
6595         } else {
6596
6597                 amdgpu_irq_put(
6598                         adev,
6599                         &adev->pageflip_irq,
6600                         irq_type);
6601                 drm_crtc_vblank_off(&acrtc->base);
6602         }
6603 }
6604
6605 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6606                                       struct amdgpu_crtc *acrtc)
6607 {
6608         int irq_type =
6609                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6610
6611         /**
6612          * This reads the current state for the IRQ and force reapplies
6613          * the setting to hardware.
6614          */
6615         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6616 }
6617
6618 static bool
6619 is_scaling_state_different(const struct dm_connector_state *dm_state,
6620                            const struct dm_connector_state *old_dm_state)
6621 {
6622         if (dm_state->scaling != old_dm_state->scaling)
6623                 return true;
6624         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6625                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6626                         return true;
6627         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6628                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6629                         return true;
6630         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6631                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6632                 return true;
6633         return false;
6634 }
6635
6636 #ifdef CONFIG_DRM_AMD_DC_HDCP
6637 static bool is_content_protection_different(struct drm_connector_state *state,
6638                                             const struct drm_connector_state *old_state,
6639                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6640 {
6641         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6642
6643         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6644             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6645                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6646                 return true;
6647         }
6648
6649         /* CP is being re enabled, ignore this */
6650         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6651             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6652                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6653                 return false;
6654         }
6655
6656         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6657         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6658             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6659                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6660
6661         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6662          * hot-plug, headless s3, dpms
6663          */
6664         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6665             aconnector->dc_sink != NULL)
6666                 return true;
6667
6668         if (old_state->content_protection == state->content_protection)
6669                 return false;
6670
6671         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6672                 return true;
6673
6674         return false;
6675 }
6676
6677 #endif
6678 static void remove_stream(struct amdgpu_device *adev,
6679                           struct amdgpu_crtc *acrtc,
6680                           struct dc_stream_state *stream)
6681 {
6682         /* this is the update mode case */
6683
6684         acrtc->otg_inst = -1;
6685         acrtc->enabled = false;
6686 }
6687
6688 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6689                                struct dc_cursor_position *position)
6690 {
6691         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6692         int x, y;
6693         int xorigin = 0, yorigin = 0;
6694
6695         position->enable = false;
6696         position->x = 0;
6697         position->y = 0;
6698
6699         if (!crtc || !plane->state->fb)
6700                 return 0;
6701
6702         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6703             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6704                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6705                           __func__,
6706                           plane->state->crtc_w,
6707                           plane->state->crtc_h);
6708                 return -EINVAL;
6709         }
6710
6711         x = plane->state->crtc_x;
6712         y = plane->state->crtc_y;
6713
6714         if (x <= -amdgpu_crtc->max_cursor_width ||
6715             y <= -amdgpu_crtc->max_cursor_height)
6716                 return 0;
6717
6718         if (x < 0) {
6719                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6720                 x = 0;
6721         }
6722         if (y < 0) {
6723                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6724                 y = 0;
6725         }
6726         position->enable = true;
6727         position->translate_by_source = true;
6728         position->x = x;
6729         position->y = y;
6730         position->x_hotspot = xorigin;
6731         position->y_hotspot = yorigin;
6732
6733         return 0;
6734 }
6735
6736 static void handle_cursor_update(struct drm_plane *plane,
6737                                  struct drm_plane_state *old_plane_state)
6738 {
6739         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6740         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6741         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6742         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6743         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6744         uint64_t address = afb ? afb->address : 0;
6745         struct dc_cursor_position position;
6746         struct dc_cursor_attributes attributes;
6747         int ret;
6748
6749         if (!plane->state->fb && !old_plane_state->fb)
6750                 return;
6751
6752         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6753                          __func__,
6754                          amdgpu_crtc->crtc_id,
6755                          plane->state->crtc_w,
6756                          plane->state->crtc_h);
6757
6758         ret = get_cursor_position(plane, crtc, &position);
6759         if (ret)
6760                 return;
6761
6762         if (!position.enable) {
6763                 /* turn off cursor */
6764                 if (crtc_state && crtc_state->stream) {
6765                         mutex_lock(&adev->dm.dc_lock);
6766                         dc_stream_set_cursor_position(crtc_state->stream,
6767                                                       &position);
6768                         mutex_unlock(&adev->dm.dc_lock);
6769                 }
6770                 return;
6771         }
6772
6773         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6774         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6775
6776         memset(&attributes, 0, sizeof(attributes));
6777         attributes.address.high_part = upper_32_bits(address);
6778         attributes.address.low_part  = lower_32_bits(address);
6779         attributes.width             = plane->state->crtc_w;
6780         attributes.height            = plane->state->crtc_h;
6781         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6782         attributes.rotation_angle    = 0;
6783         attributes.attribute_flags.value = 0;
6784
6785         attributes.pitch = attributes.width;
6786
6787         if (crtc_state->stream) {
6788                 mutex_lock(&adev->dm.dc_lock);
6789                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6790                                                          &attributes))
6791                         DRM_ERROR("DC failed to set cursor attributes\n");
6792
6793                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6794                                                    &position))
6795                         DRM_ERROR("DC failed to set cursor position\n");
6796                 mutex_unlock(&adev->dm.dc_lock);
6797         }
6798 }
6799
6800 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6801 {
6802
6803         assert_spin_locked(&acrtc->base.dev->event_lock);
6804         WARN_ON(acrtc->event);
6805
6806         acrtc->event = acrtc->base.state->event;
6807
6808         /* Set the flip status */
6809         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6810
6811         /* Mark this event as consumed */
6812         acrtc->base.state->event = NULL;
6813
6814         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6815                                                  acrtc->crtc_id);
6816 }
6817
6818 static void update_freesync_state_on_stream(
6819         struct amdgpu_display_manager *dm,
6820         struct dm_crtc_state *new_crtc_state,
6821         struct dc_stream_state *new_stream,
6822         struct dc_plane_state *surface,
6823         u32 flip_timestamp_in_us)
6824 {
6825         struct mod_vrr_params vrr_params;
6826         struct dc_info_packet vrr_infopacket = {0};
6827         struct amdgpu_device *adev = dm->adev;
6828         unsigned long flags;
6829
6830         if (!new_stream)
6831                 return;
6832
6833         /*
6834          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6835          * For now it's sufficient to just guard against these conditions.
6836          */
6837
6838         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6839                 return;
6840
6841         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6842         vrr_params = new_crtc_state->vrr_params;
6843
6844         if (surface) {
6845                 mod_freesync_handle_preflip(
6846                         dm->freesync_module,
6847                         surface,
6848                         new_stream,
6849                         flip_timestamp_in_us,
6850                         &vrr_params);
6851
6852                 if (adev->family < AMDGPU_FAMILY_AI &&
6853                     amdgpu_dm_vrr_active(new_crtc_state)) {
6854                         mod_freesync_handle_v_update(dm->freesync_module,
6855                                                      new_stream, &vrr_params);
6856
6857                         /* Need to call this before the frame ends. */
6858                         dc_stream_adjust_vmin_vmax(dm->dc,
6859                                                    new_crtc_state->stream,
6860                                                    &vrr_params.adjust);
6861                 }
6862         }
6863
6864         mod_freesync_build_vrr_infopacket(
6865                 dm->freesync_module,
6866                 new_stream,
6867                 &vrr_params,
6868                 PACKET_TYPE_VRR,
6869                 TRANSFER_FUNC_UNKNOWN,
6870                 &vrr_infopacket);
6871
6872         new_crtc_state->freesync_timing_changed |=
6873                 (memcmp(&new_crtc_state->vrr_params.adjust,
6874                         &vrr_params.adjust,
6875                         sizeof(vrr_params.adjust)) != 0);
6876
6877         new_crtc_state->freesync_vrr_info_changed |=
6878                 (memcmp(&new_crtc_state->vrr_infopacket,
6879                         &vrr_infopacket,
6880                         sizeof(vrr_infopacket)) != 0);
6881
6882         new_crtc_state->vrr_params = vrr_params;
6883         new_crtc_state->vrr_infopacket = vrr_infopacket;
6884
6885         new_stream->adjust = new_crtc_state->vrr_params.adjust;
6886         new_stream->vrr_infopacket = vrr_infopacket;
6887
6888         if (new_crtc_state->freesync_vrr_info_changed)
6889                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6890                               new_crtc_state->base.crtc->base.id,
6891                               (int)new_crtc_state->base.vrr_enabled,
6892                               (int)vrr_params.state);
6893
6894         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6895 }
6896
6897 static void pre_update_freesync_state_on_stream(
6898         struct amdgpu_display_manager *dm,
6899         struct dm_crtc_state *new_crtc_state)
6900 {
6901         struct dc_stream_state *new_stream = new_crtc_state->stream;
6902         struct mod_vrr_params vrr_params;
6903         struct mod_freesync_config config = new_crtc_state->freesync_config;
6904         struct amdgpu_device *adev = dm->adev;
6905         unsigned long flags;
6906
6907         if (!new_stream)
6908                 return;
6909
6910         /*
6911          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6912          * For now it's sufficient to just guard against these conditions.
6913          */
6914         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6915                 return;
6916
6917         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6918         vrr_params = new_crtc_state->vrr_params;
6919
6920         if (new_crtc_state->vrr_supported &&
6921             config.min_refresh_in_uhz &&
6922             config.max_refresh_in_uhz) {
6923                 config.state = new_crtc_state->base.vrr_enabled ?
6924                         VRR_STATE_ACTIVE_VARIABLE :
6925                         VRR_STATE_INACTIVE;
6926         } else {
6927                 config.state = VRR_STATE_UNSUPPORTED;
6928         }
6929
6930         mod_freesync_build_vrr_params(dm->freesync_module,
6931                                       new_stream,
6932                                       &config, &vrr_params);
6933
6934         new_crtc_state->freesync_timing_changed |=
6935                 (memcmp(&new_crtc_state->vrr_params.adjust,
6936                         &vrr_params.adjust,
6937                         sizeof(vrr_params.adjust)) != 0);
6938
6939         new_crtc_state->vrr_params = vrr_params;
6940         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6941 }
6942
6943 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6944                                             struct dm_crtc_state *new_state)
6945 {
6946         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6947         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6948
6949         if (!old_vrr_active && new_vrr_active) {
6950                 /* Transition VRR inactive -> active:
6951                  * While VRR is active, we must not disable vblank irq, as a
6952                  * reenable after disable would compute bogus vblank/pflip
6953                  * timestamps if it likely happened inside display front-porch.
6954                  *
6955                  * We also need vupdate irq for the actual core vblank handling
6956                  * at end of vblank.
6957                  */
6958                 dm_set_vupdate_irq(new_state->base.crtc, true);
6959                 drm_crtc_vblank_get(new_state->base.crtc);
6960                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6961                                  __func__, new_state->base.crtc->base.id);
6962         } else if (old_vrr_active && !new_vrr_active) {
6963                 /* Transition VRR active -> inactive:
6964                  * Allow vblank irq disable again for fixed refresh rate.
6965                  */
6966                 dm_set_vupdate_irq(new_state->base.crtc, false);
6967                 drm_crtc_vblank_put(new_state->base.crtc);
6968                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6969                                  __func__, new_state->base.crtc->base.id);
6970         }
6971 }
6972
6973 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6974 {
6975         struct drm_plane *plane;
6976         struct drm_plane_state *old_plane_state, *new_plane_state;
6977         int i;
6978
6979         /*
6980          * TODO: Make this per-stream so we don't issue redundant updates for
6981          * commits with multiple streams.
6982          */
6983         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6984                                        new_plane_state, i)
6985                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6986                         handle_cursor_update(plane, old_plane_state);
6987 }
6988
6989 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6990                                     struct dc_state *dc_state,
6991                                     struct drm_device *dev,
6992                                     struct amdgpu_display_manager *dm,
6993                                     struct drm_crtc *pcrtc,
6994                                     bool wait_for_vblank)
6995 {
6996         uint32_t i;
6997         uint64_t timestamp_ns;
6998         struct drm_plane *plane;
6999         struct drm_plane_state *old_plane_state, *new_plane_state;
7000         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7001         struct drm_crtc_state *new_pcrtc_state =
7002                         drm_atomic_get_new_crtc_state(state, pcrtc);
7003         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7004         struct dm_crtc_state *dm_old_crtc_state =
7005                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7006         int planes_count = 0, vpos, hpos;
7007         long r;
7008         unsigned long flags;
7009         struct amdgpu_bo *abo;
7010         uint32_t target_vblank, last_flip_vblank;
7011         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7012         bool pflip_present = false;
7013         struct {
7014                 struct dc_surface_update surface_updates[MAX_SURFACES];
7015                 struct dc_plane_info plane_infos[MAX_SURFACES];
7016                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7017                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7018                 struct dc_stream_update stream_update;
7019         } *bundle;
7020
7021         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7022
7023         if (!bundle) {
7024                 dm_error("Failed to allocate update bundle\n");
7025                 goto cleanup;
7026         }
7027
7028         /*
7029          * Disable the cursor first if we're disabling all the planes.
7030          * It'll remain on the screen after the planes are re-enabled
7031          * if we don't.
7032          */
7033         if (acrtc_state->active_planes == 0)
7034                 amdgpu_dm_commit_cursors(state);
7035
7036         /* update planes when needed */
7037         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7038                 struct drm_crtc *crtc = new_plane_state->crtc;
7039                 struct drm_crtc_state *new_crtc_state;
7040                 struct drm_framebuffer *fb = new_plane_state->fb;
7041                 bool plane_needs_flip;
7042                 struct dc_plane_state *dc_plane;
7043                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7044
7045                 /* Cursor plane is handled after stream updates */
7046                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7047                         continue;
7048
7049                 if (!fb || !crtc || pcrtc != crtc)
7050                         continue;
7051
7052                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7053                 if (!new_crtc_state->active)
7054                         continue;
7055
7056                 dc_plane = dm_new_plane_state->dc_state;
7057
7058                 bundle->surface_updates[planes_count].surface = dc_plane;
7059                 if (new_pcrtc_state->color_mgmt_changed) {
7060                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7061                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7062                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7063                 }
7064
7065                 fill_dc_scaling_info(new_plane_state,
7066                                      &bundle->scaling_infos[planes_count]);
7067
7068                 bundle->surface_updates[planes_count].scaling_info =
7069                         &bundle->scaling_infos[planes_count];
7070
7071                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7072
7073                 pflip_present = pflip_present || plane_needs_flip;
7074
7075                 if (!plane_needs_flip) {
7076                         planes_count += 1;
7077                         continue;
7078                 }
7079
7080                 abo = gem_to_amdgpu_bo(fb->obj[0]);
7081
7082                 /*
7083                  * Wait for all fences on this FB. Do limited wait to avoid
7084                  * deadlock during GPU reset when this fence will not signal
7085                  * but we hold reservation lock for the BO.
7086                  */
7087                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7088                                                         false,
7089                                                         msecs_to_jiffies(5000));
7090                 if (unlikely(r <= 0))
7091                         DRM_ERROR("Waiting for fences timed out!");
7092
7093                 fill_dc_plane_info_and_addr(
7094                         dm->adev, new_plane_state,
7095                         dm_new_plane_state->tiling_flags,
7096                         &bundle->plane_infos[planes_count],
7097                         &bundle->flip_addrs[planes_count].address,
7098                         dm_new_plane_state->tmz_surface, false);
7099
7100                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7101                                  new_plane_state->plane->index,
7102                                  bundle->plane_infos[planes_count].dcc.enable);
7103
7104                 bundle->surface_updates[planes_count].plane_info =
7105                         &bundle->plane_infos[planes_count];
7106
7107                 /*
7108                  * Only allow immediate flips for fast updates that don't
7109                  * change FB pitch, DCC state, rotation or mirroing.
7110                  */
7111                 bundle->flip_addrs[planes_count].flip_immediate =
7112                         crtc->state->async_flip &&
7113                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7114
7115                 timestamp_ns = ktime_get_ns();
7116                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7117                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7118                 bundle->surface_updates[planes_count].surface = dc_plane;
7119
7120                 if (!bundle->surface_updates[planes_count].surface) {
7121                         DRM_ERROR("No surface for CRTC: id=%d\n",
7122                                         acrtc_attach->crtc_id);
7123                         continue;
7124                 }
7125
7126                 if (plane == pcrtc->primary)
7127                         update_freesync_state_on_stream(
7128                                 dm,
7129                                 acrtc_state,
7130                                 acrtc_state->stream,
7131                                 dc_plane,
7132                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7133
7134                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7135                                  __func__,
7136                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7137                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7138
7139                 planes_count += 1;
7140
7141         }
7142
7143         if (pflip_present) {
7144                 if (!vrr_active) {
7145                         /* Use old throttling in non-vrr fixed refresh rate mode
7146                          * to keep flip scheduling based on target vblank counts
7147                          * working in a backwards compatible way, e.g., for
7148                          * clients using the GLX_OML_sync_control extension or
7149                          * DRI3/Present extension with defined target_msc.
7150                          */
7151                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7152                 }
7153                 else {
7154                         /* For variable refresh rate mode only:
7155                          * Get vblank of last completed flip to avoid > 1 vrr
7156                          * flips per video frame by use of throttling, but allow
7157                          * flip programming anywhere in the possibly large
7158                          * variable vrr vblank interval for fine-grained flip
7159                          * timing control and more opportunity to avoid stutter
7160                          * on late submission of flips.
7161                          */
7162                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7163                         last_flip_vblank = acrtc_attach->last_flip_vblank;
7164                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7165                 }
7166
7167                 target_vblank = last_flip_vblank + wait_for_vblank;
7168
7169                 /*
7170                  * Wait until we're out of the vertical blank period before the one
7171                  * targeted by the flip
7172                  */
7173                 while ((acrtc_attach->enabled &&
7174                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7175                                                             0, &vpos, &hpos, NULL,
7176                                                             NULL, &pcrtc->hwmode)
7177                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7178                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7179                         (int)(target_vblank -
7180                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7181                         usleep_range(1000, 1100);
7182                 }
7183
7184                 /**
7185                  * Prepare the flip event for the pageflip interrupt to handle.
7186                  *
7187                  * This only works in the case where we've already turned on the
7188                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7189                  * from 0 -> n planes we have to skip a hardware generated event
7190                  * and rely on sending it from software.
7191                  */
7192                 if (acrtc_attach->base.state->event &&
7193                     acrtc_state->active_planes > 0) {
7194                         drm_crtc_vblank_get(pcrtc);
7195
7196                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7197
7198                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7199                         prepare_flip_isr(acrtc_attach);
7200
7201                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7202                 }
7203
7204                 if (acrtc_state->stream) {
7205                         if (acrtc_state->freesync_vrr_info_changed)
7206                                 bundle->stream_update.vrr_infopacket =
7207                                         &acrtc_state->stream->vrr_infopacket;
7208                 }
7209         }
7210
7211         /* Update the planes if changed or disable if we don't have any. */
7212         if ((planes_count || acrtc_state->active_planes == 0) &&
7213                 acrtc_state->stream) {
7214                 bundle->stream_update.stream = acrtc_state->stream;
7215                 if (new_pcrtc_state->mode_changed) {
7216                         bundle->stream_update.src = acrtc_state->stream->src;
7217                         bundle->stream_update.dst = acrtc_state->stream->dst;
7218                 }
7219
7220                 if (new_pcrtc_state->color_mgmt_changed) {
7221                         /*
7222                          * TODO: This isn't fully correct since we've actually
7223                          * already modified the stream in place.
7224                          */
7225                         bundle->stream_update.gamut_remap =
7226                                 &acrtc_state->stream->gamut_remap_matrix;
7227                         bundle->stream_update.output_csc_transform =
7228                                 &acrtc_state->stream->csc_color_matrix;
7229                         bundle->stream_update.out_transfer_func =
7230                                 acrtc_state->stream->out_transfer_func;
7231                 }
7232
7233                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7234                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7235                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7236
7237                 /*
7238                  * If FreeSync state on the stream has changed then we need to
7239                  * re-adjust the min/max bounds now that DC doesn't handle this
7240                  * as part of commit.
7241                  */
7242                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7243                     amdgpu_dm_vrr_active(acrtc_state)) {
7244                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7245                         dc_stream_adjust_vmin_vmax(
7246                                 dm->dc, acrtc_state->stream,
7247                                 &acrtc_state->vrr_params.adjust);
7248                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7249                 }
7250                 mutex_lock(&dm->dc_lock);
7251                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7252                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7253                         amdgpu_dm_psr_disable(acrtc_state->stream);
7254
7255                 dc_commit_updates_for_stream(dm->dc,
7256                                                      bundle->surface_updates,
7257                                                      planes_count,
7258                                                      acrtc_state->stream,
7259                                                      &bundle->stream_update,
7260                                                      dc_state);
7261
7262                 /**
7263                  * Enable or disable the interrupts on the backend.
7264                  *
7265                  * Most pipes are put into power gating when unused.
7266                  *
7267                  * When power gating is enabled on a pipe we lose the
7268                  * interrupt enablement state when power gating is disabled.
7269                  *
7270                  * So we need to update the IRQ control state in hardware
7271                  * whenever the pipe turns on (since it could be previously
7272                  * power gated) or off (since some pipes can't be power gated
7273                  * on some ASICs).
7274                  */
7275                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7276                         dm_update_pflip_irq_state(drm_to_adev(dev),
7277                                                   acrtc_attach);
7278
7279                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7280                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7281                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7282                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7283                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7284                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7285                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7286                         amdgpu_dm_psr_enable(acrtc_state->stream);
7287                 }
7288
7289                 mutex_unlock(&dm->dc_lock);
7290         }
7291
7292         /*
7293          * Update cursor state *after* programming all the planes.
7294          * This avoids redundant programming in the case where we're going
7295          * to be disabling a single plane - those pipes are being disabled.
7296          */
7297         if (acrtc_state->active_planes)
7298                 amdgpu_dm_commit_cursors(state);
7299
7300 cleanup:
7301         kfree(bundle);
7302 }
7303
7304 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7305                                    struct drm_atomic_state *state)
7306 {
7307         struct amdgpu_device *adev = drm_to_adev(dev);
7308         struct amdgpu_dm_connector *aconnector;
7309         struct drm_connector *connector;
7310         struct drm_connector_state *old_con_state, *new_con_state;
7311         struct drm_crtc_state *new_crtc_state;
7312         struct dm_crtc_state *new_dm_crtc_state;
7313         const struct dc_stream_status *status;
7314         int i, inst;
7315
7316         /* Notify device removals. */
7317         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7318                 if (old_con_state->crtc != new_con_state->crtc) {
7319                         /* CRTC changes require notification. */
7320                         goto notify;
7321                 }
7322
7323                 if (!new_con_state->crtc)
7324                         continue;
7325
7326                 new_crtc_state = drm_atomic_get_new_crtc_state(
7327                         state, new_con_state->crtc);
7328
7329                 if (!new_crtc_state)
7330                         continue;
7331
7332                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7333                         continue;
7334
7335         notify:
7336                 aconnector = to_amdgpu_dm_connector(connector);
7337
7338                 mutex_lock(&adev->dm.audio_lock);
7339                 inst = aconnector->audio_inst;
7340                 aconnector->audio_inst = -1;
7341                 mutex_unlock(&adev->dm.audio_lock);
7342
7343                 amdgpu_dm_audio_eld_notify(adev, inst);
7344         }
7345
7346         /* Notify audio device additions. */
7347         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7348                 if (!new_con_state->crtc)
7349                         continue;
7350
7351                 new_crtc_state = drm_atomic_get_new_crtc_state(
7352                         state, new_con_state->crtc);
7353
7354                 if (!new_crtc_state)
7355                         continue;
7356
7357                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7358                         continue;
7359
7360                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7361                 if (!new_dm_crtc_state->stream)
7362                         continue;
7363
7364                 status = dc_stream_get_status(new_dm_crtc_state->stream);
7365                 if (!status)
7366                         continue;
7367
7368                 aconnector = to_amdgpu_dm_connector(connector);
7369
7370                 mutex_lock(&adev->dm.audio_lock);
7371                 inst = status->audio_inst;
7372                 aconnector->audio_inst = inst;
7373                 mutex_unlock(&adev->dm.audio_lock);
7374
7375                 amdgpu_dm_audio_eld_notify(adev, inst);
7376         }
7377 }
7378
7379 /*
7380  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7381  * @crtc_state: the DRM CRTC state
7382  * @stream_state: the DC stream state.
7383  *
7384  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7385  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7386  */
7387 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7388                                                 struct dc_stream_state *stream_state)
7389 {
7390         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7391 }
7392
7393 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7394                                    struct drm_atomic_state *state,
7395                                    bool nonblock)
7396 {
7397         struct drm_crtc *crtc;
7398         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7399         struct amdgpu_device *adev = drm_to_adev(dev);
7400         int i;
7401
7402         /*
7403          * We evade vblank and pflip interrupts on CRTCs that are undergoing
7404          * a modeset, being disabled, or have no active planes.
7405          *
7406          * It's done in atomic commit rather than commit tail for now since
7407          * some of these interrupt handlers access the current CRTC state and
7408          * potentially the stream pointer itself.
7409          *
7410          * Since the atomic state is swapped within atomic commit and not within
7411          * commit tail this would leave to new state (that hasn't been committed yet)
7412          * being accesssed from within the handlers.
7413          *
7414          * TODO: Fix this so we can do this in commit tail and not have to block
7415          * in atomic check.
7416          */
7417         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7418                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7419
7420                 if (old_crtc_state->active &&
7421                     (!new_crtc_state->active ||
7422                      drm_atomic_crtc_needs_modeset(new_crtc_state)))
7423                         manage_dm_interrupts(adev, acrtc, false);
7424         }
7425         /*
7426          * Add check here for SoC's that support hardware cursor plane, to
7427          * unset legacy_cursor_update
7428          */
7429
7430         return drm_atomic_helper_commit(dev, state, nonblock);
7431
7432         /*TODO Handle EINTR, reenable IRQ*/
7433 }
7434
7435 /**
7436  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7437  * @state: The atomic state to commit
7438  *
7439  * This will tell DC to commit the constructed DC state from atomic_check,
7440  * programming the hardware. Any failures here implies a hardware failure, since
7441  * atomic check should have filtered anything non-kosher.
7442  */
7443 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7444 {
7445         struct drm_device *dev = state->dev;
7446         struct amdgpu_device *adev = drm_to_adev(dev);
7447         struct amdgpu_display_manager *dm = &adev->dm;
7448         struct dm_atomic_state *dm_state;
7449         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7450         uint32_t i, j;
7451         struct drm_crtc *crtc;
7452         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7453         unsigned long flags;
7454         bool wait_for_vblank = true;
7455         struct drm_connector *connector;
7456         struct drm_connector_state *old_con_state, *new_con_state;
7457         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7458         int crtc_disable_count = 0;
7459         bool mode_set_reset_required = false;
7460
7461         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7462
7463         dm_state = dm_atomic_get_new_state(state);
7464         if (dm_state && dm_state->context) {
7465                 dc_state = dm_state->context;
7466         } else {
7467                 /* No state changes, retain current state. */
7468                 dc_state_temp = dc_create_state(dm->dc);
7469                 ASSERT(dc_state_temp);
7470                 dc_state = dc_state_temp;
7471                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7472         }
7473
7474         /* update changed items */
7475         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7476                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7477
7478                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7479                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7480
7481                 DRM_DEBUG_DRIVER(
7482                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7483                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7484                         "connectors_changed:%d\n",
7485                         acrtc->crtc_id,
7486                         new_crtc_state->enable,
7487                         new_crtc_state->active,
7488                         new_crtc_state->planes_changed,
7489                         new_crtc_state->mode_changed,
7490                         new_crtc_state->active_changed,
7491                         new_crtc_state->connectors_changed);
7492
7493                 /* Copy all transient state flags into dc state */
7494                 if (dm_new_crtc_state->stream) {
7495                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7496                                                             dm_new_crtc_state->stream);
7497                 }
7498
7499                 /* handles headless hotplug case, updating new_state and
7500                  * aconnector as needed
7501                  */
7502
7503                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7504
7505                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7506
7507                         if (!dm_new_crtc_state->stream) {
7508                                 /*
7509                                  * this could happen because of issues with
7510                                  * userspace notifications delivery.
7511                                  * In this case userspace tries to set mode on
7512                                  * display which is disconnected in fact.
7513                                  * dc_sink is NULL in this case on aconnector.
7514                                  * We expect reset mode will come soon.
7515                                  *
7516                                  * This can also happen when unplug is done
7517                                  * during resume sequence ended
7518                                  *
7519                                  * In this case, we want to pretend we still
7520                                  * have a sink to keep the pipe running so that
7521                                  * hw state is consistent with the sw state
7522                                  */
7523                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7524                                                 __func__, acrtc->base.base.id);
7525                                 continue;
7526                         }
7527
7528                         if (dm_old_crtc_state->stream)
7529                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7530
7531                         pm_runtime_get_noresume(dev->dev);
7532
7533                         acrtc->enabled = true;
7534                         acrtc->hw_mode = new_crtc_state->mode;
7535                         crtc->hwmode = new_crtc_state->mode;
7536                         mode_set_reset_required = true;
7537                 } else if (modereset_required(new_crtc_state)) {
7538                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7539                         /* i.e. reset mode */
7540                         if (dm_old_crtc_state->stream)
7541                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7542                         mode_set_reset_required = true;
7543                 }
7544         } /* for_each_crtc_in_state() */
7545
7546         if (dc_state) {
7547                 /* if there mode set or reset, disable eDP PSR */
7548                 if (mode_set_reset_required)
7549                         amdgpu_dm_psr_disable_all(dm);
7550
7551                 dm_enable_per_frame_crtc_master_sync(dc_state);
7552                 mutex_lock(&dm->dc_lock);
7553                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7554                 mutex_unlock(&dm->dc_lock);
7555         }
7556
7557         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7558                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7559
7560                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7561
7562                 if (dm_new_crtc_state->stream != NULL) {
7563                         const struct dc_stream_status *status =
7564                                         dc_stream_get_status(dm_new_crtc_state->stream);
7565
7566                         if (!status)
7567                                 status = dc_stream_get_status_from_state(dc_state,
7568                                                                          dm_new_crtc_state->stream);
7569
7570                         if (!status)
7571                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7572                         else
7573                                 acrtc->otg_inst = status->primary_otg_inst;
7574                 }
7575         }
7576 #ifdef CONFIG_DRM_AMD_DC_HDCP
7577         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7578                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7579                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7580                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7581
7582                 new_crtc_state = NULL;
7583
7584                 if (acrtc)
7585                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7586
7587                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7588
7589                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7590                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7591                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7592                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7593                         continue;
7594                 }
7595
7596                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7597                         hdcp_update_display(
7598                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7599                                 new_con_state->hdcp_content_type,
7600                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7601                                                                                                          : false);
7602         }
7603 #endif
7604
7605         /* Handle connector state changes */
7606         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7607                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7608                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7609                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7610                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7611                 struct dc_stream_update stream_update;
7612                 struct dc_info_packet hdr_packet;
7613                 struct dc_stream_status *status = NULL;
7614                 bool abm_changed, hdr_changed, scaling_changed;
7615
7616                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7617                 memset(&stream_update, 0, sizeof(stream_update));
7618
7619                 if (acrtc) {
7620                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7621                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7622                 }
7623
7624                 /* Skip any modesets/resets */
7625                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7626                         continue;
7627
7628                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7629                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7630
7631                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7632                                                              dm_old_con_state);
7633
7634                 abm_changed = dm_new_crtc_state->abm_level !=
7635                               dm_old_crtc_state->abm_level;
7636
7637                 hdr_changed =
7638                         is_hdr_metadata_different(old_con_state, new_con_state);
7639
7640                 if (!scaling_changed && !abm_changed && !hdr_changed)
7641                         continue;
7642
7643                 stream_update.stream = dm_new_crtc_state->stream;
7644                 if (scaling_changed) {
7645                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7646                                         dm_new_con_state, dm_new_crtc_state->stream);
7647
7648                         stream_update.src = dm_new_crtc_state->stream->src;
7649                         stream_update.dst = dm_new_crtc_state->stream->dst;
7650                 }
7651
7652                 if (abm_changed) {
7653                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7654
7655                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7656                 }
7657
7658                 if (hdr_changed) {
7659                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7660                         stream_update.hdr_static_metadata = &hdr_packet;
7661                 }
7662
7663                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7664                 WARN_ON(!status);
7665                 WARN_ON(!status->plane_count);
7666
7667                 /*
7668                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7669                  * Here we create an empty update on each plane.
7670                  * To fix this, DC should permit updating only stream properties.
7671                  */
7672                 for (j = 0; j < status->plane_count; j++)
7673                         dummy_updates[j].surface = status->plane_states[0];
7674
7675
7676                 mutex_lock(&dm->dc_lock);
7677                 dc_commit_updates_for_stream(dm->dc,
7678                                                      dummy_updates,
7679                                                      status->plane_count,
7680                                                      dm_new_crtc_state->stream,
7681                                                      &stream_update,
7682                                                      dc_state);
7683                 mutex_unlock(&dm->dc_lock);
7684         }
7685
7686         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7687         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7688                                       new_crtc_state, i) {
7689                 if (old_crtc_state->active && !new_crtc_state->active)
7690                         crtc_disable_count++;
7691
7692                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7693                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7694
7695                 /* Update freesync active state. */
7696                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7697
7698                 /* Handle vrr on->off / off->on transitions */
7699                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7700                                                 dm_new_crtc_state);
7701         }
7702
7703         /**
7704          * Enable interrupts for CRTCs that are newly enabled or went through
7705          * a modeset. It was intentionally deferred until after the front end
7706          * state was modified to wait until the OTG was on and so the IRQ
7707          * handlers didn't access stale or invalid state.
7708          */
7709         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7710                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7711
7712                 if (new_crtc_state->active &&
7713                     (!old_crtc_state->active ||
7714                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7715                         manage_dm_interrupts(adev, acrtc, true);
7716 #ifdef CONFIG_DEBUG_FS
7717                         /**
7718                          * Frontend may have changed so reapply the CRC capture
7719                          * settings for the stream.
7720                          */
7721                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7722
7723                         if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7724                                 amdgpu_dm_crtc_configure_crc_source(
7725                                         crtc, dm_new_crtc_state,
7726                                         dm_new_crtc_state->crc_src);
7727                         }
7728 #endif
7729                 }
7730         }
7731
7732         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7733                 if (new_crtc_state->async_flip)
7734                         wait_for_vblank = false;
7735
7736         /* update planes when needed per crtc*/
7737         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7738                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7739
7740                 if (dm_new_crtc_state->stream)
7741                         amdgpu_dm_commit_planes(state, dc_state, dev,
7742                                                 dm, crtc, wait_for_vblank);
7743         }
7744
7745         /* Update audio instances for each connector. */
7746         amdgpu_dm_commit_audio(dev, state);
7747
7748         /*
7749          * send vblank event on all events not handled in flip and
7750          * mark consumed event for drm_atomic_helper_commit_hw_done
7751          */
7752         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7753         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7754
7755                 if (new_crtc_state->event)
7756                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7757
7758                 new_crtc_state->event = NULL;
7759         }
7760         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7761
7762         /* Signal HW programming completion */
7763         drm_atomic_helper_commit_hw_done(state);
7764
7765         if (wait_for_vblank)
7766                 drm_atomic_helper_wait_for_flip_done(dev, state);
7767
7768         drm_atomic_helper_cleanup_planes(dev, state);
7769
7770         /*
7771          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7772          * so we can put the GPU into runtime suspend if we're not driving any
7773          * displays anymore
7774          */
7775         for (i = 0; i < crtc_disable_count; i++)
7776                 pm_runtime_put_autosuspend(dev->dev);
7777         pm_runtime_mark_last_busy(dev->dev);
7778
7779         if (dc_state_temp)
7780                 dc_release_state(dc_state_temp);
7781 }
7782
7783
7784 static int dm_force_atomic_commit(struct drm_connector *connector)
7785 {
7786         int ret = 0;
7787         struct drm_device *ddev = connector->dev;
7788         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7789         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7790         struct drm_plane *plane = disconnected_acrtc->base.primary;
7791         struct drm_connector_state *conn_state;
7792         struct drm_crtc_state *crtc_state;
7793         struct drm_plane_state *plane_state;
7794
7795         if (!state)
7796                 return -ENOMEM;
7797
7798         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7799
7800         /* Construct an atomic state to restore previous display setting */
7801
7802         /*
7803          * Attach connectors to drm_atomic_state
7804          */
7805         conn_state = drm_atomic_get_connector_state(state, connector);
7806
7807         ret = PTR_ERR_OR_ZERO(conn_state);
7808         if (ret)
7809                 goto err;
7810
7811         /* Attach crtc to drm_atomic_state*/
7812         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7813
7814         ret = PTR_ERR_OR_ZERO(crtc_state);
7815         if (ret)
7816                 goto err;
7817
7818         /* force a restore */
7819         crtc_state->mode_changed = true;
7820
7821         /* Attach plane to drm_atomic_state */
7822         plane_state = drm_atomic_get_plane_state(state, plane);
7823
7824         ret = PTR_ERR_OR_ZERO(plane_state);
7825         if (ret)
7826                 goto err;
7827
7828
7829         /* Call commit internally with the state we just constructed */
7830         ret = drm_atomic_commit(state);
7831         if (!ret)
7832                 return 0;
7833
7834 err:
7835         DRM_ERROR("Restoring old state failed with %i\n", ret);
7836         drm_atomic_state_put(state);
7837
7838         return ret;
7839 }
7840
7841 /*
7842  * This function handles all cases when set mode does not come upon hotplug.
7843  * This includes when a display is unplugged then plugged back into the
7844  * same port and when running without usermode desktop manager supprot
7845  */
7846 void dm_restore_drm_connector_state(struct drm_device *dev,
7847                                     struct drm_connector *connector)
7848 {
7849         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7850         struct amdgpu_crtc *disconnected_acrtc;
7851         struct dm_crtc_state *acrtc_state;
7852
7853         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7854                 return;
7855
7856         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7857         if (!disconnected_acrtc)
7858                 return;
7859
7860         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7861         if (!acrtc_state->stream)
7862                 return;
7863
7864         /*
7865          * If the previous sink is not released and different from the current,
7866          * we deduce we are in a state where we can not rely on usermode call
7867          * to turn on the display, so we do it here
7868          */
7869         if (acrtc_state->stream->sink != aconnector->dc_sink)
7870                 dm_force_atomic_commit(&aconnector->base);
7871 }
7872
7873 /*
7874  * Grabs all modesetting locks to serialize against any blocking commits,
7875  * Waits for completion of all non blocking commits.
7876  */
7877 static int do_aquire_global_lock(struct drm_device *dev,
7878                                  struct drm_atomic_state *state)
7879 {
7880         struct drm_crtc *crtc;
7881         struct drm_crtc_commit *commit;
7882         long ret;
7883
7884         /*
7885          * Adding all modeset locks to aquire_ctx will
7886          * ensure that when the framework release it the
7887          * extra locks we are locking here will get released to
7888          */
7889         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7890         if (ret)
7891                 return ret;
7892
7893         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7894                 spin_lock(&crtc->commit_lock);
7895                 commit = list_first_entry_or_null(&crtc->commit_list,
7896                                 struct drm_crtc_commit, commit_entry);
7897                 if (commit)
7898                         drm_crtc_commit_get(commit);
7899                 spin_unlock(&crtc->commit_lock);
7900
7901                 if (!commit)
7902                         continue;
7903
7904                 /*
7905                  * Make sure all pending HW programming completed and
7906                  * page flips done
7907                  */
7908                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7909
7910                 if (ret > 0)
7911                         ret = wait_for_completion_interruptible_timeout(
7912                                         &commit->flip_done, 10*HZ);
7913
7914                 if (ret == 0)
7915                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7916                                   "timed out\n", crtc->base.id, crtc->name);
7917
7918                 drm_crtc_commit_put(commit);
7919         }
7920
7921         return ret < 0 ? ret : 0;
7922 }
7923
7924 static void get_freesync_config_for_crtc(
7925         struct dm_crtc_state *new_crtc_state,
7926         struct dm_connector_state *new_con_state)
7927 {
7928         struct mod_freesync_config config = {0};
7929         struct amdgpu_dm_connector *aconnector =
7930                         to_amdgpu_dm_connector(new_con_state->base.connector);
7931         struct drm_display_mode *mode = &new_crtc_state->base.mode;
7932         int vrefresh = drm_mode_vrefresh(mode);
7933
7934         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7935                                         vrefresh >= aconnector->min_vfreq &&
7936                                         vrefresh <= aconnector->max_vfreq;
7937
7938         if (new_crtc_state->vrr_supported) {
7939                 new_crtc_state->stream->ignore_msa_timing_param = true;
7940                 config.state = new_crtc_state->base.vrr_enabled ?
7941                                 VRR_STATE_ACTIVE_VARIABLE :
7942                                 VRR_STATE_INACTIVE;
7943                 config.min_refresh_in_uhz =
7944                                 aconnector->min_vfreq * 1000000;
7945                 config.max_refresh_in_uhz =
7946                                 aconnector->max_vfreq * 1000000;
7947                 config.vsif_supported = true;
7948                 config.btr = true;
7949         }
7950
7951         new_crtc_state->freesync_config = config;
7952 }
7953
7954 static void reset_freesync_config_for_crtc(
7955         struct dm_crtc_state *new_crtc_state)
7956 {
7957         new_crtc_state->vrr_supported = false;
7958
7959         memset(&new_crtc_state->vrr_params, 0,
7960                sizeof(new_crtc_state->vrr_params));
7961         memset(&new_crtc_state->vrr_infopacket, 0,
7962                sizeof(new_crtc_state->vrr_infopacket));
7963 }
7964
7965 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7966                                 struct drm_atomic_state *state,
7967                                 struct drm_crtc *crtc,
7968                                 struct drm_crtc_state *old_crtc_state,
7969                                 struct drm_crtc_state *new_crtc_state,
7970                                 bool enable,
7971                                 bool *lock_and_validation_needed)
7972 {
7973         struct dm_atomic_state *dm_state = NULL;
7974         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7975         struct dc_stream_state *new_stream;
7976         int ret = 0;
7977
7978         /*
7979          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7980          * update changed items
7981          */
7982         struct amdgpu_crtc *acrtc = NULL;
7983         struct amdgpu_dm_connector *aconnector = NULL;
7984         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7985         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7986
7987         new_stream = NULL;
7988
7989         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7990         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7991         acrtc = to_amdgpu_crtc(crtc);
7992         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7993
7994         /* TODO This hack should go away */
7995         if (aconnector && enable) {
7996                 /* Make sure fake sink is created in plug-in scenario */
7997                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7998                                                             &aconnector->base);
7999                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8000                                                             &aconnector->base);
8001
8002                 if (IS_ERR(drm_new_conn_state)) {
8003                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8004                         goto fail;
8005                 }
8006
8007                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8008                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8009
8010                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8011                         goto skip_modeset;
8012
8013                 new_stream = create_validate_stream_for_sink(aconnector,
8014                                                              &new_crtc_state->mode,
8015                                                              dm_new_conn_state,
8016                                                              dm_old_crtc_state->stream);
8017
8018                 /*
8019                  * we can have no stream on ACTION_SET if a display
8020                  * was disconnected during S3, in this case it is not an
8021                  * error, the OS will be updated after detection, and
8022                  * will do the right thing on next atomic commit
8023                  */
8024
8025                 if (!new_stream) {
8026                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8027                                         __func__, acrtc->base.base.id);
8028                         ret = -ENOMEM;
8029                         goto fail;
8030                 }
8031
8032                 /*
8033                  * TODO: Check VSDB bits to decide whether this should
8034                  * be enabled or not.
8035                  */
8036                 new_stream->triggered_crtc_reset.enabled =
8037                         dm->force_timing_sync;
8038
8039                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8040
8041                 ret = fill_hdr_info_packet(drm_new_conn_state,
8042                                            &new_stream->hdr_static_metadata);
8043                 if (ret)
8044                         goto fail;
8045
8046                 /*
8047                  * If we already removed the old stream from the context
8048                  * (and set the new stream to NULL) then we can't reuse
8049                  * the old stream even if the stream and scaling are unchanged.
8050                  * We'll hit the BUG_ON and black screen.
8051                  *
8052                  * TODO: Refactor this function to allow this check to work
8053                  * in all conditions.
8054                  */
8055                 if (dm_new_crtc_state->stream &&
8056                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8057                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8058                         new_crtc_state->mode_changed = false;
8059                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8060                                          new_crtc_state->mode_changed);
8061                 }
8062         }
8063
8064         /* mode_changed flag may get updated above, need to check again */
8065         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8066                 goto skip_modeset;
8067
8068         DRM_DEBUG_DRIVER(
8069                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8070                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8071                 "connectors_changed:%d\n",
8072                 acrtc->crtc_id,
8073                 new_crtc_state->enable,
8074                 new_crtc_state->active,
8075                 new_crtc_state->planes_changed,
8076                 new_crtc_state->mode_changed,
8077                 new_crtc_state->active_changed,
8078                 new_crtc_state->connectors_changed);
8079
8080         /* Remove stream for any changed/disabled CRTC */
8081         if (!enable) {
8082
8083                 if (!dm_old_crtc_state->stream)
8084                         goto skip_modeset;
8085
8086                 ret = dm_atomic_get_state(state, &dm_state);
8087                 if (ret)
8088                         goto fail;
8089
8090                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8091                                 crtc->base.id);
8092
8093                 /* i.e. reset mode */
8094                 if (dc_remove_stream_from_ctx(
8095                                 dm->dc,
8096                                 dm_state->context,
8097                                 dm_old_crtc_state->stream) != DC_OK) {
8098                         ret = -EINVAL;
8099                         goto fail;
8100                 }
8101
8102                 dc_stream_release(dm_old_crtc_state->stream);
8103                 dm_new_crtc_state->stream = NULL;
8104
8105                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8106
8107                 *lock_and_validation_needed = true;
8108
8109         } else {/* Add stream for any updated/enabled CRTC */
8110                 /*
8111                  * Quick fix to prevent NULL pointer on new_stream when
8112                  * added MST connectors not found in existing crtc_state in the chained mode
8113                  * TODO: need to dig out the root cause of that
8114                  */
8115                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8116                         goto skip_modeset;
8117
8118                 if (modereset_required(new_crtc_state))
8119                         goto skip_modeset;
8120
8121                 if (modeset_required(new_crtc_state, new_stream,
8122                                      dm_old_crtc_state->stream)) {
8123
8124                         WARN_ON(dm_new_crtc_state->stream);
8125
8126                         ret = dm_atomic_get_state(state, &dm_state);
8127                         if (ret)
8128                                 goto fail;
8129
8130                         dm_new_crtc_state->stream = new_stream;
8131
8132                         dc_stream_retain(new_stream);
8133
8134                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8135                                                 crtc->base.id);
8136
8137                         if (dc_add_stream_to_ctx(
8138                                         dm->dc,
8139                                         dm_state->context,
8140                                         dm_new_crtc_state->stream) != DC_OK) {
8141                                 ret = -EINVAL;
8142                                 goto fail;
8143                         }
8144
8145                         *lock_and_validation_needed = true;
8146                 }
8147         }
8148
8149 skip_modeset:
8150         /* Release extra reference */
8151         if (new_stream)
8152                  dc_stream_release(new_stream);
8153
8154         /*
8155          * We want to do dc stream updates that do not require a
8156          * full modeset below.
8157          */
8158         if (!(enable && aconnector && new_crtc_state->active))
8159                 return 0;
8160         /*
8161          * Given above conditions, the dc state cannot be NULL because:
8162          * 1. We're in the process of enabling CRTCs (just been added
8163          *    to the dc context, or already is on the context)
8164          * 2. Has a valid connector attached, and
8165          * 3. Is currently active and enabled.
8166          * => The dc stream state currently exists.
8167          */
8168         BUG_ON(dm_new_crtc_state->stream == NULL);
8169
8170         /* Scaling or underscan settings */
8171         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8172                 update_stream_scaling_settings(
8173                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8174
8175         /* ABM settings */
8176         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8177
8178         /*
8179          * Color management settings. We also update color properties
8180          * when a modeset is needed, to ensure it gets reprogrammed.
8181          */
8182         if (dm_new_crtc_state->base.color_mgmt_changed ||
8183             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8184                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8185                 if (ret)
8186                         goto fail;
8187         }
8188
8189         /* Update Freesync settings. */
8190         get_freesync_config_for_crtc(dm_new_crtc_state,
8191                                      dm_new_conn_state);
8192
8193         return ret;
8194
8195 fail:
8196         if (new_stream)
8197                 dc_stream_release(new_stream);
8198         return ret;
8199 }
8200
8201 static bool should_reset_plane(struct drm_atomic_state *state,
8202                                struct drm_plane *plane,
8203                                struct drm_plane_state *old_plane_state,
8204                                struct drm_plane_state *new_plane_state)
8205 {
8206         struct drm_plane *other;
8207         struct drm_plane_state *old_other_state, *new_other_state;
8208         struct drm_crtc_state *new_crtc_state;
8209         int i;
8210
8211         /*
8212          * TODO: Remove this hack once the checks below are sufficient
8213          * enough to determine when we need to reset all the planes on
8214          * the stream.
8215          */
8216         if (state->allow_modeset)
8217                 return true;
8218
8219         /* Exit early if we know that we're adding or removing the plane. */
8220         if (old_plane_state->crtc != new_plane_state->crtc)
8221                 return true;
8222
8223         /* old crtc == new_crtc == NULL, plane not in context. */
8224         if (!new_plane_state->crtc)
8225                 return false;
8226
8227         new_crtc_state =
8228                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8229
8230         if (!new_crtc_state)
8231                 return true;
8232
8233         /* CRTC Degamma changes currently require us to recreate planes. */
8234         if (new_crtc_state->color_mgmt_changed)
8235                 return true;
8236
8237         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8238                 return true;
8239
8240         /*
8241          * If there are any new primary or overlay planes being added or
8242          * removed then the z-order can potentially change. To ensure
8243          * correct z-order and pipe acquisition the current DC architecture
8244          * requires us to remove and recreate all existing planes.
8245          *
8246          * TODO: Come up with a more elegant solution for this.
8247          */
8248         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8249                 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8250
8251                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8252                         continue;
8253
8254                 if (old_other_state->crtc != new_plane_state->crtc &&
8255                     new_other_state->crtc != new_plane_state->crtc)
8256                         continue;
8257
8258                 if (old_other_state->crtc != new_other_state->crtc)
8259                         return true;
8260
8261                 /* Src/dst size and scaling updates. */
8262                 if (old_other_state->src_w != new_other_state->src_w ||
8263                     old_other_state->src_h != new_other_state->src_h ||
8264                     old_other_state->crtc_w != new_other_state->crtc_w ||
8265                     old_other_state->crtc_h != new_other_state->crtc_h)
8266                         return true;
8267
8268                 /* Rotation / mirroring updates. */
8269                 if (old_other_state->rotation != new_other_state->rotation)
8270                         return true;
8271
8272                 /* Blending updates. */
8273                 if (old_other_state->pixel_blend_mode !=
8274                     new_other_state->pixel_blend_mode)
8275                         return true;
8276
8277                 /* Alpha updates. */
8278                 if (old_other_state->alpha != new_other_state->alpha)
8279                         return true;
8280
8281                 /* Colorspace changes. */
8282                 if (old_other_state->color_range != new_other_state->color_range ||
8283                     old_other_state->color_encoding != new_other_state->color_encoding)
8284                         return true;
8285
8286                 /* Framebuffer checks fall at the end. */
8287                 if (!old_other_state->fb || !new_other_state->fb)
8288                         continue;
8289
8290                 /* Pixel format changes can require bandwidth updates. */
8291                 if (old_other_state->fb->format != new_other_state->fb->format)
8292                         return true;
8293
8294                 old_dm_plane_state = to_dm_plane_state(old_other_state);
8295                 new_dm_plane_state = to_dm_plane_state(new_other_state);
8296
8297                 /* Tiling and DCC changes also require bandwidth updates. */
8298                 if (old_dm_plane_state->tiling_flags !=
8299                     new_dm_plane_state->tiling_flags)
8300                         return true;
8301         }
8302
8303         return false;
8304 }
8305
8306 static int dm_update_plane_state(struct dc *dc,
8307                                  struct drm_atomic_state *state,
8308                                  struct drm_plane *plane,
8309                                  struct drm_plane_state *old_plane_state,
8310                                  struct drm_plane_state *new_plane_state,
8311                                  bool enable,
8312                                  bool *lock_and_validation_needed)
8313 {
8314
8315         struct dm_atomic_state *dm_state = NULL;
8316         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8317         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8318         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8319         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8320         struct amdgpu_crtc *new_acrtc;
8321         bool needs_reset;
8322         int ret = 0;
8323
8324
8325         new_plane_crtc = new_plane_state->crtc;
8326         old_plane_crtc = old_plane_state->crtc;
8327         dm_new_plane_state = to_dm_plane_state(new_plane_state);
8328         dm_old_plane_state = to_dm_plane_state(old_plane_state);
8329
8330         /*TODO Implement better atomic check for cursor plane */
8331         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8332                 if (!enable || !new_plane_crtc ||
8333                         drm_atomic_plane_disabling(plane->state, new_plane_state))
8334                         return 0;
8335
8336                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8337
8338                 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8339                         (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8340                         DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8341                                                          new_plane_state->crtc_w, new_plane_state->crtc_h);
8342                         return -EINVAL;
8343                 }
8344
8345                 return 0;
8346         }
8347
8348         needs_reset = should_reset_plane(state, plane, old_plane_state,
8349                                          new_plane_state);
8350
8351         /* Remove any changed/removed planes */
8352         if (!enable) {
8353                 if (!needs_reset)
8354                         return 0;
8355
8356                 if (!old_plane_crtc)
8357                         return 0;
8358
8359                 old_crtc_state = drm_atomic_get_old_crtc_state(
8360                                 state, old_plane_crtc);
8361                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8362
8363                 if (!dm_old_crtc_state->stream)
8364                         return 0;
8365
8366                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8367                                 plane->base.id, old_plane_crtc->base.id);
8368
8369                 ret = dm_atomic_get_state(state, &dm_state);
8370                 if (ret)
8371                         return ret;
8372
8373                 if (!dc_remove_plane_from_context(
8374                                 dc,
8375                                 dm_old_crtc_state->stream,
8376                                 dm_old_plane_state->dc_state,
8377                                 dm_state->context)) {
8378
8379                         return -EINVAL;
8380                 }
8381
8382
8383                 dc_plane_state_release(dm_old_plane_state->dc_state);
8384                 dm_new_plane_state->dc_state = NULL;
8385
8386                 *lock_and_validation_needed = true;
8387
8388         } else { /* Add new planes */
8389                 struct dc_plane_state *dc_new_plane_state;
8390
8391                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8392                         return 0;
8393
8394                 if (!new_plane_crtc)
8395                         return 0;
8396
8397                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8398                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8399
8400                 if (!dm_new_crtc_state->stream)
8401                         return 0;
8402
8403                 if (!needs_reset)
8404                         return 0;
8405
8406                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8407                 if (ret)
8408                         return ret;
8409
8410                 WARN_ON(dm_new_plane_state->dc_state);
8411
8412                 dc_new_plane_state = dc_create_plane_state(dc);
8413                 if (!dc_new_plane_state)
8414                         return -ENOMEM;
8415
8416                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8417                                 plane->base.id, new_plane_crtc->base.id);
8418
8419                 ret = fill_dc_plane_attributes(
8420                         drm_to_adev(new_plane_crtc->dev),
8421                         dc_new_plane_state,
8422                         new_plane_state,
8423                         new_crtc_state);
8424                 if (ret) {
8425                         dc_plane_state_release(dc_new_plane_state);
8426                         return ret;
8427                 }
8428
8429                 ret = dm_atomic_get_state(state, &dm_state);
8430                 if (ret) {
8431                         dc_plane_state_release(dc_new_plane_state);
8432                         return ret;
8433                 }
8434
8435                 /*
8436                  * Any atomic check errors that occur after this will
8437                  * not need a release. The plane state will be attached
8438                  * to the stream, and therefore part of the atomic
8439                  * state. It'll be released when the atomic state is
8440                  * cleaned.
8441                  */
8442                 if (!dc_add_plane_to_context(
8443                                 dc,
8444                                 dm_new_crtc_state->stream,
8445                                 dc_new_plane_state,
8446                                 dm_state->context)) {
8447
8448                         dc_plane_state_release(dc_new_plane_state);
8449                         return -EINVAL;
8450                 }
8451
8452                 dm_new_plane_state->dc_state = dc_new_plane_state;
8453
8454                 /* Tell DC to do a full surface update every time there
8455                  * is a plane change. Inefficient, but works for now.
8456                  */
8457                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8458
8459                 *lock_and_validation_needed = true;
8460         }
8461
8462
8463         return ret;
8464 }
8465
8466 #if defined(CONFIG_DRM_AMD_DC_DCN)
8467 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8468 {
8469         struct drm_connector *connector;
8470         struct drm_connector_state *conn_state;
8471         struct amdgpu_dm_connector *aconnector = NULL;
8472         int i;
8473         for_each_new_connector_in_state(state, connector, conn_state, i) {
8474                 if (conn_state->crtc != crtc)
8475                         continue;
8476
8477                 aconnector = to_amdgpu_dm_connector(connector);
8478                 if (!aconnector->port || !aconnector->mst_port)
8479                         aconnector = NULL;
8480                 else
8481                         break;
8482         }
8483
8484         if (!aconnector)
8485                 return 0;
8486
8487         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8488 }
8489 #endif
8490
8491 /**
8492  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8493  * @dev: The DRM device
8494  * @state: The atomic state to commit
8495  *
8496  * Validate that the given atomic state is programmable by DC into hardware.
8497  * This involves constructing a &struct dc_state reflecting the new hardware
8498  * state we wish to commit, then querying DC to see if it is programmable. It's
8499  * important not to modify the existing DC state. Otherwise, atomic_check
8500  * may unexpectedly commit hardware changes.
8501  *
8502  * When validating the DC state, it's important that the right locks are
8503  * acquired. For full updates case which removes/adds/updates streams on one
8504  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8505  * that any such full update commit will wait for completion of any outstanding
8506  * flip using DRMs synchronization events.
8507  *
8508  * Note that DM adds the affected connectors for all CRTCs in state, when that
8509  * might not seem necessary. This is because DC stream creation requires the
8510  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8511  * be possible but non-trivial - a possible TODO item.
8512  *
8513  * Return: -Error code if validation failed.
8514  */
8515 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8516                                   struct drm_atomic_state *state)
8517 {
8518         struct amdgpu_device *adev = drm_to_adev(dev);
8519         struct dm_atomic_state *dm_state = NULL;
8520         struct dc *dc = adev->dm.dc;
8521         struct drm_connector *connector;
8522         struct drm_connector_state *old_con_state, *new_con_state;
8523         struct drm_crtc *crtc;
8524         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8525         struct drm_plane *plane;
8526         struct drm_plane_state *old_plane_state, *new_plane_state;
8527         enum dc_status status;
8528         int ret, i;
8529         bool lock_and_validation_needed = false;
8530
8531         ret = drm_atomic_helper_check_modeset(dev, state);
8532         if (ret)
8533                 goto fail;
8534
8535         /* Check connector changes */
8536         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8537                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8538                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8539
8540                 /* Skip connectors that are disabled or part of modeset already. */
8541                 if (!old_con_state->crtc && !new_con_state->crtc)
8542                         continue;
8543
8544                 if (!new_con_state->crtc)
8545                         continue;
8546
8547                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8548                 if (IS_ERR(new_crtc_state)) {
8549                         ret = PTR_ERR(new_crtc_state);
8550                         goto fail;
8551                 }
8552
8553                 if (dm_old_con_state->abm_level !=
8554                     dm_new_con_state->abm_level)
8555                         new_crtc_state->connectors_changed = true;
8556         }
8557
8558 #if defined(CONFIG_DRM_AMD_DC_DCN)
8559         if (adev->asic_type >= CHIP_NAVI10) {
8560                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8561                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8562                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8563                                 if (ret)
8564                                         goto fail;
8565                         }
8566                 }
8567         }
8568 #endif
8569         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8570                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8571                     !new_crtc_state->color_mgmt_changed &&
8572                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8573                         continue;
8574
8575                 if (!new_crtc_state->enable)
8576                         continue;
8577
8578                 ret = drm_atomic_add_affected_connectors(state, crtc);
8579                 if (ret)
8580                         return ret;
8581
8582                 ret = drm_atomic_add_affected_planes(state, crtc);
8583                 if (ret)
8584                         goto fail;
8585         }
8586
8587         /*
8588          * Add all primary and overlay planes on the CRTC to the state
8589          * whenever a plane is enabled to maintain correct z-ordering
8590          * and to enable fast surface updates.
8591          */
8592         drm_for_each_crtc(crtc, dev) {
8593                 bool modified = false;
8594
8595                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8596                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8597                                 continue;
8598
8599                         if (new_plane_state->crtc == crtc ||
8600                             old_plane_state->crtc == crtc) {
8601                                 modified = true;
8602                                 break;
8603                         }
8604                 }
8605
8606                 if (!modified)
8607                         continue;
8608
8609                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8610                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8611                                 continue;
8612
8613                         new_plane_state =
8614                                 drm_atomic_get_plane_state(state, plane);
8615
8616                         if (IS_ERR(new_plane_state)) {
8617                                 ret = PTR_ERR(new_plane_state);
8618                                 goto fail;
8619                         }
8620                 }
8621         }
8622
8623         /* Prepass for updating tiling flags on new planes. */
8624         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8625                 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8626                 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8627
8628                 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8629                                   &new_dm_plane_state->tmz_surface);
8630                 if (ret)
8631                         goto fail;
8632         }
8633
8634         /* Remove exiting planes if they are modified */
8635         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8636                 ret = dm_update_plane_state(dc, state, plane,
8637                                             old_plane_state,
8638                                             new_plane_state,
8639                                             false,
8640                                             &lock_and_validation_needed);
8641                 if (ret)
8642                         goto fail;
8643         }
8644
8645         /* Disable all crtcs which require disable */
8646         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8647                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8648                                            old_crtc_state,
8649                                            new_crtc_state,
8650                                            false,
8651                                            &lock_and_validation_needed);
8652                 if (ret)
8653                         goto fail;
8654         }
8655
8656         /* Enable all crtcs which require enable */
8657         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8658                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8659                                            old_crtc_state,
8660                                            new_crtc_state,
8661                                            true,
8662                                            &lock_and_validation_needed);
8663                 if (ret)
8664                         goto fail;
8665         }
8666
8667         /* Add new/modified planes */
8668         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8669                 ret = dm_update_plane_state(dc, state, plane,
8670                                             old_plane_state,
8671                                             new_plane_state,
8672                                             true,
8673                                             &lock_and_validation_needed);
8674                 if (ret)
8675                         goto fail;
8676         }
8677
8678         /* Run this here since we want to validate the streams we created */
8679         ret = drm_atomic_helper_check_planes(dev, state);
8680         if (ret)
8681                 goto fail;
8682
8683         if (state->legacy_cursor_update) {
8684                 /*
8685                  * This is a fast cursor update coming from the plane update
8686                  * helper, check if it can be done asynchronously for better
8687                  * performance.
8688                  */
8689                 state->async_update =
8690                         !drm_atomic_helper_async_check(dev, state);
8691
8692                 /*
8693                  * Skip the remaining global validation if this is an async
8694                  * update. Cursor updates can be done without affecting
8695                  * state or bandwidth calcs and this avoids the performance
8696                  * penalty of locking the private state object and
8697                  * allocating a new dc_state.
8698                  */
8699                 if (state->async_update)
8700                         return 0;
8701         }
8702
8703         /* Check scaling and underscan changes*/
8704         /* TODO Removed scaling changes validation due to inability to commit
8705          * new stream into context w\o causing full reset. Need to
8706          * decide how to handle.
8707          */
8708         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8709                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8710                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8711                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8712
8713                 /* Skip any modesets/resets */
8714                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8715                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8716                         continue;
8717
8718                 /* Skip any thing not scale or underscan changes */
8719                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8720                         continue;
8721
8722                 lock_and_validation_needed = true;
8723         }
8724
8725         /**
8726          * Streams and planes are reset when there are changes that affect
8727          * bandwidth. Anything that affects bandwidth needs to go through
8728          * DC global validation to ensure that the configuration can be applied
8729          * to hardware.
8730          *
8731          * We have to currently stall out here in atomic_check for outstanding
8732          * commits to finish in this case because our IRQ handlers reference
8733          * DRM state directly - we can end up disabling interrupts too early
8734          * if we don't.
8735          *
8736          * TODO: Remove this stall and drop DM state private objects.
8737          */
8738         if (lock_and_validation_needed) {
8739                 ret = dm_atomic_get_state(state, &dm_state);
8740                 if (ret)
8741                         goto fail;
8742
8743                 ret = do_aquire_global_lock(dev, state);
8744                 if (ret)
8745                         goto fail;
8746
8747 #if defined(CONFIG_DRM_AMD_DC_DCN)
8748                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8749                         goto fail;
8750
8751                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8752                 if (ret)
8753                         goto fail;
8754 #endif
8755
8756                 /*
8757                  * Perform validation of MST topology in the state:
8758                  * We need to perform MST atomic check before calling
8759                  * dc_validate_global_state(), or there is a chance
8760                  * to get stuck in an infinite loop and hang eventually.
8761                  */
8762                 ret = drm_dp_mst_atomic_check(state);
8763                 if (ret)
8764                         goto fail;
8765                 status = dc_validate_global_state(dc, dm_state->context, false);
8766                 if (status != DC_OK) {
8767                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
8768                                        dc_status_to_str(status), status);
8769                         ret = -EINVAL;
8770                         goto fail;
8771                 }
8772         } else {
8773                 /*
8774                  * The commit is a fast update. Fast updates shouldn't change
8775                  * the DC context, affect global validation, and can have their
8776                  * commit work done in parallel with other commits not touching
8777                  * the same resource. If we have a new DC context as part of
8778                  * the DM atomic state from validation we need to free it and
8779                  * retain the existing one instead.
8780                  *
8781                  * Furthermore, since the DM atomic state only contains the DC
8782                  * context and can safely be annulled, we can free the state
8783                  * and clear the associated private object now to free
8784                  * some memory and avoid a possible use-after-free later.
8785                  */
8786
8787                 for (i = 0; i < state->num_private_objs; i++) {
8788                         struct drm_private_obj *obj = state->private_objs[i].ptr;
8789
8790                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
8791                                 int j = state->num_private_objs-1;
8792
8793                                 dm_atomic_destroy_state(obj,
8794                                                 state->private_objs[i].state);
8795
8796                                 /* If i is not at the end of the array then the
8797                                  * last element needs to be moved to where i was
8798                                  * before the array can safely be truncated.
8799                                  */
8800                                 if (i != j)
8801                                         state->private_objs[i] =
8802                                                 state->private_objs[j];
8803
8804                                 state->private_objs[j].ptr = NULL;
8805                                 state->private_objs[j].state = NULL;
8806                                 state->private_objs[j].old_state = NULL;
8807                                 state->private_objs[j].new_state = NULL;
8808
8809                                 state->num_private_objs = j;
8810                                 break;
8811                         }
8812                 }
8813         }
8814
8815         /* Store the overall update type for use later in atomic check. */
8816         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8817                 struct dm_crtc_state *dm_new_crtc_state =
8818                         to_dm_crtc_state(new_crtc_state);
8819
8820                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
8821                                                          UPDATE_TYPE_FULL :
8822                                                          UPDATE_TYPE_FAST;
8823         }
8824
8825         /* Must be success */
8826         WARN_ON(ret);
8827         return ret;
8828
8829 fail:
8830         if (ret == -EDEADLK)
8831                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8832         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8833                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8834         else
8835                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8836
8837         return ret;
8838 }
8839
8840 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8841                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
8842 {
8843         uint8_t dpcd_data;
8844         bool capable = false;
8845
8846         if (amdgpu_dm_connector->dc_link &&
8847                 dm_helpers_dp_read_dpcd(
8848                                 NULL,
8849                                 amdgpu_dm_connector->dc_link,
8850                                 DP_DOWN_STREAM_PORT_COUNT,
8851                                 &dpcd_data,
8852                                 sizeof(dpcd_data))) {
8853                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8854         }
8855
8856         return capable;
8857 }
8858 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8859                                         struct edid *edid)
8860 {
8861         int i;
8862         bool edid_check_required;
8863         struct detailed_timing *timing;
8864         struct detailed_non_pixel *data;
8865         struct detailed_data_monitor_range *range;
8866         struct amdgpu_dm_connector *amdgpu_dm_connector =
8867                         to_amdgpu_dm_connector(connector);
8868         struct dm_connector_state *dm_con_state = NULL;
8869
8870         struct drm_device *dev = connector->dev;
8871         struct amdgpu_device *adev = drm_to_adev(dev);
8872         bool freesync_capable = false;
8873
8874         if (!connector->state) {
8875                 DRM_ERROR("%s - Connector has no state", __func__);
8876                 goto update;
8877         }
8878
8879         if (!edid) {
8880                 dm_con_state = to_dm_connector_state(connector->state);
8881
8882                 amdgpu_dm_connector->min_vfreq = 0;
8883                 amdgpu_dm_connector->max_vfreq = 0;
8884                 amdgpu_dm_connector->pixel_clock_mhz = 0;
8885
8886                 goto update;
8887         }
8888
8889         dm_con_state = to_dm_connector_state(connector->state);
8890
8891         edid_check_required = false;
8892         if (!amdgpu_dm_connector->dc_sink) {
8893                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8894                 goto update;
8895         }
8896         if (!adev->dm.freesync_module)
8897                 goto update;
8898         /*
8899          * if edid non zero restrict freesync only for dp and edp
8900          */
8901         if (edid) {
8902                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8903                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8904                         edid_check_required = is_dp_capable_without_timing_msa(
8905                                                 adev->dm.dc,
8906                                                 amdgpu_dm_connector);
8907                 }
8908         }
8909         if (edid_check_required == true && (edid->version > 1 ||
8910            (edid->version == 1 && edid->revision > 1))) {
8911                 for (i = 0; i < 4; i++) {
8912
8913                         timing  = &edid->detailed_timings[i];
8914                         data    = &timing->data.other_data;
8915                         range   = &data->data.range;
8916                         /*
8917                          * Check if monitor has continuous frequency mode
8918                          */
8919                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
8920                                 continue;
8921                         /*
8922                          * Check for flag range limits only. If flag == 1 then
8923                          * no additional timing information provided.
8924                          * Default GTF, GTF Secondary curve and CVT are not
8925                          * supported
8926                          */
8927                         if (range->flags != 1)
8928                                 continue;
8929
8930                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8931                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8932                         amdgpu_dm_connector->pixel_clock_mhz =
8933                                 range->pixel_clock_mhz * 10;
8934                         break;
8935                 }
8936
8937                 if (amdgpu_dm_connector->max_vfreq -
8938                     amdgpu_dm_connector->min_vfreq > 10) {
8939
8940                         freesync_capable = true;
8941                 }
8942         }
8943
8944 update:
8945         if (dm_con_state)
8946                 dm_con_state->freesync_capable = freesync_capable;
8947
8948         if (connector->vrr_capable_property)
8949                 drm_connector_set_vrr_capable_property(connector,
8950                                                        freesync_capable);
8951 }
8952
8953 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8954 {
8955         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8956
8957         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8958                 return;
8959         if (link->type == dc_connection_none)
8960                 return;
8961         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8962                                         dpcd_data, sizeof(dpcd_data))) {
8963                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8964
8965                 if (dpcd_data[0] == 0) {
8966                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8967                         link->psr_settings.psr_feature_enabled = false;
8968                 } else {
8969                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
8970                         link->psr_settings.psr_feature_enabled = true;
8971                 }
8972
8973                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8974         }
8975 }
8976
8977 /*
8978  * amdgpu_dm_link_setup_psr() - configure psr link
8979  * @stream: stream state
8980  *
8981  * Return: true if success
8982  */
8983 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8984 {
8985         struct dc_link *link = NULL;
8986         struct psr_config psr_config = {0};
8987         struct psr_context psr_context = {0};
8988         bool ret = false;
8989
8990         if (stream == NULL)
8991                 return false;
8992
8993         link = stream->link;
8994
8995         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8996
8997         if (psr_config.psr_version > 0) {
8998                 psr_config.psr_exit_link_training_required = 0x1;
8999                 psr_config.psr_frame_capture_indication_req = 0;
9000                 psr_config.psr_rfb_setup_time = 0x37;
9001                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9002                 psr_config.allow_smu_optimizations = 0x0;
9003
9004                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9005
9006         }
9007         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9008
9009         return ret;
9010 }
9011
9012 /*
9013  * amdgpu_dm_psr_enable() - enable psr f/w
9014  * @stream: stream state
9015  *
9016  * Return: true if success
9017  */
9018 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9019 {
9020         struct dc_link *link = stream->link;
9021         unsigned int vsync_rate_hz = 0;
9022         struct dc_static_screen_params params = {0};
9023         /* Calculate number of static frames before generating interrupt to
9024          * enter PSR.
9025          */
9026         // Init fail safe of 2 frames static
9027         unsigned int num_frames_static = 2;
9028
9029         DRM_DEBUG_DRIVER("Enabling psr...\n");
9030
9031         vsync_rate_hz = div64_u64(div64_u64((
9032                         stream->timing.pix_clk_100hz * 100),
9033                         stream->timing.v_total),
9034                         stream->timing.h_total);
9035
9036         /* Round up
9037          * Calculate number of frames such that at least 30 ms of time has
9038          * passed.
9039          */
9040         if (vsync_rate_hz != 0) {
9041                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9042                 num_frames_static = (30000 / frame_time_microsec) + 1;
9043         }
9044
9045         params.triggers.cursor_update = true;
9046         params.triggers.overlay_update = true;
9047         params.triggers.surface_update = true;
9048         params.num_frames = num_frames_static;
9049
9050         dc_stream_set_static_screen_params(link->ctx->dc,
9051                                            &stream, 1,
9052                                            &params);
9053
9054         return dc_link_set_psr_allow_active(link, true, false);
9055 }
9056
9057 /*
9058  * amdgpu_dm_psr_disable() - disable psr f/w
9059  * @stream:  stream state
9060  *
9061  * Return: true if success
9062  */
9063 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9064 {
9065
9066         DRM_DEBUG_DRIVER("Disabling psr...\n");
9067
9068         return dc_link_set_psr_allow_active(stream->link, false, true);
9069 }
9070
9071 /*
9072  * amdgpu_dm_psr_disable() - disable psr f/w
9073  * if psr is enabled on any stream
9074  *
9075  * Return: true if success
9076  */
9077 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9078 {
9079         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9080         return dc_set_psr_allow_active(dm->dc, false);
9081 }
9082
9083 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9084 {
9085         struct amdgpu_device *adev = drm_to_adev(dev);
9086         struct dc *dc = adev->dm.dc;
9087         int i;
9088
9089         mutex_lock(&adev->dm.dc_lock);
9090         if (dc->current_state) {
9091                 for (i = 0; i < dc->current_state->stream_count; ++i)
9092                         dc->current_state->streams[i]
9093                                 ->triggered_crtc_reset.enabled =
9094                                 adev->dm.force_timing_sync;
9095
9096                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9097                 dc_trigger_sync(dc, dc->current_state);
9098         }
9099         mutex_unlock(&adev->dm.dc_lock);
9100 }