drm/amd/display: Fix the display corruption issue on Navi10
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 #if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
104 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
106 #endif
107 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #endif
111 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
112 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
114 #endif
115
116 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
118
119 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
124
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
127
128 /**
129  * DOC: overview
130  *
131  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
133  * requests into DC requests, and DC responses into DRM responses.
134  *
135  * The root control structure is &struct amdgpu_display_manager.
136  */
137
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
141
142 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
143 {
144         switch (link->dpcd_caps.dongle_type) {
145         case DISPLAY_DONGLE_NONE:
146                 return DRM_MODE_SUBCONNECTOR_Native;
147         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
148                 return DRM_MODE_SUBCONNECTOR_VGA;
149         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
150         case DISPLAY_DONGLE_DP_DVI_DONGLE:
151                 return DRM_MODE_SUBCONNECTOR_DVID;
152         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
153         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
154                 return DRM_MODE_SUBCONNECTOR_HDMIA;
155         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
156         default:
157                 return DRM_MODE_SUBCONNECTOR_Unknown;
158         }
159 }
160
161 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
162 {
163         struct dc_link *link = aconnector->dc_link;
164         struct drm_connector *connector = &aconnector->base;
165         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
166
167         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
168                 return;
169
170         if (aconnector->dc_sink)
171                 subconnector = get_subconnector_type(link);
172
173         drm_object_property_set_value(&connector->base,
174                         connector->dev->mode_config.dp_subconnector_property,
175                         subconnector);
176 }
177
178 /*
179  * initializes drm_device display related structures, based on the information
180  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
181  * drm_encoder, drm_mode_config
182  *
183  * Returns 0 on success
184  */
185 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
186 /* removes and deallocates the drm structures, created by the above function */
187 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
188
189 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
190                                 struct drm_plane *plane,
191                                 unsigned long possible_crtcs,
192                                 const struct dc_plane_cap *plane_cap);
193 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
194                                struct drm_plane *plane,
195                                uint32_t link_index);
196 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
197                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
198                                     uint32_t link_index,
199                                     struct amdgpu_encoder *amdgpu_encoder);
200 static int amdgpu_dm_encoder_init(struct drm_device *dev,
201                                   struct amdgpu_encoder *aencoder,
202                                   uint32_t link_index);
203
204 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
205
206 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
207                                    struct drm_atomic_state *state,
208                                    bool nonblock);
209
210 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
211
212 static int amdgpu_dm_atomic_check(struct drm_device *dev,
213                                   struct drm_atomic_state *state);
214
215 static void handle_cursor_update(struct drm_plane *plane,
216                                  struct drm_plane_state *old_plane_state);
217
218 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
219 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
220 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
221 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
222 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
223
224 /*
225  * dm_vblank_get_counter
226  *
227  * @brief
228  * Get counter for number of vertical blanks
229  *
230  * @param
231  * struct amdgpu_device *adev - [in] desired amdgpu device
232  * int disp_idx - [in] which CRTC to get the counter from
233  *
234  * @return
235  * Counter for vertical blanks
236  */
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 {
239         if (crtc >= adev->mode_info.num_crtc)
240                 return 0;
241         else {
242                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
244                 if (acrtc->dm_irq_params.stream == NULL) {
245                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246                                   crtc);
247                         return 0;
248                 }
249
250                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251         }
252 }
253
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255                                   u32 *vbl, u32 *position)
256 {
257         uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
259         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260                 return -EINVAL;
261         else {
262                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
264                 if (acrtc->dm_irq_params.stream ==  NULL) {
265                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266                                   crtc);
267                         return 0;
268                 }
269
270                 /*
271                  * TODO rework base driver to use values directly.
272                  * for now parse it back into reg-format
273                  */
274                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275                                          &v_blank_start,
276                                          &v_blank_end,
277                                          &h_position,
278                                          &v_position);
279
280                 *position = v_position | (h_position << 16);
281                 *vbl = v_blank_start | (v_blank_end << 16);
282         }
283
284         return 0;
285 }
286
287 static bool dm_is_idle(void *handle)
288 {
289         /* XXX todo */
290         return true;
291 }
292
293 static int dm_wait_for_idle(void *handle)
294 {
295         /* XXX todo */
296         return 0;
297 }
298
299 static bool dm_check_soft_reset(void *handle)
300 {
301         return false;
302 }
303
304 static int dm_soft_reset(void *handle)
305 {
306         /* XXX todo */
307         return 0;
308 }
309
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
312                      int otg_inst)
313 {
314         struct drm_device *dev = adev_to_drm(adev);
315         struct drm_crtc *crtc;
316         struct amdgpu_crtc *amdgpu_crtc;
317
318         if (otg_inst == -1) {
319                 WARN_ON(1);
320                 return adev->mode_info.crtcs[0];
321         }
322
323         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324                 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326                 if (amdgpu_crtc->otg_inst == otg_inst)
327                         return amdgpu_crtc;
328         }
329
330         return NULL;
331 }
332
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335         return acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_VARIABLE ||
337                acrtc->dm_irq_params.freesync_config.state ==
338                        VRR_STATE_ACTIVE_FIXED;
339 }
340
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346
347 /**
348  * dm_pflip_high_irq() - Handle pageflip interrupt
349  * @interrupt_params: ignored
350  *
351  * Handles the pageflip interrupt by notifying all interested parties
352  * that the pageflip has been completed.
353  */
354 static void dm_pflip_high_irq(void *interrupt_params)
355 {
356         struct amdgpu_crtc *amdgpu_crtc;
357         struct common_irq_params *irq_params = interrupt_params;
358         struct amdgpu_device *adev = irq_params->adev;
359         unsigned long flags;
360         struct drm_pending_vblank_event *e;
361         uint32_t vpos, hpos, v_blank_start, v_blank_end;
362         bool vrr_active;
363
364         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
365
366         /* IRQ could occur when in initial stage */
367         /* TODO work and BO cleanup */
368         if (amdgpu_crtc == NULL) {
369                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
370                 return;
371         }
372
373         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
374
375         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
376                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
377                                                  amdgpu_crtc->pflip_status,
378                                                  AMDGPU_FLIP_SUBMITTED,
379                                                  amdgpu_crtc->crtc_id,
380                                                  amdgpu_crtc);
381                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
382                 return;
383         }
384
385         /* page flip completed. */
386         e = amdgpu_crtc->event;
387         amdgpu_crtc->event = NULL;
388
389         if (!e)
390                 WARN_ON(1);
391
392         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
393
394         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
395         if (!vrr_active ||
396             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
397                                       &v_blank_end, &hpos, &vpos) ||
398             (vpos < v_blank_start)) {
399                 /* Update to correct count and vblank timestamp if racing with
400                  * vblank irq. This also updates to the correct vblank timestamp
401                  * even in VRR mode, as scanout is past the front-porch atm.
402                  */
403                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
404
405                 /* Wake up userspace by sending the pageflip event with proper
406                  * count and timestamp of vblank of flip completion.
407                  */
408                 if (e) {
409                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
410
411                         /* Event sent, so done with vblank for this flip */
412                         drm_crtc_vblank_put(&amdgpu_crtc->base);
413                 }
414         } else if (e) {
415                 /* VRR active and inside front-porch: vblank count and
416                  * timestamp for pageflip event will only be up to date after
417                  * drm_crtc_handle_vblank() has been executed from late vblank
418                  * irq handler after start of back-porch (vline 0). We queue the
419                  * pageflip event for send-out by drm_crtc_handle_vblank() with
420                  * updated timestamp and count, once it runs after us.
421                  *
422                  * We need to open-code this instead of using the helper
423                  * drm_crtc_arm_vblank_event(), as that helper would
424                  * call drm_crtc_accurate_vblank_count(), which we must
425                  * not call in VRR mode while we are in front-porch!
426                  */
427
428                 /* sequence will be replaced by real count during send-out. */
429                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
430                 e->pipe = amdgpu_crtc->crtc_id;
431
432                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
433                 e = NULL;
434         }
435
436         /* Keep track of vblank of this flip for flip throttling. We use the
437          * cooked hw counter, as that one incremented at start of this vblank
438          * of pageflip completion, so last_flip_vblank is the forbidden count
439          * for queueing new pageflips if vsync + VRR is enabled.
440          */
441         amdgpu_crtc->dm_irq_params.last_flip_vblank =
442                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
443
444         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
445         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
446
447         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
448                          amdgpu_crtc->crtc_id, amdgpu_crtc,
449                          vrr_active, (int) !e);
450 }
451
452 static void dm_vupdate_high_irq(void *interrupt_params)
453 {
454         struct common_irq_params *irq_params = interrupt_params;
455         struct amdgpu_device *adev = irq_params->adev;
456         struct amdgpu_crtc *acrtc;
457         unsigned long flags;
458         int vrr_active;
459
460         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
461
462         if (acrtc) {
463                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
464
465                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
466                               acrtc->crtc_id,
467                               vrr_active);
468
469                 /* Core vblank handling is done here after end of front-porch in
470                  * vrr mode, as vblank timestamping will give valid results
471                  * while now done after front-porch. This will also deliver
472                  * page-flip completion events that have been queued to us
473                  * if a pageflip happened inside front-porch.
474                  */
475                 if (vrr_active) {
476                         drm_crtc_handle_vblank(&acrtc->base);
477
478                         /* BTR processing for pre-DCE12 ASICs */
479                         if (acrtc->dm_irq_params.stream &&
480                             adev->family < AMDGPU_FAMILY_AI) {
481                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
482                                 mod_freesync_handle_v_update(
483                                     adev->dm.freesync_module,
484                                     acrtc->dm_irq_params.stream,
485                                     &acrtc->dm_irq_params.vrr_params);
486
487                                 dc_stream_adjust_vmin_vmax(
488                                     adev->dm.dc,
489                                     acrtc->dm_irq_params.stream,
490                                     &acrtc->dm_irq_params.vrr_params.adjust);
491                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
492                         }
493                 }
494         }
495 }
496
497 /**
498  * dm_crtc_high_irq() - Handles CRTC interrupt
499  * @interrupt_params: used for determining the CRTC instance
500  *
501  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
502  * event handler.
503  */
504 static void dm_crtc_high_irq(void *interrupt_params)
505 {
506         struct common_irq_params *irq_params = interrupt_params;
507         struct amdgpu_device *adev = irq_params->adev;
508         struct amdgpu_crtc *acrtc;
509         unsigned long flags;
510         int vrr_active;
511
512         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
513         if (!acrtc)
514                 return;
515
516         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
517
518         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
519                       vrr_active, acrtc->dm_irq_params.active_planes);
520
521         /**
522          * Core vblank handling at start of front-porch is only possible
523          * in non-vrr mode, as only there vblank timestamping will give
524          * valid results while done in front-porch. Otherwise defer it
525          * to dm_vupdate_high_irq after end of front-porch.
526          */
527         if (!vrr_active)
528                 drm_crtc_handle_vblank(&acrtc->base);
529
530         /**
531          * Following stuff must happen at start of vblank, for crc
532          * computation and below-the-range btr support in vrr mode.
533          */
534         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
535
536         /* BTR updates need to happen before VUPDATE on Vega and above. */
537         if (adev->family < AMDGPU_FAMILY_AI)
538                 return;
539
540         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
541
542         if (acrtc->dm_irq_params.stream &&
543             acrtc->dm_irq_params.vrr_params.supported &&
544             acrtc->dm_irq_params.freesync_config.state ==
545                     VRR_STATE_ACTIVE_VARIABLE) {
546                 mod_freesync_handle_v_update(adev->dm.freesync_module,
547                                              acrtc->dm_irq_params.stream,
548                                              &acrtc->dm_irq_params.vrr_params);
549
550                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
551                                            &acrtc->dm_irq_params.vrr_params.adjust);
552         }
553
554         /*
555          * If there aren't any active_planes then DCH HUBP may be clock-gated.
556          * In that case, pageflip completion interrupts won't fire and pageflip
557          * completion events won't get delivered. Prevent this by sending
558          * pending pageflip events from here if a flip is still pending.
559          *
560          * If any planes are enabled, use dm_pflip_high_irq() instead, to
561          * avoid race conditions between flip programming and completion,
562          * which could cause too early flip completion events.
563          */
564         if (adev->family >= AMDGPU_FAMILY_RV &&
565             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
566             acrtc->dm_irq_params.active_planes == 0) {
567                 if (acrtc->event) {
568                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
569                         acrtc->event = NULL;
570                         drm_crtc_vblank_put(&acrtc->base);
571                 }
572                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
573         }
574
575         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
576 }
577
578 static int dm_set_clockgating_state(void *handle,
579                   enum amd_clockgating_state state)
580 {
581         return 0;
582 }
583
584 static int dm_set_powergating_state(void *handle,
585                   enum amd_powergating_state state)
586 {
587         return 0;
588 }
589
590 /* Prototypes of private functions */
591 static int dm_early_init(void* handle);
592
593 /* Allocate memory for FBC compressed data  */
594 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
595 {
596         struct drm_device *dev = connector->dev;
597         struct amdgpu_device *adev = drm_to_adev(dev);
598         struct dm_comressor_info *compressor = &adev->dm.compressor;
599         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
600         struct drm_display_mode *mode;
601         unsigned long max_size = 0;
602
603         if (adev->dm.dc->fbc_compressor == NULL)
604                 return;
605
606         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
607                 return;
608
609         if (compressor->bo_ptr)
610                 return;
611
612
613         list_for_each_entry(mode, &connector->modes, head) {
614                 if (max_size < mode->htotal * mode->vtotal)
615                         max_size = mode->htotal * mode->vtotal;
616         }
617
618         if (max_size) {
619                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
620                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
621                             &compressor->gpu_addr, &compressor->cpu_addr);
622
623                 if (r)
624                         DRM_ERROR("DM: Failed to initialize FBC\n");
625                 else {
626                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
627                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
628                 }
629
630         }
631
632 }
633
634 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
635                                           int pipe, bool *enabled,
636                                           unsigned char *buf, int max_bytes)
637 {
638         struct drm_device *dev = dev_get_drvdata(kdev);
639         struct amdgpu_device *adev = drm_to_adev(dev);
640         struct drm_connector *connector;
641         struct drm_connector_list_iter conn_iter;
642         struct amdgpu_dm_connector *aconnector;
643         int ret = 0;
644
645         *enabled = false;
646
647         mutex_lock(&adev->dm.audio_lock);
648
649         drm_connector_list_iter_begin(dev, &conn_iter);
650         drm_for_each_connector_iter(connector, &conn_iter) {
651                 aconnector = to_amdgpu_dm_connector(connector);
652                 if (aconnector->audio_inst != port)
653                         continue;
654
655                 *enabled = true;
656                 ret = drm_eld_size(connector->eld);
657                 memcpy(buf, connector->eld, min(max_bytes, ret));
658
659                 break;
660         }
661         drm_connector_list_iter_end(&conn_iter);
662
663         mutex_unlock(&adev->dm.audio_lock);
664
665         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
666
667         return ret;
668 }
669
670 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
671         .get_eld = amdgpu_dm_audio_component_get_eld,
672 };
673
674 static int amdgpu_dm_audio_component_bind(struct device *kdev,
675                                        struct device *hda_kdev, void *data)
676 {
677         struct drm_device *dev = dev_get_drvdata(kdev);
678         struct amdgpu_device *adev = drm_to_adev(dev);
679         struct drm_audio_component *acomp = data;
680
681         acomp->ops = &amdgpu_dm_audio_component_ops;
682         acomp->dev = kdev;
683         adev->dm.audio_component = acomp;
684
685         return 0;
686 }
687
688 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
689                                           struct device *hda_kdev, void *data)
690 {
691         struct drm_device *dev = dev_get_drvdata(kdev);
692         struct amdgpu_device *adev = drm_to_adev(dev);
693         struct drm_audio_component *acomp = data;
694
695         acomp->ops = NULL;
696         acomp->dev = NULL;
697         adev->dm.audio_component = NULL;
698 }
699
700 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
701         .bind   = amdgpu_dm_audio_component_bind,
702         .unbind = amdgpu_dm_audio_component_unbind,
703 };
704
705 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
706 {
707         int i, ret;
708
709         if (!amdgpu_audio)
710                 return 0;
711
712         adev->mode_info.audio.enabled = true;
713
714         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
715
716         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
717                 adev->mode_info.audio.pin[i].channels = -1;
718                 adev->mode_info.audio.pin[i].rate = -1;
719                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
720                 adev->mode_info.audio.pin[i].status_bits = 0;
721                 adev->mode_info.audio.pin[i].category_code = 0;
722                 adev->mode_info.audio.pin[i].connected = false;
723                 adev->mode_info.audio.pin[i].id =
724                         adev->dm.dc->res_pool->audios[i]->inst;
725                 adev->mode_info.audio.pin[i].offset = 0;
726         }
727
728         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
729         if (ret < 0)
730                 return ret;
731
732         adev->dm.audio_registered = true;
733
734         return 0;
735 }
736
737 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
738 {
739         if (!amdgpu_audio)
740                 return;
741
742         if (!adev->mode_info.audio.enabled)
743                 return;
744
745         if (adev->dm.audio_registered) {
746                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
747                 adev->dm.audio_registered = false;
748         }
749
750         /* TODO: Disable audio? */
751
752         adev->mode_info.audio.enabled = false;
753 }
754
755 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
756 {
757         struct drm_audio_component *acomp = adev->dm.audio_component;
758
759         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
760                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
761
762                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
763                                                  pin, -1);
764         }
765 }
766
767 static int dm_dmub_hw_init(struct amdgpu_device *adev)
768 {
769         const struct dmcub_firmware_header_v1_0 *hdr;
770         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
771         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
772         const struct firmware *dmub_fw = adev->dm.dmub_fw;
773         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
774         struct abm *abm = adev->dm.dc->res_pool->abm;
775         struct dmub_srv_hw_params hw_params;
776         enum dmub_status status;
777         const unsigned char *fw_inst_const, *fw_bss_data;
778         uint32_t i, fw_inst_const_size, fw_bss_data_size;
779         bool has_hw_support;
780
781         if (!dmub_srv)
782                 /* DMUB isn't supported on the ASIC. */
783                 return 0;
784
785         if (!fb_info) {
786                 DRM_ERROR("No framebuffer info for DMUB service.\n");
787                 return -EINVAL;
788         }
789
790         if (!dmub_fw) {
791                 /* Firmware required for DMUB support. */
792                 DRM_ERROR("No firmware provided for DMUB.\n");
793                 return -EINVAL;
794         }
795
796         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
797         if (status != DMUB_STATUS_OK) {
798                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
799                 return -EINVAL;
800         }
801
802         if (!has_hw_support) {
803                 DRM_INFO("DMUB unsupported on ASIC\n");
804                 return 0;
805         }
806
807         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
808
809         fw_inst_const = dmub_fw->data +
810                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
811                         PSP_HEADER_BYTES;
812
813         fw_bss_data = dmub_fw->data +
814                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
815                       le32_to_cpu(hdr->inst_const_bytes);
816
817         /* Copy firmware and bios info into FB memory. */
818         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
819                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
820
821         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
822
823         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
824          * amdgpu_ucode_init_single_fw will load dmub firmware
825          * fw_inst_const part to cw0; otherwise, the firmware back door load
826          * will be done by dm_dmub_hw_init
827          */
828         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
829                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
830                                 fw_inst_const_size);
831         }
832
833         if (fw_bss_data_size)
834                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
835                        fw_bss_data, fw_bss_data_size);
836
837         /* Copy firmware bios info into FB memory. */
838         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
839                adev->bios_size);
840
841         /* Reset regions that need to be reset. */
842         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
843         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
844
845         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
846                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
847
848         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
849                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
850
851         /* Initialize hardware. */
852         memset(&hw_params, 0, sizeof(hw_params));
853         hw_params.fb_base = adev->gmc.fb_start;
854         hw_params.fb_offset = adev->gmc.aper_base;
855
856         /* backdoor load firmware and trigger dmub running */
857         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
858                 hw_params.load_inst_const = true;
859
860         if (dmcu)
861                 hw_params.psp_version = dmcu->psp_version;
862
863         for (i = 0; i < fb_info->num_fb; ++i)
864                 hw_params.fb[i] = &fb_info->fb[i];
865
866         status = dmub_srv_hw_init(dmub_srv, &hw_params);
867         if (status != DMUB_STATUS_OK) {
868                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
869                 return -EINVAL;
870         }
871
872         /* Wait for firmware load to finish. */
873         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
874         if (status != DMUB_STATUS_OK)
875                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
876
877         /* Init DMCU and ABM if available. */
878         if (dmcu && abm) {
879                 dmcu->funcs->dmcu_init(dmcu);
880                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
881         }
882
883         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
884         if (!adev->dm.dc->ctx->dmub_srv) {
885                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
886                 return -ENOMEM;
887         }
888
889         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
890                  adev->dm.dmcub_fw_version);
891
892         return 0;
893 }
894
895 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
896                                                            struct drm_atomic_state *state)
897 {
898         struct drm_connector *connector;
899         struct drm_crtc *crtc;
900         struct amdgpu_dm_connector *amdgpu_dm_connector;
901         struct drm_connector_state *conn_state;
902         struct dm_crtc_state *acrtc_state;
903         struct drm_crtc_state *crtc_state;
904         struct dc_stream_state *stream;
905         struct drm_device *dev = adev_to_drm(adev);
906
907         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
908
909                 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
910                 conn_state = connector->state;
911
912                 if (!(conn_state && conn_state->crtc))
913                         continue;
914
915                 crtc = conn_state->crtc;
916                 acrtc_state = to_dm_crtc_state(crtc->state);
917
918                 if (!(acrtc_state && acrtc_state->stream))
919                         continue;
920
921                 stream = acrtc_state->stream;
922
923                 if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
924                     amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
925                     amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
926                     amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
927                         conn_state = drm_atomic_get_connector_state(state, connector);
928                         crtc_state = drm_atomic_get_crtc_state(state, crtc);
929                         crtc_state->mode_changed = true;
930                 }
931         }
932 }
933
934 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
935 {
936         uint64_t pt_base;
937         uint32_t logical_addr_low;
938         uint32_t logical_addr_high;
939         uint32_t agp_base, agp_bot, agp_top;
940         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
941
942         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
943         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
944
945         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
946                 /*
947                  * Raven2 has a HW issue that it is unable to use the vram which
948                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
949                  * workaround that increase system aperture high address (add 1)
950                  * to get rid of the VM fault and hardware hang.
951                  */
952                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
953         else
954                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
955
956         agp_base = 0;
957         agp_bot = adev->gmc.agp_start >> 24;
958         agp_top = adev->gmc.agp_end >> 24;
959
960
961         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
962         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
963         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
964         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
965         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
966         page_table_base.low_part = lower_32_bits(pt_base);
967
968         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
969         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
970
971         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
972         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
973         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
974
975         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
976         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
977         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
978
979         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
980         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
981         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
982
983         pa_config->is_hvm_enabled = 0;
984
985 }
986
987
988 static int amdgpu_dm_init(struct amdgpu_device *adev)
989 {
990         struct dc_init_data init_data;
991 #ifdef CONFIG_DRM_AMD_DC_HDCP
992         struct dc_callback_init init_params;
993 #endif
994         struct dc_phy_addr_space_config pa_config;
995         int r;
996
997         adev->dm.ddev = adev_to_drm(adev);
998         adev->dm.adev = adev;
999
1000         /* Zero all the fields */
1001         memset(&init_data, 0, sizeof(init_data));
1002 #ifdef CONFIG_DRM_AMD_DC_HDCP
1003         memset(&init_params, 0, sizeof(init_params));
1004 #endif
1005
1006         mutex_init(&adev->dm.dc_lock);
1007         mutex_init(&adev->dm.audio_lock);
1008
1009         if(amdgpu_dm_irq_init(adev)) {
1010                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1011                 goto error;
1012         }
1013
1014         init_data.asic_id.chip_family = adev->family;
1015
1016         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1017         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1018
1019         init_data.asic_id.vram_width = adev->gmc.vram_width;
1020         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1021         init_data.asic_id.atombios_base_address =
1022                 adev->mode_info.atom_context->bios;
1023
1024         init_data.driver = adev;
1025
1026         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1027
1028         if (!adev->dm.cgs_device) {
1029                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1030                 goto error;
1031         }
1032
1033         init_data.cgs_device = adev->dm.cgs_device;
1034
1035         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1036
1037         switch (adev->asic_type) {
1038         case CHIP_CARRIZO:
1039         case CHIP_STONEY:
1040         case CHIP_RAVEN:
1041         case CHIP_RENOIR:
1042                 init_data.flags.gpu_vm_support = true;
1043 #if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
1044                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1045                         init_data.flags.disable_dmcu = true;
1046 #endif
1047                 break;
1048         default:
1049                 break;
1050         }
1051
1052         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1053                 init_data.flags.fbc_support = true;
1054
1055         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1056                 init_data.flags.multi_mon_pp_mclk_switch = true;
1057
1058         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1059                 init_data.flags.disable_fractional_pwm = true;
1060
1061         init_data.flags.power_down_display_on_boot = true;
1062
1063         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1064
1065         /* Display Core create. */
1066         adev->dm.dc = dc_create(&init_data);
1067
1068         if (adev->dm.dc) {
1069                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1070         } else {
1071                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1072                 goto error;
1073         }
1074
1075         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1076                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1077                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1078         }
1079
1080         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1081                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1082
1083         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1084                 adev->dm.dc->debug.disable_stutter = true;
1085
1086         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1087                 adev->dm.dc->debug.disable_dsc = true;
1088
1089         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1090                 adev->dm.dc->debug.disable_clock_gate = true;
1091
1092         r = dm_dmub_hw_init(adev);
1093         if (r) {
1094                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1095                 goto error;
1096         }
1097
1098         dc_hardware_init(adev->dm.dc);
1099
1100 #if defined(CONFIG_DRM_AMD_DC_DCN)
1101         if (adev->asic_type == CHIP_RENOIR) {
1102                 mmhub_read_system_context(adev, &pa_config);
1103
1104                 // Call the DC init_memory func
1105                 dc_setup_system_context(adev->dm.dc, &pa_config);
1106         }
1107 #endif
1108
1109         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1110         if (!adev->dm.freesync_module) {
1111                 DRM_ERROR(
1112                 "amdgpu: failed to initialize freesync_module.\n");
1113         } else
1114                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1115                                 adev->dm.freesync_module);
1116
1117         amdgpu_dm_init_color_mod();
1118
1119 #ifdef CONFIG_DRM_AMD_DC_HDCP
1120         if (adev->asic_type >= CHIP_RAVEN) {
1121                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1122
1123                 if (!adev->dm.hdcp_workqueue)
1124                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1125                 else
1126                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1127
1128                 dc_init_callbacks(adev->dm.dc, &init_params);
1129         }
1130 #endif
1131         if (amdgpu_dm_initialize_drm_device(adev)) {
1132                 DRM_ERROR(
1133                 "amdgpu: failed to initialize sw for display support.\n");
1134                 goto error;
1135         }
1136
1137         /* Update the actual used number of crtc */
1138         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1139
1140         /* create fake encoders for MST */
1141         dm_dp_create_fake_mst_encoders(adev);
1142
1143         /* TODO: Add_display_info? */
1144
1145         /* TODO use dynamic cursor width */
1146         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1147         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1148
1149         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1150                 DRM_ERROR(
1151                 "amdgpu: failed to initialize sw for display support.\n");
1152                 goto error;
1153         }
1154
1155
1156         DRM_DEBUG_DRIVER("KMS initialized.\n");
1157
1158         return 0;
1159 error:
1160         amdgpu_dm_fini(adev);
1161
1162         return -EINVAL;
1163 }
1164
1165 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1166 {
1167         int i;
1168
1169         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1170                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1171         }
1172
1173         amdgpu_dm_audio_fini(adev);
1174
1175         amdgpu_dm_destroy_drm_device(&adev->dm);
1176
1177 #ifdef CONFIG_DRM_AMD_DC_HDCP
1178         if (adev->dm.hdcp_workqueue) {
1179                 hdcp_destroy(adev->dm.hdcp_workqueue);
1180                 adev->dm.hdcp_workqueue = NULL;
1181         }
1182
1183         if (adev->dm.dc)
1184                 dc_deinit_callbacks(adev->dm.dc);
1185 #endif
1186         if (adev->dm.dc->ctx->dmub_srv) {
1187                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1188                 adev->dm.dc->ctx->dmub_srv = NULL;
1189         }
1190
1191         if (adev->dm.dmub_bo)
1192                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1193                                       &adev->dm.dmub_bo_gpu_addr,
1194                                       &adev->dm.dmub_bo_cpu_addr);
1195
1196         /* DC Destroy TODO: Replace destroy DAL */
1197         if (adev->dm.dc)
1198                 dc_destroy(&adev->dm.dc);
1199         /*
1200          * TODO: pageflip, vlank interrupt
1201          *
1202          * amdgpu_dm_irq_fini(adev);
1203          */
1204
1205         if (adev->dm.cgs_device) {
1206                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1207                 adev->dm.cgs_device = NULL;
1208         }
1209         if (adev->dm.freesync_module) {
1210                 mod_freesync_destroy(adev->dm.freesync_module);
1211                 adev->dm.freesync_module = NULL;
1212         }
1213
1214         mutex_destroy(&adev->dm.audio_lock);
1215         mutex_destroy(&adev->dm.dc_lock);
1216
1217         return;
1218 }
1219
1220 static int load_dmcu_fw(struct amdgpu_device *adev)
1221 {
1222         const char *fw_name_dmcu = NULL;
1223         int r;
1224         const struct dmcu_firmware_header_v1_0 *hdr;
1225
1226         switch(adev->asic_type) {
1227 #if defined(CONFIG_DRM_AMD_DC_SI)
1228         case CHIP_TAHITI:
1229         case CHIP_PITCAIRN:
1230         case CHIP_VERDE:
1231         case CHIP_OLAND:
1232 #endif
1233         case CHIP_BONAIRE:
1234         case CHIP_HAWAII:
1235         case CHIP_KAVERI:
1236         case CHIP_KABINI:
1237         case CHIP_MULLINS:
1238         case CHIP_TONGA:
1239         case CHIP_FIJI:
1240         case CHIP_CARRIZO:
1241         case CHIP_STONEY:
1242         case CHIP_POLARIS11:
1243         case CHIP_POLARIS10:
1244         case CHIP_POLARIS12:
1245         case CHIP_VEGAM:
1246         case CHIP_VEGA10:
1247         case CHIP_VEGA12:
1248         case CHIP_VEGA20:
1249         case CHIP_NAVI10:
1250         case CHIP_NAVI14:
1251         case CHIP_RENOIR:
1252 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1253         case CHIP_SIENNA_CICHLID:
1254         case CHIP_NAVY_FLOUNDER:
1255 #endif
1256 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
1257         case CHIP_DIMGREY_CAVEFISH:
1258 #endif
1259 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1260         case CHIP_VANGOGH:
1261 #endif
1262                 return 0;
1263         case CHIP_NAVI12:
1264                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1265                 break;
1266         case CHIP_RAVEN:
1267                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1268                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1269                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1270                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1271                 else
1272                         return 0;
1273                 break;
1274         default:
1275                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1276                 return -EINVAL;
1277         }
1278
1279         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1280                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1281                 return 0;
1282         }
1283
1284         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1285         if (r == -ENOENT) {
1286                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1287                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1288                 adev->dm.fw_dmcu = NULL;
1289                 return 0;
1290         }
1291         if (r) {
1292                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1293                         fw_name_dmcu);
1294                 return r;
1295         }
1296
1297         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1298         if (r) {
1299                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1300                         fw_name_dmcu);
1301                 release_firmware(adev->dm.fw_dmcu);
1302                 adev->dm.fw_dmcu = NULL;
1303                 return r;
1304         }
1305
1306         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1307         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1308         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1309         adev->firmware.fw_size +=
1310                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1311
1312         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1313         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1314         adev->firmware.fw_size +=
1315                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1316
1317         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1318
1319         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1320
1321         return 0;
1322 }
1323
1324 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1325 {
1326         struct amdgpu_device *adev = ctx;
1327
1328         return dm_read_reg(adev->dm.dc->ctx, address);
1329 }
1330
1331 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1332                                      uint32_t value)
1333 {
1334         struct amdgpu_device *adev = ctx;
1335
1336         return dm_write_reg(adev->dm.dc->ctx, address, value);
1337 }
1338
1339 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1340 {
1341         struct dmub_srv_create_params create_params;
1342         struct dmub_srv_region_params region_params;
1343         struct dmub_srv_region_info region_info;
1344         struct dmub_srv_fb_params fb_params;
1345         struct dmub_srv_fb_info *fb_info;
1346         struct dmub_srv *dmub_srv;
1347         const struct dmcub_firmware_header_v1_0 *hdr;
1348         const char *fw_name_dmub;
1349         enum dmub_asic dmub_asic;
1350         enum dmub_status status;
1351         int r;
1352
1353         switch (adev->asic_type) {
1354         case CHIP_RENOIR:
1355                 dmub_asic = DMUB_ASIC_DCN21;
1356                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1357 #if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
1358                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1359                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1360 #endif
1361                 break;
1362 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1363         case CHIP_SIENNA_CICHLID:
1364                 dmub_asic = DMUB_ASIC_DCN30;
1365                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1366                 break;
1367         case CHIP_NAVY_FLOUNDER:
1368                 dmub_asic = DMUB_ASIC_DCN30;
1369                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1370                 break;
1371 #endif
1372 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1373         case CHIP_VANGOGH:
1374                 dmub_asic = DMUB_ASIC_DCN301;
1375                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1376                 break;
1377 #endif
1378 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
1379         case CHIP_DIMGREY_CAVEFISH:
1380                 dmub_asic = DMUB_ASIC_DCN302;
1381                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1382                 break;
1383 #endif
1384
1385         default:
1386                 /* ASIC doesn't support DMUB. */
1387                 return 0;
1388         }
1389
1390         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1391         if (r) {
1392                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1393                 return 0;
1394         }
1395
1396         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1397         if (r) {
1398                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1399                 return 0;
1400         }
1401
1402         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1403
1404         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1405                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1406                         AMDGPU_UCODE_ID_DMCUB;
1407                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1408                         adev->dm.dmub_fw;
1409                 adev->firmware.fw_size +=
1410                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1411
1412                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1413                          adev->dm.dmcub_fw_version);
1414         }
1415
1416         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1417
1418         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1419         dmub_srv = adev->dm.dmub_srv;
1420
1421         if (!dmub_srv) {
1422                 DRM_ERROR("Failed to allocate DMUB service!\n");
1423                 return -ENOMEM;
1424         }
1425
1426         memset(&create_params, 0, sizeof(create_params));
1427         create_params.user_ctx = adev;
1428         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1429         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1430         create_params.asic = dmub_asic;
1431
1432         /* Create the DMUB service. */
1433         status = dmub_srv_create(dmub_srv, &create_params);
1434         if (status != DMUB_STATUS_OK) {
1435                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1436                 return -EINVAL;
1437         }
1438
1439         /* Calculate the size of all the regions for the DMUB service. */
1440         memset(&region_params, 0, sizeof(region_params));
1441
1442         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1443                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1444         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1445         region_params.vbios_size = adev->bios_size;
1446         region_params.fw_bss_data = region_params.bss_data_size ?
1447                 adev->dm.dmub_fw->data +
1448                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1449                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1450         region_params.fw_inst_const =
1451                 adev->dm.dmub_fw->data +
1452                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1453                 PSP_HEADER_BYTES;
1454
1455         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1456                                            &region_info);
1457
1458         if (status != DMUB_STATUS_OK) {
1459                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1460                 return -EINVAL;
1461         }
1462
1463         /*
1464          * Allocate a framebuffer based on the total size of all the regions.
1465          * TODO: Move this into GART.
1466          */
1467         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1468                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1469                                     &adev->dm.dmub_bo_gpu_addr,
1470                                     &adev->dm.dmub_bo_cpu_addr);
1471         if (r)
1472                 return r;
1473
1474         /* Rebase the regions on the framebuffer address. */
1475         memset(&fb_params, 0, sizeof(fb_params));
1476         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1477         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1478         fb_params.region_info = &region_info;
1479
1480         adev->dm.dmub_fb_info =
1481                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1482         fb_info = adev->dm.dmub_fb_info;
1483
1484         if (!fb_info) {
1485                 DRM_ERROR(
1486                         "Failed to allocate framebuffer info for DMUB service!\n");
1487                 return -ENOMEM;
1488         }
1489
1490         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1491         if (status != DMUB_STATUS_OK) {
1492                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1493                 return -EINVAL;
1494         }
1495
1496         return 0;
1497 }
1498
1499 static int dm_sw_init(void *handle)
1500 {
1501         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1502         int r;
1503
1504         r = dm_dmub_sw_init(adev);
1505         if (r)
1506                 return r;
1507
1508         return load_dmcu_fw(adev);
1509 }
1510
1511 static int dm_sw_fini(void *handle)
1512 {
1513         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1514
1515         kfree(adev->dm.dmub_fb_info);
1516         adev->dm.dmub_fb_info = NULL;
1517
1518         if (adev->dm.dmub_srv) {
1519                 dmub_srv_destroy(adev->dm.dmub_srv);
1520                 adev->dm.dmub_srv = NULL;
1521         }
1522
1523         release_firmware(adev->dm.dmub_fw);
1524         adev->dm.dmub_fw = NULL;
1525
1526         release_firmware(adev->dm.fw_dmcu);
1527         adev->dm.fw_dmcu = NULL;
1528
1529         return 0;
1530 }
1531
1532 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1533 {
1534         struct amdgpu_dm_connector *aconnector;
1535         struct drm_connector *connector;
1536         struct drm_connector_list_iter iter;
1537         int ret = 0;
1538
1539         drm_connector_list_iter_begin(dev, &iter);
1540         drm_for_each_connector_iter(connector, &iter) {
1541                 aconnector = to_amdgpu_dm_connector(connector);
1542                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1543                     aconnector->mst_mgr.aux) {
1544                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1545                                          aconnector,
1546                                          aconnector->base.base.id);
1547
1548                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1549                         if (ret < 0) {
1550                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1551                                 aconnector->dc_link->type =
1552                                         dc_connection_single;
1553                                 break;
1554                         }
1555                 }
1556         }
1557         drm_connector_list_iter_end(&iter);
1558
1559         return ret;
1560 }
1561
1562 static int dm_late_init(void *handle)
1563 {
1564         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1565
1566         struct dmcu_iram_parameters params;
1567         unsigned int linear_lut[16];
1568         int i;
1569         struct dmcu *dmcu = NULL;
1570         bool ret = true;
1571
1572         dmcu = adev->dm.dc->res_pool->dmcu;
1573
1574         for (i = 0; i < 16; i++)
1575                 linear_lut[i] = 0xFFFF * i / 15;
1576
1577         params.set = 0;
1578         params.backlight_ramping_start = 0xCCCC;
1579         params.backlight_ramping_reduction = 0xCCCCCCCC;
1580         params.backlight_lut_array_size = 16;
1581         params.backlight_lut_array = linear_lut;
1582
1583         /* Min backlight level after ABM reduction,  Don't allow below 1%
1584          * 0xFFFF x 0.01 = 0x28F
1585          */
1586         params.min_abm_backlight = 0x28F;
1587
1588         /* In the case where abm is implemented on dmcub,
1589          * dmcu object will be null.
1590          * ABM 2.4 and up are implemented on dmcub.
1591          */
1592         if (dmcu)
1593                 ret = dmcu_load_iram(dmcu, params);
1594         else if (adev->dm.dc->ctx->dmub_srv)
1595                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1596
1597         if (!ret)
1598                 return -EINVAL;
1599
1600         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1601 }
1602
1603 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1604 {
1605         struct amdgpu_dm_connector *aconnector;
1606         struct drm_connector *connector;
1607         struct drm_connector_list_iter iter;
1608         struct drm_dp_mst_topology_mgr *mgr;
1609         int ret;
1610         bool need_hotplug = false;
1611
1612         drm_connector_list_iter_begin(dev, &iter);
1613         drm_for_each_connector_iter(connector, &iter) {
1614                 aconnector = to_amdgpu_dm_connector(connector);
1615                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1616                     aconnector->mst_port)
1617                         continue;
1618
1619                 mgr = &aconnector->mst_mgr;
1620
1621                 if (suspend) {
1622                         drm_dp_mst_topology_mgr_suspend(mgr);
1623                 } else {
1624                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1625                         if (ret < 0) {
1626                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1627                                 need_hotplug = true;
1628                         }
1629                 }
1630         }
1631         drm_connector_list_iter_end(&iter);
1632
1633         if (need_hotplug)
1634                 drm_kms_helper_hotplug_event(dev);
1635 }
1636
1637 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1638 {
1639         struct smu_context *smu = &adev->smu;
1640         int ret = 0;
1641
1642         if (!is_support_sw_smu(adev))
1643                 return 0;
1644
1645         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1646          * on window driver dc implementation.
1647          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1648          * should be passed to smu during boot up and resume from s3.
1649          * boot up: dc calculate dcn watermark clock settings within dc_create,
1650          * dcn20_resource_construct
1651          * then call pplib functions below to pass the settings to smu:
1652          * smu_set_watermarks_for_clock_ranges
1653          * smu_set_watermarks_table
1654          * navi10_set_watermarks_table
1655          * smu_write_watermarks_table
1656          *
1657          * For Renoir, clock settings of dcn watermark are also fixed values.
1658          * dc has implemented different flow for window driver:
1659          * dc_hardware_init / dc_set_power_state
1660          * dcn10_init_hw
1661          * notify_wm_ranges
1662          * set_wm_ranges
1663          * -- Linux
1664          * smu_set_watermarks_for_clock_ranges
1665          * renoir_set_watermarks_table
1666          * smu_write_watermarks_table
1667          *
1668          * For Linux,
1669          * dc_hardware_init -> amdgpu_dm_init
1670          * dc_set_power_state --> dm_resume
1671          *
1672          * therefore, this function apply to navi10/12/14 but not Renoir
1673          * *
1674          */
1675         switch(adev->asic_type) {
1676         case CHIP_NAVI10:
1677         case CHIP_NAVI14:
1678         case CHIP_NAVI12:
1679                 break;
1680         default:
1681                 return 0;
1682         }
1683
1684         ret = smu_write_watermarks_table(smu);
1685         if (ret) {
1686                 DRM_ERROR("Failed to update WMTABLE!\n");
1687                 return ret;
1688         }
1689
1690         return 0;
1691 }
1692
1693 /**
1694  * dm_hw_init() - Initialize DC device
1695  * @handle: The base driver device containing the amdgpu_dm device.
1696  *
1697  * Initialize the &struct amdgpu_display_manager device. This involves calling
1698  * the initializers of each DM component, then populating the struct with them.
1699  *
1700  * Although the function implies hardware initialization, both hardware and
1701  * software are initialized here. Splitting them out to their relevant init
1702  * hooks is a future TODO item.
1703  *
1704  * Some notable things that are initialized here:
1705  *
1706  * - Display Core, both software and hardware
1707  * - DC modules that we need (freesync and color management)
1708  * - DRM software states
1709  * - Interrupt sources and handlers
1710  * - Vblank support
1711  * - Debug FS entries, if enabled
1712  */
1713 static int dm_hw_init(void *handle)
1714 {
1715         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1716         /* Create DAL display manager */
1717         amdgpu_dm_init(adev);
1718         amdgpu_dm_hpd_init(adev);
1719
1720         return 0;
1721 }
1722
1723 /**
1724  * dm_hw_fini() - Teardown DC device
1725  * @handle: The base driver device containing the amdgpu_dm device.
1726  *
1727  * Teardown components within &struct amdgpu_display_manager that require
1728  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1729  * were loaded. Also flush IRQ workqueues and disable them.
1730  */
1731 static int dm_hw_fini(void *handle)
1732 {
1733         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1734
1735         amdgpu_dm_hpd_fini(adev);
1736
1737         amdgpu_dm_irq_fini(adev);
1738         amdgpu_dm_fini(adev);
1739         return 0;
1740 }
1741
1742
1743 static int dm_enable_vblank(struct drm_crtc *crtc);
1744 static void dm_disable_vblank(struct drm_crtc *crtc);
1745
1746 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1747                                  struct dc_state *state, bool enable)
1748 {
1749         enum dc_irq_source irq_source;
1750         struct amdgpu_crtc *acrtc;
1751         int rc = -EBUSY;
1752         int i = 0;
1753
1754         for (i = 0; i < state->stream_count; i++) {
1755                 acrtc = get_crtc_by_otg_inst(
1756                                 adev, state->stream_status[i].primary_otg_inst);
1757
1758                 if (acrtc && state->stream_status[i].plane_count != 0) {
1759                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1760                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1761                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1762                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1763                         if (rc)
1764                                 DRM_WARN("Failed to %s pflip interrupts\n",
1765                                          enable ? "enable" : "disable");
1766
1767                         if (enable) {
1768                                 rc = dm_enable_vblank(&acrtc->base);
1769                                 if (rc)
1770                                         DRM_WARN("Failed to enable vblank interrupts\n");
1771                         } else {
1772                                 dm_disable_vblank(&acrtc->base);
1773                         }
1774
1775                 }
1776         }
1777
1778 }
1779
1780 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1781 {
1782         struct dc_state *context = NULL;
1783         enum dc_status res = DC_ERROR_UNEXPECTED;
1784         int i;
1785         struct dc_stream_state *del_streams[MAX_PIPES];
1786         int del_streams_count = 0;
1787
1788         memset(del_streams, 0, sizeof(del_streams));
1789
1790         context = dc_create_state(dc);
1791         if (context == NULL)
1792                 goto context_alloc_fail;
1793
1794         dc_resource_state_copy_construct_current(dc, context);
1795
1796         /* First remove from context all streams */
1797         for (i = 0; i < context->stream_count; i++) {
1798                 struct dc_stream_state *stream = context->streams[i];
1799
1800                 del_streams[del_streams_count++] = stream;
1801         }
1802
1803         /* Remove all planes for removed streams and then remove the streams */
1804         for (i = 0; i < del_streams_count; i++) {
1805                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1806                         res = DC_FAIL_DETACH_SURFACES;
1807                         goto fail;
1808                 }
1809
1810                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1811                 if (res != DC_OK)
1812                         goto fail;
1813         }
1814
1815
1816         res = dc_validate_global_state(dc, context, false);
1817
1818         if (res != DC_OK) {
1819                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1820                 goto fail;
1821         }
1822
1823         res = dc_commit_state(dc, context);
1824
1825 fail:
1826         dc_release_state(context);
1827
1828 context_alloc_fail:
1829         return res;
1830 }
1831
1832 static int dm_suspend(void *handle)
1833 {
1834         struct amdgpu_device *adev = handle;
1835         struct amdgpu_display_manager *dm = &adev->dm;
1836         int ret = 0;
1837
1838         if (amdgpu_in_reset(adev)) {
1839                 mutex_lock(&dm->dc_lock);
1840                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1841
1842                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1843
1844                 amdgpu_dm_commit_zero_streams(dm->dc);
1845
1846                 amdgpu_dm_irq_suspend(adev);
1847
1848                 return ret;
1849         }
1850
1851         WARN_ON(adev->dm.cached_state);
1852         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1853
1854         s3_handle_mst(adev_to_drm(adev), true);
1855
1856         amdgpu_dm_irq_suspend(adev);
1857
1858
1859         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1860
1861         return 0;
1862 }
1863
1864 static struct amdgpu_dm_connector *
1865 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1866                                              struct drm_crtc *crtc)
1867 {
1868         uint32_t i;
1869         struct drm_connector_state *new_con_state;
1870         struct drm_connector *connector;
1871         struct drm_crtc *crtc_from_state;
1872
1873         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1874                 crtc_from_state = new_con_state->crtc;
1875
1876                 if (crtc_from_state == crtc)
1877                         return to_amdgpu_dm_connector(connector);
1878         }
1879
1880         return NULL;
1881 }
1882
1883 static void emulated_link_detect(struct dc_link *link)
1884 {
1885         struct dc_sink_init_data sink_init_data = { 0 };
1886         struct display_sink_capability sink_caps = { 0 };
1887         enum dc_edid_status edid_status;
1888         struct dc_context *dc_ctx = link->ctx;
1889         struct dc_sink *sink = NULL;
1890         struct dc_sink *prev_sink = NULL;
1891
1892         link->type = dc_connection_none;
1893         prev_sink = link->local_sink;
1894
1895         if (prev_sink != NULL)
1896                 dc_sink_retain(prev_sink);
1897
1898         switch (link->connector_signal) {
1899         case SIGNAL_TYPE_HDMI_TYPE_A: {
1900                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1901                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1902                 break;
1903         }
1904
1905         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1906                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1907                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1908                 break;
1909         }
1910
1911         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1912                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1913                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1914                 break;
1915         }
1916
1917         case SIGNAL_TYPE_LVDS: {
1918                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1919                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1920                 break;
1921         }
1922
1923         case SIGNAL_TYPE_EDP: {
1924                 sink_caps.transaction_type =
1925                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1926                 sink_caps.signal = SIGNAL_TYPE_EDP;
1927                 break;
1928         }
1929
1930         case SIGNAL_TYPE_DISPLAY_PORT: {
1931                 sink_caps.transaction_type =
1932                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1933                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1934                 break;
1935         }
1936
1937         default:
1938                 DC_ERROR("Invalid connector type! signal:%d\n",
1939                         link->connector_signal);
1940                 return;
1941         }
1942
1943         sink_init_data.link = link;
1944         sink_init_data.sink_signal = sink_caps.signal;
1945
1946         sink = dc_sink_create(&sink_init_data);
1947         if (!sink) {
1948                 DC_ERROR("Failed to create sink!\n");
1949                 return;
1950         }
1951
1952         /* dc_sink_create returns a new reference */
1953         link->local_sink = sink;
1954
1955         edid_status = dm_helpers_read_local_edid(
1956                         link->ctx,
1957                         link,
1958                         sink);
1959
1960         if (edid_status != EDID_OK)
1961                 DC_ERROR("Failed to read EDID");
1962
1963 }
1964
1965 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1966                                      struct amdgpu_display_manager *dm)
1967 {
1968         struct {
1969                 struct dc_surface_update surface_updates[MAX_SURFACES];
1970                 struct dc_plane_info plane_infos[MAX_SURFACES];
1971                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1972                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1973                 struct dc_stream_update stream_update;
1974         } * bundle;
1975         int k, m;
1976
1977         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1978
1979         if (!bundle) {
1980                 dm_error("Failed to allocate update bundle\n");
1981                 goto cleanup;
1982         }
1983
1984         for (k = 0; k < dc_state->stream_count; k++) {
1985                 bundle->stream_update.stream = dc_state->streams[k];
1986
1987                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1988                         bundle->surface_updates[m].surface =
1989                                 dc_state->stream_status->plane_states[m];
1990                         bundle->surface_updates[m].surface->force_full_update =
1991                                 true;
1992                 }
1993                 dc_commit_updates_for_stream(
1994                         dm->dc, bundle->surface_updates,
1995                         dc_state->stream_status->plane_count,
1996                         dc_state->streams[k], &bundle->stream_update, dc_state);
1997         }
1998
1999 cleanup:
2000         kfree(bundle);
2001
2002         return;
2003 }
2004
2005 static int dm_resume(void *handle)
2006 {
2007         struct amdgpu_device *adev = handle;
2008         struct drm_device *ddev = adev_to_drm(adev);
2009         struct amdgpu_display_manager *dm = &adev->dm;
2010         struct amdgpu_dm_connector *aconnector;
2011         struct drm_connector *connector;
2012         struct drm_connector_list_iter iter;
2013         struct drm_crtc *crtc;
2014         struct drm_crtc_state *new_crtc_state;
2015         struct dm_crtc_state *dm_new_crtc_state;
2016         struct drm_plane *plane;
2017         struct drm_plane_state *new_plane_state;
2018         struct dm_plane_state *dm_new_plane_state;
2019         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2020         enum dc_connection_type new_connection_type = dc_connection_none;
2021         struct dc_state *dc_state;
2022         int i, r, j;
2023
2024         if (amdgpu_in_reset(adev)) {
2025                 dc_state = dm->cached_dc_state;
2026
2027                 r = dm_dmub_hw_init(adev);
2028                 if (r)
2029                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2030
2031                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2032                 dc_resume(dm->dc);
2033
2034                 amdgpu_dm_irq_resume_early(adev);
2035
2036                 for (i = 0; i < dc_state->stream_count; i++) {
2037                         dc_state->streams[i]->mode_changed = true;
2038                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2039                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2040                                         = 0xffffffff;
2041                         }
2042                 }
2043
2044                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2045
2046                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2047
2048                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2049
2050                 dc_release_state(dm->cached_dc_state);
2051                 dm->cached_dc_state = NULL;
2052
2053                 amdgpu_dm_irq_resume_late(adev);
2054
2055                 mutex_unlock(&dm->dc_lock);
2056
2057                 return 0;
2058         }
2059         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2060         dc_release_state(dm_state->context);
2061         dm_state->context = dc_create_state(dm->dc);
2062         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2063         dc_resource_state_construct(dm->dc, dm_state->context);
2064
2065         /* Before powering on DC we need to re-initialize DMUB. */
2066         r = dm_dmub_hw_init(adev);
2067         if (r)
2068                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2069
2070         /* power on hardware */
2071         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2072
2073         /* program HPD filter */
2074         dc_resume(dm->dc);
2075
2076         /*
2077          * early enable HPD Rx IRQ, should be done before set mode as short
2078          * pulse interrupts are used for MST
2079          */
2080         amdgpu_dm_irq_resume_early(adev);
2081
2082         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2083         s3_handle_mst(ddev, false);
2084
2085         /* Do detection*/
2086         drm_connector_list_iter_begin(ddev, &iter);
2087         drm_for_each_connector_iter(connector, &iter) {
2088                 aconnector = to_amdgpu_dm_connector(connector);
2089
2090                 /*
2091                  * this is the case when traversing through already created
2092                  * MST connectors, should be skipped
2093                  */
2094                 if (aconnector->mst_port)
2095                         continue;
2096
2097                 mutex_lock(&aconnector->hpd_lock);
2098                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2099                         DRM_ERROR("KMS: Failed to detect connector\n");
2100
2101                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2102                         emulated_link_detect(aconnector->dc_link);
2103                 else
2104                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2105
2106                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2107                         aconnector->fake_enable = false;
2108
2109                 if (aconnector->dc_sink)
2110                         dc_sink_release(aconnector->dc_sink);
2111                 aconnector->dc_sink = NULL;
2112                 amdgpu_dm_update_connector_after_detect(aconnector);
2113                 mutex_unlock(&aconnector->hpd_lock);
2114         }
2115         drm_connector_list_iter_end(&iter);
2116
2117         /* Force mode set in atomic commit */
2118         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2119                 new_crtc_state->active_changed = true;
2120
2121         /*
2122          * atomic_check is expected to create the dc states. We need to release
2123          * them here, since they were duplicated as part of the suspend
2124          * procedure.
2125          */
2126         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2127                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2128                 if (dm_new_crtc_state->stream) {
2129                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2130                         dc_stream_release(dm_new_crtc_state->stream);
2131                         dm_new_crtc_state->stream = NULL;
2132                 }
2133         }
2134
2135         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2136                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2137                 if (dm_new_plane_state->dc_state) {
2138                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2139                         dc_plane_state_release(dm_new_plane_state->dc_state);
2140                         dm_new_plane_state->dc_state = NULL;
2141                 }
2142         }
2143
2144         drm_atomic_helper_resume(ddev, dm->cached_state);
2145
2146         dm->cached_state = NULL;
2147
2148         amdgpu_dm_irq_resume_late(adev);
2149
2150         amdgpu_dm_smu_write_watermarks_table(adev);
2151
2152         return 0;
2153 }
2154
2155 /**
2156  * DOC: DM Lifecycle
2157  *
2158  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2159  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2160  * the base driver's device list to be initialized and torn down accordingly.
2161  *
2162  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2163  */
2164
2165 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2166         .name = "dm",
2167         .early_init = dm_early_init,
2168         .late_init = dm_late_init,
2169         .sw_init = dm_sw_init,
2170         .sw_fini = dm_sw_fini,
2171         .hw_init = dm_hw_init,
2172         .hw_fini = dm_hw_fini,
2173         .suspend = dm_suspend,
2174         .resume = dm_resume,
2175         .is_idle = dm_is_idle,
2176         .wait_for_idle = dm_wait_for_idle,
2177         .check_soft_reset = dm_check_soft_reset,
2178         .soft_reset = dm_soft_reset,
2179         .set_clockgating_state = dm_set_clockgating_state,
2180         .set_powergating_state = dm_set_powergating_state,
2181 };
2182
2183 const struct amdgpu_ip_block_version dm_ip_block =
2184 {
2185         .type = AMD_IP_BLOCK_TYPE_DCE,
2186         .major = 1,
2187         .minor = 0,
2188         .rev = 0,
2189         .funcs = &amdgpu_dm_funcs,
2190 };
2191
2192
2193 /**
2194  * DOC: atomic
2195  *
2196  * *WIP*
2197  */
2198
2199 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2200         .fb_create = amdgpu_display_user_framebuffer_create,
2201         .output_poll_changed = drm_fb_helper_output_poll_changed,
2202         .atomic_check = amdgpu_dm_atomic_check,
2203         .atomic_commit = amdgpu_dm_atomic_commit,
2204 };
2205
2206 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2207         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2208 };
2209
2210 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2211 {
2212         u32 max_cll, min_cll, max, min, q, r;
2213         struct amdgpu_dm_backlight_caps *caps;
2214         struct amdgpu_display_manager *dm;
2215         struct drm_connector *conn_base;
2216         struct amdgpu_device *adev;
2217         struct dc_link *link = NULL;
2218         static const u8 pre_computed_values[] = {
2219                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2220                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2221
2222         if (!aconnector || !aconnector->dc_link)
2223                 return;
2224
2225         link = aconnector->dc_link;
2226         if (link->connector_signal != SIGNAL_TYPE_EDP)
2227                 return;
2228
2229         conn_base = &aconnector->base;
2230         adev = drm_to_adev(conn_base->dev);
2231         dm = &adev->dm;
2232         caps = &dm->backlight_caps;
2233         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2234         caps->aux_support = false;
2235         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2236         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2237
2238         if (caps->ext_caps->bits.oled == 1 ||
2239             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2240             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2241                 caps->aux_support = true;
2242
2243         /* From the specification (CTA-861-G), for calculating the maximum
2244          * luminance we need to use:
2245          *      Luminance = 50*2**(CV/32)
2246          * Where CV is a one-byte value.
2247          * For calculating this expression we may need float point precision;
2248          * to avoid this complexity level, we take advantage that CV is divided
2249          * by a constant. From the Euclids division algorithm, we know that CV
2250          * can be written as: CV = 32*q + r. Next, we replace CV in the
2251          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2252          * need to pre-compute the value of r/32. For pre-computing the values
2253          * We just used the following Ruby line:
2254          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2255          * The results of the above expressions can be verified at
2256          * pre_computed_values.
2257          */
2258         q = max_cll >> 5;
2259         r = max_cll % 32;
2260         max = (1 << q) * pre_computed_values[r];
2261
2262         // min luminance: maxLum * (CV/255)^2 / 100
2263         q = DIV_ROUND_CLOSEST(min_cll, 255);
2264         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2265
2266         caps->aux_max_input_signal = max;
2267         caps->aux_min_input_signal = min;
2268 }
2269
2270 void amdgpu_dm_update_connector_after_detect(
2271                 struct amdgpu_dm_connector *aconnector)
2272 {
2273         struct drm_connector *connector = &aconnector->base;
2274         struct drm_device *dev = connector->dev;
2275         struct dc_sink *sink;
2276
2277         /* MST handled by drm_mst framework */
2278         if (aconnector->mst_mgr.mst_state == true)
2279                 return;
2280
2281         sink = aconnector->dc_link->local_sink;
2282         if (sink)
2283                 dc_sink_retain(sink);
2284
2285         /*
2286          * Edid mgmt connector gets first update only in mode_valid hook and then
2287          * the connector sink is set to either fake or physical sink depends on link status.
2288          * Skip if already done during boot.
2289          */
2290         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2291                         && aconnector->dc_em_sink) {
2292
2293                 /*
2294                  * For S3 resume with headless use eml_sink to fake stream
2295                  * because on resume connector->sink is set to NULL
2296                  */
2297                 mutex_lock(&dev->mode_config.mutex);
2298
2299                 if (sink) {
2300                         if (aconnector->dc_sink) {
2301                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2302                                 /*
2303                                  * retain and release below are used to
2304                                  * bump up refcount for sink because the link doesn't point
2305                                  * to it anymore after disconnect, so on next crtc to connector
2306                                  * reshuffle by UMD we will get into unwanted dc_sink release
2307                                  */
2308                                 dc_sink_release(aconnector->dc_sink);
2309                         }
2310                         aconnector->dc_sink = sink;
2311                         dc_sink_retain(aconnector->dc_sink);
2312                         amdgpu_dm_update_freesync_caps(connector,
2313                                         aconnector->edid);
2314                 } else {
2315                         amdgpu_dm_update_freesync_caps(connector, NULL);
2316                         if (!aconnector->dc_sink) {
2317                                 aconnector->dc_sink = aconnector->dc_em_sink;
2318                                 dc_sink_retain(aconnector->dc_sink);
2319                         }
2320                 }
2321
2322                 mutex_unlock(&dev->mode_config.mutex);
2323
2324                 if (sink)
2325                         dc_sink_release(sink);
2326                 return;
2327         }
2328
2329         /*
2330          * TODO: temporary guard to look for proper fix
2331          * if this sink is MST sink, we should not do anything
2332          */
2333         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2334                 dc_sink_release(sink);
2335                 return;
2336         }
2337
2338         if (aconnector->dc_sink == sink) {
2339                 /*
2340                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2341                  * Do nothing!!
2342                  */
2343                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2344                                 aconnector->connector_id);
2345                 if (sink)
2346                         dc_sink_release(sink);
2347                 return;
2348         }
2349
2350         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2351                 aconnector->connector_id, aconnector->dc_sink, sink);
2352
2353         mutex_lock(&dev->mode_config.mutex);
2354
2355         /*
2356          * 1. Update status of the drm connector
2357          * 2. Send an event and let userspace tell us what to do
2358          */
2359         if (sink) {
2360                 /*
2361                  * TODO: check if we still need the S3 mode update workaround.
2362                  * If yes, put it here.
2363                  */
2364                 if (aconnector->dc_sink)
2365                         amdgpu_dm_update_freesync_caps(connector, NULL);
2366
2367                 aconnector->dc_sink = sink;
2368                 dc_sink_retain(aconnector->dc_sink);
2369                 if (sink->dc_edid.length == 0) {
2370                         aconnector->edid = NULL;
2371                         if (aconnector->dc_link->aux_mode) {
2372                                 drm_dp_cec_unset_edid(
2373                                         &aconnector->dm_dp_aux.aux);
2374                         }
2375                 } else {
2376                         aconnector->edid =
2377                                 (struct edid *)sink->dc_edid.raw_edid;
2378
2379                         drm_connector_update_edid_property(connector,
2380                                                            aconnector->edid);
2381                         drm_add_edid_modes(connector, aconnector->edid);
2382
2383                         if (aconnector->dc_link->aux_mode)
2384                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2385                                                     aconnector->edid);
2386                 }
2387
2388                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2389                 update_connector_ext_caps(aconnector);
2390         } else {
2391                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2392                 amdgpu_dm_update_freesync_caps(connector, NULL);
2393                 drm_connector_update_edid_property(connector, NULL);
2394                 aconnector->num_modes = 0;
2395                 dc_sink_release(aconnector->dc_sink);
2396                 aconnector->dc_sink = NULL;
2397                 aconnector->edid = NULL;
2398 #ifdef CONFIG_DRM_AMD_DC_HDCP
2399                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2400                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2401                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2402 #endif
2403         }
2404
2405         mutex_unlock(&dev->mode_config.mutex);
2406
2407         update_subconnector_property(aconnector);
2408
2409         if (sink)
2410                 dc_sink_release(sink);
2411 }
2412
2413 static void handle_hpd_irq(void *param)
2414 {
2415         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2416         struct drm_connector *connector = &aconnector->base;
2417         struct drm_device *dev = connector->dev;
2418         enum dc_connection_type new_connection_type = dc_connection_none;
2419 #ifdef CONFIG_DRM_AMD_DC_HDCP
2420         struct amdgpu_device *adev = drm_to_adev(dev);
2421 #endif
2422
2423         /*
2424          * In case of failure or MST no need to update connector status or notify the OS
2425          * since (for MST case) MST does this in its own context.
2426          */
2427         mutex_lock(&aconnector->hpd_lock);
2428
2429 #ifdef CONFIG_DRM_AMD_DC_HDCP
2430         if (adev->dm.hdcp_workqueue)
2431                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2432 #endif
2433         if (aconnector->fake_enable)
2434                 aconnector->fake_enable = false;
2435
2436         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2437                 DRM_ERROR("KMS: Failed to detect connector\n");
2438
2439         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2440                 emulated_link_detect(aconnector->dc_link);
2441
2442
2443                 drm_modeset_lock_all(dev);
2444                 dm_restore_drm_connector_state(dev, connector);
2445                 drm_modeset_unlock_all(dev);
2446
2447                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2448                         drm_kms_helper_hotplug_event(dev);
2449
2450         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2451                 amdgpu_dm_update_connector_after_detect(aconnector);
2452
2453
2454                 drm_modeset_lock_all(dev);
2455                 dm_restore_drm_connector_state(dev, connector);
2456                 drm_modeset_unlock_all(dev);
2457
2458                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2459                         drm_kms_helper_hotplug_event(dev);
2460         }
2461         mutex_unlock(&aconnector->hpd_lock);
2462
2463 }
2464
2465 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2466 {
2467         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2468         uint8_t dret;
2469         bool new_irq_handled = false;
2470         int dpcd_addr;
2471         int dpcd_bytes_to_read;
2472
2473         const int max_process_count = 30;
2474         int process_count = 0;
2475
2476         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2477
2478         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2479                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2480                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2481                 dpcd_addr = DP_SINK_COUNT;
2482         } else {
2483                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2484                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2485                 dpcd_addr = DP_SINK_COUNT_ESI;
2486         }
2487
2488         dret = drm_dp_dpcd_read(
2489                 &aconnector->dm_dp_aux.aux,
2490                 dpcd_addr,
2491                 esi,
2492                 dpcd_bytes_to_read);
2493
2494         while (dret == dpcd_bytes_to_read &&
2495                 process_count < max_process_count) {
2496                 uint8_t retry;
2497                 dret = 0;
2498
2499                 process_count++;
2500
2501                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2502                 /* handle HPD short pulse irq */
2503                 if (aconnector->mst_mgr.mst_state)
2504                         drm_dp_mst_hpd_irq(
2505                                 &aconnector->mst_mgr,
2506                                 esi,
2507                                 &new_irq_handled);
2508
2509                 if (new_irq_handled) {
2510                         /* ACK at DPCD to notify down stream */
2511                         const int ack_dpcd_bytes_to_write =
2512                                 dpcd_bytes_to_read - 1;
2513
2514                         for (retry = 0; retry < 3; retry++) {
2515                                 uint8_t wret;
2516
2517                                 wret = drm_dp_dpcd_write(
2518                                         &aconnector->dm_dp_aux.aux,
2519                                         dpcd_addr + 1,
2520                                         &esi[1],
2521                                         ack_dpcd_bytes_to_write);
2522                                 if (wret == ack_dpcd_bytes_to_write)
2523                                         break;
2524                         }
2525
2526                         /* check if there is new irq to be handled */
2527                         dret = drm_dp_dpcd_read(
2528                                 &aconnector->dm_dp_aux.aux,
2529                                 dpcd_addr,
2530                                 esi,
2531                                 dpcd_bytes_to_read);
2532
2533                         new_irq_handled = false;
2534                 } else {
2535                         break;
2536                 }
2537         }
2538
2539         if (process_count == max_process_count)
2540                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2541 }
2542
2543 static void handle_hpd_rx_irq(void *param)
2544 {
2545         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2546         struct drm_connector *connector = &aconnector->base;
2547         struct drm_device *dev = connector->dev;
2548         struct dc_link *dc_link = aconnector->dc_link;
2549         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2550         enum dc_connection_type new_connection_type = dc_connection_none;
2551 #ifdef CONFIG_DRM_AMD_DC_HDCP
2552         union hpd_irq_data hpd_irq_data;
2553         struct amdgpu_device *adev = drm_to_adev(dev);
2554
2555         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2556 #endif
2557
2558         /*
2559          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2560          * conflict, after implement i2c helper, this mutex should be
2561          * retired.
2562          */
2563         if (dc_link->type != dc_connection_mst_branch)
2564                 mutex_lock(&aconnector->hpd_lock);
2565
2566
2567 #ifdef CONFIG_DRM_AMD_DC_HDCP
2568         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2569 #else
2570         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2571 #endif
2572                         !is_mst_root_connector) {
2573                 /* Downstream Port status changed. */
2574                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2575                         DRM_ERROR("KMS: Failed to detect connector\n");
2576
2577                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2578                         emulated_link_detect(dc_link);
2579
2580                         if (aconnector->fake_enable)
2581                                 aconnector->fake_enable = false;
2582
2583                         amdgpu_dm_update_connector_after_detect(aconnector);
2584
2585
2586                         drm_modeset_lock_all(dev);
2587                         dm_restore_drm_connector_state(dev, connector);
2588                         drm_modeset_unlock_all(dev);
2589
2590                         drm_kms_helper_hotplug_event(dev);
2591                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2592
2593                         if (aconnector->fake_enable)
2594                                 aconnector->fake_enable = false;
2595
2596                         amdgpu_dm_update_connector_after_detect(aconnector);
2597
2598
2599                         drm_modeset_lock_all(dev);
2600                         dm_restore_drm_connector_state(dev, connector);
2601                         drm_modeset_unlock_all(dev);
2602
2603                         drm_kms_helper_hotplug_event(dev);
2604                 }
2605         }
2606 #ifdef CONFIG_DRM_AMD_DC_HDCP
2607         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2608                 if (adev->dm.hdcp_workqueue)
2609                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2610         }
2611 #endif
2612         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2613             (dc_link->type == dc_connection_mst_branch))
2614                 dm_handle_hpd_rx_irq(aconnector);
2615
2616         if (dc_link->type != dc_connection_mst_branch) {
2617                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2618                 mutex_unlock(&aconnector->hpd_lock);
2619         }
2620 }
2621
2622 static void register_hpd_handlers(struct amdgpu_device *adev)
2623 {
2624         struct drm_device *dev = adev_to_drm(adev);
2625         struct drm_connector *connector;
2626         struct amdgpu_dm_connector *aconnector;
2627         const struct dc_link *dc_link;
2628         struct dc_interrupt_params int_params = {0};
2629
2630         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2631         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2632
2633         list_for_each_entry(connector,
2634                         &dev->mode_config.connector_list, head) {
2635
2636                 aconnector = to_amdgpu_dm_connector(connector);
2637                 dc_link = aconnector->dc_link;
2638
2639                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2640                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2641                         int_params.irq_source = dc_link->irq_source_hpd;
2642
2643                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2644                                         handle_hpd_irq,
2645                                         (void *) aconnector);
2646                 }
2647
2648                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2649
2650                         /* Also register for DP short pulse (hpd_rx). */
2651                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2652                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2653
2654                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2655                                         handle_hpd_rx_irq,
2656                                         (void *) aconnector);
2657                 }
2658         }
2659 }
2660
2661 #if defined(CONFIG_DRM_AMD_DC_SI)
2662 /* Register IRQ sources and initialize IRQ callbacks */
2663 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2664 {
2665         struct dc *dc = adev->dm.dc;
2666         struct common_irq_params *c_irq_params;
2667         struct dc_interrupt_params int_params = {0};
2668         int r;
2669         int i;
2670         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2671
2672         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2673         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2674
2675         /*
2676          * Actions of amdgpu_irq_add_id():
2677          * 1. Register a set() function with base driver.
2678          *    Base driver will call set() function to enable/disable an
2679          *    interrupt in DC hardware.
2680          * 2. Register amdgpu_dm_irq_handler().
2681          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2682          *    coming from DC hardware.
2683          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2684          *    for acknowledging and handling. */
2685
2686         /* Use VBLANK interrupt */
2687         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2688                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2689                 if (r) {
2690                         DRM_ERROR("Failed to add crtc irq id!\n");
2691                         return r;
2692                 }
2693
2694                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2695                 int_params.irq_source =
2696                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2697
2698                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2699
2700                 c_irq_params->adev = adev;
2701                 c_irq_params->irq_src = int_params.irq_source;
2702
2703                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2704                                 dm_crtc_high_irq, c_irq_params);
2705         }
2706
2707         /* Use GRPH_PFLIP interrupt */
2708         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2709                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2710                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2711                 if (r) {
2712                         DRM_ERROR("Failed to add page flip irq id!\n");
2713                         return r;
2714                 }
2715
2716                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2717                 int_params.irq_source =
2718                         dc_interrupt_to_irq_source(dc, i, 0);
2719
2720                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2721
2722                 c_irq_params->adev = adev;
2723                 c_irq_params->irq_src = int_params.irq_source;
2724
2725                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2726                                 dm_pflip_high_irq, c_irq_params);
2727
2728         }
2729
2730         /* HPD */
2731         r = amdgpu_irq_add_id(adev, client_id,
2732                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2733         if (r) {
2734                 DRM_ERROR("Failed to add hpd irq id!\n");
2735                 return r;
2736         }
2737
2738         register_hpd_handlers(adev);
2739
2740         return 0;
2741 }
2742 #endif
2743
2744 /* Register IRQ sources and initialize IRQ callbacks */
2745 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2746 {
2747         struct dc *dc = adev->dm.dc;
2748         struct common_irq_params *c_irq_params;
2749         struct dc_interrupt_params int_params = {0};
2750         int r;
2751         int i;
2752         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2753
2754         if (adev->asic_type >= CHIP_VEGA10)
2755                 client_id = SOC15_IH_CLIENTID_DCE;
2756
2757         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2758         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2759
2760         /*
2761          * Actions of amdgpu_irq_add_id():
2762          * 1. Register a set() function with base driver.
2763          *    Base driver will call set() function to enable/disable an
2764          *    interrupt in DC hardware.
2765          * 2. Register amdgpu_dm_irq_handler().
2766          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2767          *    coming from DC hardware.
2768          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2769          *    for acknowledging and handling. */
2770
2771         /* Use VBLANK interrupt */
2772         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2773                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2774                 if (r) {
2775                         DRM_ERROR("Failed to add crtc irq id!\n");
2776                         return r;
2777                 }
2778
2779                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2780                 int_params.irq_source =
2781                         dc_interrupt_to_irq_source(dc, i, 0);
2782
2783                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2784
2785                 c_irq_params->adev = adev;
2786                 c_irq_params->irq_src = int_params.irq_source;
2787
2788                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2789                                 dm_crtc_high_irq, c_irq_params);
2790         }
2791
2792         /* Use VUPDATE interrupt */
2793         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2794                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2795                 if (r) {
2796                         DRM_ERROR("Failed to add vupdate irq id!\n");
2797                         return r;
2798                 }
2799
2800                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2801                 int_params.irq_source =
2802                         dc_interrupt_to_irq_source(dc, i, 0);
2803
2804                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2805
2806                 c_irq_params->adev = adev;
2807                 c_irq_params->irq_src = int_params.irq_source;
2808
2809                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2810                                 dm_vupdate_high_irq, c_irq_params);
2811         }
2812
2813         /* Use GRPH_PFLIP interrupt */
2814         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2815                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2816                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2817                 if (r) {
2818                         DRM_ERROR("Failed to add page flip irq id!\n");
2819                         return r;
2820                 }
2821
2822                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2823                 int_params.irq_source =
2824                         dc_interrupt_to_irq_source(dc, i, 0);
2825
2826                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2827
2828                 c_irq_params->adev = adev;
2829                 c_irq_params->irq_src = int_params.irq_source;
2830
2831                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2832                                 dm_pflip_high_irq, c_irq_params);
2833
2834         }
2835
2836         /* HPD */
2837         r = amdgpu_irq_add_id(adev, client_id,
2838                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2839         if (r) {
2840                 DRM_ERROR("Failed to add hpd irq id!\n");
2841                 return r;
2842         }
2843
2844         register_hpd_handlers(adev);
2845
2846         return 0;
2847 }
2848
2849 #if defined(CONFIG_DRM_AMD_DC_DCN)
2850 /* Register IRQ sources and initialize IRQ callbacks */
2851 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2852 {
2853         struct dc *dc = adev->dm.dc;
2854         struct common_irq_params *c_irq_params;
2855         struct dc_interrupt_params int_params = {0};
2856         int r;
2857         int i;
2858
2859         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2860         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2861
2862         /*
2863          * Actions of amdgpu_irq_add_id():
2864          * 1. Register a set() function with base driver.
2865          *    Base driver will call set() function to enable/disable an
2866          *    interrupt in DC hardware.
2867          * 2. Register amdgpu_dm_irq_handler().
2868          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2869          *    coming from DC hardware.
2870          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2871          *    for acknowledging and handling.
2872          */
2873
2874         /* Use VSTARTUP interrupt */
2875         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2876                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2877                         i++) {
2878                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2879
2880                 if (r) {
2881                         DRM_ERROR("Failed to add crtc irq id!\n");
2882                         return r;
2883                 }
2884
2885                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2886                 int_params.irq_source =
2887                         dc_interrupt_to_irq_source(dc, i, 0);
2888
2889                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2890
2891                 c_irq_params->adev = adev;
2892                 c_irq_params->irq_src = int_params.irq_source;
2893
2894                 amdgpu_dm_irq_register_interrupt(
2895                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2896         }
2897
2898         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2899          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2900          * to trigger at end of each vblank, regardless of state of the lock,
2901          * matching DCE behaviour.
2902          */
2903         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2904              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2905              i++) {
2906                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2907
2908                 if (r) {
2909                         DRM_ERROR("Failed to add vupdate irq id!\n");
2910                         return r;
2911                 }
2912
2913                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2914                 int_params.irq_source =
2915                         dc_interrupt_to_irq_source(dc, i, 0);
2916
2917                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2918
2919                 c_irq_params->adev = adev;
2920                 c_irq_params->irq_src = int_params.irq_source;
2921
2922                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2923                                 dm_vupdate_high_irq, c_irq_params);
2924         }
2925
2926         /* Use GRPH_PFLIP interrupt */
2927         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2928                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2929                         i++) {
2930                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2931                 if (r) {
2932                         DRM_ERROR("Failed to add page flip irq id!\n");
2933                         return r;
2934                 }
2935
2936                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2937                 int_params.irq_source =
2938                         dc_interrupt_to_irq_source(dc, i, 0);
2939
2940                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2941
2942                 c_irq_params->adev = adev;
2943                 c_irq_params->irq_src = int_params.irq_source;
2944
2945                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2946                                 dm_pflip_high_irq, c_irq_params);
2947
2948         }
2949
2950         /* HPD */
2951         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2952                         &adev->hpd_irq);
2953         if (r) {
2954                 DRM_ERROR("Failed to add hpd irq id!\n");
2955                 return r;
2956         }
2957
2958         register_hpd_handlers(adev);
2959
2960         return 0;
2961 }
2962 #endif
2963
2964 /*
2965  * Acquires the lock for the atomic state object and returns
2966  * the new atomic state.
2967  *
2968  * This should only be called during atomic check.
2969  */
2970 static int dm_atomic_get_state(struct drm_atomic_state *state,
2971                                struct dm_atomic_state **dm_state)
2972 {
2973         struct drm_device *dev = state->dev;
2974         struct amdgpu_device *adev = drm_to_adev(dev);
2975         struct amdgpu_display_manager *dm = &adev->dm;
2976         struct drm_private_state *priv_state;
2977
2978         if (*dm_state)
2979                 return 0;
2980
2981         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2982         if (IS_ERR(priv_state))
2983                 return PTR_ERR(priv_state);
2984
2985         *dm_state = to_dm_atomic_state(priv_state);
2986
2987         return 0;
2988 }
2989
2990 static struct dm_atomic_state *
2991 dm_atomic_get_new_state(struct drm_atomic_state *state)
2992 {
2993         struct drm_device *dev = state->dev;
2994         struct amdgpu_device *adev = drm_to_adev(dev);
2995         struct amdgpu_display_manager *dm = &adev->dm;
2996         struct drm_private_obj *obj;
2997         struct drm_private_state *new_obj_state;
2998         int i;
2999
3000         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3001                 if (obj->funcs == dm->atomic_obj.funcs)
3002                         return to_dm_atomic_state(new_obj_state);
3003         }
3004
3005         return NULL;
3006 }
3007
3008 static struct drm_private_state *
3009 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3010 {
3011         struct dm_atomic_state *old_state, *new_state;
3012
3013         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3014         if (!new_state)
3015                 return NULL;
3016
3017         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3018
3019         old_state = to_dm_atomic_state(obj->state);
3020
3021         if (old_state && old_state->context)
3022                 new_state->context = dc_copy_state(old_state->context);
3023
3024         if (!new_state->context) {
3025                 kfree(new_state);
3026                 return NULL;
3027         }
3028
3029         return &new_state->base;
3030 }
3031
3032 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3033                                     struct drm_private_state *state)
3034 {
3035         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3036
3037         if (dm_state && dm_state->context)
3038                 dc_release_state(dm_state->context);
3039
3040         kfree(dm_state);
3041 }
3042
3043 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3044         .atomic_duplicate_state = dm_atomic_duplicate_state,
3045         .atomic_destroy_state = dm_atomic_destroy_state,
3046 };
3047
3048 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3049 {
3050         struct dm_atomic_state *state;
3051         int r;
3052
3053         adev->mode_info.mode_config_initialized = true;
3054
3055         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3056         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3057
3058         adev_to_drm(adev)->mode_config.max_width = 16384;
3059         adev_to_drm(adev)->mode_config.max_height = 16384;
3060
3061         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3062         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3063         /* indicates support for immediate flip */
3064         adev_to_drm(adev)->mode_config.async_page_flip = true;
3065
3066         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3067
3068         state = kzalloc(sizeof(*state), GFP_KERNEL);
3069         if (!state)
3070                 return -ENOMEM;
3071
3072         state->context = dc_create_state(adev->dm.dc);
3073         if (!state->context) {
3074                 kfree(state);
3075                 return -ENOMEM;
3076         }
3077
3078         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3079
3080         drm_atomic_private_obj_init(adev_to_drm(adev),
3081                                     &adev->dm.atomic_obj,
3082                                     &state->base,
3083                                     &dm_atomic_state_funcs);
3084
3085         r = amdgpu_display_modeset_create_props(adev);
3086         if (r) {
3087                 dc_release_state(state->context);
3088                 kfree(state);
3089                 return r;
3090         }
3091
3092         r = amdgpu_dm_audio_init(adev);
3093         if (r) {
3094                 dc_release_state(state->context);
3095                 kfree(state);
3096                 return r;
3097         }
3098
3099         return 0;
3100 }
3101
3102 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3103 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3104 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3105
3106 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3107         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3108
3109 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3110 {
3111 #if defined(CONFIG_ACPI)
3112         struct amdgpu_dm_backlight_caps caps;
3113
3114         memset(&caps, 0, sizeof(caps));
3115
3116         if (dm->backlight_caps.caps_valid)
3117                 return;
3118
3119         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3120         if (caps.caps_valid) {
3121                 dm->backlight_caps.caps_valid = true;
3122                 if (caps.aux_support)
3123                         return;
3124                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3125                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3126         } else {
3127                 dm->backlight_caps.min_input_signal =
3128                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3129                 dm->backlight_caps.max_input_signal =
3130                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3131         }
3132 #else
3133         if (dm->backlight_caps.aux_support)
3134                 return;
3135
3136         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3137         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3138 #endif
3139 }
3140
3141 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3142 {
3143         bool rc;
3144
3145         if (!link)
3146                 return 1;
3147
3148         rc = dc_link_set_backlight_level_nits(link, true, brightness,
3149                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3150
3151         return rc ? 0 : 1;
3152 }
3153
3154 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3155                                 unsigned *min, unsigned *max)
3156 {
3157         if (!caps)
3158                 return 0;
3159
3160         if (caps->aux_support) {
3161                 // Firmware limits are in nits, DC API wants millinits.
3162                 *max = 1000 * caps->aux_max_input_signal;
3163                 *min = 1000 * caps->aux_min_input_signal;
3164         } else {
3165                 // Firmware limits are 8-bit, PWM control is 16-bit.
3166                 *max = 0x101 * caps->max_input_signal;
3167                 *min = 0x101 * caps->min_input_signal;
3168         }
3169         return 1;
3170 }
3171
3172 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3173                                         uint32_t brightness)
3174 {
3175         unsigned min, max;
3176
3177         if (!get_brightness_range(caps, &min, &max))
3178                 return brightness;
3179
3180         // Rescale 0..255 to min..max
3181         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3182                                        AMDGPU_MAX_BL_LEVEL);
3183 }
3184
3185 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3186                                       uint32_t brightness)
3187 {
3188         unsigned min, max;
3189
3190         if (!get_brightness_range(caps, &min, &max))
3191                 return brightness;
3192
3193         if (brightness < min)
3194                 return 0;
3195         // Rescale min..max to 0..255
3196         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3197                                  max - min);
3198 }
3199
3200 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3201 {
3202         struct amdgpu_display_manager *dm = bl_get_data(bd);
3203         struct amdgpu_dm_backlight_caps caps;
3204         struct dc_link *link = NULL;
3205         u32 brightness;
3206         bool rc;
3207
3208         amdgpu_dm_update_backlight_caps(dm);
3209         caps = dm->backlight_caps;
3210
3211         link = (struct dc_link *)dm->backlight_link;
3212
3213         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3214         // Change brightness based on AUX property
3215         if (caps.aux_support)
3216                 return set_backlight_via_aux(link, brightness);
3217
3218         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3219
3220         return rc ? 0 : 1;
3221 }
3222
3223 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3224 {
3225         struct amdgpu_display_manager *dm = bl_get_data(bd);
3226         int ret = dc_link_get_backlight_level(dm->backlight_link);
3227
3228         if (ret == DC_ERROR_UNEXPECTED)
3229                 return bd->props.brightness;
3230         return convert_brightness_to_user(&dm->backlight_caps, ret);
3231 }
3232
3233 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3234         .options = BL_CORE_SUSPENDRESUME,
3235         .get_brightness = amdgpu_dm_backlight_get_brightness,
3236         .update_status  = amdgpu_dm_backlight_update_status,
3237 };
3238
3239 static void
3240 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3241 {
3242         char bl_name[16];
3243         struct backlight_properties props = { 0 };
3244
3245         amdgpu_dm_update_backlight_caps(dm);
3246
3247         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3248         props.brightness = AMDGPU_MAX_BL_LEVEL;
3249         props.type = BACKLIGHT_RAW;
3250
3251         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3252                  adev_to_drm(dm->adev)->primary->index);
3253
3254         dm->backlight_dev = backlight_device_register(bl_name,
3255                                                       adev_to_drm(dm->adev)->dev,
3256                                                       dm,
3257                                                       &amdgpu_dm_backlight_ops,
3258                                                       &props);
3259
3260         if (IS_ERR(dm->backlight_dev))
3261                 DRM_ERROR("DM: Backlight registration failed!\n");
3262         else
3263                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3264 }
3265
3266 #endif
3267
3268 static int initialize_plane(struct amdgpu_display_manager *dm,
3269                             struct amdgpu_mode_info *mode_info, int plane_id,
3270                             enum drm_plane_type plane_type,
3271                             const struct dc_plane_cap *plane_cap)
3272 {
3273         struct drm_plane *plane;
3274         unsigned long possible_crtcs;
3275         int ret = 0;
3276
3277         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3278         if (!plane) {
3279                 DRM_ERROR("KMS: Failed to allocate plane\n");
3280                 return -ENOMEM;
3281         }
3282         plane->type = plane_type;
3283
3284         /*
3285          * HACK: IGT tests expect that the primary plane for a CRTC
3286          * can only have one possible CRTC. Only expose support for
3287          * any CRTC if they're not going to be used as a primary plane
3288          * for a CRTC - like overlay or underlay planes.
3289          */
3290         possible_crtcs = 1 << plane_id;
3291         if (plane_id >= dm->dc->caps.max_streams)
3292                 possible_crtcs = 0xff;
3293
3294         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3295
3296         if (ret) {
3297                 DRM_ERROR("KMS: Failed to initialize plane\n");
3298                 kfree(plane);
3299                 return ret;
3300         }
3301
3302         if (mode_info)
3303                 mode_info->planes[plane_id] = plane;
3304
3305         return ret;
3306 }
3307
3308
3309 static void register_backlight_device(struct amdgpu_display_manager *dm,
3310                                       struct dc_link *link)
3311 {
3312 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3313         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3314
3315         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3316             link->type != dc_connection_none) {
3317                 /*
3318                  * Event if registration failed, we should continue with
3319                  * DM initialization because not having a backlight control
3320                  * is better then a black screen.
3321                  */
3322                 amdgpu_dm_register_backlight_device(dm);
3323
3324                 if (dm->backlight_dev)
3325                         dm->backlight_link = link;
3326         }
3327 #endif
3328 }
3329
3330
3331 /*
3332  * In this architecture, the association
3333  * connector -> encoder -> crtc
3334  * id not really requried. The crtc and connector will hold the
3335  * display_index as an abstraction to use with DAL component
3336  *
3337  * Returns 0 on success
3338  */
3339 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3340 {
3341         struct amdgpu_display_manager *dm = &adev->dm;
3342         int32_t i;
3343         struct amdgpu_dm_connector *aconnector = NULL;
3344         struct amdgpu_encoder *aencoder = NULL;
3345         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3346         uint32_t link_cnt;
3347         int32_t primary_planes;
3348         enum dc_connection_type new_connection_type = dc_connection_none;
3349         const struct dc_plane_cap *plane;
3350
3351         link_cnt = dm->dc->caps.max_links;
3352         if (amdgpu_dm_mode_config_init(dm->adev)) {
3353                 DRM_ERROR("DM: Failed to initialize mode config\n");
3354                 return -EINVAL;
3355         }
3356
3357         /* There is one primary plane per CRTC */
3358         primary_planes = dm->dc->caps.max_streams;
3359         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3360
3361         /*
3362          * Initialize primary planes, implicit planes for legacy IOCTLS.
3363          * Order is reversed to match iteration order in atomic check.
3364          */
3365         for (i = (primary_planes - 1); i >= 0; i--) {
3366                 plane = &dm->dc->caps.planes[i];
3367
3368                 if (initialize_plane(dm, mode_info, i,
3369                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3370                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3371                         goto fail;
3372                 }
3373         }
3374
3375         /*
3376          * Initialize overlay planes, index starting after primary planes.
3377          * These planes have a higher DRM index than the primary planes since
3378          * they should be considered as having a higher z-order.
3379          * Order is reversed to match iteration order in atomic check.
3380          *
3381          * Only support DCN for now, and only expose one so we don't encourage
3382          * userspace to use up all the pipes.
3383          */
3384         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3385                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3386
3387                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3388                         continue;
3389
3390                 if (!plane->blends_with_above || !plane->blends_with_below)
3391                         continue;
3392
3393                 if (!plane->pixel_format_support.argb8888)
3394                         continue;
3395
3396                 if (initialize_plane(dm, NULL, primary_planes + i,
3397                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3398                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3399                         goto fail;
3400                 }
3401
3402                 /* Only create one overlay plane. */
3403                 break;
3404         }
3405
3406         for (i = 0; i < dm->dc->caps.max_streams; i++)
3407                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3408                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3409                         goto fail;
3410                 }
3411
3412         dm->display_indexes_num = dm->dc->caps.max_streams;
3413
3414         /* loops over all connectors on the board */
3415         for (i = 0; i < link_cnt; i++) {
3416                 struct dc_link *link = NULL;
3417
3418                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3419                         DRM_ERROR(
3420                                 "KMS: Cannot support more than %d display indexes\n",
3421                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3422                         continue;
3423                 }
3424
3425                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3426                 if (!aconnector)
3427                         goto fail;
3428
3429                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3430                 if (!aencoder)
3431                         goto fail;
3432
3433                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3434                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3435                         goto fail;
3436                 }
3437
3438                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3439                         DRM_ERROR("KMS: Failed to initialize connector\n");
3440                         goto fail;
3441                 }
3442
3443                 link = dc_get_link_at_index(dm->dc, i);
3444
3445                 if (!dc_link_detect_sink(link, &new_connection_type))
3446                         DRM_ERROR("KMS: Failed to detect connector\n");
3447
3448                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3449                         emulated_link_detect(link);
3450                         amdgpu_dm_update_connector_after_detect(aconnector);
3451
3452                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3453                         amdgpu_dm_update_connector_after_detect(aconnector);
3454                         register_backlight_device(dm, link);
3455                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3456                                 amdgpu_dm_set_psr_caps(link);
3457                 }
3458
3459
3460         }
3461
3462         /* Software is initialized. Now we can register interrupt handlers. */
3463         switch (adev->asic_type) {
3464 #if defined(CONFIG_DRM_AMD_DC_SI)
3465         case CHIP_TAHITI:
3466         case CHIP_PITCAIRN:
3467         case CHIP_VERDE:
3468         case CHIP_OLAND:
3469                 if (dce60_register_irq_handlers(dm->adev)) {
3470                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3471                         goto fail;
3472                 }
3473                 break;
3474 #endif
3475         case CHIP_BONAIRE:
3476         case CHIP_HAWAII:
3477         case CHIP_KAVERI:
3478         case CHIP_KABINI:
3479         case CHIP_MULLINS:
3480         case CHIP_TONGA:
3481         case CHIP_FIJI:
3482         case CHIP_CARRIZO:
3483         case CHIP_STONEY:
3484         case CHIP_POLARIS11:
3485         case CHIP_POLARIS10:
3486         case CHIP_POLARIS12:
3487         case CHIP_VEGAM:
3488         case CHIP_VEGA10:
3489         case CHIP_VEGA12:
3490         case CHIP_VEGA20:
3491                 if (dce110_register_irq_handlers(dm->adev)) {
3492                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3493                         goto fail;
3494                 }
3495                 break;
3496 #if defined(CONFIG_DRM_AMD_DC_DCN)
3497         case CHIP_RAVEN:
3498         case CHIP_NAVI12:
3499         case CHIP_NAVI10:
3500         case CHIP_NAVI14:
3501         case CHIP_RENOIR:
3502 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3503         case CHIP_SIENNA_CICHLID:
3504         case CHIP_NAVY_FLOUNDER:
3505 #endif
3506 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3507         case CHIP_DIMGREY_CAVEFISH:
3508 #endif
3509 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3510         case CHIP_VANGOGH:
3511 #endif
3512                 if (dcn10_register_irq_handlers(dm->adev)) {
3513                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3514                         goto fail;
3515                 }
3516                 break;
3517 #endif
3518         default:
3519                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3520                 goto fail;
3521         }
3522
3523         return 0;
3524 fail:
3525         kfree(aencoder);
3526         kfree(aconnector);
3527
3528         return -EINVAL;
3529 }
3530
3531 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3532 {
3533         drm_mode_config_cleanup(dm->ddev);
3534         drm_atomic_private_obj_fini(&dm->atomic_obj);
3535         return;
3536 }
3537
3538 /******************************************************************************
3539  * amdgpu_display_funcs functions
3540  *****************************************************************************/
3541
3542 /*
3543  * dm_bandwidth_update - program display watermarks
3544  *
3545  * @adev: amdgpu_device pointer
3546  *
3547  * Calculate and program the display watermarks and line buffer allocation.
3548  */
3549 static void dm_bandwidth_update(struct amdgpu_device *adev)
3550 {
3551         /* TODO: implement later */
3552 }
3553
3554 static const struct amdgpu_display_funcs dm_display_funcs = {
3555         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3556         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3557         .backlight_set_level = NULL, /* never called for DC */
3558         .backlight_get_level = NULL, /* never called for DC */
3559         .hpd_sense = NULL,/* called unconditionally */
3560         .hpd_set_polarity = NULL, /* called unconditionally */
3561         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3562         .page_flip_get_scanoutpos =
3563                 dm_crtc_get_scanoutpos,/* called unconditionally */
3564         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3565         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3566 };
3567
3568 #if defined(CONFIG_DEBUG_KERNEL_DC)
3569
3570 static ssize_t s3_debug_store(struct device *device,
3571                               struct device_attribute *attr,
3572                               const char *buf,
3573                               size_t count)
3574 {
3575         int ret;
3576         int s3_state;
3577         struct drm_device *drm_dev = dev_get_drvdata(device);
3578         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3579
3580         ret = kstrtoint(buf, 0, &s3_state);
3581
3582         if (ret == 0) {
3583                 if (s3_state) {
3584                         dm_resume(adev);
3585                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3586                 } else
3587                         dm_suspend(adev);
3588         }
3589
3590         return ret == 0 ? count : 0;
3591 }
3592
3593 DEVICE_ATTR_WO(s3_debug);
3594
3595 #endif
3596
3597 static int dm_early_init(void *handle)
3598 {
3599         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3600
3601         switch (adev->asic_type) {
3602 #if defined(CONFIG_DRM_AMD_DC_SI)
3603         case CHIP_TAHITI:
3604         case CHIP_PITCAIRN:
3605         case CHIP_VERDE:
3606                 adev->mode_info.num_crtc = 6;
3607                 adev->mode_info.num_hpd = 6;
3608                 adev->mode_info.num_dig = 6;
3609                 break;
3610         case CHIP_OLAND:
3611                 adev->mode_info.num_crtc = 2;
3612                 adev->mode_info.num_hpd = 2;
3613                 adev->mode_info.num_dig = 2;
3614                 break;
3615 #endif
3616         case CHIP_BONAIRE:
3617         case CHIP_HAWAII:
3618                 adev->mode_info.num_crtc = 6;
3619                 adev->mode_info.num_hpd = 6;
3620                 adev->mode_info.num_dig = 6;
3621                 break;
3622         case CHIP_KAVERI:
3623                 adev->mode_info.num_crtc = 4;
3624                 adev->mode_info.num_hpd = 6;
3625                 adev->mode_info.num_dig = 7;
3626                 break;
3627         case CHIP_KABINI:
3628         case CHIP_MULLINS:
3629                 adev->mode_info.num_crtc = 2;
3630                 adev->mode_info.num_hpd = 6;
3631                 adev->mode_info.num_dig = 6;
3632                 break;
3633         case CHIP_FIJI:
3634         case CHIP_TONGA:
3635                 adev->mode_info.num_crtc = 6;
3636                 adev->mode_info.num_hpd = 6;
3637                 adev->mode_info.num_dig = 7;
3638                 break;
3639         case CHIP_CARRIZO:
3640                 adev->mode_info.num_crtc = 3;
3641                 adev->mode_info.num_hpd = 6;
3642                 adev->mode_info.num_dig = 9;
3643                 break;
3644         case CHIP_STONEY:
3645                 adev->mode_info.num_crtc = 2;
3646                 adev->mode_info.num_hpd = 6;
3647                 adev->mode_info.num_dig = 9;
3648                 break;
3649         case CHIP_POLARIS11:
3650         case CHIP_POLARIS12:
3651                 adev->mode_info.num_crtc = 5;
3652                 adev->mode_info.num_hpd = 5;
3653                 adev->mode_info.num_dig = 5;
3654                 break;
3655         case CHIP_POLARIS10:
3656         case CHIP_VEGAM:
3657                 adev->mode_info.num_crtc = 6;
3658                 adev->mode_info.num_hpd = 6;
3659                 adev->mode_info.num_dig = 6;
3660                 break;
3661         case CHIP_VEGA10:
3662         case CHIP_VEGA12:
3663         case CHIP_VEGA20:
3664                 adev->mode_info.num_crtc = 6;
3665                 adev->mode_info.num_hpd = 6;
3666                 adev->mode_info.num_dig = 6;
3667                 break;
3668 #if defined(CONFIG_DRM_AMD_DC_DCN)
3669         case CHIP_RAVEN:
3670                 adev->mode_info.num_crtc = 4;
3671                 adev->mode_info.num_hpd = 4;
3672                 adev->mode_info.num_dig = 4;
3673                 break;
3674 #endif
3675         case CHIP_NAVI10:
3676         case CHIP_NAVI12:
3677 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3678         case CHIP_SIENNA_CICHLID:
3679         case CHIP_NAVY_FLOUNDER:
3680 #endif
3681                 adev->mode_info.num_crtc = 6;
3682                 adev->mode_info.num_hpd = 6;
3683                 adev->mode_info.num_dig = 6;
3684                 break;
3685 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3686         case CHIP_VANGOGH:
3687                 adev->mode_info.num_crtc = 4;
3688                 adev->mode_info.num_hpd = 4;
3689                 adev->mode_info.num_dig = 4;
3690                 break;
3691 #endif
3692         case CHIP_NAVI14:
3693 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3694         case CHIP_DIMGREY_CAVEFISH:
3695 #endif
3696                 adev->mode_info.num_crtc = 5;
3697                 adev->mode_info.num_hpd = 5;
3698                 adev->mode_info.num_dig = 5;
3699                 break;
3700         case CHIP_RENOIR:
3701                 adev->mode_info.num_crtc = 4;
3702                 adev->mode_info.num_hpd = 4;
3703                 adev->mode_info.num_dig = 4;
3704                 break;
3705         default:
3706                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3707                 return -EINVAL;
3708         }
3709
3710         amdgpu_dm_set_irq_funcs(adev);
3711
3712         if (adev->mode_info.funcs == NULL)
3713                 adev->mode_info.funcs = &dm_display_funcs;
3714
3715         /*
3716          * Note: Do NOT change adev->audio_endpt_rreg and
3717          * adev->audio_endpt_wreg because they are initialised in
3718          * amdgpu_device_init()
3719          */
3720 #if defined(CONFIG_DEBUG_KERNEL_DC)
3721         device_create_file(
3722                 adev_to_drm(adev)->dev,
3723                 &dev_attr_s3_debug);
3724 #endif
3725
3726         return 0;
3727 }
3728
3729 static bool modeset_required(struct drm_crtc_state *crtc_state,
3730                              struct dc_stream_state *new_stream,
3731                              struct dc_stream_state *old_stream)
3732 {
3733         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3734 }
3735
3736 static bool modereset_required(struct drm_crtc_state *crtc_state)
3737 {
3738         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3739 }
3740
3741 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3742 {
3743         drm_encoder_cleanup(encoder);
3744         kfree(encoder);
3745 }
3746
3747 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3748         .destroy = amdgpu_dm_encoder_destroy,
3749 };
3750
3751
3752 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3753                                 struct dc_scaling_info *scaling_info)
3754 {
3755         int scale_w, scale_h;
3756
3757         memset(scaling_info, 0, sizeof(*scaling_info));
3758
3759         /* Source is fixed 16.16 but we ignore mantissa for now... */
3760         scaling_info->src_rect.x = state->src_x >> 16;
3761         scaling_info->src_rect.y = state->src_y >> 16;
3762
3763         scaling_info->src_rect.width = state->src_w >> 16;
3764         if (scaling_info->src_rect.width == 0)
3765                 return -EINVAL;
3766
3767         scaling_info->src_rect.height = state->src_h >> 16;
3768         if (scaling_info->src_rect.height == 0)
3769                 return -EINVAL;
3770
3771         scaling_info->dst_rect.x = state->crtc_x;
3772         scaling_info->dst_rect.y = state->crtc_y;
3773
3774         if (state->crtc_w == 0)
3775                 return -EINVAL;
3776
3777         scaling_info->dst_rect.width = state->crtc_w;
3778
3779         if (state->crtc_h == 0)
3780                 return -EINVAL;
3781
3782         scaling_info->dst_rect.height = state->crtc_h;
3783
3784         /* DRM doesn't specify clipping on destination output. */
3785         scaling_info->clip_rect = scaling_info->dst_rect;
3786
3787         /* TODO: Validate scaling per-format with DC plane caps */
3788         scale_w = scaling_info->dst_rect.width * 1000 /
3789                   scaling_info->src_rect.width;
3790
3791         if (scale_w < 250 || scale_w > 16000)
3792                 return -EINVAL;
3793
3794         scale_h = scaling_info->dst_rect.height * 1000 /
3795                   scaling_info->src_rect.height;
3796
3797         if (scale_h < 250 || scale_h > 16000)
3798                 return -EINVAL;
3799
3800         /*
3801          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3802          * assume reasonable defaults based on the format.
3803          */
3804
3805         return 0;
3806 }
3807
3808 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3809                        uint64_t *tiling_flags, bool *tmz_surface)
3810 {
3811         struct amdgpu_bo *rbo;
3812         int r;
3813
3814         if (!amdgpu_fb) {
3815                 *tiling_flags = 0;
3816                 *tmz_surface = false;
3817                 return 0;
3818         }
3819
3820         rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3821         r = amdgpu_bo_reserve(rbo, false);
3822
3823         if (unlikely(r)) {
3824                 /* Don't show error message when returning -ERESTARTSYS */
3825                 if (r != -ERESTARTSYS)
3826                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3827                 return r;
3828         }
3829
3830         if (tiling_flags)
3831                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3832
3833         if (tmz_surface)
3834                 *tmz_surface = amdgpu_bo_encrypted(rbo);
3835
3836         amdgpu_bo_unreserve(rbo);
3837
3838         return r;
3839 }
3840
3841 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3842 {
3843         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3844
3845         return offset ? (address + offset * 256) : 0;
3846 }
3847
3848 static int
3849 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3850                           const struct amdgpu_framebuffer *afb,
3851                           const enum surface_pixel_format format,
3852                           const enum dc_rotation_angle rotation,
3853                           const struct plane_size *plane_size,
3854                           const union dc_tiling_info *tiling_info,
3855                           const uint64_t info,
3856                           struct dc_plane_dcc_param *dcc,
3857                           struct dc_plane_address *address,
3858                           bool force_disable_dcc)
3859 {
3860         struct dc *dc = adev->dm.dc;
3861         struct dc_dcc_surface_param input;
3862         struct dc_surface_dcc_cap output;
3863         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3864         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3865         uint64_t dcc_address;
3866
3867         memset(&input, 0, sizeof(input));
3868         memset(&output, 0, sizeof(output));
3869
3870         if (force_disable_dcc)
3871                 return 0;
3872
3873         if (!offset)
3874                 return 0;
3875
3876         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3877                 return 0;
3878
3879         if (!dc->cap_funcs.get_dcc_compression_cap)
3880                 return -EINVAL;
3881
3882         input.format = format;
3883         input.surface_size.width = plane_size->surface_size.width;
3884         input.surface_size.height = plane_size->surface_size.height;
3885         input.swizzle_mode = tiling_info->gfx9.swizzle;
3886
3887         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3888                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3889         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3890                 input.scan = SCAN_DIRECTION_VERTICAL;
3891
3892         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3893                 return -EINVAL;
3894
3895         if (!output.capable)
3896                 return -EINVAL;
3897
3898         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3899                 return -EINVAL;
3900
3901         dcc->enable = 1;
3902         dcc->meta_pitch =
3903                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3904         dcc->independent_64b_blks = i64b;
3905
3906         dcc_address = get_dcc_address(afb->address, info);
3907         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3908         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3909
3910         return 0;
3911 }
3912
3913 static int
3914 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3915                              const struct amdgpu_framebuffer *afb,
3916                              const enum surface_pixel_format format,
3917                              const enum dc_rotation_angle rotation,
3918                              const uint64_t tiling_flags,
3919                              union dc_tiling_info *tiling_info,
3920                              struct plane_size *plane_size,
3921                              struct dc_plane_dcc_param *dcc,
3922                              struct dc_plane_address *address,
3923                              bool tmz_surface,
3924                              bool force_disable_dcc)
3925 {
3926         const struct drm_framebuffer *fb = &afb->base;
3927         int ret;
3928
3929         memset(tiling_info, 0, sizeof(*tiling_info));
3930         memset(plane_size, 0, sizeof(*plane_size));
3931         memset(dcc, 0, sizeof(*dcc));
3932         memset(address, 0, sizeof(*address));
3933
3934         address->tmz_surface = tmz_surface;
3935
3936         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3937                 plane_size->surface_size.x = 0;
3938                 plane_size->surface_size.y = 0;
3939                 plane_size->surface_size.width = fb->width;
3940                 plane_size->surface_size.height = fb->height;
3941                 plane_size->surface_pitch =
3942                         fb->pitches[0] / fb->format->cpp[0];
3943
3944                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3945                 address->grph.addr.low_part = lower_32_bits(afb->address);
3946                 address->grph.addr.high_part = upper_32_bits(afb->address);
3947         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3948                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3949
3950                 plane_size->surface_size.x = 0;
3951                 plane_size->surface_size.y = 0;
3952                 plane_size->surface_size.width = fb->width;
3953                 plane_size->surface_size.height = fb->height;
3954                 plane_size->surface_pitch =
3955                         fb->pitches[0] / fb->format->cpp[0];
3956
3957                 plane_size->chroma_size.x = 0;
3958                 plane_size->chroma_size.y = 0;
3959                 /* TODO: set these based on surface format */
3960                 plane_size->chroma_size.width = fb->width / 2;
3961                 plane_size->chroma_size.height = fb->height / 2;
3962
3963                 plane_size->chroma_pitch =
3964                         fb->pitches[1] / fb->format->cpp[1];
3965
3966                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3967                 address->video_progressive.luma_addr.low_part =
3968                         lower_32_bits(afb->address);
3969                 address->video_progressive.luma_addr.high_part =
3970                         upper_32_bits(afb->address);
3971                 address->video_progressive.chroma_addr.low_part =
3972                         lower_32_bits(chroma_addr);
3973                 address->video_progressive.chroma_addr.high_part =
3974                         upper_32_bits(chroma_addr);
3975         }
3976
3977         /* Fill GFX8 params */
3978         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3979                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3980
3981                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3982                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3983                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3984                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3985                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3986
3987                 /* XXX fix me for VI */
3988                 tiling_info->gfx8.num_banks = num_banks;
3989                 tiling_info->gfx8.array_mode =
3990                                 DC_ARRAY_2D_TILED_THIN1;
3991                 tiling_info->gfx8.tile_split = tile_split;
3992                 tiling_info->gfx8.bank_width = bankw;
3993                 tiling_info->gfx8.bank_height = bankh;
3994                 tiling_info->gfx8.tile_aspect = mtaspect;
3995                 tiling_info->gfx8.tile_mode =
3996                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3997         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3998                         == DC_ARRAY_1D_TILED_THIN1) {
3999                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4000         }
4001
4002         tiling_info->gfx8.pipe_config =
4003                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4004
4005         if (adev->asic_type == CHIP_VEGA10 ||
4006             adev->asic_type == CHIP_VEGA12 ||
4007             adev->asic_type == CHIP_VEGA20 ||
4008             adev->asic_type == CHIP_NAVI10 ||
4009             adev->asic_type == CHIP_NAVI14 ||
4010             adev->asic_type == CHIP_NAVI12 ||
4011 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
4012                 adev->asic_type == CHIP_SIENNA_CICHLID ||
4013                 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4014 #endif
4015 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
4016                 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4017 #endif
4018 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
4019                 adev->asic_type == CHIP_VANGOGH ||
4020 #endif
4021             adev->asic_type == CHIP_RENOIR ||
4022             adev->asic_type == CHIP_RAVEN) {
4023                 /* Fill GFX9 params */
4024                 tiling_info->gfx9.num_pipes =
4025                         adev->gfx.config.gb_addr_config_fields.num_pipes;
4026                 tiling_info->gfx9.num_banks =
4027                         adev->gfx.config.gb_addr_config_fields.num_banks;
4028                 tiling_info->gfx9.pipe_interleave =
4029                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4030                 tiling_info->gfx9.num_shader_engines =
4031                         adev->gfx.config.gb_addr_config_fields.num_se;
4032                 tiling_info->gfx9.max_compressed_frags =
4033                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4034                 tiling_info->gfx9.num_rb_per_se =
4035                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4036                 tiling_info->gfx9.swizzle =
4037                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
4038                 tiling_info->gfx9.shaderEnable = 1;
4039
4040 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
4041                 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4042                     adev->asic_type == CHIP_NAVY_FLOUNDER ||
4043                     adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4044                     adev->asic_type == CHIP_VANGOGH)
4045                         tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4046 #endif
4047                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4048                                                 plane_size, tiling_info,
4049                                                 tiling_flags, dcc, address,
4050                                                 force_disable_dcc);
4051                 if (ret)
4052                         return ret;
4053         }
4054
4055         return 0;
4056 }
4057
4058 static void
4059 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4060                                bool *per_pixel_alpha, bool *global_alpha,
4061                                int *global_alpha_value)
4062 {
4063         *per_pixel_alpha = false;
4064         *global_alpha = false;
4065         *global_alpha_value = 0xff;
4066
4067         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4068                 return;
4069
4070         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4071                 static const uint32_t alpha_formats[] = {
4072                         DRM_FORMAT_ARGB8888,
4073                         DRM_FORMAT_RGBA8888,
4074                         DRM_FORMAT_ABGR8888,
4075                 };
4076                 uint32_t format = plane_state->fb->format->format;
4077                 unsigned int i;
4078
4079                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4080                         if (format == alpha_formats[i]) {
4081                                 *per_pixel_alpha = true;
4082                                 break;
4083                         }
4084                 }
4085         }
4086
4087         if (plane_state->alpha < 0xffff) {
4088                 *global_alpha = true;
4089                 *global_alpha_value = plane_state->alpha >> 8;
4090         }
4091 }
4092
4093 static int
4094 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4095                             const enum surface_pixel_format format,
4096                             enum dc_color_space *color_space)
4097 {
4098         bool full_range;
4099
4100         *color_space = COLOR_SPACE_SRGB;
4101
4102         /* DRM color properties only affect non-RGB formats. */
4103         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4104                 return 0;
4105
4106         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4107
4108         switch (plane_state->color_encoding) {
4109         case DRM_COLOR_YCBCR_BT601:
4110                 if (full_range)
4111                         *color_space = COLOR_SPACE_YCBCR601;
4112                 else
4113                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4114                 break;
4115
4116         case DRM_COLOR_YCBCR_BT709:
4117                 if (full_range)
4118                         *color_space = COLOR_SPACE_YCBCR709;
4119                 else
4120                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4121                 break;
4122
4123         case DRM_COLOR_YCBCR_BT2020:
4124                 if (full_range)
4125                         *color_space = COLOR_SPACE_2020_YCBCR;
4126                 else
4127                         return -EINVAL;
4128                 break;
4129
4130         default:
4131                 return -EINVAL;
4132         }
4133
4134         return 0;
4135 }
4136
4137 static int
4138 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4139                             const struct drm_plane_state *plane_state,
4140                             const uint64_t tiling_flags,
4141                             struct dc_plane_info *plane_info,
4142                             struct dc_plane_address *address,
4143                             bool tmz_surface,
4144                             bool force_disable_dcc)
4145 {
4146         const struct drm_framebuffer *fb = plane_state->fb;
4147         const struct amdgpu_framebuffer *afb =
4148                 to_amdgpu_framebuffer(plane_state->fb);
4149         struct drm_format_name_buf format_name;
4150         int ret;
4151
4152         memset(plane_info, 0, sizeof(*plane_info));
4153
4154         switch (fb->format->format) {
4155         case DRM_FORMAT_C8:
4156                 plane_info->format =
4157                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4158                 break;
4159         case DRM_FORMAT_RGB565:
4160                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4161                 break;
4162         case DRM_FORMAT_XRGB8888:
4163         case DRM_FORMAT_ARGB8888:
4164                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4165                 break;
4166         case DRM_FORMAT_XRGB2101010:
4167         case DRM_FORMAT_ARGB2101010:
4168                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4169                 break;
4170         case DRM_FORMAT_XBGR2101010:
4171         case DRM_FORMAT_ABGR2101010:
4172                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4173                 break;
4174         case DRM_FORMAT_XBGR8888:
4175         case DRM_FORMAT_ABGR8888:
4176                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4177                 break;
4178         case DRM_FORMAT_NV21:
4179                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4180                 break;
4181         case DRM_FORMAT_NV12:
4182                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4183                 break;
4184         case DRM_FORMAT_P010:
4185                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4186                 break;
4187         case DRM_FORMAT_XRGB16161616F:
4188         case DRM_FORMAT_ARGB16161616F:
4189                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4190                 break;
4191         case DRM_FORMAT_XBGR16161616F:
4192         case DRM_FORMAT_ABGR16161616F:
4193                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4194                 break;
4195         default:
4196                 DRM_ERROR(
4197                         "Unsupported screen format %s\n",
4198                         drm_get_format_name(fb->format->format, &format_name));
4199                 return -EINVAL;
4200         }
4201
4202         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4203         case DRM_MODE_ROTATE_0:
4204                 plane_info->rotation = ROTATION_ANGLE_0;
4205                 break;
4206         case DRM_MODE_ROTATE_90:
4207                 plane_info->rotation = ROTATION_ANGLE_90;
4208                 break;
4209         case DRM_MODE_ROTATE_180:
4210                 plane_info->rotation = ROTATION_ANGLE_180;
4211                 break;
4212         case DRM_MODE_ROTATE_270:
4213                 plane_info->rotation = ROTATION_ANGLE_270;
4214                 break;
4215         default:
4216                 plane_info->rotation = ROTATION_ANGLE_0;
4217                 break;
4218         }
4219
4220         plane_info->visible = true;
4221         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4222
4223         plane_info->layer_index = 0;
4224
4225         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4226                                           &plane_info->color_space);
4227         if (ret)
4228                 return ret;
4229
4230         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4231                                            plane_info->rotation, tiling_flags,
4232                                            &plane_info->tiling_info,
4233                                            &plane_info->plane_size,
4234                                            &plane_info->dcc, address, tmz_surface,
4235                                            force_disable_dcc);
4236         if (ret)
4237                 return ret;
4238
4239         fill_blending_from_plane_state(
4240                 plane_state, &plane_info->per_pixel_alpha,
4241                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4242
4243         return 0;
4244 }
4245
4246 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4247                                     struct dc_plane_state *dc_plane_state,
4248                                     struct drm_plane_state *plane_state,
4249                                     struct drm_crtc_state *crtc_state)
4250 {
4251         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4252         struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4253         struct dc_scaling_info scaling_info;
4254         struct dc_plane_info plane_info;
4255         int ret;
4256         bool force_disable_dcc = false;
4257
4258         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4259         if (ret)
4260                 return ret;
4261
4262         dc_plane_state->src_rect = scaling_info.src_rect;
4263         dc_plane_state->dst_rect = scaling_info.dst_rect;
4264         dc_plane_state->clip_rect = scaling_info.clip_rect;
4265         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4266
4267         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4268         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4269                                           dm_plane_state->tiling_flags,
4270                                           &plane_info,
4271                                           &dc_plane_state->address,
4272                                           dm_plane_state->tmz_surface,
4273                                           force_disable_dcc);
4274         if (ret)
4275                 return ret;
4276
4277         dc_plane_state->format = plane_info.format;
4278         dc_plane_state->color_space = plane_info.color_space;
4279         dc_plane_state->format = plane_info.format;
4280         dc_plane_state->plane_size = plane_info.plane_size;
4281         dc_plane_state->rotation = plane_info.rotation;
4282         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4283         dc_plane_state->stereo_format = plane_info.stereo_format;
4284         dc_plane_state->tiling_info = plane_info.tiling_info;
4285         dc_plane_state->visible = plane_info.visible;
4286         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4287         dc_plane_state->global_alpha = plane_info.global_alpha;
4288         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4289         dc_plane_state->dcc = plane_info.dcc;
4290         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4291
4292         /*
4293          * Always set input transfer function, since plane state is refreshed
4294          * every time.
4295          */
4296         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4297         if (ret)
4298                 return ret;
4299
4300         return 0;
4301 }
4302
4303 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4304                                            const struct dm_connector_state *dm_state,
4305                                            struct dc_stream_state *stream)
4306 {
4307         enum amdgpu_rmx_type rmx_type;
4308
4309         struct rect src = { 0 }; /* viewport in composition space*/
4310         struct rect dst = { 0 }; /* stream addressable area */
4311
4312         /* no mode. nothing to be done */
4313         if (!mode)
4314                 return;
4315
4316         /* Full screen scaling by default */
4317         src.width = mode->hdisplay;
4318         src.height = mode->vdisplay;
4319         dst.width = stream->timing.h_addressable;
4320         dst.height = stream->timing.v_addressable;
4321
4322         if (dm_state) {
4323                 rmx_type = dm_state->scaling;
4324                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4325                         if (src.width * dst.height <
4326                                         src.height * dst.width) {
4327                                 /* height needs less upscaling/more downscaling */
4328                                 dst.width = src.width *
4329                                                 dst.height / src.height;
4330                         } else {
4331                                 /* width needs less upscaling/more downscaling */
4332                                 dst.height = src.height *
4333                                                 dst.width / src.width;
4334                         }
4335                 } else if (rmx_type == RMX_CENTER) {
4336                         dst = src;
4337                 }
4338
4339                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4340                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4341
4342                 if (dm_state->underscan_enable) {
4343                         dst.x += dm_state->underscan_hborder / 2;
4344                         dst.y += dm_state->underscan_vborder / 2;
4345                         dst.width -= dm_state->underscan_hborder;
4346                         dst.height -= dm_state->underscan_vborder;
4347                 }
4348         }
4349
4350         stream->src = src;
4351         stream->dst = dst;
4352
4353         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4354                         dst.x, dst.y, dst.width, dst.height);
4355
4356 }
4357
4358 static enum dc_color_depth
4359 convert_color_depth_from_display_info(const struct drm_connector *connector,
4360                                       bool is_y420, int requested_bpc)
4361 {
4362         uint8_t bpc;
4363
4364         if (is_y420) {
4365                 bpc = 8;
4366
4367                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4368                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4369                         bpc = 16;
4370                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4371                         bpc = 12;
4372                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4373                         bpc = 10;
4374         } else {
4375                 bpc = (uint8_t)connector->display_info.bpc;
4376                 /* Assume 8 bpc by default if no bpc is specified. */
4377                 bpc = bpc ? bpc : 8;
4378         }
4379
4380         if (requested_bpc > 0) {
4381                 /*
4382                  * Cap display bpc based on the user requested value.
4383                  *
4384                  * The value for state->max_bpc may not correctly updated
4385                  * depending on when the connector gets added to the state
4386                  * or if this was called outside of atomic check, so it
4387                  * can't be used directly.
4388                  */
4389                 bpc = min_t(u8, bpc, requested_bpc);
4390
4391                 /* Round down to the nearest even number. */
4392                 bpc = bpc - (bpc & 1);
4393         }
4394
4395         switch (bpc) {
4396         case 0:
4397                 /*
4398                  * Temporary Work around, DRM doesn't parse color depth for
4399                  * EDID revision before 1.4
4400                  * TODO: Fix edid parsing
4401                  */
4402                 return COLOR_DEPTH_888;
4403         case 6:
4404                 return COLOR_DEPTH_666;
4405         case 8:
4406                 return COLOR_DEPTH_888;
4407         case 10:
4408                 return COLOR_DEPTH_101010;
4409         case 12:
4410                 return COLOR_DEPTH_121212;
4411         case 14:
4412                 return COLOR_DEPTH_141414;
4413         case 16:
4414                 return COLOR_DEPTH_161616;
4415         default:
4416                 return COLOR_DEPTH_UNDEFINED;
4417         }
4418 }
4419
4420 static enum dc_aspect_ratio
4421 get_aspect_ratio(const struct drm_display_mode *mode_in)
4422 {
4423         /* 1-1 mapping, since both enums follow the HDMI spec. */
4424         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4425 }
4426
4427 static enum dc_color_space
4428 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4429 {
4430         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4431
4432         switch (dc_crtc_timing->pixel_encoding) {
4433         case PIXEL_ENCODING_YCBCR422:
4434         case PIXEL_ENCODING_YCBCR444:
4435         case PIXEL_ENCODING_YCBCR420:
4436         {
4437                 /*
4438                  * 27030khz is the separation point between HDTV and SDTV
4439                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4440                  * respectively
4441                  */
4442                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4443                         if (dc_crtc_timing->flags.Y_ONLY)
4444                                 color_space =
4445                                         COLOR_SPACE_YCBCR709_LIMITED;
4446                         else
4447                                 color_space = COLOR_SPACE_YCBCR709;
4448                 } else {
4449                         if (dc_crtc_timing->flags.Y_ONLY)
4450                                 color_space =
4451                                         COLOR_SPACE_YCBCR601_LIMITED;
4452                         else
4453                                 color_space = COLOR_SPACE_YCBCR601;
4454                 }
4455
4456         }
4457         break;
4458         case PIXEL_ENCODING_RGB:
4459                 color_space = COLOR_SPACE_SRGB;
4460                 break;
4461
4462         default:
4463                 WARN_ON(1);
4464                 break;
4465         }
4466
4467         return color_space;
4468 }
4469
4470 static bool adjust_colour_depth_from_display_info(
4471         struct dc_crtc_timing *timing_out,
4472         const struct drm_display_info *info)
4473 {
4474         enum dc_color_depth depth = timing_out->display_color_depth;
4475         int normalized_clk;
4476         do {
4477                 normalized_clk = timing_out->pix_clk_100hz / 10;
4478                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4479                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4480                         normalized_clk /= 2;
4481                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4482                 switch (depth) {
4483                 case COLOR_DEPTH_888:
4484                         break;
4485                 case COLOR_DEPTH_101010:
4486                         normalized_clk = (normalized_clk * 30) / 24;
4487                         break;
4488                 case COLOR_DEPTH_121212:
4489                         normalized_clk = (normalized_clk * 36) / 24;
4490                         break;
4491                 case COLOR_DEPTH_161616:
4492                         normalized_clk = (normalized_clk * 48) / 24;
4493                         break;
4494                 default:
4495                         /* The above depths are the only ones valid for HDMI. */
4496                         return false;
4497                 }
4498                 if (normalized_clk <= info->max_tmds_clock) {
4499                         timing_out->display_color_depth = depth;
4500                         return true;
4501                 }
4502         } while (--depth > COLOR_DEPTH_666);
4503         return false;
4504 }
4505
4506 static void fill_stream_properties_from_drm_display_mode(
4507         struct dc_stream_state *stream,
4508         const struct drm_display_mode *mode_in,
4509         const struct drm_connector *connector,
4510         const struct drm_connector_state *connector_state,
4511         const struct dc_stream_state *old_stream,
4512         int requested_bpc)
4513 {
4514         struct dc_crtc_timing *timing_out = &stream->timing;
4515         const struct drm_display_info *info = &connector->display_info;
4516         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4517         struct hdmi_vendor_infoframe hv_frame;
4518         struct hdmi_avi_infoframe avi_frame;
4519
4520         memset(&hv_frame, 0, sizeof(hv_frame));
4521         memset(&avi_frame, 0, sizeof(avi_frame));
4522
4523         timing_out->h_border_left = 0;
4524         timing_out->h_border_right = 0;
4525         timing_out->v_border_top = 0;
4526         timing_out->v_border_bottom = 0;
4527         /* TODO: un-hardcode */
4528         if (drm_mode_is_420_only(info, mode_in)
4529                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4530                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4531         else if (drm_mode_is_420_also(info, mode_in)
4532                         && aconnector->force_yuv420_output)
4533                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4534         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4535                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4536                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4537         else
4538                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4539
4540         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4541         timing_out->display_color_depth = convert_color_depth_from_display_info(
4542                 connector,
4543                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4544                 requested_bpc);
4545         timing_out->scan_type = SCANNING_TYPE_NODATA;
4546         timing_out->hdmi_vic = 0;
4547
4548         if(old_stream) {
4549                 timing_out->vic = old_stream->timing.vic;
4550                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4551                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4552         } else {
4553                 timing_out->vic = drm_match_cea_mode(mode_in);
4554                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4555                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4556                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4557                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4558         }
4559
4560         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4561                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4562                 timing_out->vic = avi_frame.video_code;
4563                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4564                 timing_out->hdmi_vic = hv_frame.vic;
4565         }
4566
4567         timing_out->h_addressable = mode_in->crtc_hdisplay;
4568         timing_out->h_total = mode_in->crtc_htotal;
4569         timing_out->h_sync_width =
4570                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4571         timing_out->h_front_porch =
4572                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4573         timing_out->v_total = mode_in->crtc_vtotal;
4574         timing_out->v_addressable = mode_in->crtc_vdisplay;
4575         timing_out->v_front_porch =
4576                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4577         timing_out->v_sync_width =
4578                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4579         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4580         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4581
4582         stream->output_color_space = get_output_color_space(timing_out);
4583
4584         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4585         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4586         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4587                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4588                     drm_mode_is_420_also(info, mode_in) &&
4589                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4590                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4591                         adjust_colour_depth_from_display_info(timing_out, info);
4592                 }
4593         }
4594 }
4595
4596 static void fill_audio_info(struct audio_info *audio_info,
4597                             const struct drm_connector *drm_connector,
4598                             const struct dc_sink *dc_sink)
4599 {
4600         int i = 0;
4601         int cea_revision = 0;
4602         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4603
4604         audio_info->manufacture_id = edid_caps->manufacturer_id;
4605         audio_info->product_id = edid_caps->product_id;
4606
4607         cea_revision = drm_connector->display_info.cea_rev;
4608
4609         strscpy(audio_info->display_name,
4610                 edid_caps->display_name,
4611                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4612
4613         if (cea_revision >= 3) {
4614                 audio_info->mode_count = edid_caps->audio_mode_count;
4615
4616                 for (i = 0; i < audio_info->mode_count; ++i) {
4617                         audio_info->modes[i].format_code =
4618                                         (enum audio_format_code)
4619                                         (edid_caps->audio_modes[i].format_code);
4620                         audio_info->modes[i].channel_count =
4621                                         edid_caps->audio_modes[i].channel_count;
4622                         audio_info->modes[i].sample_rates.all =
4623                                         edid_caps->audio_modes[i].sample_rate;
4624                         audio_info->modes[i].sample_size =
4625                                         edid_caps->audio_modes[i].sample_size;
4626                 }
4627         }
4628
4629         audio_info->flags.all = edid_caps->speaker_flags;
4630
4631         /* TODO: We only check for the progressive mode, check for interlace mode too */
4632         if (drm_connector->latency_present[0]) {
4633                 audio_info->video_latency = drm_connector->video_latency[0];
4634                 audio_info->audio_latency = drm_connector->audio_latency[0];
4635         }
4636
4637         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4638
4639 }
4640
4641 static void
4642 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4643                                       struct drm_display_mode *dst_mode)
4644 {
4645         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4646         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4647         dst_mode->crtc_clock = src_mode->crtc_clock;
4648         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4649         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4650         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4651         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4652         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4653         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4654         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4655         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4656         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4657         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4658         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4659 }
4660
4661 static void
4662 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4663                                         const struct drm_display_mode *native_mode,
4664                                         bool scale_enabled)
4665 {
4666         if (scale_enabled) {
4667                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4668         } else if (native_mode->clock == drm_mode->clock &&
4669                         native_mode->htotal == drm_mode->htotal &&
4670                         native_mode->vtotal == drm_mode->vtotal) {
4671                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4672         } else {
4673                 /* no scaling nor amdgpu inserted, no need to patch */
4674         }
4675 }
4676
4677 static struct dc_sink *
4678 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4679 {
4680         struct dc_sink_init_data sink_init_data = { 0 };
4681         struct dc_sink *sink = NULL;
4682         sink_init_data.link = aconnector->dc_link;
4683         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4684
4685         sink = dc_sink_create(&sink_init_data);
4686         if (!sink) {
4687                 DRM_ERROR("Failed to create sink!\n");
4688                 return NULL;
4689         }
4690         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4691
4692         return sink;
4693 }
4694
4695 static void set_multisync_trigger_params(
4696                 struct dc_stream_state *stream)
4697 {
4698         if (stream->triggered_crtc_reset.enabled) {
4699                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4700                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4701         }
4702 }
4703
4704 static void set_master_stream(struct dc_stream_state *stream_set[],
4705                               int stream_count)
4706 {
4707         int j, highest_rfr = 0, master_stream = 0;
4708
4709         for (j = 0;  j < stream_count; j++) {
4710                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4711                         int refresh_rate = 0;
4712
4713                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4714                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4715                         if (refresh_rate > highest_rfr) {
4716                                 highest_rfr = refresh_rate;
4717                                 master_stream = j;
4718                         }
4719                 }
4720         }
4721         for (j = 0;  j < stream_count; j++) {
4722                 if (stream_set[j])
4723                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4724         }
4725 }
4726
4727 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4728 {
4729         int i = 0;
4730
4731         if (context->stream_count < 2)
4732                 return;
4733         for (i = 0; i < context->stream_count ; i++) {
4734                 if (!context->streams[i])
4735                         continue;
4736                 /*
4737                  * TODO: add a function to read AMD VSDB bits and set
4738                  * crtc_sync_master.multi_sync_enabled flag
4739                  * For now it's set to false
4740                  */
4741                 set_multisync_trigger_params(context->streams[i]);
4742         }
4743         set_master_stream(context->streams, context->stream_count);
4744 }
4745
4746 static struct dc_stream_state *
4747 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4748                        const struct drm_display_mode *drm_mode,
4749                        const struct dm_connector_state *dm_state,
4750                        const struct dc_stream_state *old_stream,
4751                        int requested_bpc)
4752 {
4753         struct drm_display_mode *preferred_mode = NULL;
4754         struct drm_connector *drm_connector;
4755         const struct drm_connector_state *con_state =
4756                 dm_state ? &dm_state->base : NULL;
4757         struct dc_stream_state *stream = NULL;
4758         struct drm_display_mode mode = *drm_mode;
4759         bool native_mode_found = false;
4760         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4761         int mode_refresh;
4762         int preferred_refresh = 0;
4763 #if defined(CONFIG_DRM_AMD_DC_DCN)
4764         struct dsc_dec_dpcd_caps dsc_caps;
4765 #endif
4766         uint32_t link_bandwidth_kbps;
4767
4768         struct dc_sink *sink = NULL;
4769         if (aconnector == NULL) {
4770                 DRM_ERROR("aconnector is NULL!\n");
4771                 return stream;
4772         }
4773
4774         drm_connector = &aconnector->base;
4775
4776         if (!aconnector->dc_sink) {
4777                 sink = create_fake_sink(aconnector);
4778                 if (!sink)
4779                         return stream;
4780         } else {
4781                 sink = aconnector->dc_sink;
4782                 dc_sink_retain(sink);
4783         }
4784
4785         stream = dc_create_stream_for_sink(sink);
4786
4787         if (stream == NULL) {
4788                 DRM_ERROR("Failed to create stream for sink!\n");
4789                 goto finish;
4790         }
4791
4792         stream->dm_stream_context = aconnector;
4793
4794         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4795                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4796
4797         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4798                 /* Search for preferred mode */
4799                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4800                         native_mode_found = true;
4801                         break;
4802                 }
4803         }
4804         if (!native_mode_found)
4805                 preferred_mode = list_first_entry_or_null(
4806                                 &aconnector->base.modes,
4807                                 struct drm_display_mode,
4808                                 head);
4809
4810         mode_refresh = drm_mode_vrefresh(&mode);
4811
4812         if (preferred_mode == NULL) {
4813                 /*
4814                  * This may not be an error, the use case is when we have no
4815                  * usermode calls to reset and set mode upon hotplug. In this
4816                  * case, we call set mode ourselves to restore the previous mode
4817                  * and the modelist may not be filled in in time.
4818                  */
4819                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4820         } else {
4821                 decide_crtc_timing_for_drm_display_mode(
4822                                 &mode, preferred_mode,
4823                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4824                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4825         }
4826
4827         if (!dm_state)
4828                 drm_mode_set_crtcinfo(&mode, 0);
4829
4830         /*
4831         * If scaling is enabled and refresh rate didn't change
4832         * we copy the vic and polarities of the old timings
4833         */
4834         if (!scale || mode_refresh != preferred_refresh)
4835                 fill_stream_properties_from_drm_display_mode(stream,
4836                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
4837         else
4838                 fill_stream_properties_from_drm_display_mode(stream,
4839                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4840
4841         stream->timing.flags.DSC = 0;
4842
4843         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4844 #if defined(CONFIG_DRM_AMD_DC_DCN)
4845                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4846                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4847                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4848                                       &dsc_caps);
4849 #endif
4850                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4851                                                              dc_link_get_link_cap(aconnector->dc_link));
4852
4853 #if defined(CONFIG_DRM_AMD_DC_DCN)
4854                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4855                         /* Set DSC policy according to dsc_clock_en */
4856                         dc_dsc_policy_set_enable_dsc_when_not_needed(
4857                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4858
4859                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4860                                                   &dsc_caps,
4861                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4862                                                   link_bandwidth_kbps,
4863                                                   &stream->timing,
4864                                                   &stream->timing.dsc_cfg))
4865                                 stream->timing.flags.DSC = 1;
4866                         /* Overwrite the stream flag if DSC is enabled through debugfs */
4867                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4868                                 stream->timing.flags.DSC = 1;
4869
4870                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4871                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4872
4873                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4874                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4875
4876                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4877                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4878                 }
4879 #endif
4880         }
4881
4882         update_stream_scaling_settings(&mode, dm_state, stream);
4883
4884         fill_audio_info(
4885                 &stream->audio_info,
4886                 drm_connector,
4887                 sink);
4888
4889         update_stream_signal(stream, sink);
4890
4891         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4892                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4893
4894         if (stream->link->psr_settings.psr_feature_enabled) {
4895                 //
4896                 // should decide stream support vsc sdp colorimetry capability
4897                 // before building vsc info packet
4898                 //
4899                 stream->use_vsc_sdp_for_colorimetry = false;
4900                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4901                         stream->use_vsc_sdp_for_colorimetry =
4902                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4903                 } else {
4904                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4905                                 stream->use_vsc_sdp_for_colorimetry = true;
4906                 }
4907                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4908         }
4909 finish:
4910         dc_sink_release(sink);
4911
4912         return stream;
4913 }
4914
4915 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4916 {
4917         drm_crtc_cleanup(crtc);
4918         kfree(crtc);
4919 }
4920
4921 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4922                                   struct drm_crtc_state *state)
4923 {
4924         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4925
4926         /* TODO Destroy dc_stream objects are stream object is flattened */
4927         if (cur->stream)
4928                 dc_stream_release(cur->stream);
4929
4930
4931         __drm_atomic_helper_crtc_destroy_state(state);
4932
4933
4934         kfree(state);
4935 }
4936
4937 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4938 {
4939         struct dm_crtc_state *state;
4940
4941         if (crtc->state)
4942                 dm_crtc_destroy_state(crtc, crtc->state);
4943
4944         state = kzalloc(sizeof(*state), GFP_KERNEL);
4945         if (WARN_ON(!state))
4946                 return;
4947
4948         __drm_atomic_helper_crtc_reset(crtc, &state->base);
4949 }
4950
4951 static struct drm_crtc_state *
4952 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4953 {
4954         struct dm_crtc_state *state, *cur;
4955
4956         cur = to_dm_crtc_state(crtc->state);
4957
4958         if (WARN_ON(!crtc->state))
4959                 return NULL;
4960
4961         state = kzalloc(sizeof(*state), GFP_KERNEL);
4962         if (!state)
4963                 return NULL;
4964
4965         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4966
4967         if (cur->stream) {
4968                 state->stream = cur->stream;
4969                 dc_stream_retain(state->stream);
4970         }
4971
4972         state->active_planes = cur->active_planes;
4973         state->vrr_infopacket = cur->vrr_infopacket;
4974         state->abm_level = cur->abm_level;
4975         state->vrr_supported = cur->vrr_supported;
4976         state->freesync_config = cur->freesync_config;
4977         state->crc_src = cur->crc_src;
4978         state->cm_has_degamma = cur->cm_has_degamma;
4979         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4980
4981         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4982
4983         return &state->base;
4984 }
4985
4986 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4987 {
4988         enum dc_irq_source irq_source;
4989         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4990         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4991         int rc;
4992
4993         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4994
4995         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4996
4997         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4998                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4999         return rc;
5000 }
5001
5002 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5003 {
5004         enum dc_irq_source irq_source;
5005         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5006         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5007         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5008         int rc = 0;
5009
5010         if (enable) {
5011                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5012                 if (amdgpu_dm_vrr_active(acrtc_state))
5013                         rc = dm_set_vupdate_irq(crtc, true);
5014         } else {
5015                 /* vblank irq off -> vupdate irq off */
5016                 rc = dm_set_vupdate_irq(crtc, false);
5017         }
5018
5019         if (rc)
5020                 return rc;
5021
5022         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5023         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5024 }
5025
5026 static int dm_enable_vblank(struct drm_crtc *crtc)
5027 {
5028         return dm_set_vblank(crtc, true);
5029 }
5030
5031 static void dm_disable_vblank(struct drm_crtc *crtc)
5032 {
5033         dm_set_vblank(crtc, false);
5034 }
5035
5036 /* Implemented only the options currently availible for the driver */
5037 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5038         .reset = dm_crtc_reset_state,
5039         .destroy = amdgpu_dm_crtc_destroy,
5040         .gamma_set = drm_atomic_helper_legacy_gamma_set,
5041         .set_config = drm_atomic_helper_set_config,
5042         .page_flip = drm_atomic_helper_page_flip,
5043         .atomic_duplicate_state = dm_crtc_duplicate_state,
5044         .atomic_destroy_state = dm_crtc_destroy_state,
5045         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5046         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5047         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5048         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5049         .enable_vblank = dm_enable_vblank,
5050         .disable_vblank = dm_disable_vblank,
5051         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5052 };
5053
5054 static enum drm_connector_status
5055 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5056 {
5057         bool connected;
5058         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5059
5060         /*
5061          * Notes:
5062          * 1. This interface is NOT called in context of HPD irq.
5063          * 2. This interface *is called* in context of user-mode ioctl. Which
5064          * makes it a bad place for *any* MST-related activity.
5065          */
5066
5067         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5068             !aconnector->fake_enable)
5069                 connected = (aconnector->dc_sink != NULL);
5070         else
5071                 connected = (aconnector->base.force == DRM_FORCE_ON);
5072
5073         update_subconnector_property(aconnector);
5074
5075         return (connected ? connector_status_connected :
5076                         connector_status_disconnected);
5077 }
5078
5079 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5080                                             struct drm_connector_state *connector_state,
5081                                             struct drm_property *property,
5082                                             uint64_t val)
5083 {
5084         struct drm_device *dev = connector->dev;
5085         struct amdgpu_device *adev = drm_to_adev(dev);
5086         struct dm_connector_state *dm_old_state =
5087                 to_dm_connector_state(connector->state);
5088         struct dm_connector_state *dm_new_state =
5089                 to_dm_connector_state(connector_state);
5090
5091         int ret = -EINVAL;
5092
5093         if (property == dev->mode_config.scaling_mode_property) {
5094                 enum amdgpu_rmx_type rmx_type;
5095
5096                 switch (val) {
5097                 case DRM_MODE_SCALE_CENTER:
5098                         rmx_type = RMX_CENTER;
5099                         break;
5100                 case DRM_MODE_SCALE_ASPECT:
5101                         rmx_type = RMX_ASPECT;
5102                         break;
5103                 case DRM_MODE_SCALE_FULLSCREEN:
5104                         rmx_type = RMX_FULL;
5105                         break;
5106                 case DRM_MODE_SCALE_NONE:
5107                 default:
5108                         rmx_type = RMX_OFF;
5109                         break;
5110                 }
5111
5112                 if (dm_old_state->scaling == rmx_type)
5113                         return 0;
5114
5115                 dm_new_state->scaling = rmx_type;
5116                 ret = 0;
5117         } else if (property == adev->mode_info.underscan_hborder_property) {
5118                 dm_new_state->underscan_hborder = val;
5119                 ret = 0;
5120         } else if (property == adev->mode_info.underscan_vborder_property) {
5121                 dm_new_state->underscan_vborder = val;
5122                 ret = 0;
5123         } else if (property == adev->mode_info.underscan_property) {
5124                 dm_new_state->underscan_enable = val;
5125                 ret = 0;
5126         } else if (property == adev->mode_info.abm_level_property) {
5127                 dm_new_state->abm_level = val;
5128                 ret = 0;
5129         }
5130
5131         return ret;
5132 }
5133
5134 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5135                                             const struct drm_connector_state *state,
5136                                             struct drm_property *property,
5137                                             uint64_t *val)
5138 {
5139         struct drm_device *dev = connector->dev;
5140         struct amdgpu_device *adev = drm_to_adev(dev);
5141         struct dm_connector_state *dm_state =
5142                 to_dm_connector_state(state);
5143         int ret = -EINVAL;
5144
5145         if (property == dev->mode_config.scaling_mode_property) {
5146                 switch (dm_state->scaling) {
5147                 case RMX_CENTER:
5148                         *val = DRM_MODE_SCALE_CENTER;
5149                         break;
5150                 case RMX_ASPECT:
5151                         *val = DRM_MODE_SCALE_ASPECT;
5152                         break;
5153                 case RMX_FULL:
5154                         *val = DRM_MODE_SCALE_FULLSCREEN;
5155                         break;
5156                 case RMX_OFF:
5157                 default:
5158                         *val = DRM_MODE_SCALE_NONE;
5159                         break;
5160                 }
5161                 ret = 0;
5162         } else if (property == adev->mode_info.underscan_hborder_property) {
5163                 *val = dm_state->underscan_hborder;
5164                 ret = 0;
5165         } else if (property == adev->mode_info.underscan_vborder_property) {
5166                 *val = dm_state->underscan_vborder;
5167                 ret = 0;
5168         } else if (property == adev->mode_info.underscan_property) {
5169                 *val = dm_state->underscan_enable;
5170                 ret = 0;
5171         } else if (property == adev->mode_info.abm_level_property) {
5172                 *val = dm_state->abm_level;
5173                 ret = 0;
5174         }
5175
5176         return ret;
5177 }
5178
5179 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5180 {
5181         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5182
5183         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5184 }
5185
5186 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5187 {
5188         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5189         const struct dc_link *link = aconnector->dc_link;
5190         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5191         struct amdgpu_display_manager *dm = &adev->dm;
5192
5193         /*
5194          * Call only if mst_mgr was iniitalized before since it's not done
5195          * for all connector types.
5196          */
5197         if (aconnector->mst_mgr.dev)
5198                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5199
5200 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5201         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5202
5203         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5204             link->type != dc_connection_none &&
5205             dm->backlight_dev) {
5206                 backlight_device_unregister(dm->backlight_dev);
5207                 dm->backlight_dev = NULL;
5208         }
5209 #endif
5210
5211         if (aconnector->dc_em_sink)
5212                 dc_sink_release(aconnector->dc_em_sink);
5213         aconnector->dc_em_sink = NULL;
5214         if (aconnector->dc_sink)
5215                 dc_sink_release(aconnector->dc_sink);
5216         aconnector->dc_sink = NULL;
5217
5218         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5219         drm_connector_unregister(connector);
5220         drm_connector_cleanup(connector);
5221         if (aconnector->i2c) {
5222                 i2c_del_adapter(&aconnector->i2c->base);
5223                 kfree(aconnector->i2c);
5224         }
5225         kfree(aconnector->dm_dp_aux.aux.name);
5226
5227         kfree(connector);
5228 }
5229
5230 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5231 {
5232         struct dm_connector_state *state =
5233                 to_dm_connector_state(connector->state);
5234
5235         if (connector->state)
5236                 __drm_atomic_helper_connector_destroy_state(connector->state);
5237
5238         kfree(state);
5239
5240         state = kzalloc(sizeof(*state), GFP_KERNEL);
5241
5242         if (state) {
5243                 state->scaling = RMX_OFF;
5244                 state->underscan_enable = false;
5245                 state->underscan_hborder = 0;
5246                 state->underscan_vborder = 0;
5247                 state->base.max_requested_bpc = 8;
5248                 state->vcpi_slots = 0;
5249                 state->pbn = 0;
5250                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5251                         state->abm_level = amdgpu_dm_abm_level;
5252
5253                 __drm_atomic_helper_connector_reset(connector, &state->base);
5254         }
5255 }
5256
5257 struct drm_connector_state *
5258 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5259 {
5260         struct dm_connector_state *state =
5261                 to_dm_connector_state(connector->state);
5262
5263         struct dm_connector_state *new_state =
5264                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5265
5266         if (!new_state)
5267                 return NULL;
5268
5269         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5270
5271         new_state->freesync_capable = state->freesync_capable;
5272         new_state->abm_level = state->abm_level;
5273         new_state->scaling = state->scaling;
5274         new_state->underscan_enable = state->underscan_enable;
5275         new_state->underscan_hborder = state->underscan_hborder;
5276         new_state->underscan_vborder = state->underscan_vborder;
5277         new_state->vcpi_slots = state->vcpi_slots;
5278         new_state->pbn = state->pbn;
5279         return &new_state->base;
5280 }
5281
5282 static int
5283 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5284 {
5285         struct amdgpu_dm_connector *amdgpu_dm_connector =
5286                 to_amdgpu_dm_connector(connector);
5287         int r;
5288
5289         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5290             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5291                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5292                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5293                 if (r)
5294                         return r;
5295         }
5296
5297 #if defined(CONFIG_DEBUG_FS)
5298         connector_debugfs_init(amdgpu_dm_connector);
5299 #endif
5300
5301         return 0;
5302 }
5303
5304 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5305         .reset = amdgpu_dm_connector_funcs_reset,
5306         .detect = amdgpu_dm_connector_detect,
5307         .fill_modes = drm_helper_probe_single_connector_modes,
5308         .destroy = amdgpu_dm_connector_destroy,
5309         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5310         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5311         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5312         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5313         .late_register = amdgpu_dm_connector_late_register,
5314         .early_unregister = amdgpu_dm_connector_unregister
5315 };
5316
5317 static int get_modes(struct drm_connector *connector)
5318 {
5319         return amdgpu_dm_connector_get_modes(connector);
5320 }
5321
5322 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5323 {
5324         struct dc_sink_init_data init_params = {
5325                         .link = aconnector->dc_link,
5326                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5327         };
5328         struct edid *edid;
5329
5330         if (!aconnector->base.edid_blob_ptr) {
5331                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5332                                 aconnector->base.name);
5333
5334                 aconnector->base.force = DRM_FORCE_OFF;
5335                 aconnector->base.override_edid = false;
5336                 return;
5337         }
5338
5339         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5340
5341         aconnector->edid = edid;
5342
5343         aconnector->dc_em_sink = dc_link_add_remote_sink(
5344                 aconnector->dc_link,
5345                 (uint8_t *)edid,
5346                 (edid->extensions + 1) * EDID_LENGTH,
5347                 &init_params);
5348
5349         if (aconnector->base.force == DRM_FORCE_ON) {
5350                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5351                 aconnector->dc_link->local_sink :
5352                 aconnector->dc_em_sink;
5353                 dc_sink_retain(aconnector->dc_sink);
5354         }
5355 }
5356
5357 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5358 {
5359         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5360
5361         /*
5362          * In case of headless boot with force on for DP managed connector
5363          * Those settings have to be != 0 to get initial modeset
5364          */
5365         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5366                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5367                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5368         }
5369
5370
5371         aconnector->base.override_edid = true;
5372         create_eml_sink(aconnector);
5373 }
5374
5375 static struct dc_stream_state *
5376 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5377                                 const struct drm_display_mode *drm_mode,
5378                                 const struct dm_connector_state *dm_state,
5379                                 const struct dc_stream_state *old_stream)
5380 {
5381         struct drm_connector *connector = &aconnector->base;
5382         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5383         struct dc_stream_state *stream;
5384         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5385         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5386         enum dc_status dc_result = DC_OK;
5387
5388         do {
5389                 stream = create_stream_for_sink(aconnector, drm_mode,
5390                                                 dm_state, old_stream,
5391                                                 requested_bpc);
5392                 if (stream == NULL) {
5393                         DRM_ERROR("Failed to create stream for sink!\n");
5394                         break;
5395                 }
5396
5397                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5398
5399                 if (dc_result != DC_OK) {
5400                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5401                                       drm_mode->hdisplay,
5402                                       drm_mode->vdisplay,
5403                                       drm_mode->clock,
5404                                       dc_result,
5405                                       dc_status_to_str(dc_result));
5406
5407                         dc_stream_release(stream);
5408                         stream = NULL;
5409                         requested_bpc -= 2; /* lower bpc to retry validation */
5410                 }
5411
5412         } while (stream == NULL && requested_bpc >= 6);
5413
5414         return stream;
5415 }
5416
5417 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5418                                    struct drm_display_mode *mode)
5419 {
5420         int result = MODE_ERROR;
5421         struct dc_sink *dc_sink;
5422         /* TODO: Unhardcode stream count */
5423         struct dc_stream_state *stream;
5424         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5425
5426         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5427                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5428                 return result;
5429
5430         /*
5431          * Only run this the first time mode_valid is called to initilialize
5432          * EDID mgmt
5433          */
5434         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5435                 !aconnector->dc_em_sink)
5436                 handle_edid_mgmt(aconnector);
5437
5438         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5439
5440         if (dc_sink == NULL) {
5441                 DRM_ERROR("dc_sink is NULL!\n");
5442                 goto fail;
5443         }
5444
5445         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5446         if (stream) {
5447                 dc_stream_release(stream);
5448                 result = MODE_OK;
5449         }
5450
5451 fail:
5452         /* TODO: error handling*/
5453         return result;
5454 }
5455
5456 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5457                                 struct dc_info_packet *out)
5458 {
5459         struct hdmi_drm_infoframe frame;
5460         unsigned char buf[30]; /* 26 + 4 */
5461         ssize_t len;
5462         int ret, i;
5463
5464         memset(out, 0, sizeof(*out));
5465
5466         if (!state->hdr_output_metadata)
5467                 return 0;
5468
5469         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5470         if (ret)
5471                 return ret;
5472
5473         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5474         if (len < 0)
5475                 return (int)len;
5476
5477         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5478         if (len != 30)
5479                 return -EINVAL;
5480
5481         /* Prepare the infopacket for DC. */
5482         switch (state->connector->connector_type) {
5483         case DRM_MODE_CONNECTOR_HDMIA:
5484                 out->hb0 = 0x87; /* type */
5485                 out->hb1 = 0x01; /* version */
5486                 out->hb2 = 0x1A; /* length */
5487                 out->sb[0] = buf[3]; /* checksum */
5488                 i = 1;
5489                 break;
5490
5491         case DRM_MODE_CONNECTOR_DisplayPort:
5492         case DRM_MODE_CONNECTOR_eDP:
5493                 out->hb0 = 0x00; /* sdp id, zero */
5494                 out->hb1 = 0x87; /* type */
5495                 out->hb2 = 0x1D; /* payload len - 1 */
5496                 out->hb3 = (0x13 << 2); /* sdp version */
5497                 out->sb[0] = 0x01; /* version */
5498                 out->sb[1] = 0x1A; /* length */
5499                 i = 2;
5500                 break;
5501
5502         default:
5503                 return -EINVAL;
5504         }
5505
5506         memcpy(&out->sb[i], &buf[4], 26);
5507         out->valid = true;
5508
5509         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5510                        sizeof(out->sb), false);
5511
5512         return 0;
5513 }
5514
5515 static bool
5516 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5517                           const struct drm_connector_state *new_state)
5518 {
5519         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5520         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5521
5522         if (old_blob != new_blob) {
5523                 if (old_blob && new_blob &&
5524                     old_blob->length == new_blob->length)
5525                         return memcmp(old_blob->data, new_blob->data,
5526                                       old_blob->length);
5527
5528                 return true;
5529         }
5530
5531         return false;
5532 }
5533
5534 static int
5535 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5536                                  struct drm_atomic_state *state)
5537 {
5538         struct drm_connector_state *new_con_state =
5539                 drm_atomic_get_new_connector_state(state, conn);
5540         struct drm_connector_state *old_con_state =
5541                 drm_atomic_get_old_connector_state(state, conn);
5542         struct drm_crtc *crtc = new_con_state->crtc;
5543         struct drm_crtc_state *new_crtc_state;
5544         int ret;
5545
5546         if (!crtc)
5547                 return 0;
5548
5549         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5550                 struct dc_info_packet hdr_infopacket;
5551
5552                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5553                 if (ret)
5554                         return ret;
5555
5556                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5557                 if (IS_ERR(new_crtc_state))
5558                         return PTR_ERR(new_crtc_state);
5559
5560                 /*
5561                  * DC considers the stream backends changed if the
5562                  * static metadata changes. Forcing the modeset also
5563                  * gives a simple way for userspace to switch from
5564                  * 8bpc to 10bpc when setting the metadata to enter
5565                  * or exit HDR.
5566                  *
5567                  * Changing the static metadata after it's been
5568                  * set is permissible, however. So only force a
5569                  * modeset if we're entering or exiting HDR.
5570                  */
5571                 new_crtc_state->mode_changed =
5572                         !old_con_state->hdr_output_metadata ||
5573                         !new_con_state->hdr_output_metadata;
5574         }
5575
5576         return 0;
5577 }
5578
5579 static const struct drm_connector_helper_funcs
5580 amdgpu_dm_connector_helper_funcs = {
5581         /*
5582          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5583          * modes will be filtered by drm_mode_validate_size(), and those modes
5584          * are missing after user start lightdm. So we need to renew modes list.
5585          * in get_modes call back, not just return the modes count
5586          */
5587         .get_modes = get_modes,
5588         .mode_valid = amdgpu_dm_connector_mode_valid,
5589         .atomic_check = amdgpu_dm_connector_atomic_check,
5590 };
5591
5592 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5593 {
5594 }
5595
5596 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5597 {
5598         struct drm_atomic_state *state = new_crtc_state->state;
5599         struct drm_plane *plane;
5600         int num_active = 0;
5601
5602         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5603                 struct drm_plane_state *new_plane_state;
5604
5605                 /* Cursor planes are "fake". */
5606                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5607                         continue;
5608
5609                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5610
5611                 if (!new_plane_state) {
5612                         /*
5613                          * The plane is enable on the CRTC and hasn't changed
5614                          * state. This means that it previously passed
5615                          * validation and is therefore enabled.
5616                          */
5617                         num_active += 1;
5618                         continue;
5619                 }
5620
5621                 /* We need a framebuffer to be considered enabled. */
5622                 num_active += (new_plane_state->fb != NULL);
5623         }
5624
5625         return num_active;
5626 }
5627
5628 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5629                                          struct drm_crtc_state *new_crtc_state)
5630 {
5631         struct dm_crtc_state *dm_new_crtc_state =
5632                 to_dm_crtc_state(new_crtc_state);
5633
5634         dm_new_crtc_state->active_planes = 0;
5635
5636         if (!dm_new_crtc_state->stream)
5637                 return;
5638
5639         dm_new_crtc_state->active_planes =
5640                 count_crtc_active_planes(new_crtc_state);
5641 }
5642
5643 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5644                                        struct drm_crtc_state *state)
5645 {
5646         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5647         struct dc *dc = adev->dm.dc;
5648         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5649         int ret = -EINVAL;
5650
5651         dm_update_crtc_active_planes(crtc, state);
5652
5653         if (unlikely(!dm_crtc_state->stream &&
5654                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5655                 WARN_ON(1);
5656                 return ret;
5657         }
5658
5659         /*
5660          * We require the primary plane to be enabled whenever the CRTC is, otherwise
5661          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5662          * planes are disabled, which is not supported by the hardware. And there is legacy
5663          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5664          */
5665         if (state->enable &&
5666             !(state->plane_mask & drm_plane_mask(crtc->primary)))
5667                 return -EINVAL;
5668
5669         /* In some use cases, like reset, no stream is attached */
5670         if (!dm_crtc_state->stream)
5671                 return 0;
5672
5673         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5674                 return 0;
5675
5676         return ret;
5677 }
5678
5679 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5680                                       const struct drm_display_mode *mode,
5681                                       struct drm_display_mode *adjusted_mode)
5682 {
5683         return true;
5684 }
5685
5686 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5687         .disable = dm_crtc_helper_disable,
5688         .atomic_check = dm_crtc_helper_atomic_check,
5689         .mode_fixup = dm_crtc_helper_mode_fixup,
5690         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5691 };
5692
5693 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5694 {
5695
5696 }
5697
5698 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5699 {
5700         switch (display_color_depth) {
5701                 case COLOR_DEPTH_666:
5702                         return 6;
5703                 case COLOR_DEPTH_888:
5704                         return 8;
5705                 case COLOR_DEPTH_101010:
5706                         return 10;
5707                 case COLOR_DEPTH_121212:
5708                         return 12;
5709                 case COLOR_DEPTH_141414:
5710                         return 14;
5711                 case COLOR_DEPTH_161616:
5712                         return 16;
5713                 default:
5714                         break;
5715                 }
5716         return 0;
5717 }
5718
5719 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5720                                           struct drm_crtc_state *crtc_state,
5721                                           struct drm_connector_state *conn_state)
5722 {
5723         struct drm_atomic_state *state = crtc_state->state;
5724         struct drm_connector *connector = conn_state->connector;
5725         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5726         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5727         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5728         struct drm_dp_mst_topology_mgr *mst_mgr;
5729         struct drm_dp_mst_port *mst_port;
5730         enum dc_color_depth color_depth;
5731         int clock, bpp = 0;
5732         bool is_y420 = false;
5733
5734         if (!aconnector->port || !aconnector->dc_sink)
5735                 return 0;
5736
5737         mst_port = aconnector->port;
5738         mst_mgr = &aconnector->mst_port->mst_mgr;
5739
5740         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5741                 return 0;
5742
5743         if (!state->duplicated) {
5744                 int max_bpc = conn_state->max_requested_bpc;
5745                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5746                                 aconnector->force_yuv420_output;
5747                 color_depth = convert_color_depth_from_display_info(connector,
5748                                                                     is_y420,
5749                                                                     max_bpc);
5750                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5751                 clock = adjusted_mode->clock;
5752                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5753         }
5754         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5755                                                                            mst_mgr,
5756                                                                            mst_port,
5757                                                                            dm_new_connector_state->pbn,
5758                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
5759         if (dm_new_connector_state->vcpi_slots < 0) {
5760                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5761                 return dm_new_connector_state->vcpi_slots;
5762         }
5763         return 0;
5764 }
5765
5766 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5767         .disable = dm_encoder_helper_disable,
5768         .atomic_check = dm_encoder_helper_atomic_check
5769 };
5770
5771 #if defined(CONFIG_DRM_AMD_DC_DCN)
5772 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5773                                             struct dc_state *dc_state)
5774 {
5775         struct dc_stream_state *stream = NULL;
5776         struct drm_connector *connector;
5777         struct drm_connector_state *new_con_state, *old_con_state;
5778         struct amdgpu_dm_connector *aconnector;
5779         struct dm_connector_state *dm_conn_state;
5780         int i, j, clock, bpp;
5781         int vcpi, pbn_div, pbn = 0;
5782
5783         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5784
5785                 aconnector = to_amdgpu_dm_connector(connector);
5786
5787                 if (!aconnector->port)
5788                         continue;
5789
5790                 if (!new_con_state || !new_con_state->crtc)
5791                         continue;
5792
5793                 dm_conn_state = to_dm_connector_state(new_con_state);
5794
5795                 for (j = 0; j < dc_state->stream_count; j++) {
5796                         stream = dc_state->streams[j];
5797                         if (!stream)
5798                                 continue;
5799
5800                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5801                                 break;
5802
5803                         stream = NULL;
5804                 }
5805
5806                 if (!stream)
5807                         continue;
5808
5809                 if (stream->timing.flags.DSC != 1) {
5810                         drm_dp_mst_atomic_enable_dsc(state,
5811                                                      aconnector->port,
5812                                                      dm_conn_state->pbn,
5813                                                      0,
5814                                                      false);
5815                         continue;
5816                 }
5817
5818                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5819                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5820                 clock = stream->timing.pix_clk_100hz / 10;
5821                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5822                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5823                                                     aconnector->port,
5824                                                     pbn, pbn_div,
5825                                                     true);
5826                 if (vcpi < 0)
5827                         return vcpi;
5828
5829                 dm_conn_state->pbn = pbn;
5830                 dm_conn_state->vcpi_slots = vcpi;
5831         }
5832         return 0;
5833 }
5834 #endif
5835
5836 static void dm_drm_plane_reset(struct drm_plane *plane)
5837 {
5838         struct dm_plane_state *amdgpu_state = NULL;
5839
5840         if (plane->state)
5841                 plane->funcs->atomic_destroy_state(plane, plane->state);
5842
5843         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5844         WARN_ON(amdgpu_state == NULL);
5845
5846         if (amdgpu_state)
5847                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5848 }
5849
5850 static struct drm_plane_state *
5851 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5852 {
5853         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5854
5855         old_dm_plane_state = to_dm_plane_state(plane->state);
5856         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5857         if (!dm_plane_state)
5858                 return NULL;
5859
5860         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5861
5862         if (old_dm_plane_state->dc_state) {
5863                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5864                 dc_plane_state_retain(dm_plane_state->dc_state);
5865         }
5866
5867         /* Framebuffer hasn't been updated yet, so retain old flags. */
5868         dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5869         dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5870
5871         return &dm_plane_state->base;
5872 }
5873
5874 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5875                                 struct drm_plane_state *state)
5876 {
5877         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5878
5879         if (dm_plane_state->dc_state)
5880                 dc_plane_state_release(dm_plane_state->dc_state);
5881
5882         drm_atomic_helper_plane_destroy_state(plane, state);
5883 }
5884
5885 static const struct drm_plane_funcs dm_plane_funcs = {
5886         .update_plane   = drm_atomic_helper_update_plane,
5887         .disable_plane  = drm_atomic_helper_disable_plane,
5888         .destroy        = drm_primary_helper_destroy,
5889         .reset = dm_drm_plane_reset,
5890         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5891         .atomic_destroy_state = dm_drm_plane_destroy_state,
5892 };
5893
5894 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5895                                       struct drm_plane_state *new_state)
5896 {
5897         struct amdgpu_framebuffer *afb;
5898         struct drm_gem_object *obj;
5899         struct amdgpu_device *adev;
5900         struct amdgpu_bo *rbo;
5901         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5902         struct list_head list;
5903         struct ttm_validate_buffer tv;
5904         struct ww_acquire_ctx ticket;
5905         uint32_t domain;
5906         int r;
5907
5908         if (!new_state->fb) {
5909                 DRM_DEBUG_DRIVER("No FB bound\n");
5910                 return 0;
5911         }
5912
5913         afb = to_amdgpu_framebuffer(new_state->fb);
5914         obj = new_state->fb->obj[0];
5915         rbo = gem_to_amdgpu_bo(obj);
5916         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5917         INIT_LIST_HEAD(&list);
5918
5919         tv.bo = &rbo->tbo;
5920         tv.num_shared = 1;
5921         list_add(&tv.head, &list);
5922
5923         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5924         if (r) {
5925                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5926                 return r;
5927         }
5928
5929         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5930                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5931         else
5932                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5933
5934         r = amdgpu_bo_pin(rbo, domain);
5935         if (unlikely(r != 0)) {
5936                 if (r != -ERESTARTSYS)
5937                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5938                 ttm_eu_backoff_reservation(&ticket, &list);
5939                 return r;
5940         }
5941
5942         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5943         if (unlikely(r != 0)) {
5944                 amdgpu_bo_unpin(rbo);
5945                 ttm_eu_backoff_reservation(&ticket, &list);
5946                 DRM_ERROR("%p bind failed\n", rbo);
5947                 return r;
5948         }
5949
5950         ttm_eu_backoff_reservation(&ticket, &list);
5951
5952         afb->address = amdgpu_bo_gpu_offset(rbo);
5953
5954         amdgpu_bo_ref(rbo);
5955
5956         /**
5957          * We don't do surface updates on planes that have been newly created,
5958          * but we also don't have the afb->address during atomic check.
5959          *
5960          * Fill in buffer attributes depending on the address here, but only on
5961          * newly created planes since they're not being used by DC yet and this
5962          * won't modify global state.
5963          */
5964         dm_plane_state_old = to_dm_plane_state(plane->state);
5965         dm_plane_state_new = to_dm_plane_state(new_state);
5966
5967         if (dm_plane_state_new->dc_state &&
5968             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5969                 struct dc_plane_state *plane_state =
5970                         dm_plane_state_new->dc_state;
5971                 bool force_disable_dcc = !plane_state->dcc.enable;
5972
5973                 fill_plane_buffer_attributes(
5974                         adev, afb, plane_state->format, plane_state->rotation,
5975                         dm_plane_state_new->tiling_flags,
5976                         &plane_state->tiling_info, &plane_state->plane_size,
5977                         &plane_state->dcc, &plane_state->address,
5978                         dm_plane_state_new->tmz_surface, force_disable_dcc);
5979         }
5980
5981         return 0;
5982 }
5983
5984 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5985                                        struct drm_plane_state *old_state)
5986 {
5987         struct amdgpu_bo *rbo;
5988         int r;
5989
5990         if (!old_state->fb)
5991                 return;
5992
5993         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5994         r = amdgpu_bo_reserve(rbo, false);
5995         if (unlikely(r)) {
5996                 DRM_ERROR("failed to reserve rbo before unpin\n");
5997                 return;
5998         }
5999
6000         amdgpu_bo_unpin(rbo);
6001         amdgpu_bo_unreserve(rbo);
6002         amdgpu_bo_unref(&rbo);
6003 }
6004
6005 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6006                                        struct drm_crtc_state *new_crtc_state)
6007 {
6008         int max_downscale = 0;
6009         int max_upscale = INT_MAX;
6010
6011         /* TODO: These should be checked against DC plane caps */
6012         return drm_atomic_helper_check_plane_state(
6013                 state, new_crtc_state, max_downscale, max_upscale, true, true);
6014 }
6015
6016 static int dm_plane_atomic_check(struct drm_plane *plane,
6017                                  struct drm_plane_state *state)
6018 {
6019         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6020         struct dc *dc = adev->dm.dc;
6021         struct dm_plane_state *dm_plane_state;
6022         struct dc_scaling_info scaling_info;
6023         struct drm_crtc_state *new_crtc_state;
6024         int ret;
6025
6026         dm_plane_state = to_dm_plane_state(state);
6027
6028         if (!dm_plane_state->dc_state)
6029                 return 0;
6030
6031         new_crtc_state =
6032                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6033         if (!new_crtc_state)
6034                 return -EINVAL;
6035
6036         ret = dm_plane_helper_check_state(state, new_crtc_state);
6037         if (ret)
6038                 return ret;
6039
6040         ret = fill_dc_scaling_info(state, &scaling_info);
6041         if (ret)
6042                 return ret;
6043
6044         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6045                 return 0;
6046
6047         return -EINVAL;
6048 }
6049
6050 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6051                                        struct drm_plane_state *new_plane_state)
6052 {
6053         /* Only support async updates on cursor planes. */
6054         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6055                 return -EINVAL;
6056
6057         return 0;
6058 }
6059
6060 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6061                                          struct drm_plane_state *new_state)
6062 {
6063         struct drm_plane_state *old_state =
6064                 drm_atomic_get_old_plane_state(new_state->state, plane);
6065
6066         swap(plane->state->fb, new_state->fb);
6067
6068         plane->state->src_x = new_state->src_x;
6069         plane->state->src_y = new_state->src_y;
6070         plane->state->src_w = new_state->src_w;
6071         plane->state->src_h = new_state->src_h;
6072         plane->state->crtc_x = new_state->crtc_x;
6073         plane->state->crtc_y = new_state->crtc_y;
6074         plane->state->crtc_w = new_state->crtc_w;
6075         plane->state->crtc_h = new_state->crtc_h;
6076
6077         handle_cursor_update(plane, old_state);
6078 }
6079
6080 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6081         .prepare_fb = dm_plane_helper_prepare_fb,
6082         .cleanup_fb = dm_plane_helper_cleanup_fb,
6083         .atomic_check = dm_plane_atomic_check,
6084         .atomic_async_check = dm_plane_atomic_async_check,
6085         .atomic_async_update = dm_plane_atomic_async_update
6086 };
6087
6088 /*
6089  * TODO: these are currently initialized to rgb formats only.
6090  * For future use cases we should either initialize them dynamically based on
6091  * plane capabilities, or initialize this array to all formats, so internal drm
6092  * check will succeed, and let DC implement proper check
6093  */
6094 static const uint32_t rgb_formats[] = {
6095         DRM_FORMAT_XRGB8888,
6096         DRM_FORMAT_ARGB8888,
6097         DRM_FORMAT_RGBA8888,
6098         DRM_FORMAT_XRGB2101010,
6099         DRM_FORMAT_XBGR2101010,
6100         DRM_FORMAT_ARGB2101010,
6101         DRM_FORMAT_ABGR2101010,
6102         DRM_FORMAT_XBGR8888,
6103         DRM_FORMAT_ABGR8888,
6104         DRM_FORMAT_RGB565,
6105 };
6106
6107 static const uint32_t overlay_formats[] = {
6108         DRM_FORMAT_XRGB8888,
6109         DRM_FORMAT_ARGB8888,
6110         DRM_FORMAT_RGBA8888,
6111         DRM_FORMAT_XBGR8888,
6112         DRM_FORMAT_ABGR8888,
6113         DRM_FORMAT_RGB565
6114 };
6115
6116 static const u32 cursor_formats[] = {
6117         DRM_FORMAT_ARGB8888
6118 };
6119
6120 static int get_plane_formats(const struct drm_plane *plane,
6121                              const struct dc_plane_cap *plane_cap,
6122                              uint32_t *formats, int max_formats)
6123 {
6124         int i, num_formats = 0;
6125
6126         /*
6127          * TODO: Query support for each group of formats directly from
6128          * DC plane caps. This will require adding more formats to the
6129          * caps list.
6130          */
6131
6132         switch (plane->type) {
6133         case DRM_PLANE_TYPE_PRIMARY:
6134                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6135                         if (num_formats >= max_formats)
6136                                 break;
6137
6138                         formats[num_formats++] = rgb_formats[i];
6139                 }
6140
6141                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6142                         formats[num_formats++] = DRM_FORMAT_NV12;
6143                 if (plane_cap && plane_cap->pixel_format_support.p010)
6144                         formats[num_formats++] = DRM_FORMAT_P010;
6145                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6146                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6147                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6148                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6149                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6150                 }
6151                 break;
6152
6153         case DRM_PLANE_TYPE_OVERLAY:
6154                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6155                         if (num_formats >= max_formats)
6156                                 break;
6157
6158                         formats[num_formats++] = overlay_formats[i];
6159                 }
6160                 break;
6161
6162         case DRM_PLANE_TYPE_CURSOR:
6163                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6164                         if (num_formats >= max_formats)
6165                                 break;
6166
6167                         formats[num_formats++] = cursor_formats[i];
6168                 }
6169                 break;
6170         }
6171
6172         return num_formats;
6173 }
6174
6175 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6176                                 struct drm_plane *plane,
6177                                 unsigned long possible_crtcs,
6178                                 const struct dc_plane_cap *plane_cap)
6179 {
6180         uint32_t formats[32];
6181         int num_formats;
6182         int res = -EPERM;
6183         unsigned int supported_rotations;
6184
6185         num_formats = get_plane_formats(plane, plane_cap, formats,
6186                                         ARRAY_SIZE(formats));
6187
6188         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6189                                        &dm_plane_funcs, formats, num_formats,
6190                                        NULL, plane->type, NULL);
6191         if (res)
6192                 return res;
6193
6194         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6195             plane_cap && plane_cap->per_pixel_alpha) {
6196                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6197                                           BIT(DRM_MODE_BLEND_PREMULTI);
6198
6199                 drm_plane_create_alpha_property(plane);
6200                 drm_plane_create_blend_mode_property(plane, blend_caps);
6201         }
6202
6203         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6204             plane_cap &&
6205             (plane_cap->pixel_format_support.nv12 ||
6206              plane_cap->pixel_format_support.p010)) {
6207                 /* This only affects YUV formats. */
6208                 drm_plane_create_color_properties(
6209                         plane,
6210                         BIT(DRM_COLOR_YCBCR_BT601) |
6211                         BIT(DRM_COLOR_YCBCR_BT709) |
6212                         BIT(DRM_COLOR_YCBCR_BT2020),
6213                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6214                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6215                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6216         }
6217
6218         supported_rotations =
6219                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6220                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6221
6222         if (dm->adev->asic_type >= CHIP_BONAIRE)
6223                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6224                                                    supported_rotations);
6225
6226         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6227
6228         /* Create (reset) the plane state */
6229         if (plane->funcs->reset)
6230                 plane->funcs->reset(plane);
6231
6232         return 0;
6233 }
6234
6235 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6236                                struct drm_plane *plane,
6237                                uint32_t crtc_index)
6238 {
6239         struct amdgpu_crtc *acrtc = NULL;
6240         struct drm_plane *cursor_plane;
6241
6242         int res = -ENOMEM;
6243
6244         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6245         if (!cursor_plane)
6246                 goto fail;
6247
6248         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6249         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6250
6251         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6252         if (!acrtc)
6253                 goto fail;
6254
6255         res = drm_crtc_init_with_planes(
6256                         dm->ddev,
6257                         &acrtc->base,
6258                         plane,
6259                         cursor_plane,
6260                         &amdgpu_dm_crtc_funcs, NULL);
6261
6262         if (res)
6263                 goto fail;
6264
6265         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6266
6267         /* Create (reset) the plane state */
6268         if (acrtc->base.funcs->reset)
6269                 acrtc->base.funcs->reset(&acrtc->base);
6270
6271         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6272         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6273
6274         acrtc->crtc_id = crtc_index;
6275         acrtc->base.enabled = false;
6276         acrtc->otg_inst = -1;
6277
6278         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6279         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6280                                    true, MAX_COLOR_LUT_ENTRIES);
6281         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6282
6283         return 0;
6284
6285 fail:
6286         kfree(acrtc);
6287         kfree(cursor_plane);
6288         return res;
6289 }
6290
6291
6292 static int to_drm_connector_type(enum signal_type st)
6293 {
6294         switch (st) {
6295         case SIGNAL_TYPE_HDMI_TYPE_A:
6296                 return DRM_MODE_CONNECTOR_HDMIA;
6297         case SIGNAL_TYPE_EDP:
6298                 return DRM_MODE_CONNECTOR_eDP;
6299         case SIGNAL_TYPE_LVDS:
6300                 return DRM_MODE_CONNECTOR_LVDS;
6301         case SIGNAL_TYPE_RGB:
6302                 return DRM_MODE_CONNECTOR_VGA;
6303         case SIGNAL_TYPE_DISPLAY_PORT:
6304         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6305                 return DRM_MODE_CONNECTOR_DisplayPort;
6306         case SIGNAL_TYPE_DVI_DUAL_LINK:
6307         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6308                 return DRM_MODE_CONNECTOR_DVID;
6309         case SIGNAL_TYPE_VIRTUAL:
6310                 return DRM_MODE_CONNECTOR_VIRTUAL;
6311
6312         default:
6313                 return DRM_MODE_CONNECTOR_Unknown;
6314         }
6315 }
6316
6317 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6318 {
6319         struct drm_encoder *encoder;
6320
6321         /* There is only one encoder per connector */
6322         drm_connector_for_each_possible_encoder(connector, encoder)
6323                 return encoder;
6324
6325         return NULL;
6326 }
6327
6328 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6329 {
6330         struct drm_encoder *encoder;
6331         struct amdgpu_encoder *amdgpu_encoder;
6332
6333         encoder = amdgpu_dm_connector_to_encoder(connector);
6334
6335         if (encoder == NULL)
6336                 return;
6337
6338         amdgpu_encoder = to_amdgpu_encoder(encoder);
6339
6340         amdgpu_encoder->native_mode.clock = 0;
6341
6342         if (!list_empty(&connector->probed_modes)) {
6343                 struct drm_display_mode *preferred_mode = NULL;
6344
6345                 list_for_each_entry(preferred_mode,
6346                                     &connector->probed_modes,
6347                                     head) {
6348                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6349                                 amdgpu_encoder->native_mode = *preferred_mode;
6350
6351                         break;
6352                 }
6353
6354         }
6355 }
6356
6357 static struct drm_display_mode *
6358 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6359                              char *name,
6360                              int hdisplay, int vdisplay)
6361 {
6362         struct drm_device *dev = encoder->dev;
6363         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6364         struct drm_display_mode *mode = NULL;
6365         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6366
6367         mode = drm_mode_duplicate(dev, native_mode);
6368
6369         if (mode == NULL)
6370                 return NULL;
6371
6372         mode->hdisplay = hdisplay;
6373         mode->vdisplay = vdisplay;
6374         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6375         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6376
6377         return mode;
6378
6379 }
6380
6381 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6382                                                  struct drm_connector *connector)
6383 {
6384         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6385         struct drm_display_mode *mode = NULL;
6386         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6387         struct amdgpu_dm_connector *amdgpu_dm_connector =
6388                                 to_amdgpu_dm_connector(connector);
6389         int i;
6390         int n;
6391         struct mode_size {
6392                 char name[DRM_DISPLAY_MODE_LEN];
6393                 int w;
6394                 int h;
6395         } common_modes[] = {
6396                 {  "640x480",  640,  480},
6397                 {  "800x600",  800,  600},
6398                 { "1024x768", 1024,  768},
6399                 { "1280x720", 1280,  720},
6400                 { "1280x800", 1280,  800},
6401                 {"1280x1024", 1280, 1024},
6402                 { "1440x900", 1440,  900},
6403                 {"1680x1050", 1680, 1050},
6404                 {"1600x1200", 1600, 1200},
6405                 {"1920x1080", 1920, 1080},
6406                 {"1920x1200", 1920, 1200}
6407         };
6408
6409         n = ARRAY_SIZE(common_modes);
6410
6411         for (i = 0; i < n; i++) {
6412                 struct drm_display_mode *curmode = NULL;
6413                 bool mode_existed = false;
6414
6415                 if (common_modes[i].w > native_mode->hdisplay ||
6416                     common_modes[i].h > native_mode->vdisplay ||
6417                    (common_modes[i].w == native_mode->hdisplay &&
6418                     common_modes[i].h == native_mode->vdisplay))
6419                         continue;
6420
6421                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6422                         if (common_modes[i].w == curmode->hdisplay &&
6423                             common_modes[i].h == curmode->vdisplay) {
6424                                 mode_existed = true;
6425                                 break;
6426                         }
6427                 }
6428
6429                 if (mode_existed)
6430                         continue;
6431
6432                 mode = amdgpu_dm_create_common_mode(encoder,
6433                                 common_modes[i].name, common_modes[i].w,
6434                                 common_modes[i].h);
6435                 drm_mode_probed_add(connector, mode);
6436                 amdgpu_dm_connector->num_modes++;
6437         }
6438 }
6439
6440 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6441                                               struct edid *edid)
6442 {
6443         struct amdgpu_dm_connector *amdgpu_dm_connector =
6444                         to_amdgpu_dm_connector(connector);
6445
6446         if (edid) {
6447                 /* empty probed_modes */
6448                 INIT_LIST_HEAD(&connector->probed_modes);
6449                 amdgpu_dm_connector->num_modes =
6450                                 drm_add_edid_modes(connector, edid);
6451
6452                 /* sorting the probed modes before calling function
6453                  * amdgpu_dm_get_native_mode() since EDID can have
6454                  * more than one preferred mode. The modes that are
6455                  * later in the probed mode list could be of higher
6456                  * and preferred resolution. For example, 3840x2160
6457                  * resolution in base EDID preferred timing and 4096x2160
6458                  * preferred resolution in DID extension block later.
6459                  */
6460                 drm_mode_sort(&connector->probed_modes);
6461                 amdgpu_dm_get_native_mode(connector);
6462         } else {
6463                 amdgpu_dm_connector->num_modes = 0;
6464         }
6465 }
6466
6467 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6468 {
6469         struct amdgpu_dm_connector *amdgpu_dm_connector =
6470                         to_amdgpu_dm_connector(connector);
6471         struct drm_encoder *encoder;
6472         struct edid *edid = amdgpu_dm_connector->edid;
6473
6474         encoder = amdgpu_dm_connector_to_encoder(connector);
6475
6476         if (!edid || !drm_edid_is_valid(edid)) {
6477                 amdgpu_dm_connector->num_modes =
6478                                 drm_add_modes_noedid(connector, 640, 480);
6479         } else {
6480                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6481                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6482         }
6483         amdgpu_dm_fbc_init(connector);
6484
6485         return amdgpu_dm_connector->num_modes;
6486 }
6487
6488 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6489                                      struct amdgpu_dm_connector *aconnector,
6490                                      int connector_type,
6491                                      struct dc_link *link,
6492                                      int link_index)
6493 {
6494         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6495
6496         /*
6497          * Some of the properties below require access to state, like bpc.
6498          * Allocate some default initial connector state with our reset helper.
6499          */
6500         if (aconnector->base.funcs->reset)
6501                 aconnector->base.funcs->reset(&aconnector->base);
6502
6503         aconnector->connector_id = link_index;
6504         aconnector->dc_link = link;
6505         aconnector->base.interlace_allowed = false;
6506         aconnector->base.doublescan_allowed = false;
6507         aconnector->base.stereo_allowed = false;
6508         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6509         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6510         aconnector->audio_inst = -1;
6511         mutex_init(&aconnector->hpd_lock);
6512
6513         /*
6514          * configure support HPD hot plug connector_>polled default value is 0
6515          * which means HPD hot plug not supported
6516          */
6517         switch (connector_type) {
6518         case DRM_MODE_CONNECTOR_HDMIA:
6519                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6520                 aconnector->base.ycbcr_420_allowed =
6521                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6522                 break;
6523         case DRM_MODE_CONNECTOR_DisplayPort:
6524                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6525                 aconnector->base.ycbcr_420_allowed =
6526                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
6527                 break;
6528         case DRM_MODE_CONNECTOR_DVID:
6529                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6530                 break;
6531         default:
6532                 break;
6533         }
6534
6535         drm_object_attach_property(&aconnector->base.base,
6536                                 dm->ddev->mode_config.scaling_mode_property,
6537                                 DRM_MODE_SCALE_NONE);
6538
6539         drm_object_attach_property(&aconnector->base.base,
6540                                 adev->mode_info.underscan_property,
6541                                 UNDERSCAN_OFF);
6542         drm_object_attach_property(&aconnector->base.base,
6543                                 adev->mode_info.underscan_hborder_property,
6544                                 0);
6545         drm_object_attach_property(&aconnector->base.base,
6546                                 adev->mode_info.underscan_vborder_property,
6547                                 0);
6548
6549         if (!aconnector->mst_port)
6550                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6551
6552         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6553         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6554         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6555
6556         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6557             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6558                 drm_object_attach_property(&aconnector->base.base,
6559                                 adev->mode_info.abm_level_property, 0);
6560         }
6561
6562         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6563             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6564             connector_type == DRM_MODE_CONNECTOR_eDP) {
6565                 drm_object_attach_property(
6566                         &aconnector->base.base,
6567                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
6568
6569                 if (!aconnector->mst_port)
6570                         drm_connector_attach_vrr_capable_property(&aconnector->base);
6571
6572 #ifdef CONFIG_DRM_AMD_DC_HDCP
6573                 if (adev->dm.hdcp_workqueue)
6574                         drm_connector_attach_content_protection_property(&aconnector->base, true);
6575 #endif
6576         }
6577 }
6578
6579 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6580                               struct i2c_msg *msgs, int num)
6581 {
6582         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6583         struct ddc_service *ddc_service = i2c->ddc_service;
6584         struct i2c_command cmd;
6585         int i;
6586         int result = -EIO;
6587
6588         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6589
6590         if (!cmd.payloads)
6591                 return result;
6592
6593         cmd.number_of_payloads = num;
6594         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6595         cmd.speed = 100;
6596
6597         for (i = 0; i < num; i++) {
6598                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6599                 cmd.payloads[i].address = msgs[i].addr;
6600                 cmd.payloads[i].length = msgs[i].len;
6601                 cmd.payloads[i].data = msgs[i].buf;
6602         }
6603
6604         if (dc_submit_i2c(
6605                         ddc_service->ctx->dc,
6606                         ddc_service->ddc_pin->hw_info.ddc_channel,
6607                         &cmd))
6608                 result = num;
6609
6610         kfree(cmd.payloads);
6611         return result;
6612 }
6613
6614 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6615 {
6616         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6617 }
6618
6619 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6620         .master_xfer = amdgpu_dm_i2c_xfer,
6621         .functionality = amdgpu_dm_i2c_func,
6622 };
6623
6624 static struct amdgpu_i2c_adapter *
6625 create_i2c(struct ddc_service *ddc_service,
6626            int link_index,
6627            int *res)
6628 {
6629         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6630         struct amdgpu_i2c_adapter *i2c;
6631
6632         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6633         if (!i2c)
6634                 return NULL;
6635         i2c->base.owner = THIS_MODULE;
6636         i2c->base.class = I2C_CLASS_DDC;
6637         i2c->base.dev.parent = &adev->pdev->dev;
6638         i2c->base.algo = &amdgpu_dm_i2c_algo;
6639         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6640         i2c_set_adapdata(&i2c->base, i2c);
6641         i2c->ddc_service = ddc_service;
6642         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6643
6644         return i2c;
6645 }
6646
6647
6648 /*
6649  * Note: this function assumes that dc_link_detect() was called for the
6650  * dc_link which will be represented by this aconnector.
6651  */
6652 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6653                                     struct amdgpu_dm_connector *aconnector,
6654                                     uint32_t link_index,
6655                                     struct amdgpu_encoder *aencoder)
6656 {
6657         int res = 0;
6658         int connector_type;
6659         struct dc *dc = dm->dc;
6660         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6661         struct amdgpu_i2c_adapter *i2c;
6662
6663         link->priv = aconnector;
6664
6665         DRM_DEBUG_DRIVER("%s()\n", __func__);
6666
6667         i2c = create_i2c(link->ddc, link->link_index, &res);
6668         if (!i2c) {
6669                 DRM_ERROR("Failed to create i2c adapter data\n");
6670                 return -ENOMEM;
6671         }
6672
6673         aconnector->i2c = i2c;
6674         res = i2c_add_adapter(&i2c->base);
6675
6676         if (res) {
6677                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6678                 goto out_free;
6679         }
6680
6681         connector_type = to_drm_connector_type(link->connector_signal);
6682
6683         res = drm_connector_init_with_ddc(
6684                         dm->ddev,
6685                         &aconnector->base,
6686                         &amdgpu_dm_connector_funcs,
6687                         connector_type,
6688                         &i2c->base);
6689
6690         if (res) {
6691                 DRM_ERROR("connector_init failed\n");
6692                 aconnector->connector_id = -1;
6693                 goto out_free;
6694         }
6695
6696         drm_connector_helper_add(
6697                         &aconnector->base,
6698                         &amdgpu_dm_connector_helper_funcs);
6699
6700         amdgpu_dm_connector_init_helper(
6701                 dm,
6702                 aconnector,
6703                 connector_type,
6704                 link,
6705                 link_index);
6706
6707         drm_connector_attach_encoder(
6708                 &aconnector->base, &aencoder->base);
6709
6710         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6711                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6712                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6713
6714 out_free:
6715         if (res) {
6716                 kfree(i2c);
6717                 aconnector->i2c = NULL;
6718         }
6719         return res;
6720 }
6721
6722 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6723 {
6724         switch (adev->mode_info.num_crtc) {
6725         case 1:
6726                 return 0x1;
6727         case 2:
6728                 return 0x3;
6729         case 3:
6730                 return 0x7;
6731         case 4:
6732                 return 0xf;
6733         case 5:
6734                 return 0x1f;
6735         case 6:
6736         default:
6737                 return 0x3f;
6738         }
6739 }
6740
6741 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6742                                   struct amdgpu_encoder *aencoder,
6743                                   uint32_t link_index)
6744 {
6745         struct amdgpu_device *adev = drm_to_adev(dev);
6746
6747         int res = drm_encoder_init(dev,
6748                                    &aencoder->base,
6749                                    &amdgpu_dm_encoder_funcs,
6750                                    DRM_MODE_ENCODER_TMDS,
6751                                    NULL);
6752
6753         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6754
6755         if (!res)
6756                 aencoder->encoder_id = link_index;
6757         else
6758                 aencoder->encoder_id = -1;
6759
6760         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6761
6762         return res;
6763 }
6764
6765 static void manage_dm_interrupts(struct amdgpu_device *adev,
6766                                  struct amdgpu_crtc *acrtc,
6767                                  bool enable)
6768 {
6769         /*
6770          * We have no guarantee that the frontend index maps to the same
6771          * backend index - some even map to more than one.
6772          *
6773          * TODO: Use a different interrupt or check DC itself for the mapping.
6774          */
6775         int irq_type =
6776                 amdgpu_display_crtc_idx_to_irq_type(
6777                         adev,
6778                         acrtc->crtc_id);
6779
6780         if (enable) {
6781                 drm_crtc_vblank_on(&acrtc->base);
6782                 amdgpu_irq_get(
6783                         adev,
6784                         &adev->pageflip_irq,
6785                         irq_type);
6786         } else {
6787
6788                 amdgpu_irq_put(
6789                         adev,
6790                         &adev->pageflip_irq,
6791                         irq_type);
6792                 drm_crtc_vblank_off(&acrtc->base);
6793         }
6794 }
6795
6796 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6797                                       struct amdgpu_crtc *acrtc)
6798 {
6799         int irq_type =
6800                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6801
6802         /**
6803          * This reads the current state for the IRQ and force reapplies
6804          * the setting to hardware.
6805          */
6806         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6807 }
6808
6809 static bool
6810 is_scaling_state_different(const struct dm_connector_state *dm_state,
6811                            const struct dm_connector_state *old_dm_state)
6812 {
6813         if (dm_state->scaling != old_dm_state->scaling)
6814                 return true;
6815         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6816                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6817                         return true;
6818         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6819                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6820                         return true;
6821         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6822                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6823                 return true;
6824         return false;
6825 }
6826
6827 #ifdef CONFIG_DRM_AMD_DC_HDCP
6828 static bool is_content_protection_different(struct drm_connector_state *state,
6829                                             const struct drm_connector_state *old_state,
6830                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6831 {
6832         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6833
6834         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6835             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6836                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6837                 return true;
6838         }
6839
6840         /* CP is being re enabled, ignore this */
6841         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6842             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6843                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6844                 return false;
6845         }
6846
6847         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6848         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6849             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6850                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6851
6852         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6853          * hot-plug, headless s3, dpms
6854          */
6855         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6856             aconnector->dc_sink != NULL)
6857                 return true;
6858
6859         if (old_state->content_protection == state->content_protection)
6860                 return false;
6861
6862         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6863                 return true;
6864
6865         return false;
6866 }
6867
6868 #endif
6869 static void remove_stream(struct amdgpu_device *adev,
6870                           struct amdgpu_crtc *acrtc,
6871                           struct dc_stream_state *stream)
6872 {
6873         /* this is the update mode case */
6874
6875         acrtc->otg_inst = -1;
6876         acrtc->enabled = false;
6877 }
6878
6879 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6880                                struct dc_cursor_position *position)
6881 {
6882         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6883         int x, y;
6884         int xorigin = 0, yorigin = 0;
6885
6886         position->enable = false;
6887         position->x = 0;
6888         position->y = 0;
6889
6890         if (!crtc || !plane->state->fb)
6891                 return 0;
6892
6893         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6894             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6895                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6896                           __func__,
6897                           plane->state->crtc_w,
6898                           plane->state->crtc_h);
6899                 return -EINVAL;
6900         }
6901
6902         x = plane->state->crtc_x;
6903         y = plane->state->crtc_y;
6904
6905         if (x <= -amdgpu_crtc->max_cursor_width ||
6906             y <= -amdgpu_crtc->max_cursor_height)
6907                 return 0;
6908
6909         if (x < 0) {
6910                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6911                 x = 0;
6912         }
6913         if (y < 0) {
6914                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6915                 y = 0;
6916         }
6917         position->enable = true;
6918         position->translate_by_source = true;
6919         position->x = x;
6920         position->y = y;
6921         position->x_hotspot = xorigin;
6922         position->y_hotspot = yorigin;
6923
6924         return 0;
6925 }
6926
6927 static void handle_cursor_update(struct drm_plane *plane,
6928                                  struct drm_plane_state *old_plane_state)
6929 {
6930         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6931         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6932         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6933         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6934         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6935         uint64_t address = afb ? afb->address : 0;
6936         struct dc_cursor_position position;
6937         struct dc_cursor_attributes attributes;
6938         int ret;
6939
6940         if (!plane->state->fb && !old_plane_state->fb)
6941                 return;
6942
6943         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6944                          __func__,
6945                          amdgpu_crtc->crtc_id,
6946                          plane->state->crtc_w,
6947                          plane->state->crtc_h);
6948
6949         ret = get_cursor_position(plane, crtc, &position);
6950         if (ret)
6951                 return;
6952
6953         if (!position.enable) {
6954                 /* turn off cursor */
6955                 if (crtc_state && crtc_state->stream) {
6956                         mutex_lock(&adev->dm.dc_lock);
6957                         dc_stream_set_cursor_position(crtc_state->stream,
6958                                                       &position);
6959                         mutex_unlock(&adev->dm.dc_lock);
6960                 }
6961                 return;
6962         }
6963
6964         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6965         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6966
6967         memset(&attributes, 0, sizeof(attributes));
6968         attributes.address.high_part = upper_32_bits(address);
6969         attributes.address.low_part  = lower_32_bits(address);
6970         attributes.width             = plane->state->crtc_w;
6971         attributes.height            = plane->state->crtc_h;
6972         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6973         attributes.rotation_angle    = 0;
6974         attributes.attribute_flags.value = 0;
6975
6976         attributes.pitch = attributes.width;
6977
6978         if (crtc_state->stream) {
6979                 mutex_lock(&adev->dm.dc_lock);
6980                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6981                                                          &attributes))
6982                         DRM_ERROR("DC failed to set cursor attributes\n");
6983
6984                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6985                                                    &position))
6986                         DRM_ERROR("DC failed to set cursor position\n");
6987                 mutex_unlock(&adev->dm.dc_lock);
6988         }
6989 }
6990
6991 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6992 {
6993
6994         assert_spin_locked(&acrtc->base.dev->event_lock);
6995         WARN_ON(acrtc->event);
6996
6997         acrtc->event = acrtc->base.state->event;
6998
6999         /* Set the flip status */
7000         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7001
7002         /* Mark this event as consumed */
7003         acrtc->base.state->event = NULL;
7004
7005         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7006                                                  acrtc->crtc_id);
7007 }
7008
7009 static void update_freesync_state_on_stream(
7010         struct amdgpu_display_manager *dm,
7011         struct dm_crtc_state *new_crtc_state,
7012         struct dc_stream_state *new_stream,
7013         struct dc_plane_state *surface,
7014         u32 flip_timestamp_in_us)
7015 {
7016         struct mod_vrr_params vrr_params;
7017         struct dc_info_packet vrr_infopacket = {0};
7018         struct amdgpu_device *adev = dm->adev;
7019         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7020         unsigned long flags;
7021
7022         if (!new_stream)
7023                 return;
7024
7025         /*
7026          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7027          * For now it's sufficient to just guard against these conditions.
7028          */
7029
7030         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7031                 return;
7032
7033         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7034         vrr_params = acrtc->dm_irq_params.vrr_params;
7035
7036         if (surface) {
7037                 mod_freesync_handle_preflip(
7038                         dm->freesync_module,
7039                         surface,
7040                         new_stream,
7041                         flip_timestamp_in_us,
7042                         &vrr_params);
7043
7044                 if (adev->family < AMDGPU_FAMILY_AI &&
7045                     amdgpu_dm_vrr_active(new_crtc_state)) {
7046                         mod_freesync_handle_v_update(dm->freesync_module,
7047                                                      new_stream, &vrr_params);
7048
7049                         /* Need to call this before the frame ends. */
7050                         dc_stream_adjust_vmin_vmax(dm->dc,
7051                                                    new_crtc_state->stream,
7052                                                    &vrr_params.adjust);
7053                 }
7054         }
7055
7056         mod_freesync_build_vrr_infopacket(
7057                 dm->freesync_module,
7058                 new_stream,
7059                 &vrr_params,
7060                 PACKET_TYPE_VRR,
7061                 TRANSFER_FUNC_UNKNOWN,
7062                 &vrr_infopacket);
7063
7064         new_crtc_state->freesync_timing_changed |=
7065                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7066                         &vrr_params.adjust,
7067                         sizeof(vrr_params.adjust)) != 0);
7068
7069         new_crtc_state->freesync_vrr_info_changed |=
7070                 (memcmp(&new_crtc_state->vrr_infopacket,
7071                         &vrr_infopacket,
7072                         sizeof(vrr_infopacket)) != 0);
7073
7074         acrtc->dm_irq_params.vrr_params = vrr_params;
7075         new_crtc_state->vrr_infopacket = vrr_infopacket;
7076
7077         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7078         new_stream->vrr_infopacket = vrr_infopacket;
7079
7080         if (new_crtc_state->freesync_vrr_info_changed)
7081                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7082                               new_crtc_state->base.crtc->base.id,
7083                               (int)new_crtc_state->base.vrr_enabled,
7084                               (int)vrr_params.state);
7085
7086         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7087 }
7088
7089 static void update_stream_irq_parameters(
7090         struct amdgpu_display_manager *dm,
7091         struct dm_crtc_state *new_crtc_state)
7092 {
7093         struct dc_stream_state *new_stream = new_crtc_state->stream;
7094         struct mod_vrr_params vrr_params;
7095         struct mod_freesync_config config = new_crtc_state->freesync_config;
7096         struct amdgpu_device *adev = dm->adev;
7097         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7098         unsigned long flags;
7099
7100         if (!new_stream)
7101                 return;
7102
7103         /*
7104          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7105          * For now it's sufficient to just guard against these conditions.
7106          */
7107         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7108                 return;
7109
7110         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7111         vrr_params = acrtc->dm_irq_params.vrr_params;
7112
7113         if (new_crtc_state->vrr_supported &&
7114             config.min_refresh_in_uhz &&
7115             config.max_refresh_in_uhz) {
7116                 config.state = new_crtc_state->base.vrr_enabled ?
7117                         VRR_STATE_ACTIVE_VARIABLE :
7118                         VRR_STATE_INACTIVE;
7119         } else {
7120                 config.state = VRR_STATE_UNSUPPORTED;
7121         }
7122
7123         mod_freesync_build_vrr_params(dm->freesync_module,
7124                                       new_stream,
7125                                       &config, &vrr_params);
7126
7127         new_crtc_state->freesync_timing_changed |=
7128                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7129                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7130
7131         new_crtc_state->freesync_config = config;
7132         /* Copy state for access from DM IRQ handler */
7133         acrtc->dm_irq_params.freesync_config = config;
7134         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7135         acrtc->dm_irq_params.vrr_params = vrr_params;
7136         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7137 }
7138
7139 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7140                                             struct dm_crtc_state *new_state)
7141 {
7142         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7143         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7144
7145         if (!old_vrr_active && new_vrr_active) {
7146                 /* Transition VRR inactive -> active:
7147                  * While VRR is active, we must not disable vblank irq, as a
7148                  * reenable after disable would compute bogus vblank/pflip
7149                  * timestamps if it likely happened inside display front-porch.
7150                  *
7151                  * We also need vupdate irq for the actual core vblank handling
7152                  * at end of vblank.
7153                  */
7154                 dm_set_vupdate_irq(new_state->base.crtc, true);
7155                 drm_crtc_vblank_get(new_state->base.crtc);
7156                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7157                                  __func__, new_state->base.crtc->base.id);
7158         } else if (old_vrr_active && !new_vrr_active) {
7159                 /* Transition VRR active -> inactive:
7160                  * Allow vblank irq disable again for fixed refresh rate.
7161                  */
7162                 dm_set_vupdate_irq(new_state->base.crtc, false);
7163                 drm_crtc_vblank_put(new_state->base.crtc);
7164                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7165                                  __func__, new_state->base.crtc->base.id);
7166         }
7167 }
7168
7169 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7170 {
7171         struct drm_plane *plane;
7172         struct drm_plane_state *old_plane_state, *new_plane_state;
7173         int i;
7174
7175         /*
7176          * TODO: Make this per-stream so we don't issue redundant updates for
7177          * commits with multiple streams.
7178          */
7179         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7180                                        new_plane_state, i)
7181                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7182                         handle_cursor_update(plane, old_plane_state);
7183 }
7184
7185 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7186                                     struct dc_state *dc_state,
7187                                     struct drm_device *dev,
7188                                     struct amdgpu_display_manager *dm,
7189                                     struct drm_crtc *pcrtc,
7190                                     bool wait_for_vblank)
7191 {
7192         uint32_t i;
7193         uint64_t timestamp_ns;
7194         struct drm_plane *plane;
7195         struct drm_plane_state *old_plane_state, *new_plane_state;
7196         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7197         struct drm_crtc_state *new_pcrtc_state =
7198                         drm_atomic_get_new_crtc_state(state, pcrtc);
7199         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7200         struct dm_crtc_state *dm_old_crtc_state =
7201                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7202         int planes_count = 0, vpos, hpos;
7203         long r;
7204         unsigned long flags;
7205         struct amdgpu_bo *abo;
7206         uint32_t target_vblank, last_flip_vblank;
7207         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7208         bool pflip_present = false;
7209         struct {
7210                 struct dc_surface_update surface_updates[MAX_SURFACES];
7211                 struct dc_plane_info plane_infos[MAX_SURFACES];
7212                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7213                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7214                 struct dc_stream_update stream_update;
7215         } *bundle;
7216
7217         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7218
7219         if (!bundle) {
7220                 dm_error("Failed to allocate update bundle\n");
7221                 goto cleanup;
7222         }
7223
7224         /*
7225          * Disable the cursor first if we're disabling all the planes.
7226          * It'll remain on the screen after the planes are re-enabled
7227          * if we don't.
7228          */
7229         if (acrtc_state->active_planes == 0)
7230                 amdgpu_dm_commit_cursors(state);
7231
7232         /* update planes when needed */
7233         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7234                 struct drm_crtc *crtc = new_plane_state->crtc;
7235                 struct drm_crtc_state *new_crtc_state;
7236                 struct drm_framebuffer *fb = new_plane_state->fb;
7237                 bool plane_needs_flip;
7238                 struct dc_plane_state *dc_plane;
7239                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7240
7241                 /* Cursor plane is handled after stream updates */
7242                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7243                         continue;
7244
7245                 if (!fb || !crtc || pcrtc != crtc)
7246                         continue;
7247
7248                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7249                 if (!new_crtc_state->active)
7250                         continue;
7251
7252                 dc_plane = dm_new_plane_state->dc_state;
7253
7254                 bundle->surface_updates[planes_count].surface = dc_plane;
7255                 if (new_pcrtc_state->color_mgmt_changed) {
7256                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7257                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7258                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7259                 }
7260
7261                 fill_dc_scaling_info(new_plane_state,
7262                                      &bundle->scaling_infos[planes_count]);
7263
7264                 bundle->surface_updates[planes_count].scaling_info =
7265                         &bundle->scaling_infos[planes_count];
7266
7267                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7268
7269                 pflip_present = pflip_present || plane_needs_flip;
7270
7271                 if (!plane_needs_flip) {
7272                         planes_count += 1;
7273                         continue;
7274                 }
7275
7276                 abo = gem_to_amdgpu_bo(fb->obj[0]);
7277
7278                 /*
7279                  * Wait for all fences on this FB. Do limited wait to avoid
7280                  * deadlock during GPU reset when this fence will not signal
7281                  * but we hold reservation lock for the BO.
7282                  */
7283                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7284                                                         false,
7285                                                         msecs_to_jiffies(5000));
7286                 if (unlikely(r <= 0))
7287                         DRM_ERROR("Waiting for fences timed out!");
7288
7289                 fill_dc_plane_info_and_addr(
7290                         dm->adev, new_plane_state,
7291                         dm_new_plane_state->tiling_flags,
7292                         &bundle->plane_infos[planes_count],
7293                         &bundle->flip_addrs[planes_count].address,
7294                         dm_new_plane_state->tmz_surface, false);
7295
7296                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7297                                  new_plane_state->plane->index,
7298                                  bundle->plane_infos[planes_count].dcc.enable);
7299
7300                 bundle->surface_updates[planes_count].plane_info =
7301                         &bundle->plane_infos[planes_count];
7302
7303                 /*
7304                  * Only allow immediate flips for fast updates that don't
7305                  * change FB pitch, DCC state, rotation or mirroing.
7306                  */
7307                 bundle->flip_addrs[planes_count].flip_immediate =
7308                         crtc->state->async_flip &&
7309                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7310
7311                 timestamp_ns = ktime_get_ns();
7312                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7313                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7314                 bundle->surface_updates[planes_count].surface = dc_plane;
7315
7316                 if (!bundle->surface_updates[planes_count].surface) {
7317                         DRM_ERROR("No surface for CRTC: id=%d\n",
7318                                         acrtc_attach->crtc_id);
7319                         continue;
7320                 }
7321
7322                 if (plane == pcrtc->primary)
7323                         update_freesync_state_on_stream(
7324                                 dm,
7325                                 acrtc_state,
7326                                 acrtc_state->stream,
7327                                 dc_plane,
7328                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7329
7330                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7331                                  __func__,
7332                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7333                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7334
7335                 planes_count += 1;
7336
7337         }
7338
7339         if (pflip_present) {
7340                 if (!vrr_active) {
7341                         /* Use old throttling in non-vrr fixed refresh rate mode
7342                          * to keep flip scheduling based on target vblank counts
7343                          * working in a backwards compatible way, e.g., for
7344                          * clients using the GLX_OML_sync_control extension or
7345                          * DRI3/Present extension with defined target_msc.
7346                          */
7347                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7348                 }
7349                 else {
7350                         /* For variable refresh rate mode only:
7351                          * Get vblank of last completed flip to avoid > 1 vrr
7352                          * flips per video frame by use of throttling, but allow
7353                          * flip programming anywhere in the possibly large
7354                          * variable vrr vblank interval for fine-grained flip
7355                          * timing control and more opportunity to avoid stutter
7356                          * on late submission of flips.
7357                          */
7358                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7359                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7360                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7361                 }
7362
7363                 target_vblank = last_flip_vblank + wait_for_vblank;
7364
7365                 /*
7366                  * Wait until we're out of the vertical blank period before the one
7367                  * targeted by the flip
7368                  */
7369                 while ((acrtc_attach->enabled &&
7370                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7371                                                             0, &vpos, &hpos, NULL,
7372                                                             NULL, &pcrtc->hwmode)
7373                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7374                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7375                         (int)(target_vblank -
7376                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7377                         usleep_range(1000, 1100);
7378                 }
7379
7380                 /**
7381                  * Prepare the flip event for the pageflip interrupt to handle.
7382                  *
7383                  * This only works in the case where we've already turned on the
7384                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7385                  * from 0 -> n planes we have to skip a hardware generated event
7386                  * and rely on sending it from software.
7387                  */
7388                 if (acrtc_attach->base.state->event &&
7389                     acrtc_state->active_planes > 0) {
7390                         drm_crtc_vblank_get(pcrtc);
7391
7392                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7393
7394                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7395                         prepare_flip_isr(acrtc_attach);
7396
7397                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7398                 }
7399
7400                 if (acrtc_state->stream) {
7401                         if (acrtc_state->freesync_vrr_info_changed)
7402                                 bundle->stream_update.vrr_infopacket =
7403                                         &acrtc_state->stream->vrr_infopacket;
7404                 }
7405         }
7406
7407         /* Update the planes if changed or disable if we don't have any. */
7408         if ((planes_count || acrtc_state->active_planes == 0) &&
7409                 acrtc_state->stream) {
7410                 bundle->stream_update.stream = acrtc_state->stream;
7411                 if (new_pcrtc_state->mode_changed) {
7412                         bundle->stream_update.src = acrtc_state->stream->src;
7413                         bundle->stream_update.dst = acrtc_state->stream->dst;
7414                 }
7415
7416                 if (new_pcrtc_state->color_mgmt_changed) {
7417                         /*
7418                          * TODO: This isn't fully correct since we've actually
7419                          * already modified the stream in place.
7420                          */
7421                         bundle->stream_update.gamut_remap =
7422                                 &acrtc_state->stream->gamut_remap_matrix;
7423                         bundle->stream_update.output_csc_transform =
7424                                 &acrtc_state->stream->csc_color_matrix;
7425                         bundle->stream_update.out_transfer_func =
7426                                 acrtc_state->stream->out_transfer_func;
7427                 }
7428
7429                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7430                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7431                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7432
7433                 /*
7434                  * If FreeSync state on the stream has changed then we need to
7435                  * re-adjust the min/max bounds now that DC doesn't handle this
7436                  * as part of commit.
7437                  */
7438                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7439                     amdgpu_dm_vrr_active(acrtc_state)) {
7440                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7441                         dc_stream_adjust_vmin_vmax(
7442                                 dm->dc, acrtc_state->stream,
7443                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7444                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7445                 }
7446                 mutex_lock(&dm->dc_lock);
7447                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7448                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7449                         amdgpu_dm_psr_disable(acrtc_state->stream);
7450
7451                 dc_commit_updates_for_stream(dm->dc,
7452                                                      bundle->surface_updates,
7453                                                      planes_count,
7454                                                      acrtc_state->stream,
7455                                                      &bundle->stream_update,
7456                                                      dc_state);
7457
7458                 /**
7459                  * Enable or disable the interrupts on the backend.
7460                  *
7461                  * Most pipes are put into power gating when unused.
7462                  *
7463                  * When power gating is enabled on a pipe we lose the
7464                  * interrupt enablement state when power gating is disabled.
7465                  *
7466                  * So we need to update the IRQ control state in hardware
7467                  * whenever the pipe turns on (since it could be previously
7468                  * power gated) or off (since some pipes can't be power gated
7469                  * on some ASICs).
7470                  */
7471                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7472                         dm_update_pflip_irq_state(drm_to_adev(dev),
7473                                                   acrtc_attach);
7474
7475                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7476                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7477                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7478                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7479                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7480                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7481                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7482                         amdgpu_dm_psr_enable(acrtc_state->stream);
7483                 }
7484
7485                 mutex_unlock(&dm->dc_lock);
7486         }
7487
7488         /*
7489          * Update cursor state *after* programming all the planes.
7490          * This avoids redundant programming in the case where we're going
7491          * to be disabling a single plane - those pipes are being disabled.
7492          */
7493         if (acrtc_state->active_planes)
7494                 amdgpu_dm_commit_cursors(state);
7495
7496 cleanup:
7497         kfree(bundle);
7498 }
7499
7500 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7501                                    struct drm_atomic_state *state)
7502 {
7503         struct amdgpu_device *adev = drm_to_adev(dev);
7504         struct amdgpu_dm_connector *aconnector;
7505         struct drm_connector *connector;
7506         struct drm_connector_state *old_con_state, *new_con_state;
7507         struct drm_crtc_state *new_crtc_state;
7508         struct dm_crtc_state *new_dm_crtc_state;
7509         const struct dc_stream_status *status;
7510         int i, inst;
7511
7512         /* Notify device removals. */
7513         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7514                 if (old_con_state->crtc != new_con_state->crtc) {
7515                         /* CRTC changes require notification. */
7516                         goto notify;
7517                 }
7518
7519                 if (!new_con_state->crtc)
7520                         continue;
7521
7522                 new_crtc_state = drm_atomic_get_new_crtc_state(
7523                         state, new_con_state->crtc);
7524
7525                 if (!new_crtc_state)
7526                         continue;
7527
7528                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7529                         continue;
7530
7531         notify:
7532                 aconnector = to_amdgpu_dm_connector(connector);
7533
7534                 mutex_lock(&adev->dm.audio_lock);
7535                 inst = aconnector->audio_inst;
7536                 aconnector->audio_inst = -1;
7537                 mutex_unlock(&adev->dm.audio_lock);
7538
7539                 amdgpu_dm_audio_eld_notify(adev, inst);
7540         }
7541
7542         /* Notify audio device additions. */
7543         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7544                 if (!new_con_state->crtc)
7545                         continue;
7546
7547                 new_crtc_state = drm_atomic_get_new_crtc_state(
7548                         state, new_con_state->crtc);
7549
7550                 if (!new_crtc_state)
7551                         continue;
7552
7553                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7554                         continue;
7555
7556                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7557                 if (!new_dm_crtc_state->stream)
7558                         continue;
7559
7560                 status = dc_stream_get_status(new_dm_crtc_state->stream);
7561                 if (!status)
7562                         continue;
7563
7564                 aconnector = to_amdgpu_dm_connector(connector);
7565
7566                 mutex_lock(&adev->dm.audio_lock);
7567                 inst = status->audio_inst;
7568                 aconnector->audio_inst = inst;
7569                 mutex_unlock(&adev->dm.audio_lock);
7570
7571                 amdgpu_dm_audio_eld_notify(adev, inst);
7572         }
7573 }
7574
7575 /*
7576  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7577  * @crtc_state: the DRM CRTC state
7578  * @stream_state: the DC stream state.
7579  *
7580  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7581  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7582  */
7583 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7584                                                 struct dc_stream_state *stream_state)
7585 {
7586         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7587 }
7588
7589 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7590                                    struct drm_atomic_state *state,
7591                                    bool nonblock)
7592 {
7593         /*
7594          * Add check here for SoC's that support hardware cursor plane, to
7595          * unset legacy_cursor_update
7596          */
7597
7598         return drm_atomic_helper_commit(dev, state, nonblock);
7599
7600         /*TODO Handle EINTR, reenable IRQ*/
7601 }
7602
7603 /**
7604  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7605  * @state: The atomic state to commit
7606  *
7607  * This will tell DC to commit the constructed DC state from atomic_check,
7608  * programming the hardware. Any failures here implies a hardware failure, since
7609  * atomic check should have filtered anything non-kosher.
7610  */
7611 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7612 {
7613         struct drm_device *dev = state->dev;
7614         struct amdgpu_device *adev = drm_to_adev(dev);
7615         struct amdgpu_display_manager *dm = &adev->dm;
7616         struct dm_atomic_state *dm_state;
7617         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7618         uint32_t i, j;
7619         struct drm_crtc *crtc;
7620         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7621         unsigned long flags;
7622         bool wait_for_vblank = true;
7623         struct drm_connector *connector;
7624         struct drm_connector_state *old_con_state, *new_con_state;
7625         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7626         int crtc_disable_count = 0;
7627         bool mode_set_reset_required = false;
7628
7629         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7630
7631         dm_state = dm_atomic_get_new_state(state);
7632         if (dm_state && dm_state->context) {
7633                 dc_state = dm_state->context;
7634         } else {
7635                 /* No state changes, retain current state. */
7636                 dc_state_temp = dc_create_state(dm->dc);
7637                 ASSERT(dc_state_temp);
7638                 dc_state = dc_state_temp;
7639                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7640         }
7641
7642         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7643                                        new_crtc_state, i) {
7644                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7645
7646                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7647
7648                 if (old_crtc_state->active &&
7649                     (!new_crtc_state->active ||
7650                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7651                         manage_dm_interrupts(adev, acrtc, false);
7652                         dc_stream_release(dm_old_crtc_state->stream);
7653                 }
7654         }
7655
7656         /* update changed items */
7657         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7658                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7659
7660                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7661                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7662
7663                 DRM_DEBUG_DRIVER(
7664                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7665                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7666                         "connectors_changed:%d\n",
7667                         acrtc->crtc_id,
7668                         new_crtc_state->enable,
7669                         new_crtc_state->active,
7670                         new_crtc_state->planes_changed,
7671                         new_crtc_state->mode_changed,
7672                         new_crtc_state->active_changed,
7673                         new_crtc_state->connectors_changed);
7674
7675                 /* Copy all transient state flags into dc state */
7676                 if (dm_new_crtc_state->stream) {
7677                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7678                                                             dm_new_crtc_state->stream);
7679                 }
7680
7681                 /* handles headless hotplug case, updating new_state and
7682                  * aconnector as needed
7683                  */
7684
7685                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7686
7687                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7688
7689                         if (!dm_new_crtc_state->stream) {
7690                                 /*
7691                                  * this could happen because of issues with
7692                                  * userspace notifications delivery.
7693                                  * In this case userspace tries to set mode on
7694                                  * display which is disconnected in fact.
7695                                  * dc_sink is NULL in this case on aconnector.
7696                                  * We expect reset mode will come soon.
7697                                  *
7698                                  * This can also happen when unplug is done
7699                                  * during resume sequence ended
7700                                  *
7701                                  * In this case, we want to pretend we still
7702                                  * have a sink to keep the pipe running so that
7703                                  * hw state is consistent with the sw state
7704                                  */
7705                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7706                                                 __func__, acrtc->base.base.id);
7707                                 continue;
7708                         }
7709
7710                         if (dm_old_crtc_state->stream)
7711                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7712
7713                         pm_runtime_get_noresume(dev->dev);
7714
7715                         acrtc->enabled = true;
7716                         acrtc->hw_mode = new_crtc_state->mode;
7717                         crtc->hwmode = new_crtc_state->mode;
7718                         mode_set_reset_required = true;
7719                 } else if (modereset_required(new_crtc_state)) {
7720                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7721                         /* i.e. reset mode */
7722                         if (dm_old_crtc_state->stream)
7723                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7724                         mode_set_reset_required = true;
7725                 }
7726         } /* for_each_crtc_in_state() */
7727
7728         if (dc_state) {
7729                 /* if there mode set or reset, disable eDP PSR */
7730                 if (mode_set_reset_required)
7731                         amdgpu_dm_psr_disable_all(dm);
7732
7733                 dm_enable_per_frame_crtc_master_sync(dc_state);
7734                 mutex_lock(&dm->dc_lock);
7735                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7736                 mutex_unlock(&dm->dc_lock);
7737         }
7738
7739         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7740                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7741
7742                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7743
7744                 if (dm_new_crtc_state->stream != NULL) {
7745                         const struct dc_stream_status *status =
7746                                         dc_stream_get_status(dm_new_crtc_state->stream);
7747
7748                         if (!status)
7749                                 status = dc_stream_get_status_from_state(dc_state,
7750                                                                          dm_new_crtc_state->stream);
7751                         if (!status)
7752                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7753                         else
7754                                 acrtc->otg_inst = status->primary_otg_inst;
7755                 }
7756         }
7757 #ifdef CONFIG_DRM_AMD_DC_HDCP
7758         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7759                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7760                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7761                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7762
7763                 new_crtc_state = NULL;
7764
7765                 if (acrtc)
7766                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7767
7768                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7769
7770                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7771                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7772                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7773                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7774                         continue;
7775                 }
7776
7777                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7778                         hdcp_update_display(
7779                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7780                                 new_con_state->hdcp_content_type,
7781                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7782                                                                                                          : false);
7783         }
7784 #endif
7785
7786         /* Handle connector state changes */
7787         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7788                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7789                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7790                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7791                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7792                 struct dc_stream_update stream_update;
7793                 struct dc_info_packet hdr_packet;
7794                 struct dc_stream_status *status = NULL;
7795                 bool abm_changed, hdr_changed, scaling_changed;
7796
7797                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7798                 memset(&stream_update, 0, sizeof(stream_update));
7799
7800                 if (acrtc) {
7801                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7802                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7803                 }
7804
7805                 /* Skip any modesets/resets */
7806                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7807                         continue;
7808
7809                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7810                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7811
7812                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7813                                                              dm_old_con_state);
7814
7815                 abm_changed = dm_new_crtc_state->abm_level !=
7816                               dm_old_crtc_state->abm_level;
7817
7818                 hdr_changed =
7819                         is_hdr_metadata_different(old_con_state, new_con_state);
7820
7821                 if (!scaling_changed && !abm_changed && !hdr_changed)
7822                         continue;
7823
7824                 stream_update.stream = dm_new_crtc_state->stream;
7825                 if (scaling_changed) {
7826                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7827                                         dm_new_con_state, dm_new_crtc_state->stream);
7828
7829                         stream_update.src = dm_new_crtc_state->stream->src;
7830                         stream_update.dst = dm_new_crtc_state->stream->dst;
7831                 }
7832
7833                 if (abm_changed) {
7834                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7835
7836                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7837                 }
7838
7839                 if (hdr_changed) {
7840                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7841                         stream_update.hdr_static_metadata = &hdr_packet;
7842                 }
7843
7844                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7845                 WARN_ON(!status);
7846                 WARN_ON(!status->plane_count);
7847
7848                 /*
7849                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7850                  * Here we create an empty update on each plane.
7851                  * To fix this, DC should permit updating only stream properties.
7852                  */
7853                 for (j = 0; j < status->plane_count; j++)
7854                         dummy_updates[j].surface = status->plane_states[0];
7855
7856
7857                 mutex_lock(&dm->dc_lock);
7858                 dc_commit_updates_for_stream(dm->dc,
7859                                                      dummy_updates,
7860                                                      status->plane_count,
7861                                                      dm_new_crtc_state->stream,
7862                                                      &stream_update,
7863                                                      dc_state);
7864                 mutex_unlock(&dm->dc_lock);
7865         }
7866
7867         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7868         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7869                                       new_crtc_state, i) {
7870                 if (old_crtc_state->active && !new_crtc_state->active)
7871                         crtc_disable_count++;
7872
7873                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7874                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7875
7876                 /* For freesync config update on crtc state and params for irq */
7877                 update_stream_irq_parameters(dm, dm_new_crtc_state);
7878
7879                 /* Handle vrr on->off / off->on transitions */
7880                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7881                                                 dm_new_crtc_state);
7882         }
7883
7884         /**
7885          * Enable interrupts for CRTCs that are newly enabled or went through
7886          * a modeset. It was intentionally deferred until after the front end
7887          * state was modified to wait until the OTG was on and so the IRQ
7888          * handlers didn't access stale or invalid state.
7889          */
7890         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7891                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7892
7893                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7894
7895                 if (new_crtc_state->active &&
7896                     (!old_crtc_state->active ||
7897                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7898                         dc_stream_retain(dm_new_crtc_state->stream);
7899                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7900                         manage_dm_interrupts(adev, acrtc, true);
7901
7902 #ifdef CONFIG_DEBUG_FS
7903                         /**
7904                          * Frontend may have changed so reapply the CRC capture
7905                          * settings for the stream.
7906                          */
7907                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7908
7909                         if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7910                                 amdgpu_dm_crtc_configure_crc_source(
7911                                         crtc, dm_new_crtc_state,
7912                                         dm_new_crtc_state->crc_src);
7913                         }
7914 #endif
7915                 }
7916         }
7917
7918         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7919                 if (new_crtc_state->async_flip)
7920                         wait_for_vblank = false;
7921
7922         /* update planes when needed per crtc*/
7923         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7924                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7925
7926                 if (dm_new_crtc_state->stream)
7927                         amdgpu_dm_commit_planes(state, dc_state, dev,
7928                                                 dm, crtc, wait_for_vblank);
7929         }
7930
7931         /* Update audio instances for each connector. */
7932         amdgpu_dm_commit_audio(dev, state);
7933
7934         /*
7935          * send vblank event on all events not handled in flip and
7936          * mark consumed event for drm_atomic_helper_commit_hw_done
7937          */
7938         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7939         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7940
7941                 if (new_crtc_state->event)
7942                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7943
7944                 new_crtc_state->event = NULL;
7945         }
7946         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7947
7948         /* Signal HW programming completion */
7949         drm_atomic_helper_commit_hw_done(state);
7950
7951         if (wait_for_vblank)
7952                 drm_atomic_helper_wait_for_flip_done(dev, state);
7953
7954         drm_atomic_helper_cleanup_planes(dev, state);
7955
7956         /*
7957          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7958          * so we can put the GPU into runtime suspend if we're not driving any
7959          * displays anymore
7960          */
7961         for (i = 0; i < crtc_disable_count; i++)
7962                 pm_runtime_put_autosuspend(dev->dev);
7963         pm_runtime_mark_last_busy(dev->dev);
7964
7965         if (dc_state_temp)
7966                 dc_release_state(dc_state_temp);
7967 }
7968
7969
7970 static int dm_force_atomic_commit(struct drm_connector *connector)
7971 {
7972         int ret = 0;
7973         struct drm_device *ddev = connector->dev;
7974         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7975         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7976         struct drm_plane *plane = disconnected_acrtc->base.primary;
7977         struct drm_connector_state *conn_state;
7978         struct drm_crtc_state *crtc_state;
7979         struct drm_plane_state *plane_state;
7980
7981         if (!state)
7982                 return -ENOMEM;
7983
7984         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7985
7986         /* Construct an atomic state to restore previous display setting */
7987
7988         /*
7989          * Attach connectors to drm_atomic_state
7990          */
7991         conn_state = drm_atomic_get_connector_state(state, connector);
7992
7993         ret = PTR_ERR_OR_ZERO(conn_state);
7994         if (ret)
7995                 goto err;
7996
7997         /* Attach crtc to drm_atomic_state*/
7998         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7999
8000         ret = PTR_ERR_OR_ZERO(crtc_state);
8001         if (ret)
8002                 goto err;
8003
8004         /* force a restore */
8005         crtc_state->mode_changed = true;
8006
8007         /* Attach plane to drm_atomic_state */
8008         plane_state = drm_atomic_get_plane_state(state, plane);
8009
8010         ret = PTR_ERR_OR_ZERO(plane_state);
8011         if (ret)
8012                 goto err;
8013
8014
8015         /* Call commit internally with the state we just constructed */
8016         ret = drm_atomic_commit(state);
8017         if (!ret)
8018                 return 0;
8019
8020 err:
8021         DRM_ERROR("Restoring old state failed with %i\n", ret);
8022         drm_atomic_state_put(state);
8023
8024         return ret;
8025 }
8026
8027 /*
8028  * This function handles all cases when set mode does not come upon hotplug.
8029  * This includes when a display is unplugged then plugged back into the
8030  * same port and when running without usermode desktop manager supprot
8031  */
8032 void dm_restore_drm_connector_state(struct drm_device *dev,
8033                                     struct drm_connector *connector)
8034 {
8035         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8036         struct amdgpu_crtc *disconnected_acrtc;
8037         struct dm_crtc_state *acrtc_state;
8038
8039         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8040                 return;
8041
8042         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8043         if (!disconnected_acrtc)
8044                 return;
8045
8046         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8047         if (!acrtc_state->stream)
8048                 return;
8049
8050         /*
8051          * If the previous sink is not released and different from the current,
8052          * we deduce we are in a state where we can not rely on usermode call
8053          * to turn on the display, so we do it here
8054          */
8055         if (acrtc_state->stream->sink != aconnector->dc_sink)
8056                 dm_force_atomic_commit(&aconnector->base);
8057 }
8058
8059 /*
8060  * Grabs all modesetting locks to serialize against any blocking commits,
8061  * Waits for completion of all non blocking commits.
8062  */
8063 static int do_aquire_global_lock(struct drm_device *dev,
8064                                  struct drm_atomic_state *state)
8065 {
8066         struct drm_crtc *crtc;
8067         struct drm_crtc_commit *commit;
8068         long ret;
8069
8070         /*
8071          * Adding all modeset locks to aquire_ctx will
8072          * ensure that when the framework release it the
8073          * extra locks we are locking here will get released to
8074          */
8075         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8076         if (ret)
8077                 return ret;
8078
8079         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8080                 spin_lock(&crtc->commit_lock);
8081                 commit = list_first_entry_or_null(&crtc->commit_list,
8082                                 struct drm_crtc_commit, commit_entry);
8083                 if (commit)
8084                         drm_crtc_commit_get(commit);
8085                 spin_unlock(&crtc->commit_lock);
8086
8087                 if (!commit)
8088                         continue;
8089
8090                 /*
8091                  * Make sure all pending HW programming completed and
8092                  * page flips done
8093                  */
8094                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8095
8096                 if (ret > 0)
8097                         ret = wait_for_completion_interruptible_timeout(
8098                                         &commit->flip_done, 10*HZ);
8099
8100                 if (ret == 0)
8101                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8102                                   "timed out\n", crtc->base.id, crtc->name);
8103
8104                 drm_crtc_commit_put(commit);
8105         }
8106
8107         return ret < 0 ? ret : 0;
8108 }
8109
8110 static void get_freesync_config_for_crtc(
8111         struct dm_crtc_state *new_crtc_state,
8112         struct dm_connector_state *new_con_state)
8113 {
8114         struct mod_freesync_config config = {0};
8115         struct amdgpu_dm_connector *aconnector =
8116                         to_amdgpu_dm_connector(new_con_state->base.connector);
8117         struct drm_display_mode *mode = &new_crtc_state->base.mode;
8118         int vrefresh = drm_mode_vrefresh(mode);
8119
8120         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8121                                         vrefresh >= aconnector->min_vfreq &&
8122                                         vrefresh <= aconnector->max_vfreq;
8123
8124         if (new_crtc_state->vrr_supported) {
8125                 new_crtc_state->stream->ignore_msa_timing_param = true;
8126                 config.state = new_crtc_state->base.vrr_enabled ?
8127                                 VRR_STATE_ACTIVE_VARIABLE :
8128                                 VRR_STATE_INACTIVE;
8129                 config.min_refresh_in_uhz =
8130                                 aconnector->min_vfreq * 1000000;
8131                 config.max_refresh_in_uhz =
8132                                 aconnector->max_vfreq * 1000000;
8133                 config.vsif_supported = true;
8134                 config.btr = true;
8135         }
8136
8137         new_crtc_state->freesync_config = config;
8138 }
8139
8140 static void reset_freesync_config_for_crtc(
8141         struct dm_crtc_state *new_crtc_state)
8142 {
8143         new_crtc_state->vrr_supported = false;
8144
8145         memset(&new_crtc_state->vrr_infopacket, 0,
8146                sizeof(new_crtc_state->vrr_infopacket));
8147 }
8148
8149 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8150                                 struct drm_atomic_state *state,
8151                                 struct drm_crtc *crtc,
8152                                 struct drm_crtc_state *old_crtc_state,
8153                                 struct drm_crtc_state *new_crtc_state,
8154                                 bool enable,
8155                                 bool *lock_and_validation_needed)
8156 {
8157         struct dm_atomic_state *dm_state = NULL;
8158         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8159         struct dc_stream_state *new_stream;
8160         int ret = 0;
8161
8162         /*
8163          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8164          * update changed items
8165          */
8166         struct amdgpu_crtc *acrtc = NULL;
8167         struct amdgpu_dm_connector *aconnector = NULL;
8168         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8169         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8170
8171         new_stream = NULL;
8172
8173         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8174         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8175         acrtc = to_amdgpu_crtc(crtc);
8176         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8177
8178         /* TODO This hack should go away */
8179         if (aconnector && enable) {
8180                 /* Make sure fake sink is created in plug-in scenario */
8181                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8182                                                             &aconnector->base);
8183                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8184                                                             &aconnector->base);
8185
8186                 if (IS_ERR(drm_new_conn_state)) {
8187                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8188                         goto fail;
8189                 }
8190
8191                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8192                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8193
8194                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8195                         goto skip_modeset;
8196
8197                 new_stream = create_validate_stream_for_sink(aconnector,
8198                                                              &new_crtc_state->mode,
8199                                                              dm_new_conn_state,
8200                                                              dm_old_crtc_state->stream);
8201
8202                 /*
8203                  * we can have no stream on ACTION_SET if a display
8204                  * was disconnected during S3, in this case it is not an
8205                  * error, the OS will be updated after detection, and
8206                  * will do the right thing on next atomic commit
8207                  */
8208
8209                 if (!new_stream) {
8210                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8211                                         __func__, acrtc->base.base.id);
8212                         ret = -ENOMEM;
8213                         goto fail;
8214                 }
8215
8216                 /*
8217                  * TODO: Check VSDB bits to decide whether this should
8218                  * be enabled or not.
8219                  */
8220                 new_stream->triggered_crtc_reset.enabled =
8221                         dm->force_timing_sync;
8222
8223                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8224
8225                 ret = fill_hdr_info_packet(drm_new_conn_state,
8226                                            &new_stream->hdr_static_metadata);
8227                 if (ret)
8228                         goto fail;
8229
8230                 /*
8231                  * If we already removed the old stream from the context
8232                  * (and set the new stream to NULL) then we can't reuse
8233                  * the old stream even if the stream and scaling are unchanged.
8234                  * We'll hit the BUG_ON and black screen.
8235                  *
8236                  * TODO: Refactor this function to allow this check to work
8237                  * in all conditions.
8238                  */
8239                 if (dm_new_crtc_state->stream &&
8240                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8241                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8242                         new_crtc_state->mode_changed = false;
8243                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8244                                          new_crtc_state->mode_changed);
8245                 }
8246         }
8247
8248         /* mode_changed flag may get updated above, need to check again */
8249         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8250                 goto skip_modeset;
8251
8252         DRM_DEBUG_DRIVER(
8253                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8254                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8255                 "connectors_changed:%d\n",
8256                 acrtc->crtc_id,
8257                 new_crtc_state->enable,
8258                 new_crtc_state->active,
8259                 new_crtc_state->planes_changed,
8260                 new_crtc_state->mode_changed,
8261                 new_crtc_state->active_changed,
8262                 new_crtc_state->connectors_changed);
8263
8264         /* Remove stream for any changed/disabled CRTC */
8265         if (!enable) {
8266
8267                 if (!dm_old_crtc_state->stream)
8268                         goto skip_modeset;
8269
8270                 ret = dm_atomic_get_state(state, &dm_state);
8271                 if (ret)
8272                         goto fail;
8273
8274                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8275                                 crtc->base.id);
8276
8277                 /* i.e. reset mode */
8278                 if (dc_remove_stream_from_ctx(
8279                                 dm->dc,
8280                                 dm_state->context,
8281                                 dm_old_crtc_state->stream) != DC_OK) {
8282                         ret = -EINVAL;
8283                         goto fail;
8284                 }
8285
8286                 dc_stream_release(dm_old_crtc_state->stream);
8287                 dm_new_crtc_state->stream = NULL;
8288
8289                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8290
8291                 *lock_and_validation_needed = true;
8292
8293         } else {/* Add stream for any updated/enabled CRTC */
8294                 /*
8295                  * Quick fix to prevent NULL pointer on new_stream when
8296                  * added MST connectors not found in existing crtc_state in the chained mode
8297                  * TODO: need to dig out the root cause of that
8298                  */
8299                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8300                         goto skip_modeset;
8301
8302                 if (modereset_required(new_crtc_state))
8303                         goto skip_modeset;
8304
8305                 if (modeset_required(new_crtc_state, new_stream,
8306                                      dm_old_crtc_state->stream)) {
8307
8308                         WARN_ON(dm_new_crtc_state->stream);
8309
8310                         ret = dm_atomic_get_state(state, &dm_state);
8311                         if (ret)
8312                                 goto fail;
8313
8314                         dm_new_crtc_state->stream = new_stream;
8315
8316                         dc_stream_retain(new_stream);
8317
8318                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8319                                                 crtc->base.id);
8320
8321                         if (dc_add_stream_to_ctx(
8322                                         dm->dc,
8323                                         dm_state->context,
8324                                         dm_new_crtc_state->stream) != DC_OK) {
8325                                 ret = -EINVAL;
8326                                 goto fail;
8327                         }
8328
8329                         *lock_and_validation_needed = true;
8330                 }
8331         }
8332
8333 skip_modeset:
8334         /* Release extra reference */
8335         if (new_stream)
8336                  dc_stream_release(new_stream);
8337
8338         /*
8339          * We want to do dc stream updates that do not require a
8340          * full modeset below.
8341          */
8342         if (!(enable && aconnector && new_crtc_state->active))
8343                 return 0;
8344         /*
8345          * Given above conditions, the dc state cannot be NULL because:
8346          * 1. We're in the process of enabling CRTCs (just been added
8347          *    to the dc context, or already is on the context)
8348          * 2. Has a valid connector attached, and
8349          * 3. Is currently active and enabled.
8350          * => The dc stream state currently exists.
8351          */
8352         BUG_ON(dm_new_crtc_state->stream == NULL);
8353
8354         /* Scaling or underscan settings */
8355         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8356                 update_stream_scaling_settings(
8357                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8358
8359         /* ABM settings */
8360         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8361
8362         /*
8363          * Color management settings. We also update color properties
8364          * when a modeset is needed, to ensure it gets reprogrammed.
8365          */
8366         if (dm_new_crtc_state->base.color_mgmt_changed ||
8367             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8368                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8369                 if (ret)
8370                         goto fail;
8371         }
8372
8373         /* Update Freesync settings. */
8374         get_freesync_config_for_crtc(dm_new_crtc_state,
8375                                      dm_new_conn_state);
8376
8377         return ret;
8378
8379 fail:
8380         if (new_stream)
8381                 dc_stream_release(new_stream);
8382         return ret;
8383 }
8384
8385 static bool should_reset_plane(struct drm_atomic_state *state,
8386                                struct drm_plane *plane,
8387                                struct drm_plane_state *old_plane_state,
8388                                struct drm_plane_state *new_plane_state)
8389 {
8390         struct drm_plane *other;
8391         struct drm_plane_state *old_other_state, *new_other_state;
8392         struct drm_crtc_state *new_crtc_state;
8393         int i;
8394
8395         /*
8396          * TODO: Remove this hack once the checks below are sufficient
8397          * enough to determine when we need to reset all the planes on
8398          * the stream.
8399          */
8400         if (state->allow_modeset)
8401                 return true;
8402
8403         /* Exit early if we know that we're adding or removing the plane. */
8404         if (old_plane_state->crtc != new_plane_state->crtc)
8405                 return true;
8406
8407         /* old crtc == new_crtc == NULL, plane not in context. */
8408         if (!new_plane_state->crtc)
8409                 return false;
8410
8411         new_crtc_state =
8412                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8413
8414         if (!new_crtc_state)
8415                 return true;
8416
8417         /* CRTC Degamma changes currently require us to recreate planes. */
8418         if (new_crtc_state->color_mgmt_changed)
8419                 return true;
8420
8421         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8422                 return true;
8423
8424         /*
8425          * If there are any new primary or overlay planes being added or
8426          * removed then the z-order can potentially change. To ensure
8427          * correct z-order and pipe acquisition the current DC architecture
8428          * requires us to remove and recreate all existing planes.
8429          *
8430          * TODO: Come up with a more elegant solution for this.
8431          */
8432         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8433                 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8434
8435                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8436                         continue;
8437
8438                 if (old_other_state->crtc != new_plane_state->crtc &&
8439                     new_other_state->crtc != new_plane_state->crtc)
8440                         continue;
8441
8442                 if (old_other_state->crtc != new_other_state->crtc)
8443                         return true;
8444
8445                 /* Src/dst size and scaling updates. */
8446                 if (old_other_state->src_w != new_other_state->src_w ||
8447                     old_other_state->src_h != new_other_state->src_h ||
8448                     old_other_state->crtc_w != new_other_state->crtc_w ||
8449                     old_other_state->crtc_h != new_other_state->crtc_h)
8450                         return true;
8451
8452                 /* Rotation / mirroring updates. */
8453                 if (old_other_state->rotation != new_other_state->rotation)
8454                         return true;
8455
8456                 /* Blending updates. */
8457                 if (old_other_state->pixel_blend_mode !=
8458                     new_other_state->pixel_blend_mode)
8459                         return true;
8460
8461                 /* Alpha updates. */
8462                 if (old_other_state->alpha != new_other_state->alpha)
8463                         return true;
8464
8465                 /* Colorspace changes. */
8466                 if (old_other_state->color_range != new_other_state->color_range ||
8467                     old_other_state->color_encoding != new_other_state->color_encoding)
8468                         return true;
8469
8470                 /* Framebuffer checks fall at the end. */
8471                 if (!old_other_state->fb || !new_other_state->fb)
8472                         continue;
8473
8474                 /* Pixel format changes can require bandwidth updates. */
8475                 if (old_other_state->fb->format != new_other_state->fb->format)
8476                         return true;
8477
8478                 old_dm_plane_state = to_dm_plane_state(old_other_state);
8479                 new_dm_plane_state = to_dm_plane_state(new_other_state);
8480
8481                 /* Tiling and DCC changes also require bandwidth updates. */
8482                 if (old_dm_plane_state->tiling_flags !=
8483                     new_dm_plane_state->tiling_flags)
8484                         return true;
8485         }
8486
8487         return false;
8488 }
8489
8490 static int dm_update_plane_state(struct dc *dc,
8491                                  struct drm_atomic_state *state,
8492                                  struct drm_plane *plane,
8493                                  struct drm_plane_state *old_plane_state,
8494                                  struct drm_plane_state *new_plane_state,
8495                                  bool enable,
8496                                  bool *lock_and_validation_needed)
8497 {
8498
8499         struct dm_atomic_state *dm_state = NULL;
8500         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8501         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8502         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8503         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8504         struct amdgpu_crtc *new_acrtc;
8505         bool needs_reset;
8506         int ret = 0;
8507
8508
8509         new_plane_crtc = new_plane_state->crtc;
8510         old_plane_crtc = old_plane_state->crtc;
8511         dm_new_plane_state = to_dm_plane_state(new_plane_state);
8512         dm_old_plane_state = to_dm_plane_state(old_plane_state);
8513
8514         /*TODO Implement better atomic check for cursor plane */
8515         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8516                 if (!enable || !new_plane_crtc ||
8517                         drm_atomic_plane_disabling(plane->state, new_plane_state))
8518                         return 0;
8519
8520                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8521
8522                 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8523                         (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8524                         DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8525                                                          new_plane_state->crtc_w, new_plane_state->crtc_h);
8526                         return -EINVAL;
8527                 }
8528
8529                 return 0;
8530         }
8531
8532         needs_reset = should_reset_plane(state, plane, old_plane_state,
8533                                          new_plane_state);
8534
8535         /* Remove any changed/removed planes */
8536         if (!enable) {
8537                 if (!needs_reset)
8538                         return 0;
8539
8540                 if (!old_plane_crtc)
8541                         return 0;
8542
8543                 old_crtc_state = drm_atomic_get_old_crtc_state(
8544                                 state, old_plane_crtc);
8545                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8546
8547                 if (!dm_old_crtc_state->stream)
8548                         return 0;
8549
8550                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8551                                 plane->base.id, old_plane_crtc->base.id);
8552
8553                 ret = dm_atomic_get_state(state, &dm_state);
8554                 if (ret)
8555                         return ret;
8556
8557                 if (!dc_remove_plane_from_context(
8558                                 dc,
8559                                 dm_old_crtc_state->stream,
8560                                 dm_old_plane_state->dc_state,
8561                                 dm_state->context)) {
8562
8563                         return -EINVAL;
8564                 }
8565
8566
8567                 dc_plane_state_release(dm_old_plane_state->dc_state);
8568                 dm_new_plane_state->dc_state = NULL;
8569
8570                 *lock_and_validation_needed = true;
8571
8572         } else { /* Add new planes */
8573                 struct dc_plane_state *dc_new_plane_state;
8574
8575                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8576                         return 0;
8577
8578                 if (!new_plane_crtc)
8579                         return 0;
8580
8581                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8582                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8583
8584                 if (!dm_new_crtc_state->stream)
8585                         return 0;
8586
8587                 if (!needs_reset)
8588                         return 0;
8589
8590                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8591                 if (ret)
8592                         return ret;
8593
8594                 WARN_ON(dm_new_plane_state->dc_state);
8595
8596                 dc_new_plane_state = dc_create_plane_state(dc);
8597                 if (!dc_new_plane_state)
8598                         return -ENOMEM;
8599
8600                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8601                                 plane->base.id, new_plane_crtc->base.id);
8602
8603                 ret = fill_dc_plane_attributes(
8604                         drm_to_adev(new_plane_crtc->dev),
8605                         dc_new_plane_state,
8606                         new_plane_state,
8607                         new_crtc_state);
8608                 if (ret) {
8609                         dc_plane_state_release(dc_new_plane_state);
8610                         return ret;
8611                 }
8612
8613                 ret = dm_atomic_get_state(state, &dm_state);
8614                 if (ret) {
8615                         dc_plane_state_release(dc_new_plane_state);
8616                         return ret;
8617                 }
8618
8619                 /*
8620                  * Any atomic check errors that occur after this will
8621                  * not need a release. The plane state will be attached
8622                  * to the stream, and therefore part of the atomic
8623                  * state. It'll be released when the atomic state is
8624                  * cleaned.
8625                  */
8626                 if (!dc_add_plane_to_context(
8627                                 dc,
8628                                 dm_new_crtc_state->stream,
8629                                 dc_new_plane_state,
8630                                 dm_state->context)) {
8631
8632                         dc_plane_state_release(dc_new_plane_state);
8633                         return -EINVAL;
8634                 }
8635
8636                 dm_new_plane_state->dc_state = dc_new_plane_state;
8637
8638                 /* Tell DC to do a full surface update every time there
8639                  * is a plane change. Inefficient, but works for now.
8640                  */
8641                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8642
8643                 *lock_and_validation_needed = true;
8644         }
8645
8646
8647         return ret;
8648 }
8649
8650 #if defined(CONFIG_DRM_AMD_DC_DCN)
8651 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8652 {
8653         struct drm_connector *connector;
8654         struct drm_connector_state *conn_state;
8655         struct amdgpu_dm_connector *aconnector = NULL;
8656         int i;
8657         for_each_new_connector_in_state(state, connector, conn_state, i) {
8658                 if (conn_state->crtc != crtc)
8659                         continue;
8660
8661                 aconnector = to_amdgpu_dm_connector(connector);
8662                 if (!aconnector->port || !aconnector->mst_port)
8663                         aconnector = NULL;
8664                 else
8665                         break;
8666         }
8667
8668         if (!aconnector)
8669                 return 0;
8670
8671         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8672 }
8673 #endif
8674
8675 /**
8676  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8677  * @dev: The DRM device
8678  * @state: The atomic state to commit
8679  *
8680  * Validate that the given atomic state is programmable by DC into hardware.
8681  * This involves constructing a &struct dc_state reflecting the new hardware
8682  * state we wish to commit, then querying DC to see if it is programmable. It's
8683  * important not to modify the existing DC state. Otherwise, atomic_check
8684  * may unexpectedly commit hardware changes.
8685  *
8686  * When validating the DC state, it's important that the right locks are
8687  * acquired. For full updates case which removes/adds/updates streams on one
8688  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8689  * that any such full update commit will wait for completion of any outstanding
8690  * flip using DRMs synchronization events.
8691  *
8692  * Note that DM adds the affected connectors for all CRTCs in state, when that
8693  * might not seem necessary. This is because DC stream creation requires the
8694  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8695  * be possible but non-trivial - a possible TODO item.
8696  *
8697  * Return: -Error code if validation failed.
8698  */
8699 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8700                                   struct drm_atomic_state *state)
8701 {
8702         struct amdgpu_device *adev = drm_to_adev(dev);
8703         struct dm_atomic_state *dm_state = NULL;
8704         struct dc *dc = adev->dm.dc;
8705         struct drm_connector *connector;
8706         struct drm_connector_state *old_con_state, *new_con_state;
8707         struct drm_crtc *crtc;
8708         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8709         struct drm_plane *plane;
8710         struct drm_plane_state *old_plane_state, *new_plane_state;
8711         enum dc_status status;
8712         int ret, i;
8713         bool lock_and_validation_needed = false;
8714
8715         amdgpu_check_debugfs_connector_property_change(adev, state);
8716
8717         ret = drm_atomic_helper_check_modeset(dev, state);
8718         if (ret)
8719                 goto fail;
8720
8721         /* Check connector changes */
8722         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8723                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8724                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8725
8726                 /* Skip connectors that are disabled or part of modeset already. */
8727                 if (!old_con_state->crtc && !new_con_state->crtc)
8728                         continue;
8729
8730                 if (!new_con_state->crtc)
8731                         continue;
8732
8733                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8734                 if (IS_ERR(new_crtc_state)) {
8735                         ret = PTR_ERR(new_crtc_state);
8736                         goto fail;
8737                 }
8738
8739                 if (dm_old_con_state->abm_level !=
8740                     dm_new_con_state->abm_level)
8741                         new_crtc_state->connectors_changed = true;
8742         }
8743
8744 #if defined(CONFIG_DRM_AMD_DC_DCN)
8745         if (adev->asic_type >= CHIP_NAVI10) {
8746                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8747                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8748                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8749                                 if (ret)
8750                                         goto fail;
8751                         }
8752                 }
8753         }
8754 #endif
8755         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8756                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8757                     !new_crtc_state->color_mgmt_changed &&
8758                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8759                         continue;
8760
8761                 if (!new_crtc_state->enable)
8762                         continue;
8763
8764                 ret = drm_atomic_add_affected_connectors(state, crtc);
8765                 if (ret)
8766                         return ret;
8767
8768                 ret = drm_atomic_add_affected_planes(state, crtc);
8769                 if (ret)
8770                         goto fail;
8771         }
8772
8773         /*
8774          * Add all primary and overlay planes on the CRTC to the state
8775          * whenever a plane is enabled to maintain correct z-ordering
8776          * and to enable fast surface updates.
8777          */
8778         drm_for_each_crtc(crtc, dev) {
8779                 bool modified = false;
8780
8781                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8782                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8783                                 continue;
8784
8785                         if (new_plane_state->crtc == crtc ||
8786                             old_plane_state->crtc == crtc) {
8787                                 modified = true;
8788                                 break;
8789                         }
8790                 }
8791
8792                 if (!modified)
8793                         continue;
8794
8795                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8796                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8797                                 continue;
8798
8799                         new_plane_state =
8800                                 drm_atomic_get_plane_state(state, plane);
8801
8802                         if (IS_ERR(new_plane_state)) {
8803                                 ret = PTR_ERR(new_plane_state);
8804                                 goto fail;
8805                         }
8806                 }
8807         }
8808
8809         /* Prepass for updating tiling flags on new planes. */
8810         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8811                 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8812                 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8813
8814                 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8815                                   &new_dm_plane_state->tmz_surface);
8816                 if (ret)
8817                         goto fail;
8818         }
8819
8820         /* Remove exiting planes if they are modified */
8821         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8822                 ret = dm_update_plane_state(dc, state, plane,
8823                                             old_plane_state,
8824                                             new_plane_state,
8825                                             false,
8826                                             &lock_and_validation_needed);
8827                 if (ret)
8828                         goto fail;
8829         }
8830
8831         /* Disable all crtcs which require disable */
8832         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8833                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8834                                            old_crtc_state,
8835                                            new_crtc_state,
8836                                            false,
8837                                            &lock_and_validation_needed);
8838                 if (ret)
8839                         goto fail;
8840         }
8841
8842         /* Enable all crtcs which require enable */
8843         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8844                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8845                                            old_crtc_state,
8846                                            new_crtc_state,
8847                                            true,
8848                                            &lock_and_validation_needed);
8849                 if (ret)
8850                         goto fail;
8851         }
8852
8853         /* Add new/modified planes */
8854         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8855                 ret = dm_update_plane_state(dc, state, plane,
8856                                             old_plane_state,
8857                                             new_plane_state,
8858                                             true,
8859                                             &lock_and_validation_needed);
8860                 if (ret)
8861                         goto fail;
8862         }
8863
8864         /* Run this here since we want to validate the streams we created */
8865         ret = drm_atomic_helper_check_planes(dev, state);
8866         if (ret)
8867                 goto fail;
8868
8869         if (state->legacy_cursor_update) {
8870                 /*
8871                  * This is a fast cursor update coming from the plane update
8872                  * helper, check if it can be done asynchronously for better
8873                  * performance.
8874                  */
8875                 state->async_update =
8876                         !drm_atomic_helper_async_check(dev, state);
8877
8878                 /*
8879                  * Skip the remaining global validation if this is an async
8880                  * update. Cursor updates can be done without affecting
8881                  * state or bandwidth calcs and this avoids the performance
8882                  * penalty of locking the private state object and
8883                  * allocating a new dc_state.
8884                  */
8885                 if (state->async_update)
8886                         return 0;
8887         }
8888
8889         /* Check scaling and underscan changes*/
8890         /* TODO Removed scaling changes validation due to inability to commit
8891          * new stream into context w\o causing full reset. Need to
8892          * decide how to handle.
8893          */
8894         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8895                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8896                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8897                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8898
8899                 /* Skip any modesets/resets */
8900                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8901                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8902                         continue;
8903
8904                 /* Skip any thing not scale or underscan changes */
8905                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8906                         continue;
8907
8908                 lock_and_validation_needed = true;
8909         }
8910
8911         /**
8912          * Streams and planes are reset when there are changes that affect
8913          * bandwidth. Anything that affects bandwidth needs to go through
8914          * DC global validation to ensure that the configuration can be applied
8915          * to hardware.
8916          *
8917          * We have to currently stall out here in atomic_check for outstanding
8918          * commits to finish in this case because our IRQ handlers reference
8919          * DRM state directly - we can end up disabling interrupts too early
8920          * if we don't.
8921          *
8922          * TODO: Remove this stall and drop DM state private objects.
8923          */
8924         if (lock_and_validation_needed) {
8925                 ret = dm_atomic_get_state(state, &dm_state);
8926                 if (ret)
8927                         goto fail;
8928
8929                 ret = do_aquire_global_lock(dev, state);
8930                 if (ret)
8931                         goto fail;
8932
8933 #if defined(CONFIG_DRM_AMD_DC_DCN)
8934                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8935                         goto fail;
8936
8937                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8938                 if (ret)
8939                         goto fail;
8940 #endif
8941
8942                 /*
8943                  * Perform validation of MST topology in the state:
8944                  * We need to perform MST atomic check before calling
8945                  * dc_validate_global_state(), or there is a chance
8946                  * to get stuck in an infinite loop and hang eventually.
8947                  */
8948                 ret = drm_dp_mst_atomic_check(state);
8949                 if (ret)
8950                         goto fail;
8951                 status = dc_validate_global_state(dc, dm_state->context, false);
8952                 if (status != DC_OK) {
8953                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
8954                                        dc_status_to_str(status), status);
8955                         ret = -EINVAL;
8956                         goto fail;
8957                 }
8958         } else {
8959                 /*
8960                  * The commit is a fast update. Fast updates shouldn't change
8961                  * the DC context, affect global validation, and can have their
8962                  * commit work done in parallel with other commits not touching
8963                  * the same resource. If we have a new DC context as part of
8964                  * the DM atomic state from validation we need to free it and
8965                  * retain the existing one instead.
8966                  *
8967                  * Furthermore, since the DM atomic state only contains the DC
8968                  * context and can safely be annulled, we can free the state
8969                  * and clear the associated private object now to free
8970                  * some memory and avoid a possible use-after-free later.
8971                  */
8972
8973                 for (i = 0; i < state->num_private_objs; i++) {
8974                         struct drm_private_obj *obj = state->private_objs[i].ptr;
8975
8976                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
8977                                 int j = state->num_private_objs-1;
8978
8979                                 dm_atomic_destroy_state(obj,
8980                                                 state->private_objs[i].state);
8981
8982                                 /* If i is not at the end of the array then the
8983                                  * last element needs to be moved to where i was
8984                                  * before the array can safely be truncated.
8985                                  */
8986                                 if (i != j)
8987                                         state->private_objs[i] =
8988                                                 state->private_objs[j];
8989
8990                                 state->private_objs[j].ptr = NULL;
8991                                 state->private_objs[j].state = NULL;
8992                                 state->private_objs[j].old_state = NULL;
8993                                 state->private_objs[j].new_state = NULL;
8994
8995                                 state->num_private_objs = j;
8996                                 break;
8997                         }
8998                 }
8999         }
9000
9001         /* Store the overall update type for use later in atomic check. */
9002         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9003                 struct dm_crtc_state *dm_new_crtc_state =
9004                         to_dm_crtc_state(new_crtc_state);
9005
9006                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9007                                                          UPDATE_TYPE_FULL :
9008                                                          UPDATE_TYPE_FAST;
9009         }
9010
9011         /* Must be success */
9012         WARN_ON(ret);
9013         return ret;
9014
9015 fail:
9016         if (ret == -EDEADLK)
9017                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9018         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9019                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9020         else
9021                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9022
9023         return ret;
9024 }
9025
9026 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9027                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
9028 {
9029         uint8_t dpcd_data;
9030         bool capable = false;
9031
9032         if (amdgpu_dm_connector->dc_link &&
9033                 dm_helpers_dp_read_dpcd(
9034                                 NULL,
9035                                 amdgpu_dm_connector->dc_link,
9036                                 DP_DOWN_STREAM_PORT_COUNT,
9037                                 &dpcd_data,
9038                                 sizeof(dpcd_data))) {
9039                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9040         }
9041
9042         return capable;
9043 }
9044 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9045                                         struct edid *edid)
9046 {
9047         int i;
9048         bool edid_check_required;
9049         struct detailed_timing *timing;
9050         struct detailed_non_pixel *data;
9051         struct detailed_data_monitor_range *range;
9052         struct amdgpu_dm_connector *amdgpu_dm_connector =
9053                         to_amdgpu_dm_connector(connector);
9054         struct dm_connector_state *dm_con_state = NULL;
9055
9056         struct drm_device *dev = connector->dev;
9057         struct amdgpu_device *adev = drm_to_adev(dev);
9058         bool freesync_capable = false;
9059
9060         if (!connector->state) {
9061                 DRM_ERROR("%s - Connector has no state", __func__);
9062                 goto update;
9063         }
9064
9065         if (!edid) {
9066                 dm_con_state = to_dm_connector_state(connector->state);
9067
9068                 amdgpu_dm_connector->min_vfreq = 0;
9069                 amdgpu_dm_connector->max_vfreq = 0;
9070                 amdgpu_dm_connector->pixel_clock_mhz = 0;
9071
9072                 goto update;
9073         }
9074
9075         dm_con_state = to_dm_connector_state(connector->state);
9076
9077         edid_check_required = false;
9078         if (!amdgpu_dm_connector->dc_sink) {
9079                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9080                 goto update;
9081         }
9082         if (!adev->dm.freesync_module)
9083                 goto update;
9084         /*
9085          * if edid non zero restrict freesync only for dp and edp
9086          */
9087         if (edid) {
9088                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9089                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9090                         edid_check_required = is_dp_capable_without_timing_msa(
9091                                                 adev->dm.dc,
9092                                                 amdgpu_dm_connector);
9093                 }
9094         }
9095         if (edid_check_required == true && (edid->version > 1 ||
9096            (edid->version == 1 && edid->revision > 1))) {
9097                 for (i = 0; i < 4; i++) {
9098
9099                         timing  = &edid->detailed_timings[i];
9100                         data    = &timing->data.other_data;
9101                         range   = &data->data.range;
9102                         /*
9103                          * Check if monitor has continuous frequency mode
9104                          */
9105                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
9106                                 continue;
9107                         /*
9108                          * Check for flag range limits only. If flag == 1 then
9109                          * no additional timing information provided.
9110                          * Default GTF, GTF Secondary curve and CVT are not
9111                          * supported
9112                          */
9113                         if (range->flags != 1)
9114                                 continue;
9115
9116                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9117                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9118                         amdgpu_dm_connector->pixel_clock_mhz =
9119                                 range->pixel_clock_mhz * 10;
9120                         break;
9121                 }
9122
9123                 if (amdgpu_dm_connector->max_vfreq -
9124                     amdgpu_dm_connector->min_vfreq > 10) {
9125
9126                         freesync_capable = true;
9127                 }
9128         }
9129
9130 update:
9131         if (dm_con_state)
9132                 dm_con_state->freesync_capable = freesync_capable;
9133
9134         if (connector->vrr_capable_property)
9135                 drm_connector_set_vrr_capable_property(connector,
9136                                                        freesync_capable);
9137 }
9138
9139 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9140 {
9141         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9142
9143         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9144                 return;
9145         if (link->type == dc_connection_none)
9146                 return;
9147         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9148                                         dpcd_data, sizeof(dpcd_data))) {
9149                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9150
9151                 if (dpcd_data[0] == 0) {
9152                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9153                         link->psr_settings.psr_feature_enabled = false;
9154                 } else {
9155                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
9156                         link->psr_settings.psr_feature_enabled = true;
9157                 }
9158
9159                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9160         }
9161 }
9162
9163 /*
9164  * amdgpu_dm_link_setup_psr() - configure psr link
9165  * @stream: stream state
9166  *
9167  * Return: true if success
9168  */
9169 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9170 {
9171         struct dc_link *link = NULL;
9172         struct psr_config psr_config = {0};
9173         struct psr_context psr_context = {0};
9174         bool ret = false;
9175
9176         if (stream == NULL)
9177                 return false;
9178
9179         link = stream->link;
9180
9181         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9182
9183         if (psr_config.psr_version > 0) {
9184                 psr_config.psr_exit_link_training_required = 0x1;
9185                 psr_config.psr_frame_capture_indication_req = 0;
9186                 psr_config.psr_rfb_setup_time = 0x37;
9187                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9188                 psr_config.allow_smu_optimizations = 0x0;
9189
9190                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9191
9192         }
9193         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9194
9195         return ret;
9196 }
9197
9198 /*
9199  * amdgpu_dm_psr_enable() - enable psr f/w
9200  * @stream: stream state
9201  *
9202  * Return: true if success
9203  */
9204 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9205 {
9206         struct dc_link *link = stream->link;
9207         unsigned int vsync_rate_hz = 0;
9208         struct dc_static_screen_params params = {0};
9209         /* Calculate number of static frames before generating interrupt to
9210          * enter PSR.
9211          */
9212         // Init fail safe of 2 frames static
9213         unsigned int num_frames_static = 2;
9214
9215         DRM_DEBUG_DRIVER("Enabling psr...\n");
9216
9217         vsync_rate_hz = div64_u64(div64_u64((
9218                         stream->timing.pix_clk_100hz * 100),
9219                         stream->timing.v_total),
9220                         stream->timing.h_total);
9221
9222         /* Round up
9223          * Calculate number of frames such that at least 30 ms of time has
9224          * passed.
9225          */
9226         if (vsync_rate_hz != 0) {
9227                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9228                 num_frames_static = (30000 / frame_time_microsec) + 1;
9229         }
9230
9231         params.triggers.cursor_update = true;
9232         params.triggers.overlay_update = true;
9233         params.triggers.surface_update = true;
9234         params.num_frames = num_frames_static;
9235
9236         dc_stream_set_static_screen_params(link->ctx->dc,
9237                                            &stream, 1,
9238                                            &params);
9239
9240         return dc_link_set_psr_allow_active(link, true, false);
9241 }
9242
9243 /*
9244  * amdgpu_dm_psr_disable() - disable psr f/w
9245  * @stream:  stream state
9246  *
9247  * Return: true if success
9248  */
9249 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9250 {
9251
9252         DRM_DEBUG_DRIVER("Disabling psr...\n");
9253
9254         return dc_link_set_psr_allow_active(stream->link, false, true);
9255 }
9256
9257 /*
9258  * amdgpu_dm_psr_disable() - disable psr f/w
9259  * if psr is enabled on any stream
9260  *
9261  * Return: true if success
9262  */
9263 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9264 {
9265         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9266         return dc_set_psr_allow_active(dm->dc, false);
9267 }
9268
9269 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9270 {
9271         struct amdgpu_device *adev = drm_to_adev(dev);
9272         struct dc *dc = adev->dm.dc;
9273         int i;
9274
9275         mutex_lock(&adev->dm.dc_lock);
9276         if (dc->current_state) {
9277                 for (i = 0; i < dc->current_state->stream_count; ++i)
9278                         dc->current_state->streams[i]
9279                                 ->triggered_crtc_reset.enabled =
9280                                 adev->dm.force_timing_sync;
9281
9282                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9283                 dc_trigger_sync(dc, dc->current_state);
9284         }
9285         mutex_unlock(&adev->dm.dc_lock);
9286 }